1 | // required for old g++ to compile PRId64 macros, see |
2 | // https://github.com/pytorch/pytorch/issues/3571 |
3 | // for context |
4 | #ifndef __STDC_FORMAT_MACROS |
5 | #define __STDC_FORMAT_MACROS |
6 | #endif |
7 | |
8 | // an external backend might generate file within its code tree |
9 | // and check all the source files within the tree with clang-format. |
10 | // so, disable it since the backend might have a different config. |
11 | // clang-format off |
12 | |
13 | // NOTE: This condition is true for all PyTorch internal libraries, it |
14 | // just excludes external projects such as torch_xla which |
15 | // re-use some of the PyTorch codegen machinery. |
16 | #if defined(CAFFE2_BUILD_MAIN_LIB) || \ |
17 | defined(TORCH_CUDA_BUILD_MAIN_LIB) || \ |
18 | defined(TORCH_HIP_BUILD_MAIN_LIB) || \ |
19 | defined(TORCH_CUDA_CU_BUILD_MAIN_LIB) || \ |
20 | defined(TORCH_CUDA_CPP_BUILD_MAIN_LIB) |
21 | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS |
22 | #endif |
23 | |
24 | // @generated by torchgen/gen.py from RegisterDispatchKey.cpp |
25 | |
26 | #include <c10/core/TensorImpl.h> |
27 | #include <c10/core/Allocator.h> |
28 | #include <ATen/DeviceGuard.h> |
29 | #include <ATen/NamedTensorUtils.h> |
30 | #include <ATen/Utils.h> |
31 | #include <ATen/WrapDimUtils.h> |
32 | #include <ATen/Dispatch.h> |
33 | #include <c10/util/ExclusivelyOwned.h> |
34 | #include <c10/util/Half.h> |
35 | #include <c10/core/UndefinedTensorImpl.h> |
36 | #include <c10/util/Optional.h> |
37 | #include <ATen/Tensor.h> |
38 | #include <ATen/native/Resize.h> |
39 | |
40 | #include <cstddef> |
41 | #include <functional> |
42 | #include <memory> |
43 | #include <utility> |
44 | |
45 | #include <ATen/Config.h> |
46 | #include <ATen/core/op_registration/adaption.h> |
47 | #include <torch/library.h> |
48 | |
49 | |
50 | #include <ATen/ops/as_strided_native.h> |
51 | #include <ATen/ops/empty.h> |
52 | #include <ATen/ops/empty_strided.h> |
53 | #include <ATen/ops/_copy_from_and_resize.h> |
54 | #include <ATen/ops/_copy_from.h> |
55 | #include <ATen/ops/_adaptive_avg_pool2d_backward_compositeexplicitautograd_dispatch.h> |
56 | #include <ATen/ops/_adaptive_avg_pool2d_backward_native.h> |
57 | #include <ATen/ops/_adaptive_avg_pool2d_compositeexplicitautograd_dispatch.h> |
58 | #include <ATen/ops/_adaptive_avg_pool2d_native.h> |
59 | #include <ATen/ops/_adaptive_avg_pool3d_backward_compositeexplicitautograd_dispatch.h> |
60 | #include <ATen/ops/_adaptive_avg_pool3d_backward_native.h> |
61 | #include <ATen/ops/_adaptive_avg_pool3d_compositeexplicitautograd_dispatch.h> |
62 | #include <ATen/ops/_adaptive_avg_pool3d_native.h> |
63 | #include <ATen/ops/_add_relu_compositeexplicitautograd_dispatch.h> |
64 | #include <ATen/ops/_add_relu_native.h> |
65 | #include <ATen/ops/_aminmax_compositeexplicitautograd_dispatch.h> |
66 | #include <ATen/ops/_aminmax_native.h> |
67 | #include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_compositeexplicitautograd_dispatch.h> |
68 | #include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_native.h> |
69 | #include <ATen/ops/_amp_update_scale_compositeexplicitautograd_dispatch.h> |
70 | #include <ATen/ops/_amp_update_scale_native.h> |
71 | #include <ATen/ops/_cdist_backward_compositeexplicitautograd_dispatch.h> |
72 | #include <ATen/ops/_cdist_backward_native.h> |
73 | #include <ATen/ops/_cdist_forward_compositeexplicitautograd_dispatch.h> |
74 | #include <ATen/ops/_cdist_forward_native.h> |
75 | #include <ATen/ops/_cholesky_solve_helper_compositeexplicitautograd_dispatch.h> |
76 | #include <ATen/ops/_cholesky_solve_helper_native.h> |
77 | #include <ATen/ops/_coalesce_compositeexplicitautograd_dispatch.h> |
78 | #include <ATen/ops/_coalesce_native.h> |
79 | #include <ATen/ops/_coalesced_compositeexplicitautograd_dispatch.h> |
80 | #include <ATen/ops/_coalesced_native.h> |
81 | #include <ATen/ops/_conj_compositeexplicitautograd_dispatch.h> |
82 | #include <ATen/ops/_conj_copy_compositeexplicitautograd_dispatch.h> |
83 | #include <ATen/ops/_conj_copy_native.h> |
84 | #include <ATen/ops/_conj_native.h> |
85 | #include <ATen/ops/_conj_physical_compositeexplicitautograd_dispatch.h> |
86 | #include <ATen/ops/_conj_physical_native.h> |
87 | #include <ATen/ops/_convolution_compositeexplicitautograd_dispatch.h> |
88 | #include <ATen/ops/_convolution_native.h> |
89 | #include <ATen/ops/_copy_from_and_resize_compositeexplicitautograd_dispatch.h> |
90 | #include <ATen/ops/_copy_from_and_resize_native.h> |
91 | #include <ATen/ops/_copy_from_compositeexplicitautograd_dispatch.h> |
92 | #include <ATen/ops/_copy_from_native.h> |
93 | #include <ATen/ops/_ctc_loss_backward_compositeexplicitautograd_dispatch.h> |
94 | #include <ATen/ops/_ctc_loss_backward_native.h> |
95 | #include <ATen/ops/_ctc_loss_compositeexplicitautograd_dispatch.h> |
96 | #include <ATen/ops/_ctc_loss_native.h> |
97 | #include <ATen/ops/_cudnn_ctc_loss_compositeexplicitautograd_dispatch.h> |
98 | #include <ATen/ops/_cudnn_ctc_loss_native.h> |
99 | #include <ATen/ops/_cudnn_init_dropout_state_compositeexplicitautograd_dispatch.h> |
100 | #include <ATen/ops/_cudnn_init_dropout_state_native.h> |
101 | #include <ATen/ops/_cudnn_rnn_backward_compositeexplicitautograd_dispatch.h> |
102 | #include <ATen/ops/_cudnn_rnn_backward_native.h> |
103 | #include <ATen/ops/_cudnn_rnn_compositeexplicitautograd_dispatch.h> |
104 | #include <ATen/ops/_cudnn_rnn_flatten_weight_compositeexplicitautograd_dispatch.h> |
105 | #include <ATen/ops/_cudnn_rnn_flatten_weight_native.h> |
106 | #include <ATen/ops/_cudnn_rnn_native.h> |
107 | #include <ATen/ops/_dirichlet_grad_compositeexplicitautograd_dispatch.h> |
108 | #include <ATen/ops/_dirichlet_grad_native.h> |
109 | #include <ATen/ops/_efficientzerotensor_compositeexplicitautograd_dispatch.h> |
110 | #include <ATen/ops/_efficientzerotensor_native.h> |
111 | #include <ATen/ops/_embedding_bag_compositeexplicitautograd_dispatch.h> |
112 | #include <ATen/ops/_embedding_bag_dense_backward_compositeexplicitautograd_dispatch.h> |
113 | #include <ATen/ops/_embedding_bag_dense_backward_native.h> |
114 | #include <ATen/ops/_embedding_bag_forward_only_compositeexplicitautograd_dispatch.h> |
115 | #include <ATen/ops/_embedding_bag_forward_only_native.h> |
116 | #include <ATen/ops/_embedding_bag_native.h> |
117 | #include <ATen/ops/_embedding_bag_per_sample_weights_backward_compositeexplicitautograd_dispatch.h> |
118 | #include <ATen/ops/_embedding_bag_per_sample_weights_backward_native.h> |
119 | #include <ATen/ops/_empty_affine_quantized_compositeexplicitautograd_dispatch.h> |
120 | #include <ATen/ops/_empty_affine_quantized_native.h> |
121 | #include <ATen/ops/_empty_per_channel_affine_quantized_compositeexplicitautograd_dispatch.h> |
122 | #include <ATen/ops/_empty_per_channel_affine_quantized_native.h> |
123 | #include <ATen/ops/_euclidean_dist_compositeexplicitautograd_dispatch.h> |
124 | #include <ATen/ops/_euclidean_dist_native.h> |
125 | #include <ATen/ops/_fake_quantize_learnable_per_channel_affine_compositeexplicitautograd_dispatch.h> |
126 | #include <ATen/ops/_fake_quantize_learnable_per_channel_affine_native.h> |
127 | #include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_compositeexplicitautograd_dispatch.h> |
128 | #include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_native.h> |
129 | #include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_compositeexplicitautograd_dispatch.h> |
130 | #include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_native.h> |
131 | #include <ATen/ops/_foobar_compositeexplicitautograd_dispatch.h> |
132 | #include <ATen/ops/_foobar_native.h> |
133 | #include <ATen/ops/_foreach_abs_compositeexplicitautograd_dispatch.h> |
134 | #include <ATen/ops/_foreach_abs_native.h> |
135 | #include <ATen/ops/_foreach_acos_compositeexplicitautograd_dispatch.h> |
136 | #include <ATen/ops/_foreach_acos_native.h> |
137 | #include <ATen/ops/_foreach_add_compositeexplicitautograd_dispatch.h> |
138 | #include <ATen/ops/_foreach_add_native.h> |
139 | #include <ATen/ops/_foreach_addcdiv_compositeexplicitautograd_dispatch.h> |
140 | #include <ATen/ops/_foreach_addcdiv_native.h> |
141 | #include <ATen/ops/_foreach_addcmul_compositeexplicitautograd_dispatch.h> |
142 | #include <ATen/ops/_foreach_addcmul_native.h> |
143 | #include <ATen/ops/_foreach_asin_compositeexplicitautograd_dispatch.h> |
144 | #include <ATen/ops/_foreach_asin_native.h> |
145 | #include <ATen/ops/_foreach_atan_compositeexplicitautograd_dispatch.h> |
146 | #include <ATen/ops/_foreach_atan_native.h> |
147 | #include <ATen/ops/_foreach_ceil_compositeexplicitautograd_dispatch.h> |
148 | #include <ATen/ops/_foreach_ceil_native.h> |
149 | #include <ATen/ops/_foreach_clamp_max_compositeexplicitautograd_dispatch.h> |
150 | #include <ATen/ops/_foreach_clamp_max_native.h> |
151 | #include <ATen/ops/_foreach_clamp_min_compositeexplicitautograd_dispatch.h> |
152 | #include <ATen/ops/_foreach_clamp_min_native.h> |
153 | #include <ATen/ops/_foreach_cos_compositeexplicitautograd_dispatch.h> |
154 | #include <ATen/ops/_foreach_cos_native.h> |
155 | #include <ATen/ops/_foreach_cosh_compositeexplicitautograd_dispatch.h> |
156 | #include <ATen/ops/_foreach_cosh_native.h> |
157 | #include <ATen/ops/_foreach_div_compositeexplicitautograd_dispatch.h> |
158 | #include <ATen/ops/_foreach_div_native.h> |
159 | #include <ATen/ops/_foreach_erf_compositeexplicitautograd_dispatch.h> |
160 | #include <ATen/ops/_foreach_erf_native.h> |
161 | #include <ATen/ops/_foreach_erfc_compositeexplicitautograd_dispatch.h> |
162 | #include <ATen/ops/_foreach_erfc_native.h> |
163 | #include <ATen/ops/_foreach_exp_compositeexplicitautograd_dispatch.h> |
164 | #include <ATen/ops/_foreach_exp_native.h> |
165 | #include <ATen/ops/_foreach_expm1_compositeexplicitautograd_dispatch.h> |
166 | #include <ATen/ops/_foreach_expm1_native.h> |
167 | #include <ATen/ops/_foreach_floor_compositeexplicitautograd_dispatch.h> |
168 | #include <ATen/ops/_foreach_floor_native.h> |
169 | #include <ATen/ops/_foreach_frac_compositeexplicitautograd_dispatch.h> |
170 | #include <ATen/ops/_foreach_frac_native.h> |
171 | #include <ATen/ops/_foreach_lerp_compositeexplicitautograd_dispatch.h> |
172 | #include <ATen/ops/_foreach_lerp_native.h> |
173 | #include <ATen/ops/_foreach_lgamma_compositeexplicitautograd_dispatch.h> |
174 | #include <ATen/ops/_foreach_lgamma_native.h> |
175 | #include <ATen/ops/_foreach_log10_compositeexplicitautograd_dispatch.h> |
176 | #include <ATen/ops/_foreach_log10_native.h> |
177 | #include <ATen/ops/_foreach_log1p_compositeexplicitautograd_dispatch.h> |
178 | #include <ATen/ops/_foreach_log1p_native.h> |
179 | #include <ATen/ops/_foreach_log2_compositeexplicitautograd_dispatch.h> |
180 | #include <ATen/ops/_foreach_log2_native.h> |
181 | #include <ATen/ops/_foreach_log_compositeexplicitautograd_dispatch.h> |
182 | #include <ATen/ops/_foreach_log_native.h> |
183 | #include <ATen/ops/_foreach_maximum_compositeexplicitautograd_dispatch.h> |
184 | #include <ATen/ops/_foreach_maximum_native.h> |
185 | #include <ATen/ops/_foreach_minimum_compositeexplicitautograd_dispatch.h> |
186 | #include <ATen/ops/_foreach_minimum_native.h> |
187 | #include <ATen/ops/_foreach_mul_compositeexplicitautograd_dispatch.h> |
188 | #include <ATen/ops/_foreach_mul_native.h> |
189 | #include <ATen/ops/_foreach_neg_compositeexplicitautograd_dispatch.h> |
190 | #include <ATen/ops/_foreach_neg_native.h> |
191 | #include <ATen/ops/_foreach_norm_compositeexplicitautograd_dispatch.h> |
192 | #include <ATen/ops/_foreach_norm_native.h> |
193 | #include <ATen/ops/_foreach_reciprocal_compositeexplicitautograd_dispatch.h> |
194 | #include <ATen/ops/_foreach_reciprocal_native.h> |
195 | #include <ATen/ops/_foreach_round_compositeexplicitautograd_dispatch.h> |
196 | #include <ATen/ops/_foreach_round_native.h> |
197 | #include <ATen/ops/_foreach_sigmoid_compositeexplicitautograd_dispatch.h> |
198 | #include <ATen/ops/_foreach_sigmoid_native.h> |
199 | #include <ATen/ops/_foreach_sin_compositeexplicitautograd_dispatch.h> |
200 | #include <ATen/ops/_foreach_sin_native.h> |
201 | #include <ATen/ops/_foreach_sinh_compositeexplicitautograd_dispatch.h> |
202 | #include <ATen/ops/_foreach_sinh_native.h> |
203 | #include <ATen/ops/_foreach_sqrt_compositeexplicitautograd_dispatch.h> |
204 | #include <ATen/ops/_foreach_sqrt_native.h> |
205 | #include <ATen/ops/_foreach_sub_compositeexplicitautograd_dispatch.h> |
206 | #include <ATen/ops/_foreach_sub_native.h> |
207 | #include <ATen/ops/_foreach_tan_compositeexplicitautograd_dispatch.h> |
208 | #include <ATen/ops/_foreach_tan_native.h> |
209 | #include <ATen/ops/_foreach_tanh_compositeexplicitautograd_dispatch.h> |
210 | #include <ATen/ops/_foreach_tanh_native.h> |
211 | #include <ATen/ops/_foreach_trunc_compositeexplicitautograd_dispatch.h> |
212 | #include <ATen/ops/_foreach_trunc_native.h> |
213 | #include <ATen/ops/_foreach_zero_compositeexplicitautograd_dispatch.h> |
214 | #include <ATen/ops/_foreach_zero_native.h> |
215 | #include <ATen/ops/_fused_adam_compositeexplicitautograd_dispatch.h> |
216 | #include <ATen/ops/_fused_adam_native.h> |
217 | #include <ATen/ops/_fused_adamw_compositeexplicitautograd_dispatch.h> |
218 | #include <ATen/ops/_fused_adamw_native.h> |
219 | #include <ATen/ops/_fused_dropout_compositeexplicitautograd_dispatch.h> |
220 | #include <ATen/ops/_fused_dropout_native.h> |
221 | #include <ATen/ops/_fused_moving_avg_obs_fq_helper_compositeexplicitautograd_dispatch.h> |
222 | #include <ATen/ops/_fused_moving_avg_obs_fq_helper_native.h> |
223 | #include <ATen/ops/_fw_primal_compositeexplicitautograd_dispatch.h> |
224 | #include <ATen/ops/_fw_primal_copy_compositeexplicitautograd_dispatch.h> |
225 | #include <ATen/ops/_fw_primal_copy_native.h> |
226 | #include <ATen/ops/_fw_primal_native.h> |
227 | #include <ATen/ops/_grid_sampler_2d_cpu_fallback_compositeexplicitautograd_dispatch.h> |
228 | #include <ATen/ops/_grid_sampler_2d_cpu_fallback_native.h> |
229 | #include <ATen/ops/_has_same_storage_numel_compositeexplicitautograd_dispatch.h> |
230 | #include <ATen/ops/_has_same_storage_numel_native.h> |
231 | #include <ATen/ops/_histogramdd_bin_edges_compositeexplicitautograd_dispatch.h> |
232 | #include <ATen/ops/_histogramdd_bin_edges_native.h> |
233 | #include <ATen/ops/_histogramdd_from_bin_cts_compositeexplicitautograd_dispatch.h> |
234 | #include <ATen/ops/_histogramdd_from_bin_cts_native.h> |
235 | #include <ATen/ops/_histogramdd_from_bin_tensors_compositeexplicitautograd_dispatch.h> |
236 | #include <ATen/ops/_histogramdd_from_bin_tensors_native.h> |
237 | #include <ATen/ops/_index_put_impl_compositeexplicitautograd_dispatch.h> |
238 | #include <ATen/ops/_index_put_impl_native.h> |
239 | #include <ATen/ops/_indices_copy_compositeexplicitautograd_dispatch.h> |
240 | #include <ATen/ops/_indices_copy_native.h> |
241 | #include <ATen/ops/_is_all_true_compositeexplicitautograd_dispatch.h> |
242 | #include <ATen/ops/_is_all_true_native.h> |
243 | #include <ATen/ops/_is_any_true_compositeexplicitautograd_dispatch.h> |
244 | #include <ATen/ops/_is_any_true_native.h> |
245 | #include <ATen/ops/_linalg_check_errors_compositeexplicitautograd_dispatch.h> |
246 | #include <ATen/ops/_linalg_check_errors_native.h> |
247 | #include <ATen/ops/_lstm_mps_compositeexplicitautograd_dispatch.h> |
248 | #include <ATen/ops/_lstm_mps_native.h> |
249 | #include <ATen/ops/_make_dual_compositeexplicitautograd_dispatch.h> |
250 | #include <ATen/ops/_make_dual_copy_compositeexplicitautograd_dispatch.h> |
251 | #include <ATen/ops/_make_dual_copy_native.h> |
252 | #include <ATen/ops/_make_dual_native.h> |
253 | #include <ATen/ops/_make_per_channel_quantized_tensor_compositeexplicitautograd_dispatch.h> |
254 | #include <ATen/ops/_make_per_channel_quantized_tensor_native.h> |
255 | #include <ATen/ops/_make_per_tensor_quantized_tensor_compositeexplicitautograd_dispatch.h> |
256 | #include <ATen/ops/_make_per_tensor_quantized_tensor_native.h> |
257 | #include <ATen/ops/_masked_scale_compositeexplicitautograd_dispatch.h> |
258 | #include <ATen/ops/_masked_scale_native.h> |
259 | #include <ATen/ops/_masked_softmax_backward_compositeexplicitautograd_dispatch.h> |
260 | #include <ATen/ops/_masked_softmax_backward_native.h> |
261 | #include <ATen/ops/_masked_softmax_compositeexplicitautograd_dispatch.h> |
262 | #include <ATen/ops/_masked_softmax_native.h> |
263 | #include <ATen/ops/_mkldnn_reshape_compositeexplicitautograd_dispatch.h> |
264 | #include <ATen/ops/_mkldnn_reshape_native.h> |
265 | #include <ATen/ops/_mkldnn_transpose_compositeexplicitautograd_dispatch.h> |
266 | #include <ATen/ops/_mkldnn_transpose_native.h> |
267 | #include <ATen/ops/_mps_convolution_compositeexplicitautograd_dispatch.h> |
268 | #include <ATen/ops/_mps_convolution_native.h> |
269 | #include <ATen/ops/_mps_convolution_transpose_compositeexplicitautograd_dispatch.h> |
270 | #include <ATen/ops/_mps_convolution_transpose_native.h> |
271 | #include <ATen/ops/_mps_max_pool2d_compositeexplicitautograd_dispatch.h> |
272 | #include <ATen/ops/_mps_max_pool2d_native.h> |
273 | #include <ATen/ops/_native_batch_norm_legit_compositeexplicitautograd_dispatch.h> |
274 | #include <ATen/ops/_native_batch_norm_legit_native.h> |
275 | #include <ATen/ops/_native_decoder_only_multi_head_attention_compositeexplicitautograd_dispatch.h> |
276 | #include <ATen/ops/_native_decoder_only_multi_head_attention_native.h> |
277 | #include <ATen/ops/_native_multi_head_attention_compositeexplicitautograd_dispatch.h> |
278 | #include <ATen/ops/_native_multi_head_attention_native.h> |
279 | #include <ATen/ops/_neg_view_compositeexplicitautograd_dispatch.h> |
280 | #include <ATen/ops/_neg_view_copy_compositeexplicitautograd_dispatch.h> |
281 | #include <ATen/ops/_neg_view_copy_native.h> |
282 | #include <ATen/ops/_neg_view_native.h> |
283 | #include <ATen/ops/_nested_from_padded_and_nested_example_compositeexplicitautograd_dispatch.h> |
284 | #include <ATen/ops/_nested_from_padded_and_nested_example_native.h> |
285 | #include <ATen/ops/_nested_from_padded_compositeexplicitautograd_dispatch.h> |
286 | #include <ATen/ops/_nested_from_padded_native.h> |
287 | #include <ATen/ops/_nested_tensor_from_mask_compositeexplicitautograd_dispatch.h> |
288 | #include <ATen/ops/_nested_tensor_from_mask_native.h> |
289 | #include <ATen/ops/_nested_tensor_from_tensor_list_compositeexplicitautograd_dispatch.h> |
290 | #include <ATen/ops/_nested_tensor_from_tensor_list_native.h> |
291 | #include <ATen/ops/_nested_tensor_size_compositeexplicitautograd_dispatch.h> |
292 | #include <ATen/ops/_nested_tensor_size_native.h> |
293 | #include <ATen/ops/_nested_tensor_strides_compositeexplicitautograd_dispatch.h> |
294 | #include <ATen/ops/_nested_tensor_strides_native.h> |
295 | #include <ATen/ops/_nested_view_from_buffer_copy_compositeexplicitautograd_dispatch.h> |
296 | #include <ATen/ops/_nested_view_from_buffer_copy_native.h> |
297 | #include <ATen/ops/_new_zeros_with_same_feature_meta_compositeexplicitautograd_dispatch.h> |
298 | #include <ATen/ops/_new_zeros_with_same_feature_meta_native.h> |
299 | #include <ATen/ops/_nnpack_spatial_convolution_compositeexplicitautograd_dispatch.h> |
300 | #include <ATen/ops/_nnpack_spatial_convolution_native.h> |
301 | #include <ATen/ops/_pack_padded_sequence_compositeexplicitautograd_dispatch.h> |
302 | #include <ATen/ops/_pack_padded_sequence_native.h> |
303 | #include <ATen/ops/_pdist_backward_compositeexplicitautograd_dispatch.h> |
304 | #include <ATen/ops/_pdist_backward_native.h> |
305 | #include <ATen/ops/_pdist_forward_compositeexplicitautograd_dispatch.h> |
306 | #include <ATen/ops/_pdist_forward_native.h> |
307 | #include <ATen/ops/_pin_memory_compositeexplicitautograd_dispatch.h> |
308 | #include <ATen/ops/_pin_memory_native.h> |
309 | #include <ATen/ops/_reshape_alias_copy_compositeexplicitautograd_dispatch.h> |
310 | #include <ATen/ops/_reshape_alias_copy_native.h> |
311 | #include <ATen/ops/_reshape_copy_compositeexplicitautograd_dispatch.h> |
312 | #include <ATen/ops/_reshape_copy_native.h> |
313 | #include <ATen/ops/_resize_output_compositeexplicitautograd_dispatch.h> |
314 | #include <ATen/ops/_resize_output_native.h> |
315 | #include <ATen/ops/_sample_dirichlet_compositeexplicitautograd_dispatch.h> |
316 | #include <ATen/ops/_sample_dirichlet_native.h> |
317 | #include <ATen/ops/_segment_reduce_backward_compositeexplicitautograd_dispatch.h> |
318 | #include <ATen/ops/_segment_reduce_backward_native.h> |
319 | #include <ATen/ops/_slow_conv2d_backward_compositeexplicitautograd_dispatch.h> |
320 | #include <ATen/ops/_slow_conv2d_backward_native.h> |
321 | #include <ATen/ops/_sparse_addmm_compositeexplicitautograd_dispatch.h> |
322 | #include <ATen/ops/_sparse_addmm_native.h> |
323 | #include <ATen/ops/_sparse_broadcast_to_copy_compositeexplicitautograd_dispatch.h> |
324 | #include <ATen/ops/_sparse_broadcast_to_copy_native.h> |
325 | #include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_compositeexplicitautograd_dispatch.h> |
326 | #include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_native.h> |
327 | #include <ATen/ops/_sparse_coo_tensor_with_dims_compositeexplicitautograd_dispatch.h> |
328 | #include <ATen/ops/_sparse_coo_tensor_with_dims_native.h> |
329 | #include <ATen/ops/_sparse_csr_prod_compositeexplicitautograd_dispatch.h> |
330 | #include <ATen/ops/_sparse_csr_prod_native.h> |
331 | #include <ATen/ops/_sparse_csr_sum_compositeexplicitautograd_dispatch.h> |
332 | #include <ATen/ops/_sparse_csr_sum_native.h> |
333 | #include <ATen/ops/_sparse_log_softmax_backward_data_compositeexplicitautograd_dispatch.h> |
334 | #include <ATen/ops/_sparse_log_softmax_backward_data_native.h> |
335 | #include <ATen/ops/_sparse_log_softmax_compositeexplicitautograd_dispatch.h> |
336 | #include <ATen/ops/_sparse_log_softmax_native.h> |
337 | #include <ATen/ops/_sparse_softmax_backward_data_compositeexplicitautograd_dispatch.h> |
338 | #include <ATen/ops/_sparse_softmax_backward_data_native.h> |
339 | #include <ATen/ops/_sparse_softmax_compositeexplicitautograd_dispatch.h> |
340 | #include <ATen/ops/_sparse_softmax_native.h> |
341 | #include <ATen/ops/_sparse_sparse_matmul_compositeexplicitautograd_dispatch.h> |
342 | #include <ATen/ops/_sparse_sparse_matmul_native.h> |
343 | #include <ATen/ops/_sparse_sum_backward_compositeexplicitautograd_dispatch.h> |
344 | #include <ATen/ops/_sparse_sum_backward_native.h> |
345 | #include <ATen/ops/_sparse_sum_compositeexplicitautograd_dispatch.h> |
346 | #include <ATen/ops/_sparse_sum_native.h> |
347 | #include <ATen/ops/_spdiags_compositeexplicitautograd_dispatch.h> |
348 | #include <ATen/ops/_spdiags_native.h> |
349 | #include <ATen/ops/_stack_compositeexplicitautograd_dispatch.h> |
350 | #include <ATen/ops/_stack_native.h> |
351 | #include <ATen/ops/_standard_gamma_compositeexplicitautograd_dispatch.h> |
352 | #include <ATen/ops/_standard_gamma_grad_compositeexplicitautograd_dispatch.h> |
353 | #include <ATen/ops/_standard_gamma_grad_native.h> |
354 | #include <ATen/ops/_standard_gamma_native.h> |
355 | #include <ATen/ops/_test_autograd_multiple_dispatch_compositeexplicitautograd_dispatch.h> |
356 | #include <ATen/ops/_test_autograd_multiple_dispatch_native.h> |
357 | #include <ATen/ops/_test_autograd_multiple_dispatch_view_compositeexplicitautograd_dispatch.h> |
358 | #include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_compositeexplicitautograd_dispatch.h> |
359 | #include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_native.h> |
360 | #include <ATen/ops/_test_autograd_multiple_dispatch_view_native.h> |
361 | #include <ATen/ops/_test_optional_filled_intlist_compositeexplicitautograd_dispatch.h> |
362 | #include <ATen/ops/_test_optional_filled_intlist_native.h> |
363 | #include <ATen/ops/_test_optional_floatlist_compositeexplicitautograd_dispatch.h> |
364 | #include <ATen/ops/_test_optional_floatlist_native.h> |
365 | #include <ATen/ops/_test_optional_intlist_compositeexplicitautograd_dispatch.h> |
366 | #include <ATen/ops/_test_optional_intlist_native.h> |
367 | #include <ATen/ops/_test_warn_in_autograd_compositeexplicitautograd_dispatch.h> |
368 | #include <ATen/ops/_test_warn_in_autograd_native.h> |
369 | #include <ATen/ops/_thnn_fused_gru_cell_backward_compositeexplicitautograd_dispatch.h> |
370 | #include <ATen/ops/_thnn_fused_gru_cell_backward_native.h> |
371 | #include <ATen/ops/_thnn_fused_gru_cell_compositeexplicitautograd_dispatch.h> |
372 | #include <ATen/ops/_thnn_fused_gru_cell_native.h> |
373 | #include <ATen/ops/_thnn_fused_lstm_cell_backward_impl_compositeexplicitautograd_dispatch.h> |
374 | #include <ATen/ops/_thnn_fused_lstm_cell_backward_impl_native.h> |
375 | #include <ATen/ops/_thnn_fused_lstm_cell_compositeexplicitautograd_dispatch.h> |
376 | #include <ATen/ops/_thnn_fused_lstm_cell_native.h> |
377 | #include <ATen/ops/_to_copy_compositeexplicitautograd_dispatch.h> |
378 | #include <ATen/ops/_to_copy_native.h> |
379 | #include <ATen/ops/_to_dense_compositeexplicitautograd_dispatch.h> |
380 | #include <ATen/ops/_to_dense_native.h> |
381 | #include <ATen/ops/_transform_bias_rescale_qkv_compositeexplicitautograd_dispatch.h> |
382 | #include <ATen/ops/_transform_bias_rescale_qkv_native.h> |
383 | #include <ATen/ops/_transformer_decoder_only_layer_fwd_compositeexplicitautograd_dispatch.h> |
384 | #include <ATen/ops/_transformer_decoder_only_layer_fwd_native.h> |
385 | #include <ATen/ops/_transformer_encoder_layer_fwd_compositeexplicitautograd_dispatch.h> |
386 | #include <ATen/ops/_transformer_encoder_layer_fwd_native.h> |
387 | #include <ATen/ops/_trilinear_compositeexplicitautograd_dispatch.h> |
388 | #include <ATen/ops/_trilinear_native.h> |
389 | #include <ATen/ops/_triton_multi_head_attention_compositeexplicitautograd_dispatch.h> |
390 | #include <ATen/ops/_triton_multi_head_attention_native.h> |
391 | #include <ATen/ops/_triton_scaled_dot_attention_compositeexplicitautograd_dispatch.h> |
392 | #include <ATen/ops/_triton_scaled_dot_attention_native.h> |
393 | #include <ATen/ops/_unique2_compositeexplicitautograd_dispatch.h> |
394 | #include <ATen/ops/_unique2_native.h> |
395 | #include <ATen/ops/_unique_compositeexplicitautograd_dispatch.h> |
396 | #include <ATen/ops/_unique_native.h> |
397 | #include <ATen/ops/_unsafe_view_compositeexplicitautograd_dispatch.h> |
398 | #include <ATen/ops/_unsafe_view_native.h> |
399 | #include <ATen/ops/_values_copy_compositeexplicitautograd_dispatch.h> |
400 | #include <ATen/ops/_values_copy_native.h> |
401 | #include <ATen/ops/_weight_norm_interface_backward_compositeexplicitautograd_dispatch.h> |
402 | #include <ATen/ops/_weight_norm_interface_backward_native.h> |
403 | #include <ATen/ops/_weight_norm_interface_compositeexplicitautograd_dispatch.h> |
404 | #include <ATen/ops/_weight_norm_interface_native.h> |
405 | #include <ATen/ops/abs_compositeexplicitautograd_dispatch.h> |
406 | #include <ATen/ops/abs_native.h> |
407 | #include <ATen/ops/add_compositeexplicitautograd_dispatch.h> |
408 | #include <ATen/ops/add_native.h> |
409 | #include <ATen/ops/addr_compositeexplicitautograd_dispatch.h> |
410 | #include <ATen/ops/addr_native.h> |
411 | #include <ATen/ops/affine_grid_generator_compositeexplicitautograd_dispatch.h> |
412 | #include <ATen/ops/affine_grid_generator_native.h> |
413 | #include <ATen/ops/alias_compositeexplicitautograd_dispatch.h> |
414 | #include <ATen/ops/alias_copy_compositeexplicitautograd_dispatch.h> |
415 | #include <ATen/ops/alias_copy_native.h> |
416 | #include <ATen/ops/alias_native.h> |
417 | #include <ATen/ops/allclose_compositeexplicitautograd_dispatch.h> |
418 | #include <ATen/ops/allclose_native.h> |
419 | #include <ATen/ops/arange_compositeexplicitautograd_dispatch.h> |
420 | #include <ATen/ops/arange_native.h> |
421 | #include <ATen/ops/argsort_compositeexplicitautograd_dispatch.h> |
422 | #include <ATen/ops/argsort_native.h> |
423 | #include <ATen/ops/as_strided_copy_compositeexplicitautograd_dispatch.h> |
424 | #include <ATen/ops/as_strided_copy_native.h> |
425 | #include <ATen/ops/as_strided_scatter_compositeexplicitautograd_dispatch.h> |
426 | #include <ATen/ops/as_strided_scatter_native.h> |
427 | #include <ATen/ops/bartlett_window_compositeexplicitautograd_dispatch.h> |
428 | #include <ATen/ops/bartlett_window_native.h> |
429 | #include <ATen/ops/batch_norm_backward_elemt_compositeexplicitautograd_dispatch.h> |
430 | #include <ATen/ops/batch_norm_backward_elemt_native.h> |
431 | #include <ATen/ops/batch_norm_backward_reduce_compositeexplicitautograd_dispatch.h> |
432 | #include <ATen/ops/batch_norm_backward_reduce_native.h> |
433 | #include <ATen/ops/batch_norm_gather_stats_compositeexplicitautograd_dispatch.h> |
434 | #include <ATen/ops/batch_norm_gather_stats_native.h> |
435 | #include <ATen/ops/batch_norm_gather_stats_with_counts_compositeexplicitautograd_dispatch.h> |
436 | #include <ATen/ops/batch_norm_gather_stats_with_counts_native.h> |
437 | #include <ATen/ops/batch_norm_stats_compositeexplicitautograd_dispatch.h> |
438 | #include <ATen/ops/batch_norm_stats_native.h> |
439 | #include <ATen/ops/batch_norm_update_stats_compositeexplicitautograd_dispatch.h> |
440 | #include <ATen/ops/batch_norm_update_stats_native.h> |
441 | #include <ATen/ops/bernoulli_compositeexplicitautograd_dispatch.h> |
442 | #include <ATen/ops/bernoulli_native.h> |
443 | #include <ATen/ops/binary_cross_entropy_with_logits_compositeexplicitautograd_dispatch.h> |
444 | #include <ATen/ops/binary_cross_entropy_with_logits_native.h> |
445 | #include <ATen/ops/bincount_compositeexplicitautograd_dispatch.h> |
446 | #include <ATen/ops/bincount_native.h> |
447 | #include <ATen/ops/binomial_compositeexplicitautograd_dispatch.h> |
448 | #include <ATen/ops/binomial_native.h> |
449 | #include <ATen/ops/bitwise_and_compositeexplicitautograd_dispatch.h> |
450 | #include <ATen/ops/bitwise_and_native.h> |
451 | #include <ATen/ops/bitwise_left_shift_compositeexplicitautograd_dispatch.h> |
452 | #include <ATen/ops/bitwise_left_shift_native.h> |
453 | #include <ATen/ops/bitwise_or_compositeexplicitautograd_dispatch.h> |
454 | #include <ATen/ops/bitwise_or_native.h> |
455 | #include <ATen/ops/bitwise_right_shift_compositeexplicitautograd_dispatch.h> |
456 | #include <ATen/ops/bitwise_right_shift_native.h> |
457 | #include <ATen/ops/bitwise_xor_compositeexplicitautograd_dispatch.h> |
458 | #include <ATen/ops/bitwise_xor_native.h> |
459 | #include <ATen/ops/blackman_window_compositeexplicitautograd_dispatch.h> |
460 | #include <ATen/ops/blackman_window_native.h> |
461 | #include <ATen/ops/block_diag_compositeexplicitautograd_dispatch.h> |
462 | #include <ATen/ops/block_diag_native.h> |
463 | #include <ATen/ops/bucketize_compositeexplicitautograd_dispatch.h> |
464 | #include <ATen/ops/bucketize_native.h> |
465 | #include <ATen/ops/cauchy_compositeexplicitautograd_dispatch.h> |
466 | #include <ATen/ops/cauchy_native.h> |
467 | #include <ATen/ops/ccol_indices_compositeexplicitautograd_dispatch.h> |
468 | #include <ATen/ops/ccol_indices_copy_compositeexplicitautograd_dispatch.h> |
469 | #include <ATen/ops/ccol_indices_copy_native.h> |
470 | #include <ATen/ops/ccol_indices_native.h> |
471 | #include <ATen/ops/celu_compositeexplicitautograd_dispatch.h> |
472 | #include <ATen/ops/celu_native.h> |
473 | #include <ATen/ops/channel_shuffle_compositeexplicitautograd_dispatch.h> |
474 | #include <ATen/ops/channel_shuffle_native.h> |
475 | #include <ATen/ops/cholesky_solve_compositeexplicitautograd_dispatch.h> |
476 | #include <ATen/ops/cholesky_solve_native.h> |
477 | #include <ATen/ops/clone_compositeexplicitautograd_dispatch.h> |
478 | #include <ATen/ops/clone_native.h> |
479 | #include <ATen/ops/col_indices_compositeexplicitautograd_dispatch.h> |
480 | #include <ATen/ops/col_indices_copy_compositeexplicitautograd_dispatch.h> |
481 | #include <ATen/ops/col_indices_copy_native.h> |
482 | #include <ATen/ops/col_indices_native.h> |
483 | #include <ATen/ops/complex_compositeexplicitautograd_dispatch.h> |
484 | #include <ATen/ops/complex_native.h> |
485 | #include <ATen/ops/conj_physical_compositeexplicitautograd_dispatch.h> |
486 | #include <ATen/ops/conj_physical_native.h> |
487 | #include <ATen/ops/constant_pad_nd_compositeexplicitautograd_dispatch.h> |
488 | #include <ATen/ops/constant_pad_nd_native.h> |
489 | #include <ATen/ops/conv_depthwise3d_compositeexplicitautograd_dispatch.h> |
490 | #include <ATen/ops/conv_depthwise3d_native.h> |
491 | #include <ATen/ops/conv_tbc_compositeexplicitautograd_dispatch.h> |
492 | #include <ATen/ops/conv_tbc_native.h> |
493 | #include <ATen/ops/convolution_backward_compositeexplicitautograd_dispatch.h> |
494 | #include <ATen/ops/convolution_backward_native.h> |
495 | #include <ATen/ops/convolution_backward_overrideable_compositeexplicitautograd_dispatch.h> |
496 | #include <ATen/ops/convolution_backward_overrideable_native.h> |
497 | #include <ATen/ops/convolution_compositeexplicitautograd_dispatch.h> |
498 | #include <ATen/ops/convolution_native.h> |
499 | #include <ATen/ops/convolution_overrideable_compositeexplicitautograd_dispatch.h> |
500 | #include <ATen/ops/convolution_overrideable_native.h> |
501 | #include <ATen/ops/copy_compositeexplicitautograd_dispatch.h> |
502 | #include <ATen/ops/copy_native.h> |
503 | #include <ATen/ops/copy_sparse_to_sparse_compositeexplicitautograd_dispatch.h> |
504 | #include <ATen/ops/copy_sparse_to_sparse_native.h> |
505 | #include <ATen/ops/copysign_compositeexplicitautograd_dispatch.h> |
506 | #include <ATen/ops/copysign_native.h> |
507 | #include <ATen/ops/count_nonzero_compositeexplicitautograd_dispatch.h> |
508 | #include <ATen/ops/count_nonzero_native.h> |
509 | #include <ATen/ops/crow_indices_compositeexplicitautograd_dispatch.h> |
510 | #include <ATen/ops/crow_indices_copy_compositeexplicitautograd_dispatch.h> |
511 | #include <ATen/ops/crow_indices_copy_native.h> |
512 | #include <ATen/ops/crow_indices_native.h> |
513 | #include <ATen/ops/cudnn_affine_grid_generator_backward_compositeexplicitautograd_dispatch.h> |
514 | #include <ATen/ops/cudnn_affine_grid_generator_backward_native.h> |
515 | #include <ATen/ops/cudnn_affine_grid_generator_compositeexplicitautograd_dispatch.h> |
516 | #include <ATen/ops/cudnn_affine_grid_generator_native.h> |
517 | #include <ATen/ops/cudnn_batch_norm_backward_compositeexplicitautograd_dispatch.h> |
518 | #include <ATen/ops/cudnn_batch_norm_backward_native.h> |
519 | #include <ATen/ops/cudnn_batch_norm_compositeexplicitautograd_dispatch.h> |
520 | #include <ATen/ops/cudnn_batch_norm_native.h> |
521 | #include <ATen/ops/cudnn_convolution_add_relu_compositeexplicitautograd_dispatch.h> |
522 | #include <ATen/ops/cudnn_convolution_add_relu_native.h> |
523 | #include <ATen/ops/cudnn_convolution_compositeexplicitautograd_dispatch.h> |
524 | #include <ATen/ops/cudnn_convolution_native.h> |
525 | #include <ATen/ops/cudnn_convolution_relu_compositeexplicitautograd_dispatch.h> |
526 | #include <ATen/ops/cudnn_convolution_relu_native.h> |
527 | #include <ATen/ops/cudnn_convolution_transpose_compositeexplicitautograd_dispatch.h> |
528 | #include <ATen/ops/cudnn_convolution_transpose_native.h> |
529 | #include <ATen/ops/cudnn_grid_sampler_backward_compositeexplicitautograd_dispatch.h> |
530 | #include <ATen/ops/cudnn_grid_sampler_backward_native.h> |
531 | #include <ATen/ops/cudnn_grid_sampler_compositeexplicitautograd_dispatch.h> |
532 | #include <ATen/ops/cudnn_grid_sampler_native.h> |
533 | #include <ATen/ops/cummax_compositeexplicitautograd_dispatch.h> |
534 | #include <ATen/ops/cummax_native.h> |
535 | #include <ATen/ops/cummin_compositeexplicitautograd_dispatch.h> |
536 | #include <ATen/ops/cummin_native.h> |
537 | #include <ATen/ops/deg2rad_compositeexplicitautograd_dispatch.h> |
538 | #include <ATen/ops/deg2rad_native.h> |
539 | #include <ATen/ops/dequantize_compositeexplicitautograd_dispatch.h> |
540 | #include <ATen/ops/dequantize_native.h> |
541 | #include <ATen/ops/detach_compositeexplicitautograd_dispatch.h> |
542 | #include <ATen/ops/detach_copy_compositeexplicitautograd_dispatch.h> |
543 | #include <ATen/ops/detach_copy_native.h> |
544 | #include <ATen/ops/detach_native.h> |
545 | #include <ATen/ops/diag_embed_compositeexplicitautograd_dispatch.h> |
546 | #include <ATen/ops/diag_embed_native.h> |
547 | #include <ATen/ops/diagonal_backward_compositeexplicitautograd_dispatch.h> |
548 | #include <ATen/ops/diagonal_backward_native.h> |
549 | #include <ATen/ops/diagonal_compositeexplicitautograd_dispatch.h> |
550 | #include <ATen/ops/diagonal_copy_compositeexplicitautograd_dispatch.h> |
551 | #include <ATen/ops/diagonal_copy_native.h> |
552 | #include <ATen/ops/diagonal_native.h> |
553 | #include <ATen/ops/diagonal_scatter_compositeexplicitautograd_dispatch.h> |
554 | #include <ATen/ops/diagonal_scatter_native.h> |
555 | #include <ATen/ops/dist_compositeexplicitautograd_dispatch.h> |
556 | #include <ATen/ops/dist_native.h> |
557 | #include <ATen/ops/div_compositeexplicitautograd_dispatch.h> |
558 | #include <ATen/ops/div_native.h> |
559 | #include <ATen/ops/dot_compositeexplicitautograd_dispatch.h> |
560 | #include <ATen/ops/dot_native.h> |
561 | #include <ATen/ops/embedding_compositeexplicitautograd_dispatch.h> |
562 | #include <ATen/ops/embedding_dense_backward_compositeexplicitautograd_dispatch.h> |
563 | #include <ATen/ops/embedding_dense_backward_native.h> |
564 | #include <ATen/ops/embedding_native.h> |
565 | #include <ATen/ops/embedding_renorm_compositeexplicitautograd_dispatch.h> |
566 | #include <ATen/ops/embedding_renorm_native.h> |
567 | #include <ATen/ops/empty_compositeexplicitautograd_dispatch.h> |
568 | #include <ATen/ops/empty_like_compositeexplicitautograd_dispatch.h> |
569 | #include <ATen/ops/empty_like_native.h> |
570 | #include <ATen/ops/empty_native.h> |
571 | #include <ATen/ops/empty_quantized_compositeexplicitautograd_dispatch.h> |
572 | #include <ATen/ops/empty_quantized_native.h> |
573 | #include <ATen/ops/empty_strided_compositeexplicitautograd_dispatch.h> |
574 | #include <ATen/ops/empty_strided_native.h> |
575 | #include <ATen/ops/expand_compositeexplicitautograd_dispatch.h> |
576 | #include <ATen/ops/expand_copy_compositeexplicitautograd_dispatch.h> |
577 | #include <ATen/ops/expand_copy_native.h> |
578 | #include <ATen/ops/expand_native.h> |
579 | #include <ATen/ops/exponential_compositeexplicitautograd_dispatch.h> |
580 | #include <ATen/ops/exponential_native.h> |
581 | #include <ATen/ops/eye_compositeexplicitautograd_dispatch.h> |
582 | #include <ATen/ops/eye_native.h> |
583 | #include <ATen/ops/fake_quantize_per_channel_affine_cachemask_compositeexplicitautograd_dispatch.h> |
584 | #include <ATen/ops/fake_quantize_per_channel_affine_cachemask_native.h> |
585 | #include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_compositeexplicitautograd_dispatch.h> |
586 | #include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_native.h> |
587 | #include <ATen/ops/fft_fftfreq_compositeexplicitautograd_dispatch.h> |
588 | #include <ATen/ops/fft_fftfreq_native.h> |
589 | #include <ATen/ops/fft_rfftfreq_compositeexplicitautograd_dispatch.h> |
590 | #include <ATen/ops/fft_rfftfreq_native.h> |
591 | #include <ATen/ops/fill_compositeexplicitautograd_dispatch.h> |
592 | #include <ATen/ops/fill_native.h> |
593 | #include <ATen/ops/flip_compositeexplicitautograd_dispatch.h> |
594 | #include <ATen/ops/flip_native.h> |
595 | #include <ATen/ops/fmod_compositeexplicitautograd_dispatch.h> |
596 | #include <ATen/ops/fmod_native.h> |
597 | #include <ATen/ops/frexp_compositeexplicitautograd_dispatch.h> |
598 | #include <ATen/ops/frexp_native.h> |
599 | #include <ATen/ops/from_file_compositeexplicitautograd_dispatch.h> |
600 | #include <ATen/ops/from_file_native.h> |
601 | #include <ATen/ops/full_compositeexplicitautograd_dispatch.h> |
602 | #include <ATen/ops/full_like_compositeexplicitautograd_dispatch.h> |
603 | #include <ATen/ops/full_like_native.h> |
604 | #include <ATen/ops/full_native.h> |
605 | #include <ATen/ops/geometric_compositeexplicitautograd_dispatch.h> |
606 | #include <ATen/ops/geometric_native.h> |
607 | #include <ATen/ops/glu_backward_jvp_compositeexplicitautograd_dispatch.h> |
608 | #include <ATen/ops/glu_backward_jvp_native.h> |
609 | #include <ATen/ops/glu_jvp_compositeexplicitautograd_dispatch.h> |
610 | #include <ATen/ops/glu_jvp_native.h> |
611 | #include <ATen/ops/grid_sampler_2d_backward_compositeexplicitautograd_dispatch.h> |
612 | #include <ATen/ops/grid_sampler_2d_backward_native.h> |
613 | #include <ATen/ops/grid_sampler_2d_compositeexplicitautograd_dispatch.h> |
614 | #include <ATen/ops/grid_sampler_2d_native.h> |
615 | #include <ATen/ops/grid_sampler_3d_backward_compositeexplicitautograd_dispatch.h> |
616 | #include <ATen/ops/grid_sampler_3d_backward_native.h> |
617 | #include <ATen/ops/grid_sampler_3d_compositeexplicitautograd_dispatch.h> |
618 | #include <ATen/ops/grid_sampler_3d_native.h> |
619 | #include <ATen/ops/hamming_window_compositeexplicitautograd_dispatch.h> |
620 | #include <ATen/ops/hamming_window_native.h> |
621 | #include <ATen/ops/hann_window_compositeexplicitautograd_dispatch.h> |
622 | #include <ATen/ops/hann_window_native.h> |
623 | #include <ATen/ops/hardswish_backward_compositeexplicitautograd_dispatch.h> |
624 | #include <ATen/ops/hardswish_backward_native.h> |
625 | #include <ATen/ops/huber_loss_backward_compositeexplicitautograd_dispatch.h> |
626 | #include <ATen/ops/huber_loss_backward_native.h> |
627 | #include <ATen/ops/index_fill_compositeexplicitautograd_dispatch.h> |
628 | #include <ATen/ops/index_fill_native.h> |
629 | #include <ATen/ops/index_put_compositeexplicitautograd_dispatch.h> |
630 | #include <ATen/ops/index_put_native.h> |
631 | #include <ATen/ops/indices_compositeexplicitautograd_dispatch.h> |
632 | #include <ATen/ops/indices_copy_compositeexplicitautograd_dispatch.h> |
633 | #include <ATen/ops/indices_copy_native.h> |
634 | #include <ATen/ops/indices_native.h> |
635 | #include <ATen/ops/int_repr_compositeexplicitautograd_dispatch.h> |
636 | #include <ATen/ops/int_repr_native.h> |
637 | #include <ATen/ops/is_coalesced_compositeexplicitautograd_dispatch.h> |
638 | #include <ATen/ops/is_coalesced_native.h> |
639 | #include <ATen/ops/is_pinned_compositeexplicitautograd_dispatch.h> |
640 | #include <ATen/ops/is_pinned_native.h> |
641 | #include <ATen/ops/is_same_size_compositeexplicitautograd_dispatch.h> |
642 | #include <ATen/ops/is_same_size_native.h> |
643 | #include <ATen/ops/isinf_compositeexplicitautograd_dispatch.h> |
644 | #include <ATen/ops/isinf_native.h> |
645 | #include <ATen/ops/isnan_compositeexplicitautograd_dispatch.h> |
646 | #include <ATen/ops/isnan_native.h> |
647 | #include <ATen/ops/kaiser_window_compositeexplicitautograd_dispatch.h> |
648 | #include <ATen/ops/kaiser_window_native.h> |
649 | #include <ATen/ops/kthvalue_compositeexplicitautograd_dispatch.h> |
650 | #include <ATen/ops/kthvalue_native.h> |
651 | #include <ATen/ops/lift_compositeexplicitautograd_dispatch.h> |
652 | #include <ATen/ops/lift_fresh_compositeexplicitautograd_dispatch.h> |
653 | #include <ATen/ops/lift_fresh_copy_compositeexplicitautograd_dispatch.h> |
654 | #include <ATen/ops/lift_fresh_copy_native.h> |
655 | #include <ATen/ops/lift_fresh_native.h> |
656 | #include <ATen/ops/lift_native.h> |
657 | #include <ATen/ops/linalg_lstsq_compositeexplicitautograd_dispatch.h> |
658 | #include <ATen/ops/linalg_lstsq_native.h> |
659 | #include <ATen/ops/linalg_matrix_exp_compositeexplicitautograd_dispatch.h> |
660 | #include <ATen/ops/linalg_matrix_exp_native.h> |
661 | #include <ATen/ops/linalg_pinv_compositeexplicitautograd_dispatch.h> |
662 | #include <ATen/ops/linalg_pinv_native.h> |
663 | #include <ATen/ops/linear_backward_compositeexplicitautograd_dispatch.h> |
664 | #include <ATen/ops/linear_backward_native.h> |
665 | #include <ATen/ops/linear_compositeexplicitautograd_dispatch.h> |
666 | #include <ATen/ops/linear_native.h> |
667 | #include <ATen/ops/linspace_compositeexplicitautograd_dispatch.h> |
668 | #include <ATen/ops/linspace_native.h> |
669 | #include <ATen/ops/log_normal_compositeexplicitautograd_dispatch.h> |
670 | #include <ATen/ops/log_normal_native.h> |
671 | #include <ATen/ops/log_softmax_compositeexplicitautograd_dispatch.h> |
672 | #include <ATen/ops/log_softmax_native.h> |
673 | #include <ATen/ops/logcumsumexp_compositeexplicitautograd_dispatch.h> |
674 | #include <ATen/ops/logcumsumexp_native.h> |
675 | #include <ATen/ops/logical_and_compositeexplicitautograd_dispatch.h> |
676 | #include <ATen/ops/logical_and_native.h> |
677 | #include <ATen/ops/logical_not_compositeexplicitautograd_dispatch.h> |
678 | #include <ATen/ops/logical_not_native.h> |
679 | #include <ATen/ops/logical_or_compositeexplicitautograd_dispatch.h> |
680 | #include <ATen/ops/logical_or_native.h> |
681 | #include <ATen/ops/logical_xor_compositeexplicitautograd_dispatch.h> |
682 | #include <ATen/ops/logical_xor_native.h> |
683 | #include <ATen/ops/logspace_compositeexplicitautograd_dispatch.h> |
684 | #include <ATen/ops/logspace_native.h> |
685 | #include <ATen/ops/logsumexp_compositeexplicitautograd_dispatch.h> |
686 | #include <ATen/ops/logsumexp_native.h> |
687 | #include <ATen/ops/lshift_compositeexplicitautograd_dispatch.h> |
688 | #include <ATen/ops/lshift_native.h> |
689 | #include <ATen/ops/lstm_mps_backward_compositeexplicitautograd_dispatch.h> |
690 | #include <ATen/ops/lstm_mps_backward_native.h> |
691 | #include <ATen/ops/masked_fill_compositeexplicitautograd_dispatch.h> |
692 | #include <ATen/ops/masked_fill_native.h> |
693 | #include <ATen/ops/masked_scatter_compositeexplicitautograd_dispatch.h> |
694 | #include <ATen/ops/masked_scatter_native.h> |
695 | #include <ATen/ops/matmul_backward_compositeexplicitautograd_dispatch.h> |
696 | #include <ATen/ops/matmul_backward_native.h> |
697 | #include <ATen/ops/mean_compositeexplicitautograd_dispatch.h> |
698 | #include <ATen/ops/mean_native.h> |
699 | #include <ATen/ops/median_compositeexplicitautograd_dispatch.h> |
700 | #include <ATen/ops/median_native.h> |
701 | #include <ATen/ops/miopen_batch_norm_backward_compositeexplicitautograd_dispatch.h> |
702 | #include <ATen/ops/miopen_batch_norm_backward_native.h> |
703 | #include <ATen/ops/miopen_batch_norm_compositeexplicitautograd_dispatch.h> |
704 | #include <ATen/ops/miopen_batch_norm_native.h> |
705 | #include <ATen/ops/miopen_convolution_compositeexplicitautograd_dispatch.h> |
706 | #include <ATen/ops/miopen_convolution_native.h> |
707 | #include <ATen/ops/miopen_convolution_transpose_compositeexplicitautograd_dispatch.h> |
708 | #include <ATen/ops/miopen_convolution_transpose_native.h> |
709 | #include <ATen/ops/miopen_depthwise_convolution_compositeexplicitautograd_dispatch.h> |
710 | #include <ATen/ops/miopen_depthwise_convolution_native.h> |
711 | #include <ATen/ops/miopen_rnn_backward_compositeexplicitautograd_dispatch.h> |
712 | #include <ATen/ops/miopen_rnn_backward_native.h> |
713 | #include <ATen/ops/miopen_rnn_compositeexplicitautograd_dispatch.h> |
714 | #include <ATen/ops/miopen_rnn_native.h> |
715 | #include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward_compositeexplicitautograd_dispatch.h> |
716 | #include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward_native.h> |
717 | #include <ATen/ops/mkldnn_convolution_compositeexplicitautograd_dispatch.h> |
718 | #include <ATen/ops/mkldnn_convolution_native.h> |
719 | #include <ATen/ops/mkldnn_linear_backward_compositeexplicitautograd_dispatch.h> |
720 | #include <ATen/ops/mkldnn_linear_backward_input_compositeexplicitautograd_dispatch.h> |
721 | #include <ATen/ops/mkldnn_linear_backward_input_native.h> |
722 | #include <ATen/ops/mkldnn_linear_backward_native.h> |
723 | #include <ATen/ops/mkldnn_linear_backward_weights_compositeexplicitautograd_dispatch.h> |
724 | #include <ATen/ops/mkldnn_linear_backward_weights_native.h> |
725 | #include <ATen/ops/mkldnn_linear_compositeexplicitautograd_dispatch.h> |
726 | #include <ATen/ops/mkldnn_linear_native.h> |
727 | #include <ATen/ops/mkldnn_max_pool2d_backward_compositeexplicitautograd_dispatch.h> |
728 | #include <ATen/ops/mkldnn_max_pool2d_backward_native.h> |
729 | #include <ATen/ops/mkldnn_max_pool2d_compositeexplicitautograd_dispatch.h> |
730 | #include <ATen/ops/mkldnn_max_pool2d_native.h> |
731 | #include <ATen/ops/mkldnn_max_pool3d_backward_compositeexplicitautograd_dispatch.h> |
732 | #include <ATen/ops/mkldnn_max_pool3d_backward_native.h> |
733 | #include <ATen/ops/mkldnn_max_pool3d_compositeexplicitautograd_dispatch.h> |
734 | #include <ATen/ops/mkldnn_max_pool3d_native.h> |
735 | #include <ATen/ops/mkldnn_reorder_conv2d_weight_compositeexplicitautograd_dispatch.h> |
736 | #include <ATen/ops/mkldnn_reorder_conv2d_weight_native.h> |
737 | #include <ATen/ops/mkldnn_reorder_conv3d_weight_compositeexplicitautograd_dispatch.h> |
738 | #include <ATen/ops/mkldnn_reorder_conv3d_weight_native.h> |
739 | #include <ATen/ops/mkldnn_rnn_layer_backward_compositeexplicitautograd_dispatch.h> |
740 | #include <ATen/ops/mkldnn_rnn_layer_backward_native.h> |
741 | #include <ATen/ops/mkldnn_rnn_layer_compositeexplicitautograd_dispatch.h> |
742 | #include <ATen/ops/mkldnn_rnn_layer_native.h> |
743 | #include <ATen/ops/mode_compositeexplicitautograd_dispatch.h> |
744 | #include <ATen/ops/mode_native.h> |
745 | #include <ATen/ops/mps_convolution_backward_compositeexplicitautograd_dispatch.h> |
746 | #include <ATen/ops/mps_convolution_backward_native.h> |
747 | #include <ATen/ops/mps_convolution_transpose_backward_compositeexplicitautograd_dispatch.h> |
748 | #include <ATen/ops/mps_convolution_transpose_backward_native.h> |
749 | #include <ATen/ops/mps_max_pool2d_backward_compositeexplicitautograd_dispatch.h> |
750 | #include <ATen/ops/mps_max_pool2d_backward_native.h> |
751 | #include <ATen/ops/mul_compositeexplicitautograd_dispatch.h> |
752 | #include <ATen/ops/mul_native.h> |
753 | #include <ATen/ops/mv_compositeexplicitautograd_dispatch.h> |
754 | #include <ATen/ops/mv_native.h> |
755 | #include <ATen/ops/mvlgamma_compositeexplicitautograd_dispatch.h> |
756 | #include <ATen/ops/mvlgamma_native.h> |
757 | #include <ATen/ops/nan_to_num_compositeexplicitautograd_dispatch.h> |
758 | #include <ATen/ops/nan_to_num_native.h> |
759 | #include <ATen/ops/nanmedian_compositeexplicitautograd_dispatch.h> |
760 | #include <ATen/ops/nanmedian_native.h> |
761 | #include <ATen/ops/native_batch_norm_backward_compositeexplicitautograd_dispatch.h> |
762 | #include <ATen/ops/native_batch_norm_backward_native.h> |
763 | #include <ATen/ops/native_dropout_backward_compositeexplicitautograd_dispatch.h> |
764 | #include <ATen/ops/native_dropout_backward_native.h> |
765 | #include <ATen/ops/native_dropout_compositeexplicitautograd_dispatch.h> |
766 | #include <ATen/ops/native_dropout_native.h> |
767 | #include <ATen/ops/native_group_norm_backward_compositeexplicitautograd_dispatch.h> |
768 | #include <ATen/ops/native_group_norm_backward_native.h> |
769 | #include <ATen/ops/native_group_norm_compositeexplicitautograd_dispatch.h> |
770 | #include <ATen/ops/native_group_norm_native.h> |
771 | #include <ATen/ops/native_layer_norm_backward_compositeexplicitautograd_dispatch.h> |
772 | #include <ATen/ops/native_layer_norm_backward_native.h> |
773 | #include <ATen/ops/native_layer_norm_compositeexplicitautograd_dispatch.h> |
774 | #include <ATen/ops/native_layer_norm_native.h> |
775 | #include <ATen/ops/native_norm_compositeexplicitautograd_dispatch.h> |
776 | #include <ATen/ops/native_norm_native.h> |
777 | #include <ATen/ops/new_empty_compositeexplicitautograd_dispatch.h> |
778 | #include <ATen/ops/new_empty_native.h> |
779 | #include <ATen/ops/new_empty_strided_compositeexplicitautograd_dispatch.h> |
780 | #include <ATen/ops/new_empty_strided_native.h> |
781 | #include <ATen/ops/new_full_compositeexplicitautograd_dispatch.h> |
782 | #include <ATen/ops/new_full_native.h> |
783 | #include <ATen/ops/new_ones_compositeexplicitautograd_dispatch.h> |
784 | #include <ATen/ops/new_ones_native.h> |
785 | #include <ATen/ops/new_zeros_compositeexplicitautograd_dispatch.h> |
786 | #include <ATen/ops/new_zeros_native.h> |
787 | #include <ATen/ops/norm_compositeexplicitautograd_dispatch.h> |
788 | #include <ATen/ops/norm_native.h> |
789 | #include <ATen/ops/normal_compositeexplicitautograd_dispatch.h> |
790 | #include <ATen/ops/normal_native.h> |
791 | #include <ATen/ops/ones_compositeexplicitautograd_dispatch.h> |
792 | #include <ATen/ops/ones_like_compositeexplicitautograd_dispatch.h> |
793 | #include <ATen/ops/ones_like_native.h> |
794 | #include <ATen/ops/ones_native.h> |
795 | #include <ATen/ops/permute_compositeexplicitautograd_dispatch.h> |
796 | #include <ATen/ops/permute_copy_compositeexplicitautograd_dispatch.h> |
797 | #include <ATen/ops/permute_copy_native.h> |
798 | #include <ATen/ops/permute_native.h> |
799 | #include <ATen/ops/pixel_shuffle_compositeexplicitautograd_dispatch.h> |
800 | #include <ATen/ops/pixel_shuffle_native.h> |
801 | #include <ATen/ops/pixel_unshuffle_compositeexplicitautograd_dispatch.h> |
802 | #include <ATen/ops/pixel_unshuffle_native.h> |
803 | #include <ATen/ops/poisson_compositeexplicitautograd_dispatch.h> |
804 | #include <ATen/ops/poisson_native.h> |
805 | #include <ATen/ops/polar_compositeexplicitautograd_dispatch.h> |
806 | #include <ATen/ops/polar_native.h> |
807 | #include <ATen/ops/polygamma_compositeexplicitautograd_dispatch.h> |
808 | #include <ATen/ops/polygamma_native.h> |
809 | #include <ATen/ops/prod_compositeexplicitautograd_dispatch.h> |
810 | #include <ATen/ops/prod_native.h> |
811 | #include <ATen/ops/put_compositeexplicitautograd_dispatch.h> |
812 | #include <ATen/ops/put_native.h> |
813 | #include <ATen/ops/q_per_channel_scales_compositeexplicitautograd_dispatch.h> |
814 | #include <ATen/ops/q_per_channel_scales_native.h> |
815 | #include <ATen/ops/q_per_channel_zero_points_compositeexplicitautograd_dispatch.h> |
816 | #include <ATen/ops/q_per_channel_zero_points_native.h> |
817 | #include <ATen/ops/quantize_per_channel_compositeexplicitautograd_dispatch.h> |
818 | #include <ATen/ops/quantize_per_channel_native.h> |
819 | #include <ATen/ops/quantize_per_tensor_compositeexplicitautograd_dispatch.h> |
820 | #include <ATen/ops/quantize_per_tensor_dynamic_compositeexplicitautograd_dispatch.h> |
821 | #include <ATen/ops/quantize_per_tensor_dynamic_native.h> |
822 | #include <ATen/ops/quantize_per_tensor_native.h> |
823 | #include <ATen/ops/quantized_batch_norm_compositeexplicitautograd_dispatch.h> |
824 | #include <ATen/ops/quantized_batch_norm_native.h> |
825 | #include <ATen/ops/quantized_max_pool1d_compositeexplicitautograd_dispatch.h> |
826 | #include <ATen/ops/quantized_max_pool1d_native.h> |
827 | #include <ATen/ops/quantized_max_pool2d_compositeexplicitautograd_dispatch.h> |
828 | #include <ATen/ops/quantized_max_pool2d_native.h> |
829 | #include <ATen/ops/rad2deg_compositeexplicitautograd_dispatch.h> |
830 | #include <ATen/ops/rad2deg_native.h> |
831 | #include <ATen/ops/rand_compositeexplicitautograd_dispatch.h> |
832 | #include <ATen/ops/rand_like_compositeexplicitautograd_dispatch.h> |
833 | #include <ATen/ops/rand_like_native.h> |
834 | #include <ATen/ops/rand_native.h> |
835 | #include <ATen/ops/randint_compositeexplicitautograd_dispatch.h> |
836 | #include <ATen/ops/randint_like_compositeexplicitautograd_dispatch.h> |
837 | #include <ATen/ops/randint_like_native.h> |
838 | #include <ATen/ops/randint_native.h> |
839 | #include <ATen/ops/randn_compositeexplicitautograd_dispatch.h> |
840 | #include <ATen/ops/randn_like_compositeexplicitautograd_dispatch.h> |
841 | #include <ATen/ops/randn_like_native.h> |
842 | #include <ATen/ops/randn_native.h> |
843 | #include <ATen/ops/random_compositeexplicitautograd_dispatch.h> |
844 | #include <ATen/ops/random_native.h> |
845 | #include <ATen/ops/randperm_compositeexplicitautograd_dispatch.h> |
846 | #include <ATen/ops/randperm_native.h> |
847 | #include <ATen/ops/range_compositeexplicitautograd_dispatch.h> |
848 | #include <ATen/ops/range_native.h> |
849 | #include <ATen/ops/relu_compositeexplicitautograd_dispatch.h> |
850 | #include <ATen/ops/relu_native.h> |
851 | #include <ATen/ops/remainder_compositeexplicitautograd_dispatch.h> |
852 | #include <ATen/ops/remainder_native.h> |
853 | #include <ATen/ops/repeat_compositeexplicitautograd_dispatch.h> |
854 | #include <ATen/ops/repeat_interleave_compositeexplicitautograd_dispatch.h> |
855 | #include <ATen/ops/repeat_interleave_native.h> |
856 | #include <ATen/ops/repeat_native.h> |
857 | #include <ATen/ops/resize_as_compositeexplicitautograd_dispatch.h> |
858 | #include <ATen/ops/resize_as_native.h> |
859 | #include <ATen/ops/resize_as_sparse_compositeexplicitautograd_dispatch.h> |
860 | #include <ATen/ops/resize_as_sparse_native.h> |
861 | #include <ATen/ops/resize_compositeexplicitautograd_dispatch.h> |
862 | #include <ATen/ops/resize_native.h> |
863 | #include <ATen/ops/roll_compositeexplicitautograd_dispatch.h> |
864 | #include <ATen/ops/roll_native.h> |
865 | #include <ATen/ops/rot90_compositeexplicitautograd_dispatch.h> |
866 | #include <ATen/ops/rot90_native.h> |
867 | #include <ATen/ops/row_indices_compositeexplicitautograd_dispatch.h> |
868 | #include <ATen/ops/row_indices_copy_compositeexplicitautograd_dispatch.h> |
869 | #include <ATen/ops/row_indices_copy_native.h> |
870 | #include <ATen/ops/row_indices_native.h> |
871 | #include <ATen/ops/rrelu_with_noise_backward_compositeexplicitautograd_dispatch.h> |
872 | #include <ATen/ops/rrelu_with_noise_backward_native.h> |
873 | #include <ATen/ops/rshift_compositeexplicitautograd_dispatch.h> |
874 | #include <ATen/ops/rshift_native.h> |
875 | #include <ATen/ops/rsub_compositeexplicitautograd_dispatch.h> |
876 | #include <ATen/ops/rsub_native.h> |
877 | #include <ATen/ops/scalar_tensor_compositeexplicitautograd_dispatch.h> |
878 | #include <ATen/ops/scalar_tensor_native.h> |
879 | #include <ATen/ops/searchsorted_compositeexplicitautograd_dispatch.h> |
880 | #include <ATen/ops/searchsorted_native.h> |
881 | #include <ATen/ops/segment_reduce_compositeexplicitautograd_dispatch.h> |
882 | #include <ATen/ops/segment_reduce_native.h> |
883 | #include <ATen/ops/select_backward_compositeexplicitautograd_dispatch.h> |
884 | #include <ATen/ops/select_backward_native.h> |
885 | #include <ATen/ops/select_compositeexplicitautograd_dispatch.h> |
886 | #include <ATen/ops/select_copy_compositeexplicitautograd_dispatch.h> |
887 | #include <ATen/ops/select_copy_native.h> |
888 | #include <ATen/ops/select_native.h> |
889 | #include <ATen/ops/select_scatter_compositeexplicitautograd_dispatch.h> |
890 | #include <ATen/ops/select_scatter_native.h> |
891 | #include <ATen/ops/set_compositeexplicitautograd_dispatch.h> |
892 | #include <ATen/ops/set_native.h> |
893 | #include <ATen/ops/slice_backward_compositeexplicitautograd_dispatch.h> |
894 | #include <ATen/ops/slice_backward_native.h> |
895 | #include <ATen/ops/slice_compositeexplicitautograd_dispatch.h> |
896 | #include <ATen/ops/slice_copy_compositeexplicitautograd_dispatch.h> |
897 | #include <ATen/ops/slice_copy_native.h> |
898 | #include <ATen/ops/slice_native.h> |
899 | #include <ATen/ops/slice_scatter_compositeexplicitautograd_dispatch.h> |
900 | #include <ATen/ops/slice_scatter_native.h> |
901 | #include <ATen/ops/slow_conv_dilated2d_compositeexplicitautograd_dispatch.h> |
902 | #include <ATen/ops/slow_conv_dilated2d_native.h> |
903 | #include <ATen/ops/slow_conv_dilated3d_compositeexplicitautograd_dispatch.h> |
904 | #include <ATen/ops/slow_conv_dilated3d_native.h> |
905 | #include <ATen/ops/smooth_l1_loss_backward_compositeexplicitautograd_dispatch.h> |
906 | #include <ATen/ops/smooth_l1_loss_backward_native.h> |
907 | #include <ATen/ops/soft_margin_loss_backward_compositeexplicitautograd_dispatch.h> |
908 | #include <ATen/ops/soft_margin_loss_backward_native.h> |
909 | #include <ATen/ops/soft_margin_loss_compositeexplicitautograd_dispatch.h> |
910 | #include <ATen/ops/soft_margin_loss_native.h> |
911 | #include <ATen/ops/softmax_compositeexplicitautograd_dispatch.h> |
912 | #include <ATen/ops/softmax_native.h> |
913 | #include <ATen/ops/sort_compositeexplicitautograd_dispatch.h> |
914 | #include <ATen/ops/sort_native.h> |
915 | #include <ATen/ops/sparse_coo_tensor_compositeexplicitautograd_dispatch.h> |
916 | #include <ATen/ops/sparse_coo_tensor_native.h> |
917 | #include <ATen/ops/sparse_mask_compositeexplicitautograd_dispatch.h> |
918 | #include <ATen/ops/sparse_mask_native.h> |
919 | #include <ATen/ops/sparse_resize_and_clear_compositeexplicitautograd_dispatch.h> |
920 | #include <ATen/ops/sparse_resize_and_clear_native.h> |
921 | #include <ATen/ops/sparse_resize_compositeexplicitautograd_dispatch.h> |
922 | #include <ATen/ops/sparse_resize_native.h> |
923 | #include <ATen/ops/special_chebyshev_polynomial_t_compositeexplicitautograd_dispatch.h> |
924 | #include <ATen/ops/special_chebyshev_polynomial_t_native.h> |
925 | #include <ATen/ops/special_chebyshev_polynomial_u_compositeexplicitautograd_dispatch.h> |
926 | #include <ATen/ops/special_chebyshev_polynomial_u_native.h> |
927 | #include <ATen/ops/special_chebyshev_polynomial_v_compositeexplicitautograd_dispatch.h> |
928 | #include <ATen/ops/special_chebyshev_polynomial_v_native.h> |
929 | #include <ATen/ops/special_chebyshev_polynomial_w_compositeexplicitautograd_dispatch.h> |
930 | #include <ATen/ops/special_chebyshev_polynomial_w_native.h> |
931 | #include <ATen/ops/special_hermite_polynomial_h_compositeexplicitautograd_dispatch.h> |
932 | #include <ATen/ops/special_hermite_polynomial_h_native.h> |
933 | #include <ATen/ops/special_hermite_polynomial_he_compositeexplicitautograd_dispatch.h> |
934 | #include <ATen/ops/special_hermite_polynomial_he_native.h> |
935 | #include <ATen/ops/special_laguerre_polynomial_l_compositeexplicitautograd_dispatch.h> |
936 | #include <ATen/ops/special_laguerre_polynomial_l_native.h> |
937 | #include <ATen/ops/special_legendre_polynomial_p_compositeexplicitautograd_dispatch.h> |
938 | #include <ATen/ops/special_legendre_polynomial_p_native.h> |
939 | #include <ATen/ops/special_shifted_chebyshev_polynomial_t_compositeexplicitautograd_dispatch.h> |
940 | #include <ATen/ops/special_shifted_chebyshev_polynomial_t_native.h> |
941 | #include <ATen/ops/special_shifted_chebyshev_polynomial_u_compositeexplicitautograd_dispatch.h> |
942 | #include <ATen/ops/special_shifted_chebyshev_polynomial_u_native.h> |
943 | #include <ATen/ops/special_shifted_chebyshev_polynomial_v_compositeexplicitautograd_dispatch.h> |
944 | #include <ATen/ops/special_shifted_chebyshev_polynomial_v_native.h> |
945 | #include <ATen/ops/special_shifted_chebyshev_polynomial_w_compositeexplicitautograd_dispatch.h> |
946 | #include <ATen/ops/special_shifted_chebyshev_polynomial_w_native.h> |
947 | #include <ATen/ops/special_xlog1py_compositeexplicitautograd_dispatch.h> |
948 | #include <ATen/ops/special_xlog1py_native.h> |
949 | #include <ATen/ops/special_zeta_compositeexplicitautograd_dispatch.h> |
950 | #include <ATen/ops/special_zeta_native.h> |
951 | #include <ATen/ops/split_compositeexplicitautograd_dispatch.h> |
952 | #include <ATen/ops/split_copy_compositeexplicitautograd_dispatch.h> |
953 | #include <ATen/ops/split_copy_native.h> |
954 | #include <ATen/ops/split_native.h> |
955 | #include <ATen/ops/split_with_sizes_compositeexplicitautograd_dispatch.h> |
956 | #include <ATen/ops/split_with_sizes_copy_compositeexplicitautograd_dispatch.h> |
957 | #include <ATen/ops/split_with_sizes_copy_native.h> |
958 | #include <ATen/ops/split_with_sizes_native.h> |
959 | #include <ATen/ops/squeeze_compositeexplicitautograd_dispatch.h> |
960 | #include <ATen/ops/squeeze_copy_compositeexplicitautograd_dispatch.h> |
961 | #include <ATen/ops/squeeze_copy_native.h> |
962 | #include <ATen/ops/squeeze_native.h> |
963 | #include <ATen/ops/stack_compositeexplicitautograd_dispatch.h> |
964 | #include <ATen/ops/stack_native.h> |
965 | #include <ATen/ops/std_mean_compositeexplicitautograd_dispatch.h> |
966 | #include <ATen/ops/std_mean_native.h> |
967 | #include <ATen/ops/sub_compositeexplicitautograd_dispatch.h> |
968 | #include <ATen/ops/sub_native.h> |
969 | #include <ATen/ops/sum_compositeexplicitautograd_dispatch.h> |
970 | #include <ATen/ops/sum_native.h> |
971 | #include <ATen/ops/t_compositeexplicitautograd_dispatch.h> |
972 | #include <ATen/ops/t_copy_compositeexplicitautograd_dispatch.h> |
973 | #include <ATen/ops/t_copy_native.h> |
974 | #include <ATen/ops/t_native.h> |
975 | #include <ATen/ops/to_mkldnn_compositeexplicitautograd_dispatch.h> |
976 | #include <ATen/ops/to_mkldnn_native.h> |
977 | #include <ATen/ops/to_padded_tensor_compositeexplicitautograd_dispatch.h> |
978 | #include <ATen/ops/to_padded_tensor_native.h> |
979 | #include <ATen/ops/to_sparse_bsc_compositeexplicitautograd_dispatch.h> |
980 | #include <ATen/ops/to_sparse_bsc_native.h> |
981 | #include <ATen/ops/to_sparse_bsr_compositeexplicitautograd_dispatch.h> |
982 | #include <ATen/ops/to_sparse_bsr_native.h> |
983 | #include <ATen/ops/to_sparse_compositeexplicitautograd_dispatch.h> |
984 | #include <ATen/ops/to_sparse_csc_compositeexplicitautograd_dispatch.h> |
985 | #include <ATen/ops/to_sparse_csc_native.h> |
986 | #include <ATen/ops/to_sparse_csr_compositeexplicitautograd_dispatch.h> |
987 | #include <ATen/ops/to_sparse_csr_native.h> |
988 | #include <ATen/ops/to_sparse_native.h> |
989 | #include <ATen/ops/trace_compositeexplicitautograd_dispatch.h> |
990 | #include <ATen/ops/trace_native.h> |
991 | #include <ATen/ops/transpose_compositeexplicitautograd_dispatch.h> |
992 | #include <ATen/ops/transpose_copy_compositeexplicitautograd_dispatch.h> |
993 | #include <ATen/ops/transpose_copy_native.h> |
994 | #include <ATen/ops/transpose_native.h> |
995 | #include <ATen/ops/tril_indices_compositeexplicitautograd_dispatch.h> |
996 | #include <ATen/ops/tril_indices_native.h> |
997 | #include <ATen/ops/triu_indices_compositeexplicitautograd_dispatch.h> |
998 | #include <ATen/ops/triu_indices_native.h> |
999 | #include <ATen/ops/unbind_compositeexplicitautograd_dispatch.h> |
1000 | #include <ATen/ops/unbind_copy_compositeexplicitautograd_dispatch.h> |
1001 | #include <ATen/ops/unbind_copy_native.h> |
1002 | #include <ATen/ops/unbind_native.h> |
1003 | #include <ATen/ops/unfold_backward_compositeexplicitautograd_dispatch.h> |
1004 | #include <ATen/ops/unfold_backward_native.h> |
1005 | #include <ATen/ops/unfold_copy_compositeexplicitautograd_dispatch.h> |
1006 | #include <ATen/ops/unfold_copy_native.h> |
1007 | #include <ATen/ops/uniform_compositeexplicitautograd_dispatch.h> |
1008 | #include <ATen/ops/uniform_native.h> |
1009 | #include <ATen/ops/unique_consecutive_compositeexplicitautograd_dispatch.h> |
1010 | #include <ATen/ops/unique_consecutive_native.h> |
1011 | #include <ATen/ops/unique_dim_compositeexplicitautograd_dispatch.h> |
1012 | #include <ATen/ops/unique_dim_consecutive_compositeexplicitautograd_dispatch.h> |
1013 | #include <ATen/ops/unique_dim_consecutive_native.h> |
1014 | #include <ATen/ops/unique_dim_native.h> |
1015 | #include <ATen/ops/unsafe_split_compositeexplicitautograd_dispatch.h> |
1016 | #include <ATen/ops/unsafe_split_native.h> |
1017 | #include <ATen/ops/unsafe_split_with_sizes_compositeexplicitautograd_dispatch.h> |
1018 | #include <ATen/ops/unsafe_split_with_sizes_native.h> |
1019 | #include <ATen/ops/unsqueeze_compositeexplicitautograd_dispatch.h> |
1020 | #include <ATen/ops/unsqueeze_copy_compositeexplicitautograd_dispatch.h> |
1021 | #include <ATen/ops/unsqueeze_copy_native.h> |
1022 | #include <ATen/ops/unsqueeze_native.h> |
1023 | #include <ATen/ops/values_compositeexplicitautograd_dispatch.h> |
1024 | #include <ATen/ops/values_copy_compositeexplicitautograd_dispatch.h> |
1025 | #include <ATen/ops/values_copy_native.h> |
1026 | #include <ATen/ops/values_native.h> |
1027 | #include <ATen/ops/var_mean_compositeexplicitautograd_dispatch.h> |
1028 | #include <ATen/ops/var_mean_native.h> |
1029 | #include <ATen/ops/vdot_compositeexplicitautograd_dispatch.h> |
1030 | #include <ATen/ops/vdot_native.h> |
1031 | #include <ATen/ops/view_as_complex_copy_compositeexplicitautograd_dispatch.h> |
1032 | #include <ATen/ops/view_as_complex_copy_native.h> |
1033 | #include <ATen/ops/view_as_real_copy_compositeexplicitautograd_dispatch.h> |
1034 | #include <ATen/ops/view_as_real_copy_native.h> |
1035 | #include <ATen/ops/view_compositeexplicitautograd_dispatch.h> |
1036 | #include <ATen/ops/view_copy_compositeexplicitautograd_dispatch.h> |
1037 | #include <ATen/ops/view_copy_native.h> |
1038 | #include <ATen/ops/view_native.h> |
1039 | #include <ATen/ops/xlogy_compositeexplicitautograd_dispatch.h> |
1040 | #include <ATen/ops/xlogy_native.h> |
1041 | #include <ATen/ops/zero_compositeexplicitautograd_dispatch.h> |
1042 | #include <ATen/ops/zero_native.h> |
1043 | #include <ATen/ops/zeros_compositeexplicitautograd_dispatch.h> |
1044 | #include <ATen/ops/zeros_like_compositeexplicitautograd_dispatch.h> |
1045 | #include <ATen/ops/zeros_like_native.h> |
1046 | #include <ATen/ops/zeros_native.h> |
1047 | |
1048 | // See template file RegisterDispatchDefinitions.ini |
1049 | namespace at { |
1050 | // NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid |
1051 | // ambiguity with conflicting identifiers that may have been defined in |
1052 | // at namespace already. |
1053 | namespace { |
1054 | void resize_out(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) { |
1055 | TORCH_CHECK(options.dtype() == out.dtype(), |
1056 | "Expected out tensor to have dtype " , options.dtype(), ", but got " , out.dtype(), " instead" ); |
1057 | TORCH_CHECK(options.device() == out.device(), |
1058 | "Expected out tensor to have device " , options.device(), ", but got " , out.device(), " instead" ); |
1059 | const bool resized = at::native::resize_output(out, sizes); |
1060 | // Only restride if a resize occurred; otherwise we ignore the (advisory) |
1061 | // strides from the meta function and directly use the output tensor's |
1062 | // preexisting strides |
1063 | if (resized) { |
1064 | if (!strides.empty()) { |
1065 | TORCH_INTERNAL_ASSERT(!options.memory_format_opt().has_value()); |
1066 | // TODO: avoid the redispatch here |
1067 | out.as_strided_(sizes, strides); |
1068 | } else if (options.memory_format_opt().has_value()) { |
1069 | out.unsafeGetTensorImpl()->empty_tensor_restride(*options.memory_format_opt()); |
1070 | } |
1071 | } |
1072 | } |
1073 | void check_inplace(const Tensor &self, IntArrayRef sizes, const TensorOptions &options) { |
1074 | // These checks are needed on those operators that: |
1075 | // 1) don't use 'TensorIterator' (e.g. 'addmm' and 'baddbmm') |
1076 | // 2) have particular typing rules (e.g. 'cumsum' and 'cumprod') |
1077 | // For other operators (e.g. 'add'), 'TensorIterator' already checks |
1078 | // these things separately. |
1079 | TORCH_CHECK(options.dtype() == self.dtype(), |
1080 | "Bad in-place call: " , |
1081 | "input tensor dtype " , self.dtype(), " and output tensor dtype " , options.dtype(), " should match" ); |
1082 | TORCH_CHECK(options.device() == self.device(), |
1083 | "Bad in-place call: " , |
1084 | "input tensor device " , self.device(), " and output tensor device " , options.device(), " should match" ); |
1085 | TORCH_CHECK(sizes == self.sizes(), |
1086 | "Bad in-place call: " , |
1087 | "input tensor size " , self.sizes(), " and output tensor size " , sizes, " should match" ); |
1088 | } |
1089 | namespace { |
1090 | at::Tensor wrapper_CompositeExplicitAutograd___fw_primal(const at::Tensor & self, int64_t level) { |
1091 | // No device check |
1092 | // DeviceGuard omitted |
1093 | return at::native::_fw_primal(self, level); |
1094 | } |
1095 | } // anonymous namespace |
1096 | namespace { |
1097 | at::Tensor wrapper_CompositeExplicitAutograd___make_dual(const at::Tensor & primal, const at::Tensor & tangent, int64_t level) { |
1098 | // No device check |
1099 | // DeviceGuard omitted |
1100 | return at::native::_make_dual(primal, tangent, level); |
1101 | } |
1102 | } // anonymous namespace |
1103 | namespace { |
1104 | at::Tensor wrapper_CompositeExplicitAutograd___new_zeros_with_same_feature_meta(const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims) { |
1105 | // No device check |
1106 | // DeviceGuard omitted |
1107 | return at::native::_new_zeros_with_same_feature_meta(self, other, self_num_batch_dims); |
1108 | } |
1109 | } // anonymous namespace |
1110 | namespace { |
1111 | at::Tensor & wrapper_CompositeExplicitAutograd_out__new_zeros_with_same_feature_meta_out(const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims, at::Tensor & out) { |
1112 | // No device check |
1113 | // DeviceGuard omitted |
1114 | return at::native::_new_zeros_with_same_feature_meta_out(self, other, self_num_batch_dims, out); |
1115 | } |
1116 | } // anonymous namespace |
1117 | namespace { |
1118 | bool wrapper_CompositeExplicitAutograd___has_same_storage_numel(const at::Tensor & self, const at::Tensor & other) { |
1119 | // No device check |
1120 | // DeviceGuard omitted |
1121 | return at::native::_has_same_storage_numel(self, other); |
1122 | } |
1123 | } // anonymous namespace |
1124 | namespace { |
1125 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out__cudnn_ctc_loss_out(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) { |
1126 | // No device check |
1127 | // DeviceGuard omitted |
1128 | return at::native::_cudnn_ctc_loss_out(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity, out0, out1); |
1129 | } |
1130 | } // anonymous namespace |
1131 | namespace { |
1132 | at::Tensor & wrapper_CompositeExplicitAutograd_out__cudnn_rnn_flatten_weight_out(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) { |
1133 | // No device check |
1134 | // DeviceGuard omitted |
1135 | return at::native::_cudnn_rnn_flatten_weight_out_symint(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); |
1136 | } |
1137 | } // anonymous namespace |
1138 | namespace { |
1139 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out__cudnn_rnn_out(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) { |
1140 | // No device check |
1141 | // DeviceGuard omitted |
1142 | return at::native::_cudnn_rnn_out_symint(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4); |
1143 | } |
1144 | } // anonymous namespace |
1145 | namespace { |
1146 | void wrapper_CompositeExplicitAutograd_out__cudnn_rnn_backward_out(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) { |
1147 | // No device check |
1148 | // DeviceGuard omitted |
1149 | return at::native::_cudnn_rnn_backward_out_symint(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3); |
1150 | } |
1151 | } // anonymous namespace |
1152 | namespace { |
1153 | at::Tensor & wrapper_CompositeExplicitAutograd_out__cudnn_init_dropout_state_out(double dropout, bool train, int64_t dropout_seed, at::Tensor & out) { |
1154 | // No device check |
1155 | // DeviceGuard omitted |
1156 | return at::native::_cudnn_init_dropout_state_out(dropout, train, dropout_seed, out); |
1157 | } |
1158 | } // anonymous namespace |
1159 | namespace { |
1160 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out__fused_dropout_out(const at::Tensor & self, double p, c10::optional<at::Generator> generator, at::Tensor & out0, at::Tensor & out1) { |
1161 | // No device check |
1162 | // DeviceGuard omitted |
1163 | return at::native::_fused_dropout_out(self, p, generator, out0, out1); |
1164 | } |
1165 | } // anonymous namespace |
1166 | namespace { |
1167 | at::Tensor & wrapper_CompositeExplicitAutograd_out__masked_scale_out(const at::Tensor & self, const at::Tensor & mask, double scale, at::Tensor & out) { |
1168 | // No device check |
1169 | // DeviceGuard omitted |
1170 | return at::native::_masked_scale_out(self, mask, scale, out); |
1171 | } |
1172 | } // anonymous namespace |
1173 | namespace { |
1174 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_native_dropout_out(const at::Tensor & input, double p, c10::optional<bool> train, at::Tensor & out0, at::Tensor & out1) { |
1175 | // No device check |
1176 | // DeviceGuard omitted |
1177 | return at::native::native_dropout_out(input, p, train, out0, out1); |
1178 | } |
1179 | } // anonymous namespace |
1180 | namespace { |
1181 | at::Tensor & wrapper_CompositeExplicitAutograd_out_native_dropout_backward_out(const at::Tensor & grad_output, const at::Tensor & mask, double scale, at::Tensor & out) { |
1182 | // No device check |
1183 | // DeviceGuard omitted |
1184 | return at::native::native_dropout_backward_out(grad_output, mask, scale, out); |
1185 | } |
1186 | } // anonymous namespace |
1187 | namespace { |
1188 | at::Tensor wrapper_CompositeExplicitAutograd__abs(const at::Tensor & self) { |
1189 | // No device check |
1190 | // DeviceGuard omitted |
1191 | return at::native::abs(self); |
1192 | } |
1193 | } // anonymous namespace |
1194 | namespace { |
1195 | at::Tensor & wrapper_CompositeExplicitAutograd__abs_(at::Tensor & self) { |
1196 | // No device check |
1197 | // DeviceGuard omitted |
1198 | return at::native::abs_(self); |
1199 | } |
1200 | } // anonymous namespace |
1201 | namespace { |
1202 | at::Tensor wrapper_CompositeExplicitAutograd___conj(const at::Tensor & self) { |
1203 | // No device check |
1204 | // DeviceGuard omitted |
1205 | return at::native::_conj(self); |
1206 | } |
1207 | } // anonymous namespace |
1208 | namespace { |
1209 | at::Tensor wrapper_CompositeExplicitAutograd___conj_physical(const at::Tensor & self) { |
1210 | // No device check |
1211 | // DeviceGuard omitted |
1212 | return at::native::_conj_physical(self); |
1213 | } |
1214 | } // anonymous namespace |
1215 | namespace { |
1216 | at::Tensor & wrapper_CompositeExplicitAutograd_out__conj_physical_out(const at::Tensor & self, at::Tensor & out) { |
1217 | // No device check |
1218 | // DeviceGuard omitted |
1219 | return at::native::_conj_physical_out(self, out); |
1220 | } |
1221 | } // anonymous namespace |
1222 | namespace { |
1223 | at::Tensor & wrapper_CompositeExplicitAutograd__conj_physical_(at::Tensor & self) { |
1224 | // No device check |
1225 | // DeviceGuard omitted |
1226 | return at::native::conj_physical_(self); |
1227 | } |
1228 | } // anonymous namespace |
1229 | namespace { |
1230 | at::Tensor wrapper_CompositeExplicitAutograd___neg_view(const at::Tensor & self) { |
1231 | // No device check |
1232 | // DeviceGuard omitted |
1233 | return at::native::_neg_view(self); |
1234 | } |
1235 | } // anonymous namespace |
1236 | namespace { |
1237 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_out__add_relu_out(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { |
1238 | // No device check |
1239 | // DeviceGuard omitted |
1240 | return at::native::_add_relu_Scalar_out(self, other, alpha, out); |
1241 | } |
1242 | } // anonymous namespace |
1243 | namespace { |
1244 | at::Tensor wrapper_CompositeExplicitAutograd_Scalar_add(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { |
1245 | // No device check |
1246 | // DeviceGuard omitted |
1247 | return at::native::add(self, other, alpha); |
1248 | } |
1249 | } // anonymous namespace |
1250 | namespace { |
1251 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_out_add_out(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { |
1252 | // No device check |
1253 | // DeviceGuard omitted |
1254 | return at::native::add_Scalar_out(self, other, alpha, out); |
1255 | } |
1256 | } // anonymous namespace |
1257 | namespace { |
1258 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_add_(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { |
1259 | // No device check |
1260 | // DeviceGuard omitted |
1261 | return at::native::add_(self, other, alpha); |
1262 | } |
1263 | } // anonymous namespace |
1264 | namespace { |
1265 | at::Tensor wrapper_CompositeExplicitAutograd__addr(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) { |
1266 | // No device check |
1267 | // DeviceGuard omitted |
1268 | return at::native::math_addr(self, vec1, vec2, beta, alpha); |
1269 | } |
1270 | } // anonymous namespace |
1271 | namespace { |
1272 | at::Tensor & wrapper_CompositeExplicitAutograd_out_addr_out(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
1273 | // No device check |
1274 | // DeviceGuard omitted |
1275 | return at::native::math_addr_out(self, vec1, vec2, beta, alpha, out); |
1276 | } |
1277 | } // anonymous namespace |
1278 | namespace { |
1279 | at::Tensor & wrapper_CompositeExplicitAutograd__addr_(at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) { |
1280 | // No device check |
1281 | // DeviceGuard omitted |
1282 | return at::native::addr_(self, vec1, vec2, beta, alpha); |
1283 | } |
1284 | } // anonymous namespace |
1285 | namespace { |
1286 | at::Tensor wrapper_CompositeExplicitAutograd__affine_grid_generator(const at::Tensor & theta, at::IntArrayRef size, bool align_corners) { |
1287 | // No device check |
1288 | // DeviceGuard omitted |
1289 | return at::native::affine_grid_generator(theta, size, align_corners); |
1290 | } |
1291 | } // anonymous namespace |
1292 | namespace { |
1293 | at::Tensor & wrapper_CompositeExplicitAutograd_out_affine_grid_generator_out(const at::Tensor & theta, at::IntArrayRef size, bool align_corners, at::Tensor & out) { |
1294 | // No device check |
1295 | // DeviceGuard omitted |
1296 | return at::native::affine_grid_generator_out(theta, size, align_corners, out); |
1297 | } |
1298 | } // anonymous namespace |
1299 | namespace { |
1300 | at::Tensor wrapper_CompositeExplicitAutograd___is_all_true(const at::Tensor & self) { |
1301 | // No device check |
1302 | // DeviceGuard omitted |
1303 | return at::native::_is_all_true(self); |
1304 | } |
1305 | } // anonymous namespace |
1306 | namespace { |
1307 | at::Tensor wrapper_CompositeExplicitAutograd___is_any_true(const at::Tensor & self) { |
1308 | // No device check |
1309 | // DeviceGuard omitted |
1310 | return at::native::_is_any_true(self); |
1311 | } |
1312 | } // anonymous namespace |
1313 | namespace { |
1314 | bool wrapper_CompositeExplicitAutograd__allclose(const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan) { |
1315 | // No device check |
1316 | // DeviceGuard omitted |
1317 | return at::native::allclose(self, other, rtol, atol, equal_nan); |
1318 | } |
1319 | } // anonymous namespace |
1320 | namespace { |
1321 | at::Tensor wrapper_CompositeExplicitAutograd__arange(const at::Scalar & end, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
1322 | // No device check |
1323 | // DeviceGuard omitted |
1324 | return at::native::arange(end, dtype, layout, device, pin_memory); |
1325 | } |
1326 | } // anonymous namespace |
1327 | namespace { |
1328 | at::Tensor & wrapper_CompositeExplicitAutograd_out_arange_out(const at::Scalar & end, at::Tensor & out) { |
1329 | // No device check |
1330 | // DeviceGuard omitted |
1331 | return at::native::arange_out(end, out); |
1332 | } |
1333 | } // anonymous namespace |
1334 | namespace { |
1335 | at::Tensor wrapper_CompositeExplicitAutograd_start_arange(const at::Scalar & start, const at::Scalar & end, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
1336 | // No device check |
1337 | // DeviceGuard omitted |
1338 | return at::native::arange(start, end, dtype, layout, device, pin_memory); |
1339 | } |
1340 | } // anonymous namespace |
1341 | namespace { |
1342 | at::Tensor wrapper_CompositeExplicitAutograd_start_step_arange(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
1343 | // No device check |
1344 | // DeviceGuard omitted |
1345 | return at::native::arange(start, end, step, dtype, layout, device, pin_memory); |
1346 | } |
1347 | } // anonymous namespace |
1348 | namespace { |
1349 | at::Tensor wrapper_CompositeExplicitAutograd__bartlett_window(int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
1350 | // No device check |
1351 | // DeviceGuard omitted |
1352 | return at::native::bartlett_window(window_length, dtype, layout, device, pin_memory); |
1353 | } |
1354 | } // anonymous namespace |
1355 | namespace { |
1356 | at::Tensor & wrapper_CompositeExplicitAutograd_out_bartlett_window_out(int64_t window_length, at::Tensor & out) { |
1357 | // No device check |
1358 | // DeviceGuard omitted |
1359 | return at::native::bartlett_window_out(window_length, out); |
1360 | } |
1361 | } // anonymous namespace |
1362 | namespace { |
1363 | at::Tensor wrapper_CompositeExplicitAutograd_periodic_bartlett_window(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
1364 | // No device check |
1365 | // DeviceGuard omitted |
1366 | return at::native::bartlett_window(window_length, periodic, dtype, layout, device, pin_memory); |
1367 | } |
1368 | } // anonymous namespace |
1369 | namespace { |
1370 | at::Tensor & wrapper_CompositeExplicitAutograd_periodic_out_bartlett_window_out(int64_t window_length, bool periodic, at::Tensor & out) { |
1371 | // No device check |
1372 | // DeviceGuard omitted |
1373 | return at::native::bartlett_window_periodic_out(window_length, periodic, out); |
1374 | } |
1375 | } // anonymous namespace |
1376 | namespace { |
1377 | at::Tensor & wrapper_CompositeExplicitAutograd_out_quantized_batch_norm_out(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point, at::Tensor & out) { |
1378 | // No device check |
1379 | // DeviceGuard omitted |
1380 | return at::native::quantized_batch_norm_out(input, weight, bias, mean, var, eps, output_scale, output_zero_point, out); |
1381 | } |
1382 | } // anonymous namespace |
1383 | namespace { |
1384 | at::Tensor wrapper_CompositeExplicitAutograd__bernoulli(const at::Tensor & self, c10::optional<at::Generator> generator) { |
1385 | // No device check |
1386 | // DeviceGuard omitted |
1387 | return at::native::bernoulli(self, generator); |
1388 | } |
1389 | } // anonymous namespace |
1390 | namespace { |
1391 | at::Tensor wrapper_CompositeExplicitAutograd_Tensor_bernoulli(const at::Tensor & self, const at::Tensor & p, c10::optional<at::Generator> generator) { |
1392 | // No device check |
1393 | // DeviceGuard omitted |
1394 | return at::native::bernoulli(self, p, generator); |
1395 | } |
1396 | } // anonymous namespace |
1397 | namespace { |
1398 | at::Tensor & wrapper_CompositeExplicitAutograd_Tensor_out_bernoulli_out(const at::Tensor & self, const at::Tensor & p, c10::optional<at::Generator> generator, at::Tensor & out) { |
1399 | // No device check |
1400 | // DeviceGuard omitted |
1401 | return at::native::bernoulli_Tensor_out(self, p, generator, out); |
1402 | } |
1403 | } // anonymous namespace |
1404 | namespace { |
1405 | at::Tensor & wrapper_CompositeExplicitAutograd_float_out_bernoulli_out(const at::Tensor & self, double p, c10::optional<at::Generator> generator, at::Tensor & out) { |
1406 | // No device check |
1407 | // DeviceGuard omitted |
1408 | return at::native::bernoulli_float_out(self, p, generator, out); |
1409 | } |
1410 | } // anonymous namespace |
1411 | namespace { |
1412 | at::Tensor wrapper_CompositeExplicitAutograd__binary_cross_entropy_with_logits(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & pos_weight, int64_t reduction) { |
1413 | // No device check |
1414 | // DeviceGuard omitted |
1415 | return at::native::binary_cross_entropy_with_logits(self, target, weight, pos_weight, reduction); |
1416 | } |
1417 | } // anonymous namespace |
1418 | namespace { |
1419 | at::Tensor & wrapper_CompositeExplicitAutograd_out_binary_cross_entropy_with_logits_out(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & pos_weight, int64_t reduction, at::Tensor & out) { |
1420 | // No device check |
1421 | // DeviceGuard omitted |
1422 | return at::native::binary_cross_entropy_with_logits_out(self, target, weight, pos_weight, reduction, out); |
1423 | } |
1424 | } // anonymous namespace |
1425 | namespace { |
1426 | at::Tensor & wrapper_CompositeExplicitAutograd_out_bincount_out(const at::Tensor & self, const c10::optional<at::Tensor> & weights, int64_t minlength, at::Tensor & out) { |
1427 | // No device check |
1428 | // DeviceGuard omitted |
1429 | return at::native::bincount_out(self, weights, minlength, out); |
1430 | } |
1431 | } // anonymous namespace |
1432 | namespace { |
1433 | at::Tensor wrapper_CompositeExplicitAutograd_Scalar_copysign(const at::Tensor & self, const at::Scalar & other) { |
1434 | // No device check |
1435 | // DeviceGuard omitted |
1436 | return at::native::copysign(self, other); |
1437 | } |
1438 | } // anonymous namespace |
1439 | namespace { |
1440 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_out_copysign_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
1441 | // No device check |
1442 | // DeviceGuard omitted |
1443 | return at::native::copysign_out(self, other, out); |
1444 | } |
1445 | } // anonymous namespace |
1446 | namespace { |
1447 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_copysign_(at::Tensor & self, const at::Scalar & other) { |
1448 | // No device check |
1449 | // DeviceGuard omitted |
1450 | return at::native::copysign_(self, other); |
1451 | } |
1452 | } // anonymous namespace |
1453 | namespace { |
1454 | at::Tensor wrapper_CompositeExplicitAutograd__logical_not(const at::Tensor & self) { |
1455 | // No device check |
1456 | // DeviceGuard omitted |
1457 | return at::native::logical_not(self); |
1458 | } |
1459 | } // anonymous namespace |
1460 | namespace { |
1461 | at::Tensor & wrapper_CompositeExplicitAutograd__logical_not_(at::Tensor & self) { |
1462 | // No device check |
1463 | // DeviceGuard omitted |
1464 | return at::native::logical_not_(self); |
1465 | } |
1466 | } // anonymous namespace |
1467 | namespace { |
1468 | at::Tensor wrapper_CompositeExplicitAutograd__logical_xor(const at::Tensor & self, const at::Tensor & other) { |
1469 | // No device check |
1470 | // DeviceGuard omitted |
1471 | return at::native::logical_xor(self, other); |
1472 | } |
1473 | } // anonymous namespace |
1474 | namespace { |
1475 | at::Tensor & wrapper_CompositeExplicitAutograd__logical_xor_(at::Tensor & self, const at::Tensor & other) { |
1476 | // No device check |
1477 | // DeviceGuard omitted |
1478 | return at::native::logical_xor_(self, other); |
1479 | } |
1480 | } // anonymous namespace |
1481 | namespace { |
1482 | at::Tensor wrapper_CompositeExplicitAutograd__logical_and(const at::Tensor & self, const at::Tensor & other) { |
1483 | // No device check |
1484 | // DeviceGuard omitted |
1485 | return at::native::logical_and(self, other); |
1486 | } |
1487 | } // anonymous namespace |
1488 | namespace { |
1489 | at::Tensor & wrapper_CompositeExplicitAutograd__logical_and_(at::Tensor & self, const at::Tensor & other) { |
1490 | // No device check |
1491 | // DeviceGuard omitted |
1492 | return at::native::logical_and_(self, other); |
1493 | } |
1494 | } // anonymous namespace |
1495 | namespace { |
1496 | at::Tensor wrapper_CompositeExplicitAutograd__logical_or(const at::Tensor & self, const at::Tensor & other) { |
1497 | // No device check |
1498 | // DeviceGuard omitted |
1499 | return at::native::logical_or(self, other); |
1500 | } |
1501 | } // anonymous namespace |
1502 | namespace { |
1503 | at::Tensor & wrapper_CompositeExplicitAutograd__logical_or_(at::Tensor & self, const at::Tensor & other) { |
1504 | // No device check |
1505 | // DeviceGuard omitted |
1506 | return at::native::logical_or_(self, other); |
1507 | } |
1508 | } // anonymous namespace |
1509 | namespace { |
1510 | at::Tensor wrapper_CompositeExplicitAutograd__blackman_window(int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
1511 | // No device check |
1512 | // DeviceGuard omitted |
1513 | return at::native::blackman_window(window_length, dtype, layout, device, pin_memory); |
1514 | } |
1515 | } // anonymous namespace |
1516 | namespace { |
1517 | at::Tensor & wrapper_CompositeExplicitAutograd_out_blackman_window_out(int64_t window_length, at::Tensor & out) { |
1518 | // No device check |
1519 | // DeviceGuard omitted |
1520 | return at::native::blackman_window_out(window_length, out); |
1521 | } |
1522 | } // anonymous namespace |
1523 | namespace { |
1524 | at::Tensor wrapper_CompositeExplicitAutograd_periodic_blackman_window(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
1525 | // No device check |
1526 | // DeviceGuard omitted |
1527 | return at::native::blackman_window(window_length, periodic, dtype, layout, device, pin_memory); |
1528 | } |
1529 | } // anonymous namespace |
1530 | namespace { |
1531 | at::Tensor & wrapper_CompositeExplicitAutograd_periodic_out_blackman_window_out(int64_t window_length, bool periodic, at::Tensor & out) { |
1532 | // No device check |
1533 | // DeviceGuard omitted |
1534 | return at::native::blackman_window_periodic_out(window_length, periodic, out); |
1535 | } |
1536 | } // anonymous namespace |
1537 | namespace { |
1538 | at::Tensor wrapper_CompositeExplicitAutograd__block_diag(at::TensorList tensors) { |
1539 | // No device check |
1540 | // DeviceGuard omitted |
1541 | return at::native::block_diag(tensors); |
1542 | } |
1543 | } // anonymous namespace |
1544 | namespace { |
1545 | at::Tensor & wrapper_CompositeExplicitAutograd_out_block_diag_out(at::TensorList tensors, at::Tensor & out) { |
1546 | // No device check |
1547 | // DeviceGuard omitted |
1548 | return at::native::block_diag_out(tensors, out); |
1549 | } |
1550 | } // anonymous namespace |
1551 | namespace { |
1552 | at::Tensor wrapper_CompositeExplicitAutograd__complex(const at::Tensor & real, const at::Tensor & imag) { |
1553 | // No device check |
1554 | // DeviceGuard omitted |
1555 | return at::native::complex(real, imag); |
1556 | } |
1557 | } // anonymous namespace |
1558 | namespace { |
1559 | at::Tensor wrapper_CompositeExplicitAutograd__polar(const at::Tensor & abs, const at::Tensor & angle) { |
1560 | // No device check |
1561 | // DeviceGuard omitted |
1562 | return at::native::polar(abs, angle); |
1563 | } |
1564 | } // anonymous namespace |
1565 | namespace { |
1566 | at::Tensor wrapper_CompositeExplicitAutograd__constant_pad_nd(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value) { |
1567 | // No device check |
1568 | // DeviceGuard omitted |
1569 | return at::native::constant_pad_nd(self, C10_AS_INTARRAYREF_SLOW(pad), value); |
1570 | } |
1571 | } // anonymous namespace |
1572 | namespace { |
1573 | at::Tensor & wrapper_CompositeExplicitAutograd_out_constant_pad_nd_out(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value, at::Tensor & out) { |
1574 | // No device check |
1575 | // DeviceGuard omitted |
1576 | return at::native::constant_pad_nd_out_symint(self, pad, value, out); |
1577 | } |
1578 | } // anonymous namespace |
1579 | namespace { |
1580 | at::Tensor wrapper_CompositeExplicitAutograd__convolution(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups) { |
1581 | // No device check |
1582 | // DeviceGuard omitted |
1583 | return at::native::convolution(input, weight, bias, stride, C10_AS_INTARRAYREF_SLOW(padding), dilation, transposed, C10_AS_INTARRAYREF_SLOW(output_padding), groups); |
1584 | } |
1585 | } // anonymous namespace |
1586 | namespace { |
1587 | at::Tensor & wrapper_CompositeExplicitAutograd_out_convolution_out(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, at::Tensor & out) { |
1588 | // No device check |
1589 | // DeviceGuard omitted |
1590 | return at::native::convolution_out_symint(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out); |
1591 | } |
1592 | } // anonymous namespace |
1593 | namespace { |
1594 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeExplicitAutograd__convolution_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) { |
1595 | // No device check |
1596 | // DeviceGuard omitted |
1597 | return at::native::convolution_backward(grad_output, input, weight, bias_sizes.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*bias_sizes)) : c10::nullopt, stride, C10_AS_INTARRAYREF_SLOW(padding), dilation, transposed, C10_AS_INTARRAYREF_SLOW(output_padding), groups, output_mask); |
1598 | } |
1599 | } // anonymous namespace |
1600 | namespace { |
1601 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_convolution_backward_out(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
1602 | // No device check |
1603 | // DeviceGuard omitted |
1604 | return at::native::convolution_backward_out_symint(grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2); |
1605 | } |
1606 | } // anonymous namespace |
1607 | namespace { |
1608 | at::Tensor wrapper_CompositeExplicitAutograd__convolution_overrideable(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) { |
1609 | // No device check |
1610 | // DeviceGuard omitted |
1611 | return at::native::convolution_overrideable(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); |
1612 | } |
1613 | } // anonymous namespace |
1614 | namespace { |
1615 | at::Tensor & wrapper_CompositeExplicitAutograd_out_convolution_overrideable_out(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, at::Tensor & out) { |
1616 | // No device check |
1617 | // DeviceGuard omitted |
1618 | return at::native::convolution_overrideable_out(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out); |
1619 | } |
1620 | } // anonymous namespace |
1621 | namespace { |
1622 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeExplicitAutograd__convolution_backward_overrideable(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) { |
1623 | // No device check |
1624 | // DeviceGuard omitted |
1625 | return at::native::convolution_backward_overrideable(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask); |
1626 | } |
1627 | } // anonymous namespace |
1628 | namespace { |
1629 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_convolution_backward_overrideable_out(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
1630 | // No device check |
1631 | // DeviceGuard omitted |
1632 | return at::native::convolution_backward_overrideable_out(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2); |
1633 | } |
1634 | } // anonymous namespace |
1635 | namespace { |
1636 | at::Tensor wrapper_CompositeExplicitAutograd___convolution(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) { |
1637 | // No device check |
1638 | // DeviceGuard omitted |
1639 | return at::native::_convolution(input, weight, bias, stride, C10_AS_INTARRAYREF_SLOW(padding), dilation, transposed, C10_AS_INTARRAYREF_SLOW(output_padding), groups, benchmark, deterministic, cudnn_enabled, allow_tf32); |
1640 | } |
1641 | } // anonymous namespace |
1642 | namespace { |
1643 | at::Tensor & wrapper_CompositeExplicitAutograd_out__convolution_out(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, at::Tensor & out) { |
1644 | // No device check |
1645 | // DeviceGuard omitted |
1646 | return at::native::_convolution_out_symint(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32, out); |
1647 | } |
1648 | } // anonymous namespace |
1649 | namespace { |
1650 | at::Tensor wrapper_CompositeExplicitAutograd__conv_tbc(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) { |
1651 | // No device check |
1652 | // DeviceGuard omitted |
1653 | return at::native::conv_tbc(self, weight, bias, pad); |
1654 | } |
1655 | } // anonymous namespace |
1656 | namespace { |
1657 | at::Tensor & wrapper_CompositeExplicitAutograd_out_conv_tbc_out(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad, at::Tensor & out) { |
1658 | // No device check |
1659 | // DeviceGuard omitted |
1660 | return at::native::conv_tbc_out(self, weight, bias, pad, out); |
1661 | } |
1662 | } // anonymous namespace |
1663 | namespace { |
1664 | at::Tensor & wrapper_CompositeExplicitAutograd_out_copy_out(const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out) { |
1665 | // No device check |
1666 | // DeviceGuard omitted |
1667 | return at::native::copy_out(self, src, non_blocking, out); |
1668 | } |
1669 | } // anonymous namespace |
1670 | namespace { |
1671 | at::Tensor & wrapper_CompositeExplicitAutograd__copy_(at::Tensor & self, const at::Tensor & src, bool non_blocking) { |
1672 | // No device check |
1673 | // DeviceGuard omitted |
1674 | return at::native::copy_(self, src, non_blocking); |
1675 | } |
1676 | } // anonymous namespace |
1677 | namespace { |
1678 | at::Tensor & wrapper_CompositeExplicitAutograd_out__copy_from_out(const at::Tensor & self, const at::Tensor & dst, bool non_blocking, at::Tensor & out) { |
1679 | // No device check |
1680 | // DeviceGuard omitted |
1681 | return at::native::_copy_from_out(self, dst, non_blocking, out); |
1682 | } |
1683 | } // anonymous namespace |
1684 | namespace { |
1685 | at::Tensor & wrapper_CompositeExplicitAutograd_out__copy_from_and_resize_out(const at::Tensor & self, const at::Tensor & dst, at::Tensor & out) { |
1686 | // No device check |
1687 | // DeviceGuard omitted |
1688 | return at::native::_copy_from_and_resize_out(self, dst, out); |
1689 | } |
1690 | } // anonymous namespace |
1691 | namespace { |
1692 | at::Tensor & wrapper_CompositeExplicitAutograd_dim_IntList_out_count_nonzero_out(const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) { |
1693 | // No device check |
1694 | // DeviceGuard omitted |
1695 | return at::native::count_nonzero_dim_IntList_out(self, dim, out); |
1696 | } |
1697 | } // anonymous namespace |
1698 | namespace { |
1699 | at::Tensor wrapper_CompositeExplicitAutograd__count_nonzero(const at::Tensor & self, c10::optional<int64_t> dim) { |
1700 | // No device check |
1701 | // DeviceGuard omitted |
1702 | return at::native::count_nonzero(self, dim); |
1703 | } |
1704 | } // anonymous namespace |
1705 | namespace { |
1706 | at::Tensor & wrapper_CompositeExplicitAutograd_out_count_nonzero_out(const at::Tensor & self, c10::optional<int64_t> dim, at::Tensor & out) { |
1707 | // No device check |
1708 | // DeviceGuard omitted |
1709 | return at::native::count_nonzero_out(self, dim, out); |
1710 | } |
1711 | } // anonymous namespace |
1712 | namespace { |
1713 | at::Tensor & wrapper_CompositeExplicitAutograd_out_cudnn_affine_grid_generator_out(const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out) { |
1714 | // No device check |
1715 | // DeviceGuard omitted |
1716 | return at::native::cudnn_affine_grid_generator_out(theta, N, C, H, W, out); |
1717 | } |
1718 | } // anonymous namespace |
1719 | namespace { |
1720 | at::Tensor & wrapper_CompositeExplicitAutograd_out_cudnn_affine_grid_generator_backward_out(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out) { |
1721 | // No device check |
1722 | // DeviceGuard omitted |
1723 | return at::native::cudnn_affine_grid_generator_backward_out(grad, N, C, H, W, out); |
1724 | } |
1725 | } // anonymous namespace |
1726 | namespace { |
1727 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_cudnn_batch_norm_out(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { |
1728 | // No device check |
1729 | // DeviceGuard omitted |
1730 | return at::native::cudnn_batch_norm_out(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2, out3); |
1731 | } |
1732 | } // anonymous namespace |
1733 | namespace { |
1734 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_cudnn_batch_norm_backward_out(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
1735 | // No device check |
1736 | // DeviceGuard omitted |
1737 | return at::native::cudnn_batch_norm_backward_out(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace, out0, out1, out2); |
1738 | } |
1739 | } // anonymous namespace |
1740 | namespace { |
1741 | at::Tensor & wrapper_CompositeExplicitAutograd_out_cudnn_convolution_out(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) { |
1742 | // No device check |
1743 | // DeviceGuard omitted |
1744 | return at::native::cudnn_convolution_out(self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out); |
1745 | } |
1746 | } // anonymous namespace |
1747 | namespace { |
1748 | at::Tensor & wrapper_CompositeExplicitAutograd_out_cudnn_convolution_transpose_out(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) { |
1749 | // No device check |
1750 | // DeviceGuard omitted |
1751 | return at::native::cudnn_convolution_transpose_out(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out); |
1752 | } |
1753 | } // anonymous namespace |
1754 | namespace { |
1755 | at::Tensor & wrapper_CompositeExplicitAutograd_out__mps_convolution_transpose_out(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { |
1756 | // No device check |
1757 | // DeviceGuard omitted |
1758 | return at::native::_mps_convolution_transpose_out(self, weight, padding, output_padding, stride, dilation, groups, out); |
1759 | } |
1760 | } // anonymous namespace |
1761 | namespace { |
1762 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_mps_convolution_transpose_backward_out(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) { |
1763 | // No device check |
1764 | // DeviceGuard omitted |
1765 | return at::native::mps_convolution_transpose_backward_out(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask, out0, out1); |
1766 | } |
1767 | } // anonymous namespace |
1768 | namespace { |
1769 | at::Tensor & wrapper_CompositeExplicitAutograd_out_cudnn_convolution_relu_out(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { |
1770 | // No device check |
1771 | // DeviceGuard omitted |
1772 | return at::native::cudnn_convolution_relu_out(self, weight, bias, stride, padding, dilation, groups, out); |
1773 | } |
1774 | } // anonymous namespace |
1775 | namespace { |
1776 | at::Tensor & wrapper_CompositeExplicitAutograd_out_cudnn_convolution_add_relu_out(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional<at::Scalar> & alpha, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { |
1777 | // No device check |
1778 | // DeviceGuard omitted |
1779 | return at::native::cudnn_convolution_add_relu_out(self, weight, z, alpha, bias, stride, padding, dilation, groups, out); |
1780 | } |
1781 | } // anonymous namespace |
1782 | namespace { |
1783 | at::Tensor & wrapper_CompositeExplicitAutograd_out_cudnn_grid_sampler_out(const at::Tensor & self, const at::Tensor & grid, at::Tensor & out) { |
1784 | // No device check |
1785 | // DeviceGuard omitted |
1786 | return at::native::cudnn_grid_sampler_out(self, grid, out); |
1787 | } |
1788 | } // anonymous namespace |
1789 | namespace { |
1790 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_cudnn_grid_sampler_backward_out(const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output, at::Tensor & out0, at::Tensor & out1) { |
1791 | // No device check |
1792 | // DeviceGuard omitted |
1793 | return at::native::cudnn_grid_sampler_backward_out(self, grid, grad_output, out0, out1); |
1794 | } |
1795 | } // anonymous namespace |
1796 | namespace { |
1797 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeExplicitAutograd__cummax(const at::Tensor & self, int64_t dim) { |
1798 | // No device check |
1799 | // DeviceGuard omitted |
1800 | return at::native::cummax(self, dim); |
1801 | } |
1802 | } // anonymous namespace |
1803 | namespace { |
1804 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_cummax_out(const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices) { |
1805 | // No device check |
1806 | // DeviceGuard omitted |
1807 | return at::native::cummax_out(self, dim, values, indices); |
1808 | } |
1809 | } // anonymous namespace |
1810 | namespace { |
1811 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeExplicitAutograd__cummin(const at::Tensor & self, int64_t dim) { |
1812 | // No device check |
1813 | // DeviceGuard omitted |
1814 | return at::native::cummin(self, dim); |
1815 | } |
1816 | } // anonymous namespace |
1817 | namespace { |
1818 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_cummin_out(const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices) { |
1819 | // No device check |
1820 | // DeviceGuard omitted |
1821 | return at::native::cummin_out(self, dim, values, indices); |
1822 | } |
1823 | } // anonymous namespace |
1824 | namespace { |
1825 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out__ctc_loss_out(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) { |
1826 | // No device check |
1827 | // DeviceGuard omitted |
1828 | return at::native::_ctc_loss_out(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1); |
1829 | } |
1830 | } // anonymous namespace |
1831 | namespace { |
1832 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_Tensor_out__ctc_loss_out(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) { |
1833 | // No device check |
1834 | // DeviceGuard omitted |
1835 | return at::native::_ctc_loss_Tensor_out(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1); |
1836 | } |
1837 | } // anonymous namespace |
1838 | namespace { |
1839 | at::Tensor & wrapper_CompositeExplicitAutograd_out__ctc_loss_backward_out(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity, at::Tensor & out) { |
1840 | // No device check |
1841 | // DeviceGuard omitted |
1842 | return at::native::_ctc_loss_backward_out(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity, out); |
1843 | } |
1844 | } // anonymous namespace |
1845 | namespace { |
1846 | at::Tensor & wrapper_CompositeExplicitAutograd_out_diag_embed_out(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { |
1847 | // No device check |
1848 | // DeviceGuard omitted |
1849 | return at::native::diag_embed_out(self, offset, dim1, dim2, out); |
1850 | } |
1851 | } // anonymous namespace |
1852 | namespace { |
1853 | at::Tensor wrapper_CompositeExplicitAutograd__diagonal(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) { |
1854 | // No device check |
1855 | // DeviceGuard omitted |
1856 | return at::native::diagonal(self, offset, dim1, dim2); |
1857 | } |
1858 | } // anonymous namespace |
1859 | namespace { |
1860 | at::Tensor wrapper_CompositeExplicitAutograd__diagonal_backward(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { |
1861 | // No device check |
1862 | // DeviceGuard omitted |
1863 | return at::native::diagonal_backward_symint(grad_output, input_sizes, offset, dim1, dim2); |
1864 | } |
1865 | } // anonymous namespace |
1866 | namespace { |
1867 | at::Tensor & wrapper_CompositeExplicitAutograd_out_diagonal_backward_out(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { |
1868 | // No device check |
1869 | // DeviceGuard omitted |
1870 | return at::native::diagonal_backward_out_symint(grad_output, input_sizes, offset, dim1, dim2, out); |
1871 | } |
1872 | } // anonymous namespace |
1873 | namespace { |
1874 | at::Tensor wrapper_CompositeExplicitAutograd_Scalar_div(const at::Tensor & self, const at::Scalar & other) { |
1875 | // No device check |
1876 | // DeviceGuard omitted |
1877 | return at::native::div(self, other); |
1878 | } |
1879 | } // anonymous namespace |
1880 | namespace { |
1881 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_out_div_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
1882 | // No device check |
1883 | // DeviceGuard omitted |
1884 | return at::native::div_Scalar_out(self, other, out); |
1885 | } |
1886 | } // anonymous namespace |
1887 | namespace { |
1888 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_div_(at::Tensor & self, const at::Scalar & other) { |
1889 | // No device check |
1890 | // DeviceGuard omitted |
1891 | return at::native::div_(self, other); |
1892 | } |
1893 | } // anonymous namespace |
1894 | namespace { |
1895 | at::Tensor wrapper_CompositeExplicitAutograd_Scalar_mode_div(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) { |
1896 | // No device check |
1897 | // DeviceGuard omitted |
1898 | return at::native::div(self, other, rounding_mode); |
1899 | } |
1900 | } // anonymous namespace |
1901 | namespace { |
1902 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_mode_out_div_out(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out) { |
1903 | // No device check |
1904 | // DeviceGuard omitted |
1905 | return at::native::div_Scalar_mode_out(self, other, rounding_mode, out); |
1906 | } |
1907 | } // anonymous namespace |
1908 | namespace { |
1909 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_mode_div_(at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) { |
1910 | // No device check |
1911 | // DeviceGuard omitted |
1912 | return at::native::div_(self, other, rounding_mode); |
1913 | } |
1914 | } // anonymous namespace |
1915 | namespace { |
1916 | at::Tensor & wrapper_CompositeExplicitAutograd_out_dot_out(const at::Tensor & self, const at::Tensor & tensor, at::Tensor & out) { |
1917 | // No device check |
1918 | // DeviceGuard omitted |
1919 | return at::native::dot_out(self, tensor, out); |
1920 | } |
1921 | } // anonymous namespace |
1922 | namespace { |
1923 | at::Tensor & wrapper_CompositeExplicitAutograd_out_vdot_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
1924 | // No device check |
1925 | // DeviceGuard omitted |
1926 | return at::native::vdot_out(self, other, out); |
1927 | } |
1928 | } // anonymous namespace |
1929 | namespace { |
1930 | at::Tensor wrapper_CompositeExplicitAutograd__embedding(const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) { |
1931 | // No device check |
1932 | // DeviceGuard omitted |
1933 | return at::native::embedding_symint(weight, indices, padding_idx, scale_grad_by_freq, sparse); |
1934 | } |
1935 | } // anonymous namespace |
1936 | namespace { |
1937 | at::Tensor & wrapper_CompositeExplicitAutograd_out_embedding_out(const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse, at::Tensor & out) { |
1938 | // No device check |
1939 | // DeviceGuard omitted |
1940 | return at::native::embedding_out_symint(weight, indices, padding_idx, scale_grad_by_freq, sparse, out); |
1941 | } |
1942 | } // anonymous namespace |
1943 | namespace { |
1944 | at::Tensor & wrapper_CompositeExplicitAutograd_out_embedding_dense_backward_out(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, at::Tensor & out) { |
1945 | // No device check |
1946 | // DeviceGuard omitted |
1947 | return at::native::embedding_dense_backward_out_symint(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out); |
1948 | } |
1949 | } // anonymous namespace |
1950 | namespace { |
1951 | at::Tensor wrapper_CompositeExplicitAutograd__embedding_renorm(const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) { |
1952 | // No device check |
1953 | // DeviceGuard omitted |
1954 | return at::native::embedding_renorm(self, indices, max_norm, norm_type); |
1955 | } |
1956 | } // anonymous namespace |
1957 | namespace { |
1958 | at::Tensor & wrapper_CompositeExplicitAutograd_out_embedding_renorm_out(const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type, at::Tensor & out) { |
1959 | // No device check |
1960 | // DeviceGuard omitted |
1961 | return at::native::embedding_renorm_out(self, indices, max_norm, norm_type, out); |
1962 | } |
1963 | } // anonymous namespace |
1964 | namespace { |
1965 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out__embedding_bag_forward_only_out(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { |
1966 | // No device check |
1967 | // DeviceGuard omitted |
1968 | return at::native::_embedding_bag_forward_only_out(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx, out0, out1, out2, out3); |
1969 | } |
1970 | } // anonymous namespace |
1971 | namespace { |
1972 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out__embedding_bag_out(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { |
1973 | // No device check |
1974 | // DeviceGuard omitted |
1975 | return at::native::_embedding_bag_out(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx, out0, out1, out2, out3); |
1976 | } |
1977 | } // anonymous namespace |
1978 | namespace { |
1979 | at::Tensor & wrapper_CompositeExplicitAutograd_out__embedding_bag_dense_backward_out(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx, at::Tensor & out) { |
1980 | // No device check |
1981 | // DeviceGuard omitted |
1982 | return at::native::_embedding_bag_dense_backward_out_symint(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx, out); |
1983 | } |
1984 | } // anonymous namespace |
1985 | namespace { |
1986 | at::Tensor & wrapper_CompositeExplicitAutograd_out__embedding_bag_per_sample_weights_backward_out(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx, at::Tensor & out) { |
1987 | // No device check |
1988 | // DeviceGuard omitted |
1989 | return at::native::_embedding_bag_per_sample_weights_backward_out(grad, weight, indices, offsets, offset2bag, mode, padding_idx, out); |
1990 | } |
1991 | } // anonymous namespace |
1992 | namespace { |
1993 | at::Tensor wrapper_CompositeExplicitAutograd_names_empty(at::IntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
1994 | // No device check |
1995 | // DeviceGuard omitted |
1996 | return at::native::empty_names(size, names, dtype, layout, device, pin_memory, memory_format); |
1997 | } |
1998 | } // anonymous namespace |
1999 | namespace { |
2000 | at::Tensor & wrapper_CompositeExplicitAutograd_names_out_empty_out(at::IntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
2001 | // No device check |
2002 | // DeviceGuard omitted |
2003 | return at::native::empty_names_out(size, names, memory_format, out); |
2004 | } |
2005 | } // anonymous namespace |
2006 | namespace { |
2007 | at::Tensor wrapper_CompositeExplicitAutograd__new_empty(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
2008 | // No device check |
2009 | // DeviceGuard omitted |
2010 | return at::native::new_empty_symint(self, size, dtype, layout, device, pin_memory); |
2011 | } |
2012 | } // anonymous namespace |
2013 | namespace { |
2014 | at::Tensor & wrapper_CompositeExplicitAutograd_out_new_empty_out(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { |
2015 | // No device check |
2016 | // DeviceGuard omitted |
2017 | return at::native::new_empty_out_symint(self, size, out); |
2018 | } |
2019 | } // anonymous namespace |
2020 | namespace { |
2021 | at::Tensor & wrapper_CompositeExplicitAutograd_out_new_empty_strided_out(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) { |
2022 | // No device check |
2023 | // DeviceGuard omitted |
2024 | return at::native::new_empty_strided_out_symint(self, size, stride, out); |
2025 | } |
2026 | } // anonymous namespace |
2027 | namespace { |
2028 | at::Tensor wrapper_CompositeExplicitAutograd__new_full(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
2029 | // No device check |
2030 | // DeviceGuard omitted |
2031 | return at::native::new_full(self, C10_AS_INTARRAYREF_SLOW(size), fill_value, dtype, layout, device, pin_memory); |
2032 | } |
2033 | } // anonymous namespace |
2034 | namespace { |
2035 | at::Tensor & wrapper_CompositeExplicitAutograd_out_new_full_out(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) { |
2036 | // No device check |
2037 | // DeviceGuard omitted |
2038 | return at::native::new_full_out_symint(self, size, fill_value, out); |
2039 | } |
2040 | } // anonymous namespace |
2041 | namespace { |
2042 | at::Tensor wrapper_CompositeExplicitAutograd__new_zeros(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
2043 | // No device check |
2044 | // DeviceGuard omitted |
2045 | return at::native::new_zeros(self, C10_AS_INTARRAYREF_SLOW(size), dtype, layout, device, pin_memory); |
2046 | } |
2047 | } // anonymous namespace |
2048 | namespace { |
2049 | at::Tensor & wrapper_CompositeExplicitAutograd_out_new_zeros_out(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { |
2050 | // No device check |
2051 | // DeviceGuard omitted |
2052 | return at::native::new_zeros_out_symint(self, size, out); |
2053 | } |
2054 | } // anonymous namespace |
2055 | namespace { |
2056 | at::Tensor wrapper_CompositeExplicitAutograd__new_ones(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
2057 | // No device check |
2058 | // DeviceGuard omitted |
2059 | return at::native::new_ones(self, C10_AS_INTARRAYREF_SLOW(size), dtype, layout, device, pin_memory); |
2060 | } |
2061 | } // anonymous namespace |
2062 | namespace { |
2063 | at::Tensor & wrapper_CompositeExplicitAutograd_out_new_ones_out(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { |
2064 | // No device check |
2065 | // DeviceGuard omitted |
2066 | return at::native::new_ones_out_symint(self, size, out); |
2067 | } |
2068 | } // anonymous namespace |
2069 | namespace { |
2070 | at::Tensor & wrapper_CompositeExplicitAutograd_out__empty_affine_quantized_out(at::IntArrayRef size, double scale, int64_t zero_point, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
2071 | // No device check |
2072 | // DeviceGuard omitted |
2073 | return at::native::_empty_affine_quantized_out(size, scale, zero_point, memory_format, out); |
2074 | } |
2075 | } // anonymous namespace |
2076 | namespace { |
2077 | at::Tensor & wrapper_CompositeExplicitAutograd_out__empty_per_channel_affine_quantized_out(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
2078 | // No device check |
2079 | // DeviceGuard omitted |
2080 | return at::native::_empty_per_channel_affine_quantized_out(size, scales, zero_points, axis, memory_format, out); |
2081 | } |
2082 | } // anonymous namespace |
2083 | namespace { |
2084 | at::Tensor wrapper_CompositeExplicitAutograd__resize(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) { |
2085 | // No device check |
2086 | // DeviceGuard omitted |
2087 | return at::native::resize_symint(self, size, memory_format); |
2088 | } |
2089 | } // anonymous namespace |
2090 | namespace { |
2091 | const at::Tensor & wrapper_CompositeExplicitAutograd_out_resize_out(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format, const at::Tensor & out) { |
2092 | // No device check |
2093 | // DeviceGuard omitted |
2094 | return at::native::resize_out_symint(self, size, memory_format, out); |
2095 | } |
2096 | } // anonymous namespace |
2097 | namespace { |
2098 | at::Tensor wrapper_CompositeExplicitAutograd___resize_output(const at::Tensor & self, at::IntArrayRef size, at::Device device) { |
2099 | // No device check |
2100 | // DeviceGuard omitted |
2101 | return at::native::_resize_output(self, size, device); |
2102 | } |
2103 | } // anonymous namespace |
2104 | namespace { |
2105 | const at::Tensor & wrapper_CompositeExplicitAutograd_out__resize_output_out(const at::Tensor & self, at::IntArrayRef size, at::Device device, const at::Tensor & out) { |
2106 | // No device check |
2107 | // DeviceGuard omitted |
2108 | return at::native::_resize_output_out(self, size, device, out); |
2109 | } |
2110 | } // anonymous namespace |
2111 | namespace { |
2112 | at::Tensor & wrapper_CompositeExplicitAutograd_out_empty_quantized_out(at::IntArrayRef size, const at::Tensor & qtensor, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
2113 | // No device check |
2114 | // DeviceGuard omitted |
2115 | return at::native::empty_quantized_out(size, qtensor, memory_format, out); |
2116 | } |
2117 | } // anonymous namespace |
2118 | namespace { |
2119 | at::Tensor wrapper_CompositeExplicitAutograd__empty_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
2120 | // No device check |
2121 | // DeviceGuard omitted |
2122 | return at::native::empty_like(self, dtype, layout, device, pin_memory, memory_format); |
2123 | } |
2124 | } // anonymous namespace |
2125 | namespace { |
2126 | at::Tensor & wrapper_CompositeExplicitAutograd_out_empty_like_out(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
2127 | // No device check |
2128 | // DeviceGuard omitted |
2129 | return at::native::empty_like_out(self, memory_format, out); |
2130 | } |
2131 | } // anonymous namespace |
2132 | namespace { |
2133 | at::Tensor & wrapper_CompositeExplicitAutograd_out_empty_strided_out(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) { |
2134 | // No device check |
2135 | // DeviceGuard omitted |
2136 | return at::native::empty_strided_out_symint(size, stride, out); |
2137 | } |
2138 | } // anonymous namespace |
2139 | namespace { |
2140 | at::Tensor wrapper_CompositeExplicitAutograd__expand(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) { |
2141 | // No device check |
2142 | // DeviceGuard omitted |
2143 | return at::native::expand(self, C10_AS_INTARRAYREF_SLOW(size), implicit); |
2144 | } |
2145 | } // anonymous namespace |
2146 | namespace { |
2147 | at::Tensor wrapper_CompositeExplicitAutograd__eye(int64_t n, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
2148 | // No device check |
2149 | // DeviceGuard omitted |
2150 | return at::native::eye(n, dtype, layout, device, pin_memory); |
2151 | } |
2152 | } // anonymous namespace |
2153 | namespace { |
2154 | at::Tensor wrapper_CompositeExplicitAutograd_m_eye(int64_t n, int64_t m, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
2155 | // No device check |
2156 | // DeviceGuard omitted |
2157 | return at::native::eye(n, m, dtype, layout, device, pin_memory); |
2158 | } |
2159 | } // anonymous namespace |
2160 | namespace { |
2161 | at::Tensor wrapper_CompositeExplicitAutograd_Scalar_fill(const at::Tensor & self, const at::Scalar & value) { |
2162 | // No device check |
2163 | // DeviceGuard omitted |
2164 | return at::native::fill(self, value); |
2165 | } |
2166 | } // anonymous namespace |
2167 | namespace { |
2168 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_out_fill_out(const at::Tensor & self, const at::Scalar & value, at::Tensor & out) { |
2169 | // No device check |
2170 | // DeviceGuard omitted |
2171 | return at::native::fill_Scalar_out(self, value, out); |
2172 | } |
2173 | } // anonymous namespace |
2174 | namespace { |
2175 | at::Tensor wrapper_CompositeExplicitAutograd_Tensor_fill(const at::Tensor & self, const at::Tensor & value) { |
2176 | // No device check |
2177 | // DeviceGuard omitted |
2178 | return at::native::fill(self, value); |
2179 | } |
2180 | } // anonymous namespace |
2181 | namespace { |
2182 | at::Tensor & wrapper_CompositeExplicitAutograd_Tensor_out_fill_out(const at::Tensor & self, const at::Tensor & value, at::Tensor & out) { |
2183 | // No device check |
2184 | // DeviceGuard omitted |
2185 | return at::native::fill_Tensor_out(self, value, out); |
2186 | } |
2187 | } // anonymous namespace |
2188 | namespace { |
2189 | at::Tensor wrapper_CompositeExplicitAutograd_names_full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
2190 | // No device check |
2191 | // DeviceGuard omitted |
2192 | return at::native::full(size, fill_value, names, dtype, layout, device, pin_memory); |
2193 | } |
2194 | } // anonymous namespace |
2195 | namespace { |
2196 | at::Tensor & wrapper_CompositeExplicitAutograd_names_out_full_out(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::DimnameList> names, at::Tensor & out) { |
2197 | // No device check |
2198 | // DeviceGuard omitted |
2199 | return at::native::full_names_out(size, fill_value, names, out); |
2200 | } |
2201 | } // anonymous namespace |
2202 | namespace { |
2203 | at::Tensor wrapper_CompositeExplicitAutograd__full(c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
2204 | // No device check |
2205 | // DeviceGuard omitted |
2206 | return at::native::full(C10_AS_INTARRAYREF_SLOW(size), fill_value, dtype, layout, device, pin_memory); |
2207 | } |
2208 | } // anonymous namespace |
2209 | namespace { |
2210 | at::Tensor & wrapper_CompositeExplicitAutograd_out_full_out(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) { |
2211 | // No device check |
2212 | // DeviceGuard omitted |
2213 | return at::native::full_out(C10_AS_INTARRAYREF_SLOW(size), fill_value, out); |
2214 | } |
2215 | } // anonymous namespace |
2216 | namespace { |
2217 | at::Tensor wrapper_CompositeExplicitAutograd__full_like(const at::Tensor & self, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
2218 | // No device check |
2219 | // DeviceGuard omitted |
2220 | return at::native::full_like(self, fill_value, dtype, layout, device, pin_memory, memory_format); |
2221 | } |
2222 | } // anonymous namespace |
2223 | namespace { |
2224 | at::Tensor & wrapper_CompositeExplicitAutograd_out_full_like_out(const at::Tensor & self, const at::Scalar & fill_value, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
2225 | // No device check |
2226 | // DeviceGuard omitted |
2227 | return at::native::full_like_out(self, fill_value, memory_format, out); |
2228 | } |
2229 | } // anonymous namespace |
2230 | namespace { |
2231 | at::Tensor & wrapper_CompositeExplicitAutograd_out_from_file_out(c10::string_view filename, c10::optional<bool> shared, c10::optional<int64_t> size, at::Tensor & out) { |
2232 | // No device check |
2233 | // DeviceGuard omitted |
2234 | return at::native::from_file_out(filename, shared, size, out); |
2235 | } |
2236 | } // anonymous namespace |
2237 | namespace { |
2238 | at::Tensor & wrapper_CompositeExplicitAutograd_out_grid_sampler_2d_out(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) { |
2239 | // No device check |
2240 | // DeviceGuard omitted |
2241 | return at::native::grid_sampler_2d_out(input, grid, interpolation_mode, padding_mode, align_corners, out); |
2242 | } |
2243 | } // anonymous namespace |
2244 | namespace { |
2245 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_grid_sampler_2d_backward_out(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) { |
2246 | // No device check |
2247 | // DeviceGuard omitted |
2248 | return at::native::grid_sampler_2d_backward_out(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1); |
2249 | } |
2250 | } // anonymous namespace |
2251 | namespace { |
2252 | at::Tensor wrapper_CompositeExplicitAutograd___grid_sampler_2d_cpu_fallback(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { |
2253 | // No device check |
2254 | // DeviceGuard omitted |
2255 | return at::native::_grid_sampler_2d_cpu_fallback(input, grid, interpolation_mode, padding_mode, align_corners); |
2256 | } |
2257 | } // anonymous namespace |
2258 | namespace { |
2259 | at::Tensor & wrapper_CompositeExplicitAutograd_out__grid_sampler_2d_cpu_fallback_out(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) { |
2260 | // No device check |
2261 | // DeviceGuard omitted |
2262 | return at::native::_grid_sampler_2d_cpu_fallback_out(input, grid, interpolation_mode, padding_mode, align_corners, out); |
2263 | } |
2264 | } // anonymous namespace |
2265 | namespace { |
2266 | at::Tensor & wrapper_CompositeExplicitAutograd_out_grid_sampler_3d_out(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) { |
2267 | // No device check |
2268 | // DeviceGuard omitted |
2269 | return at::native::grid_sampler_3d_out(input, grid, interpolation_mode, padding_mode, align_corners, out); |
2270 | } |
2271 | } // anonymous namespace |
2272 | namespace { |
2273 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_grid_sampler_3d_backward_out(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) { |
2274 | // No device check |
2275 | // DeviceGuard omitted |
2276 | return at::native::grid_sampler_3d_backward_out(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1); |
2277 | } |
2278 | } // anonymous namespace |
2279 | namespace { |
2280 | at::Tensor wrapper_CompositeExplicitAutograd__hann_window(int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
2281 | // No device check |
2282 | // DeviceGuard omitted |
2283 | return at::native::hann_window(window_length, dtype, layout, device, pin_memory); |
2284 | } |
2285 | } // anonymous namespace |
2286 | namespace { |
2287 | at::Tensor & wrapper_CompositeExplicitAutograd_out_hann_window_out(int64_t window_length, at::Tensor & out) { |
2288 | // No device check |
2289 | // DeviceGuard omitted |
2290 | return at::native::hann_window_out(window_length, out); |
2291 | } |
2292 | } // anonymous namespace |
2293 | namespace { |
2294 | at::Tensor wrapper_CompositeExplicitAutograd_periodic_hann_window(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
2295 | // No device check |
2296 | // DeviceGuard omitted |
2297 | return at::native::hann_window(window_length, periodic, dtype, layout, device, pin_memory); |
2298 | } |
2299 | } // anonymous namespace |
2300 | namespace { |
2301 | at::Tensor & wrapper_CompositeExplicitAutograd_periodic_out_hann_window_out(int64_t window_length, bool periodic, at::Tensor & out) { |
2302 | // No device check |
2303 | // DeviceGuard omitted |
2304 | return at::native::hann_window_periodic_out(window_length, periodic, out); |
2305 | } |
2306 | } // anonymous namespace |
2307 | namespace { |
2308 | at::Tensor wrapper_CompositeExplicitAutograd__hamming_window(int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
2309 | // No device check |
2310 | // DeviceGuard omitted |
2311 | return at::native::hamming_window(window_length, dtype, layout, device, pin_memory); |
2312 | } |
2313 | } // anonymous namespace |
2314 | namespace { |
2315 | at::Tensor & wrapper_CompositeExplicitAutograd_out_hamming_window_out(int64_t window_length, at::Tensor & out) { |
2316 | // No device check |
2317 | // DeviceGuard omitted |
2318 | return at::native::hamming_window_out(window_length, out); |
2319 | } |
2320 | } // anonymous namespace |
2321 | namespace { |
2322 | at::Tensor wrapper_CompositeExplicitAutograd_periodic_hamming_window(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
2323 | // No device check |
2324 | // DeviceGuard omitted |
2325 | return at::native::hamming_window(window_length, periodic, dtype, layout, device, pin_memory); |
2326 | } |
2327 | } // anonymous namespace |
2328 | namespace { |
2329 | at::Tensor & wrapper_CompositeExplicitAutograd_periodic_out_hamming_window_out(int64_t window_length, bool periodic, at::Tensor & out) { |
2330 | // No device check |
2331 | // DeviceGuard omitted |
2332 | return at::native::hamming_window_periodic_out(window_length, periodic, out); |
2333 | } |
2334 | } // anonymous namespace |
2335 | namespace { |
2336 | at::Tensor wrapper_CompositeExplicitAutograd_periodic_alpha_hamming_window(int64_t window_length, bool periodic, double alpha, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
2337 | // No device check |
2338 | // DeviceGuard omitted |
2339 | return at::native::hamming_window(window_length, periodic, alpha, dtype, layout, device, pin_memory); |
2340 | } |
2341 | } // anonymous namespace |
2342 | namespace { |
2343 | at::Tensor & wrapper_CompositeExplicitAutograd_periodic_alpha_out_hamming_window_out(int64_t window_length, bool periodic, double alpha, at::Tensor & out) { |
2344 | // No device check |
2345 | // DeviceGuard omitted |
2346 | return at::native::hamming_window_periodic_alpha_out(window_length, periodic, alpha, out); |
2347 | } |
2348 | } // anonymous namespace |
2349 | namespace { |
2350 | at::Tensor wrapper_CompositeExplicitAutograd_periodic_alpha_beta_hamming_window(int64_t window_length, bool periodic, double alpha, double beta, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
2351 | // No device check |
2352 | // DeviceGuard omitted |
2353 | return at::native::hamming_window(window_length, periodic, alpha, beta, dtype, layout, device, pin_memory); |
2354 | } |
2355 | } // anonymous namespace |
2356 | namespace { |
2357 | at::Tensor & wrapper_CompositeExplicitAutograd_periodic_alpha_beta_out_hamming_window_out(int64_t window_length, bool periodic, double alpha, double beta, at::Tensor & out) { |
2358 | // No device check |
2359 | // DeviceGuard omitted |
2360 | return at::native::hamming_window_periodic_alpha_beta_out(window_length, periodic, alpha, beta, out); |
2361 | } |
2362 | } // anonymous namespace |
2363 | namespace { |
2364 | at::Tensor wrapper_CompositeExplicitAutograd__kaiser_window(int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
2365 | // No device check |
2366 | // DeviceGuard omitted |
2367 | return at::native::kaiser_window(window_length, dtype, layout, device, pin_memory); |
2368 | } |
2369 | } // anonymous namespace |
2370 | namespace { |
2371 | at::Tensor & wrapper_CompositeExplicitAutograd_out_kaiser_window_out(int64_t window_length, at::Tensor & out) { |
2372 | // No device check |
2373 | // DeviceGuard omitted |
2374 | return at::native::kaiser_window_out(window_length, out); |
2375 | } |
2376 | } // anonymous namespace |
2377 | namespace { |
2378 | at::Tensor wrapper_CompositeExplicitAutograd_periodic_kaiser_window(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
2379 | // No device check |
2380 | // DeviceGuard omitted |
2381 | return at::native::kaiser_window(window_length, periodic, dtype, layout, device, pin_memory); |
2382 | } |
2383 | } // anonymous namespace |
2384 | namespace { |
2385 | at::Tensor & wrapper_CompositeExplicitAutograd_periodic_out_kaiser_window_out(int64_t window_length, bool periodic, at::Tensor & out) { |
2386 | // No device check |
2387 | // DeviceGuard omitted |
2388 | return at::native::kaiser_window_periodic_out(window_length, periodic, out); |
2389 | } |
2390 | } // anonymous namespace |
2391 | namespace { |
2392 | at::Tensor wrapper_CompositeExplicitAutograd_beta_kaiser_window(int64_t window_length, bool periodic, double beta, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
2393 | // No device check |
2394 | // DeviceGuard omitted |
2395 | return at::native::kaiser_window(window_length, periodic, beta, dtype, layout, device, pin_memory); |
2396 | } |
2397 | } // anonymous namespace |
2398 | namespace { |
2399 | at::Tensor & wrapper_CompositeExplicitAutograd_beta_out_kaiser_window_out(int64_t window_length, bool periodic, double beta, at::Tensor & out) { |
2400 | // No device check |
2401 | // DeviceGuard omitted |
2402 | return at::native::kaiser_window_beta_out(window_length, periodic, beta, out); |
2403 | } |
2404 | } // anonymous namespace |
2405 | namespace { |
2406 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeExplicitAutograd__native_group_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps) { |
2407 | // No device check |
2408 | // DeviceGuard omitted |
2409 | return at::native::math_group_norm(input, weight, bias, N.expect_int(), C.expect_int(), HxW.expect_int(), group, eps); |
2410 | } |
2411 | } // anonymous namespace |
2412 | namespace { |
2413 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_native_group_norm_out(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
2414 | // No device check |
2415 | // DeviceGuard omitted |
2416 | return at::native::native_group_norm_out_symint(input, weight, bias, N, C, HxW, group, eps, out0, out1, out2); |
2417 | } |
2418 | } // anonymous namespace |
2419 | namespace { |
2420 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_native_group_norm_backward_out(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
2421 | // No device check |
2422 | // DeviceGuard omitted |
2423 | return at::native::native_group_norm_backward_out_symint(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask, out0, out1, out2); |
2424 | } |
2425 | } // anonymous namespace |
2426 | namespace { |
2427 | at::Tensor wrapper_CompositeExplicitAutograd__index_put(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) { |
2428 | // No device check |
2429 | // DeviceGuard omitted |
2430 | return at::native::index_put(self, indices, values, accumulate); |
2431 | } |
2432 | } // anonymous namespace |
2433 | namespace { |
2434 | at::Tensor & wrapper_CompositeExplicitAutograd_out_index_put_out(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, at::Tensor & out) { |
2435 | // No device check |
2436 | // DeviceGuard omitted |
2437 | return at::native::index_put_out(self, indices, values, accumulate, out); |
2438 | } |
2439 | } // anonymous namespace |
2440 | namespace { |
2441 | at::Tensor & wrapper_CompositeExplicitAutograd__index_put_(at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) { |
2442 | // No device check |
2443 | // DeviceGuard omitted |
2444 | return at::native::index_put_(self, indices, values, accumulate); |
2445 | } |
2446 | } // anonymous namespace |
2447 | namespace { |
2448 | at::Tensor wrapper_CompositeExplicitAutograd___index_put_impl(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) { |
2449 | // No device check |
2450 | // DeviceGuard omitted |
2451 | return at::native::_index_put_impl(self, indices, values, accumulate, unsafe); |
2452 | } |
2453 | } // anonymous namespace |
2454 | namespace { |
2455 | at::Tensor & wrapper_CompositeExplicitAutograd_out__index_put_impl_out(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe, at::Tensor & out) { |
2456 | // No device check |
2457 | // DeviceGuard omitted |
2458 | return at::native::_index_put_impl_out(self, indices, values, accumulate, unsafe, out); |
2459 | } |
2460 | } // anonymous namespace |
2461 | namespace { |
2462 | at::Tensor & wrapper_CompositeExplicitAutograd_out_isnan_out(const at::Tensor & self, at::Tensor & out) { |
2463 | // No device check |
2464 | // DeviceGuard omitted |
2465 | return at::native::isnan_out(self, out); |
2466 | } |
2467 | } // anonymous namespace |
2468 | namespace { |
2469 | bool wrapper_CompositeExplicitAutograd__is_same_size(const at::Tensor & self, const at::Tensor & other) { |
2470 | // No device check |
2471 | // DeviceGuard omitted |
2472 | return at::native::is_same_size(self, other); |
2473 | } |
2474 | } // anonymous namespace |
2475 | namespace { |
2476 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeExplicitAutograd__kthvalue(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim) { |
2477 | // No device check |
2478 | // DeviceGuard omitted |
2479 | return at::native::kthvalue(self, k, dim, keepdim); |
2480 | } |
2481 | } // anonymous namespace |
2482 | namespace { |
2483 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeExplicitAutograd__native_layer_norm(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps) { |
2484 | // No device check |
2485 | // DeviceGuard omitted |
2486 | return at::native::math_native_layer_norm(input, C10_AS_INTARRAYREF_SLOW(normalized_shape), weight, bias, eps); |
2487 | } |
2488 | } // anonymous namespace |
2489 | namespace { |
2490 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_native_layer_norm_out(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
2491 | // No device check |
2492 | // DeviceGuard omitted |
2493 | return at::native::native_layer_norm_out_symint(input, normalized_shape, weight, bias, eps, out0, out1, out2); |
2494 | } |
2495 | } // anonymous namespace |
2496 | namespace { |
2497 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_native_layer_norm_backward_out(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
2498 | // No device check |
2499 | // DeviceGuard omitted |
2500 | return at::native::native_layer_norm_backward_out_symint(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask, out0, out1, out2); |
2501 | } |
2502 | } // anonymous namespace |
2503 | namespace { |
2504 | at::Tensor wrapper_CompositeExplicitAutograd__nan_to_num(const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) { |
2505 | // No device check |
2506 | // DeviceGuard omitted |
2507 | return at::native::nan_to_num(self, nan, posinf, neginf); |
2508 | } |
2509 | } // anonymous namespace |
2510 | namespace { |
2511 | at::Tensor & wrapper_CompositeExplicitAutograd__nan_to_num_(at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) { |
2512 | // No device check |
2513 | // DeviceGuard omitted |
2514 | return at::native::nan_to_num_(self, nan, posinf, neginf); |
2515 | } |
2516 | } // anonymous namespace |
2517 | namespace { |
2518 | at::Tensor & wrapper_CompositeExplicitAutograd_out_linear_out(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::Tensor & out) { |
2519 | // No device check |
2520 | // DeviceGuard omitted |
2521 | return at::native::linear_out(input, weight, bias, out); |
2522 | } |
2523 | } // anonymous namespace |
2524 | namespace { |
2525 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_linear_backward_out(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
2526 | // No device check |
2527 | // DeviceGuard omitted |
2528 | return at::native::linear_backward_out(self, grad_output, weight, output_mask, out0, out1, out2); |
2529 | } |
2530 | } // anonymous namespace |
2531 | namespace { |
2532 | at::Tensor & wrapper_CompositeExplicitAutograd_out_mkldnn_linear_out(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::Tensor & out) { |
2533 | // No device check |
2534 | // DeviceGuard omitted |
2535 | return at::native::mkldnn_linear_out(self, weight, bias, out); |
2536 | } |
2537 | } // anonymous namespace |
2538 | namespace { |
2539 | at::Tensor & wrapper_CompositeExplicitAutograd_out_mkldnn_linear_backward_input_out(at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight, at::Tensor & out) { |
2540 | // No device check |
2541 | // DeviceGuard omitted |
2542 | return at::native::mkldnn_linear_backward_input_out(input_size, grad_output, weight, out); |
2543 | } |
2544 | } // anonymous namespace |
2545 | namespace { |
2546 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_mkldnn_linear_backward_weights_out(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined, at::Tensor & out0, at::Tensor & out1) { |
2547 | // No device check |
2548 | // DeviceGuard omitted |
2549 | return at::native::mkldnn_linear_backward_weights_out(grad_output, input, weight, bias_defined, out0, out1); |
2550 | } |
2551 | } // anonymous namespace |
2552 | namespace { |
2553 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_mkldnn_linear_backward_out(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
2554 | // No device check |
2555 | // DeviceGuard omitted |
2556 | return at::native::mkldnn_linear_backward_out(self, grad_output, weight, output_mask, out0, out1, out2); |
2557 | } |
2558 | } // anonymous namespace |
2559 | namespace { |
2560 | at::Tensor wrapper_CompositeExplicitAutograd__linspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
2561 | // No device check |
2562 | // DeviceGuard omitted |
2563 | return at::native::linspace(start, end, steps, dtype, layout, device, pin_memory); |
2564 | } |
2565 | } // anonymous namespace |
2566 | namespace { |
2567 | at::Tensor wrapper_CompositeExplicitAutograd_Scalar_Self_xlogy(const at::Scalar & self, const at::Tensor & other) { |
2568 | // No device check |
2569 | // DeviceGuard omitted |
2570 | return at::native::xlogy(self, other); |
2571 | } |
2572 | } // anonymous namespace |
2573 | namespace { |
2574 | at::Tensor & wrapper_CompositeExplicitAutograd_OutScalar_Self_xlogy_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
2575 | // No device check |
2576 | // DeviceGuard omitted |
2577 | return at::native::xlogy_out(self, other, out); |
2578 | } |
2579 | } // anonymous namespace |
2580 | namespace { |
2581 | at::Tensor wrapper_CompositeExplicitAutograd_Scalar_Other_xlogy(const at::Tensor & self, const at::Scalar & other) { |
2582 | // No device check |
2583 | // DeviceGuard omitted |
2584 | return at::native::xlogy(self, other); |
2585 | } |
2586 | } // anonymous namespace |
2587 | namespace { |
2588 | at::Tensor & wrapper_CompositeExplicitAutograd_OutScalar_Other_xlogy_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
2589 | // No device check |
2590 | // DeviceGuard omitted |
2591 | return at::native::xlogy_out(self, other, out); |
2592 | } |
2593 | } // anonymous namespace |
2594 | namespace { |
2595 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_Other_xlogy_(at::Tensor & self, const at::Scalar & other) { |
2596 | // No device check |
2597 | // DeviceGuard omitted |
2598 | return at::native::xlogy_(self, other); |
2599 | } |
2600 | } // anonymous namespace |
2601 | namespace { |
2602 | at::Tensor wrapper_CompositeExplicitAutograd__logspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
2603 | // No device check |
2604 | // DeviceGuard omitted |
2605 | return at::native::logspace(start, end, steps, base, dtype, layout, device, pin_memory); |
2606 | } |
2607 | } // anonymous namespace |
2608 | namespace { |
2609 | at::Tensor & wrapper_CompositeExplicitAutograd_int_out_log_softmax_out(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
2610 | // No device check |
2611 | // DeviceGuard omitted |
2612 | return at::native::log_softmax_out(self, dim, dtype, out); |
2613 | } |
2614 | } // anonymous namespace |
2615 | namespace { |
2616 | at::Tensor wrapper_CompositeExplicitAutograd__logcumsumexp(const at::Tensor & self, int64_t dim) { |
2617 | // No device check |
2618 | // DeviceGuard omitted |
2619 | return at::native::logcumsumexp(self, dim); |
2620 | } |
2621 | } // anonymous namespace |
2622 | namespace { |
2623 | at::Tensor & wrapper_CompositeExplicitAutograd_out_logcumsumexp_out(const at::Tensor & self, int64_t dim, at::Tensor & out) { |
2624 | // No device check |
2625 | // DeviceGuard omitted |
2626 | return at::native::logcumsumexp_out(self, dim, out); |
2627 | } |
2628 | } // anonymous namespace |
2629 | namespace { |
2630 | at::Tensor wrapper_CompositeExplicitAutograd__logsumexp(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) { |
2631 | // No device check |
2632 | // DeviceGuard omitted |
2633 | return at::native::logsumexp(self, dim, keepdim); |
2634 | } |
2635 | } // anonymous namespace |
2636 | namespace { |
2637 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_matmul_backward_out(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array<bool,2> mask, at::Tensor & out0, at::Tensor & out1) { |
2638 | // No device check |
2639 | // DeviceGuard omitted |
2640 | return at::native::matmul_backward_out(grad, self, other, mask, out0, out1); |
2641 | } |
2642 | } // anonymous namespace |
2643 | namespace { |
2644 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out__aminmax_out(const at::Tensor & self, at::Tensor & out0, at::Tensor & out1) { |
2645 | // No device check |
2646 | // DeviceGuard omitted |
2647 | return at::native::_aminmax_out(self, out0, out1); |
2648 | } |
2649 | } // anonymous namespace |
2650 | namespace { |
2651 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_dim_out__aminmax_out(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out0, at::Tensor & out1) { |
2652 | // No device check |
2653 | // DeviceGuard omitted |
2654 | return at::native::_aminmax_dim_out(self, dim, keepdim, out0, out1); |
2655 | } |
2656 | } // anonymous namespace |
2657 | namespace { |
2658 | at::Tensor & wrapper_CompositeExplicitAutograd_out__mps_max_pool2d_out(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
2659 | // No device check |
2660 | // DeviceGuard omitted |
2661 | return at::native::_mps_max_pool2d_out(self, kernel_size, stride, padding, dilation, ceil_mode, out); |
2662 | } |
2663 | } // anonymous namespace |
2664 | namespace { |
2665 | at::Tensor & wrapper_CompositeExplicitAutograd_out_mps_max_pool2d_backward_out(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
2666 | // No device check |
2667 | // DeviceGuard omitted |
2668 | return at::native::mps_max_pool2d_backward_out(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, out); |
2669 | } |
2670 | } // anonymous namespace |
2671 | namespace { |
2672 | at::Tensor & wrapper_CompositeExplicitAutograd_out_mkldnn_max_pool2d_out(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
2673 | // No device check |
2674 | // DeviceGuard omitted |
2675 | return at::native::mkldnn_max_pool2d_out(self, kernel_size, stride, padding, dilation, ceil_mode, out); |
2676 | } |
2677 | } // anonymous namespace |
2678 | namespace { |
2679 | at::Tensor & wrapper_CompositeExplicitAutograd_out_mkldnn_max_pool2d_backward_out(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
2680 | // No device check |
2681 | // DeviceGuard omitted |
2682 | return at::native::mkldnn_max_pool2d_backward_out(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode, out); |
2683 | } |
2684 | } // anonymous namespace |
2685 | namespace { |
2686 | at::Tensor & wrapper_CompositeExplicitAutograd_out_mkldnn_max_pool3d_out(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
2687 | // No device check |
2688 | // DeviceGuard omitted |
2689 | return at::native::mkldnn_max_pool3d_out(self, kernel_size, stride, padding, dilation, ceil_mode, out); |
2690 | } |
2691 | } // anonymous namespace |
2692 | namespace { |
2693 | at::Tensor & wrapper_CompositeExplicitAutograd_out_mkldnn_max_pool3d_backward_out(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
2694 | // No device check |
2695 | // DeviceGuard omitted |
2696 | return at::native::mkldnn_max_pool3d_backward_out(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode, out); |
2697 | } |
2698 | } // anonymous namespace |
2699 | namespace { |
2700 | at::Tensor & wrapper_CompositeExplicitAutograd_out_quantized_max_pool1d_out(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
2701 | // No device check |
2702 | // DeviceGuard omitted |
2703 | return at::native::quantized_max_pool1d_out(self, kernel_size, stride, padding, dilation, ceil_mode, out); |
2704 | } |
2705 | } // anonymous namespace |
2706 | namespace { |
2707 | at::Tensor & wrapper_CompositeExplicitAutograd_out_quantized_max_pool2d_out(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
2708 | // No device check |
2709 | // DeviceGuard omitted |
2710 | return at::native::quantized_max_pool2d_out(self, kernel_size, stride, padding, dilation, ceil_mode, out); |
2711 | } |
2712 | } // anonymous namespace |
2713 | namespace { |
2714 | at::Tensor wrapper_CompositeExplicitAutograd__mean(const at::Tensor & self, c10::optional<at::ScalarType> dtype) { |
2715 | // No device check |
2716 | // DeviceGuard omitted |
2717 | return at::native::mean(self, dtype); |
2718 | } |
2719 | } // anonymous namespace |
2720 | namespace { |
2721 | at::Tensor & wrapper_CompositeExplicitAutograd_out_median_out(const at::Tensor & self, at::Tensor & out) { |
2722 | // No device check |
2723 | // DeviceGuard omitted |
2724 | return at::native::median_out(self, out); |
2725 | } |
2726 | } // anonymous namespace |
2727 | namespace { |
2728 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeExplicitAutograd_dim_median(const at::Tensor & self, int64_t dim, bool keepdim) { |
2729 | // No device check |
2730 | // DeviceGuard omitted |
2731 | return at::native::median(self, dim, keepdim); |
2732 | } |
2733 | } // anonymous namespace |
2734 | namespace { |
2735 | at::Tensor & wrapper_CompositeExplicitAutograd_out_nanmedian_out(const at::Tensor & self, at::Tensor & out) { |
2736 | // No device check |
2737 | // DeviceGuard omitted |
2738 | return at::native::nanmedian_out(self, out); |
2739 | } |
2740 | } // anonymous namespace |
2741 | namespace { |
2742 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeExplicitAutograd_dim_nanmedian(const at::Tensor & self, int64_t dim, bool keepdim) { |
2743 | // No device check |
2744 | // DeviceGuard omitted |
2745 | return at::native::nanmedian(self, dim, keepdim); |
2746 | } |
2747 | } // anonymous namespace |
2748 | namespace { |
2749 | at::Tensor & wrapper_CompositeExplicitAutograd_out__mps_convolution_out(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { |
2750 | // No device check |
2751 | // DeviceGuard omitted |
2752 | return at::native::_mps_convolution_out(self, weight, bias, padding, stride, dilation, groups, out); |
2753 | } |
2754 | } // anonymous namespace |
2755 | namespace { |
2756 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_mps_convolution_backward_out(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
2757 | // No device check |
2758 | // DeviceGuard omitted |
2759 | return at::native::mps_convolution_backward_out(self, grad_output, weight, padding, stride, dilation, groups, output_mask, out0, out1, out2); |
2760 | } |
2761 | } // anonymous namespace |
2762 | namespace { |
2763 | at::Tensor wrapper_CompositeExplicitAutograd__mkldnn_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { |
2764 | // No device check |
2765 | // DeviceGuard omitted |
2766 | return at::native::mkldnn_convolution(self, weight, bias, C10_AS_INTARRAYREF_SLOW(padding), stride, dilation, groups); |
2767 | } |
2768 | } // anonymous namespace |
2769 | namespace { |
2770 | at::Tensor & wrapper_CompositeExplicitAutograd_out_mkldnn_convolution_out(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { |
2771 | // No device check |
2772 | // DeviceGuard omitted |
2773 | return at::native::mkldnn_convolution_out_symint(self, weight, bias, padding, stride, dilation, groups, out); |
2774 | } |
2775 | } // anonymous namespace |
2776 | namespace { |
2777 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_mkldnn_rnn_layer_out(const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { |
2778 | // No device check |
2779 | // DeviceGuard omitted |
2780 | return at::native::mkldnn_rnn_layer_out(input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train, out0, out1, out2, out3); |
2781 | } |
2782 | } // anonymous namespace |
2783 | namespace { |
2784 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_mkldnn_rnn_layer_backward_out(const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5, at::Tensor & out6) { |
2785 | // No device check |
2786 | // DeviceGuard omitted |
2787 | return at::native::mkldnn_rnn_layer_backward_out(input, weight1, weight2, weight3, weight4, hx_, cx_tmp, output, hy_, cy_, grad_output, grad_hy, grad_cy, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace, out0, out1, out2, out3, out4, out5, out6); |
2788 | } |
2789 | } // anonymous namespace |
2790 | namespace { |
2791 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_miopen_batch_norm_out(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
2792 | // No device check |
2793 | // DeviceGuard omitted |
2794 | return at::native::miopen_batch_norm_out(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2); |
2795 | } |
2796 | } // anonymous namespace |
2797 | namespace { |
2798 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_miopen_batch_norm_backward_out(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
2799 | // No device check |
2800 | // DeviceGuard omitted |
2801 | return at::native::miopen_batch_norm_backward_out(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, out0, out1, out2); |
2802 | } |
2803 | } // anonymous namespace |
2804 | namespace { |
2805 | at::Tensor & wrapper_CompositeExplicitAutograd_out_miopen_convolution_out(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { |
2806 | // No device check |
2807 | // DeviceGuard omitted |
2808 | return at::native::miopen_convolution_out_symint(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out); |
2809 | } |
2810 | } // anonymous namespace |
2811 | namespace { |
2812 | at::Tensor & wrapper_CompositeExplicitAutograd_out_miopen_convolution_transpose_out(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { |
2813 | // No device check |
2814 | // DeviceGuard omitted |
2815 | return at::native::miopen_convolution_transpose_out_symint(self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic, out); |
2816 | } |
2817 | } // anonymous namespace |
2818 | namespace { |
2819 | at::Tensor & wrapper_CompositeExplicitAutograd_out_miopen_depthwise_convolution_out(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { |
2820 | // No device check |
2821 | // DeviceGuard omitted |
2822 | return at::native::miopen_depthwise_convolution_out_symint(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out); |
2823 | } |
2824 | } // anonymous namespace |
2825 | namespace { |
2826 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_miopen_rnn_out(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) { |
2827 | // No device check |
2828 | // DeviceGuard omitted |
2829 | return at::native::miopen_rnn_out(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4); |
2830 | } |
2831 | } // anonymous namespace |
2832 | namespace { |
2833 | void wrapper_CompositeExplicitAutograd_out_miopen_rnn_backward_out(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) { |
2834 | // No device check |
2835 | // DeviceGuard omitted |
2836 | return at::native::miopen_rnn_backward_out(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3); |
2837 | } |
2838 | } // anonymous namespace |
2839 | namespace { |
2840 | at::Tensor & wrapper_CompositeExplicitAutograd_out__sparse_sparse_matmul_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
2841 | // No device check |
2842 | // DeviceGuard omitted |
2843 | return at::native::_sparse_sparse_matmul_out(self, other, out); |
2844 | } |
2845 | } // anonymous namespace |
2846 | namespace { |
2847 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_values_mode_out(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { |
2848 | // No device check |
2849 | // DeviceGuard omitted |
2850 | return at::native::mode_out(self, dim, keepdim, values, indices); |
2851 | } |
2852 | } // anonymous namespace |
2853 | namespace { |
2854 | at::Tensor wrapper_CompositeExplicitAutograd_Scalar_mul(const at::Tensor & self, const at::Scalar & other) { |
2855 | // No device check |
2856 | // DeviceGuard omitted |
2857 | return at::native::mul(self, other); |
2858 | } |
2859 | } // anonymous namespace |
2860 | namespace { |
2861 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_out_mul_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
2862 | // No device check |
2863 | // DeviceGuard omitted |
2864 | return at::native::mul_Scalar_out(self, other, out); |
2865 | } |
2866 | } // anonymous namespace |
2867 | namespace { |
2868 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_mul_(at::Tensor & self, const at::Scalar & other) { |
2869 | // No device check |
2870 | // DeviceGuard omitted |
2871 | return at::native::mul_(self, other); |
2872 | } |
2873 | } // anonymous namespace |
2874 | namespace { |
2875 | at::Tensor wrapper_CompositeExplicitAutograd__mv(const at::Tensor & self, const at::Tensor & vec) { |
2876 | // No device check |
2877 | // DeviceGuard omitted |
2878 | return at::native::mv(self, vec); |
2879 | } |
2880 | } // anonymous namespace |
2881 | namespace { |
2882 | at::Tensor & wrapper_CompositeExplicitAutograd_out_mv_out(const at::Tensor & self, const at::Tensor & vec, at::Tensor & out) { |
2883 | // No device check |
2884 | // DeviceGuard omitted |
2885 | return at::native::mv_out(self, vec, out); |
2886 | } |
2887 | } // anonymous namespace |
2888 | namespace { |
2889 | at::Tensor wrapper_CompositeExplicitAutograd__mvlgamma(const at::Tensor & self, int64_t p) { |
2890 | // No device check |
2891 | // DeviceGuard omitted |
2892 | return at::native::mvlgamma(self, p); |
2893 | } |
2894 | } // anonymous namespace |
2895 | namespace { |
2896 | at::Tensor & wrapper_CompositeExplicitAutograd__mvlgamma_(at::Tensor & self, int64_t p) { |
2897 | // No device check |
2898 | // DeviceGuard omitted |
2899 | return at::native::mvlgamma_(self, p); |
2900 | } |
2901 | } // anonymous namespace |
2902 | namespace { |
2903 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeExplicitAutograd___native_batch_norm_legit_functional(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool training, double momentum, double eps) { |
2904 | // No device check |
2905 | // DeviceGuard omitted |
2906 | return at::native::_native_batch_norm_legit_functional(input, weight, bias, running_mean, running_var, training, momentum, eps); |
2907 | } |
2908 | } // anonymous namespace |
2909 | namespace { |
2910 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_batch_norm_stats_out(const at::Tensor & input, double eps, at::Tensor & out0, at::Tensor & out1) { |
2911 | // No device check |
2912 | // DeviceGuard omitted |
2913 | return at::native::batch_norm_stats_out(input, eps, out0, out1); |
2914 | } |
2915 | } // anonymous namespace |
2916 | namespace { |
2917 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_batch_norm_gather_stats_out(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, int64_t count, at::Tensor & out0, at::Tensor & out1) { |
2918 | // No device check |
2919 | // DeviceGuard omitted |
2920 | return at::native::batch_norm_gather_stats_out(input, mean, invstd, running_mean, running_var, momentum, eps, count, out0, out1); |
2921 | } |
2922 | } // anonymous namespace |
2923 | namespace { |
2924 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_batch_norm_gather_stats_with_counts_out(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts, at::Tensor & out0, at::Tensor & out1) { |
2925 | // No device check |
2926 | // DeviceGuard omitted |
2927 | return at::native::batch_norm_gather_stats_with_counts_out(input, mean, invstd, running_mean, running_var, momentum, eps, counts, out0, out1); |
2928 | } |
2929 | } // anonymous namespace |
2930 | namespace { |
2931 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_native_batch_norm_backward_out(const at::Tensor & grad_out, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
2932 | // No device check |
2933 | // DeviceGuard omitted |
2934 | return at::native::native_batch_norm_backward_out(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask, out0, out1, out2); |
2935 | } |
2936 | } // anonymous namespace |
2937 | namespace { |
2938 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_batch_norm_backward_reduce_out(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, bool input_g, bool weight_g, bool bias_g, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { |
2939 | // No device check |
2940 | // DeviceGuard omitted |
2941 | return at::native::batch_norm_backward_reduce_out(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g, out0, out1, out2, out3); |
2942 | } |
2943 | } // anonymous namespace |
2944 | namespace { |
2945 | at::Tensor & wrapper_CompositeExplicitAutograd_out_batch_norm_backward_elemt_out(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, const at::Tensor & mean_dy, const at::Tensor & mean_dy_xmu, const at::Tensor & count, at::Tensor & out) { |
2946 | // No device check |
2947 | // DeviceGuard omitted |
2948 | return at::native::batch_norm_backward_elemt_out(grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count, out); |
2949 | } |
2950 | } // anonymous namespace |
2951 | namespace { |
2952 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_batch_norm_update_stats_out(const at::Tensor & input, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, at::Tensor & out0, at::Tensor & out1) { |
2953 | // No device check |
2954 | // DeviceGuard omitted |
2955 | return at::native::batch_norm_update_stats_out(input, running_mean, running_var, momentum, out0, out1); |
2956 | } |
2957 | } // anonymous namespace |
2958 | namespace { |
2959 | at::Tensor wrapper_CompositeExplicitAutograd___nnpack_spatial_convolution(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride) { |
2960 | // No device check |
2961 | // DeviceGuard omitted |
2962 | return at::native::_nnpack_spatial_convolution(input, weight, bias, C10_AS_INTARRAYREF_SLOW(padding), stride); |
2963 | } |
2964 | } // anonymous namespace |
2965 | namespace { |
2966 | at::Tensor & wrapper_CompositeExplicitAutograd_out__nnpack_spatial_convolution_out(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { |
2967 | // No device check |
2968 | // DeviceGuard omitted |
2969 | return at::native::_nnpack_spatial_convolution_out_symint(input, weight, bias, padding, stride, out); |
2970 | } |
2971 | } // anonymous namespace |
2972 | namespace { |
2973 | at::Tensor wrapper_CompositeExplicitAutograd_names_ones(at::IntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
2974 | // No device check |
2975 | // DeviceGuard omitted |
2976 | return at::native::ones(size, names, dtype, layout, device, pin_memory); |
2977 | } |
2978 | } // anonymous namespace |
2979 | namespace { |
2980 | at::Tensor & wrapper_CompositeExplicitAutograd_names_out_ones_out(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) { |
2981 | // No device check |
2982 | // DeviceGuard omitted |
2983 | return at::native::ones_names_out(size, names, out); |
2984 | } |
2985 | } // anonymous namespace |
2986 | namespace { |
2987 | at::Tensor wrapper_CompositeExplicitAutograd__ones(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
2988 | // No device check |
2989 | // DeviceGuard omitted |
2990 | return at::native::ones(C10_AS_INTARRAYREF_SLOW(size), dtype, layout, device, pin_memory); |
2991 | } |
2992 | } // anonymous namespace |
2993 | namespace { |
2994 | at::Tensor & wrapper_CompositeExplicitAutograd_out_ones_out(c10::SymIntArrayRef size, at::Tensor & out) { |
2995 | // No device check |
2996 | // DeviceGuard omitted |
2997 | return at::native::ones_out(C10_AS_INTARRAYREF_SLOW(size), out); |
2998 | } |
2999 | } // anonymous namespace |
3000 | namespace { |
3001 | at::Tensor wrapper_CompositeExplicitAutograd__ones_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
3002 | // No device check |
3003 | // DeviceGuard omitted |
3004 | return at::native::ones_like(self, dtype, layout, device, pin_memory, memory_format); |
3005 | } |
3006 | } // anonymous namespace |
3007 | namespace { |
3008 | at::Tensor & wrapper_CompositeExplicitAutograd_out_ones_like_out(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
3009 | // No device check |
3010 | // DeviceGuard omitted |
3011 | return at::native::ones_like_out(self, memory_format, out); |
3012 | } |
3013 | } // anonymous namespace |
3014 | namespace { |
3015 | at::Tensor wrapper_CompositeExplicitAutograd___euclidean_dist(const at::Tensor & x1, const at::Tensor & x2) { |
3016 | // No device check |
3017 | // DeviceGuard omitted |
3018 | return at::native::_euclidean_dist(x1, x2); |
3019 | } |
3020 | } // anonymous namespace |
3021 | namespace { |
3022 | at::Tensor & wrapper_CompositeExplicitAutograd_out__euclidean_dist_out(const at::Tensor & x1, const at::Tensor & x2, at::Tensor & out) { |
3023 | // No device check |
3024 | // DeviceGuard omitted |
3025 | return at::native::_euclidean_dist_out(x1, x2, out); |
3026 | } |
3027 | } // anonymous namespace |
3028 | namespace { |
3029 | at::Tensor & wrapper_CompositeExplicitAutograd_out__cdist_forward_out(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode, at::Tensor & out) { |
3030 | // No device check |
3031 | // DeviceGuard omitted |
3032 | return at::native::_cdist_forward_out(x1, x2, p, compute_mode, out); |
3033 | } |
3034 | } // anonymous namespace |
3035 | namespace { |
3036 | at::Tensor & wrapper_CompositeExplicitAutograd_out__cdist_backward_out(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist, at::Tensor & out) { |
3037 | // No device check |
3038 | // DeviceGuard omitted |
3039 | return at::native::_cdist_backward_out(grad, x1, x2, p, cdist, out); |
3040 | } |
3041 | } // anonymous namespace |
3042 | namespace { |
3043 | at::Tensor & wrapper_CompositeExplicitAutograd_out__pdist_forward_out(const at::Tensor & self, double p, at::Tensor & out) { |
3044 | // No device check |
3045 | // DeviceGuard omitted |
3046 | return at::native::_pdist_forward_out(self, p, out); |
3047 | } |
3048 | } // anonymous namespace |
3049 | namespace { |
3050 | at::Tensor & wrapper_CompositeExplicitAutograd_out__pdist_backward_out(const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist, at::Tensor & out) { |
3051 | // No device check |
3052 | // DeviceGuard omitted |
3053 | return at::native::_pdist_backward_out(grad, self, p, pdist, out); |
3054 | } |
3055 | } // anonymous namespace |
3056 | namespace { |
3057 | at::Tensor wrapper_CompositeExplicitAutograd__permute(const at::Tensor & self, at::IntArrayRef dims) { |
3058 | // No device check |
3059 | // DeviceGuard omitted |
3060 | return at::native::permute(self, dims); |
3061 | } |
3062 | } // anonymous namespace |
3063 | namespace { |
3064 | at::Tensor & wrapper_CompositeExplicitAutograd_out_pixel_shuffle_out(const at::Tensor & self, int64_t upscale_factor, at::Tensor & out) { |
3065 | // No device check |
3066 | // DeviceGuard omitted |
3067 | return at::native::pixel_shuffle_out(self, upscale_factor, out); |
3068 | } |
3069 | } // anonymous namespace |
3070 | namespace { |
3071 | at::Tensor & wrapper_CompositeExplicitAutograd_out_pixel_unshuffle_out(const at::Tensor & self, int64_t downscale_factor, at::Tensor & out) { |
3072 | // No device check |
3073 | // DeviceGuard omitted |
3074 | return at::native::pixel_unshuffle_out(self, downscale_factor, out); |
3075 | } |
3076 | } // anonymous namespace |
3077 | namespace { |
3078 | at::Tensor & wrapper_CompositeExplicitAutograd_out_channel_shuffle_out(const at::Tensor & self, int64_t groups, at::Tensor & out) { |
3079 | // No device check |
3080 | // DeviceGuard omitted |
3081 | return at::native::channel_shuffle_out(self, groups, out); |
3082 | } |
3083 | } // anonymous namespace |
3084 | namespace { |
3085 | bool wrapper_CompositeExplicitAutograd__is_pinned(const at::Tensor & self, c10::optional<at::Device> device) { |
3086 | // No device check |
3087 | // DeviceGuard omitted |
3088 | return at::native::is_pinned_default(self, device); |
3089 | } |
3090 | } // anonymous namespace |
3091 | namespace { |
3092 | at::Tensor & wrapper_CompositeExplicitAutograd_out__pin_memory_out(const at::Tensor & self, c10::optional<at::Device> device, at::Tensor & out) { |
3093 | // No device check |
3094 | // DeviceGuard omitted |
3095 | return at::native::_pin_memory_out(self, device, out); |
3096 | } |
3097 | } // anonymous namespace |
3098 | namespace { |
3099 | at::Tensor wrapper_CompositeExplicitAutograd__rad2deg(const at::Tensor & self) { |
3100 | // No device check |
3101 | // DeviceGuard omitted |
3102 | return at::native::rad2deg(self); |
3103 | } |
3104 | } // anonymous namespace |
3105 | namespace { |
3106 | at::Tensor & wrapper_CompositeExplicitAutograd_out_rad2deg_out(const at::Tensor & self, at::Tensor & out) { |
3107 | // No device check |
3108 | // DeviceGuard omitted |
3109 | return at::native::rad2deg_out(self, out); |
3110 | } |
3111 | } // anonymous namespace |
3112 | namespace { |
3113 | at::Tensor & wrapper_CompositeExplicitAutograd__rad2deg_(at::Tensor & self) { |
3114 | // No device check |
3115 | // DeviceGuard omitted |
3116 | return at::native::rad2deg_(self); |
3117 | } |
3118 | } // anonymous namespace |
3119 | namespace { |
3120 | at::Tensor wrapper_CompositeExplicitAutograd__deg2rad(const at::Tensor & self) { |
3121 | // No device check |
3122 | // DeviceGuard omitted |
3123 | return at::native::deg2rad(self); |
3124 | } |
3125 | } // anonymous namespace |
3126 | namespace { |
3127 | at::Tensor & wrapper_CompositeExplicitAutograd_out_deg2rad_out(const at::Tensor & self, at::Tensor & out) { |
3128 | // No device check |
3129 | // DeviceGuard omitted |
3130 | return at::native::deg2rad_out(self, out); |
3131 | } |
3132 | } // anonymous namespace |
3133 | namespace { |
3134 | at::Tensor & wrapper_CompositeExplicitAutograd__deg2rad_(at::Tensor & self) { |
3135 | // No device check |
3136 | // DeviceGuard omitted |
3137 | return at::native::deg2rad_(self); |
3138 | } |
3139 | } // anonymous namespace |
3140 | namespace { |
3141 | at::Tensor wrapper_CompositeExplicitAutograd__scalar_tensor(const at::Scalar & s, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
3142 | // No device check |
3143 | // DeviceGuard omitted |
3144 | return at::native::scalar_tensor(s, dtype, layout, device, pin_memory); |
3145 | } |
3146 | } // anonymous namespace |
3147 | namespace { |
3148 | at::Tensor & wrapper_CompositeExplicitAutograd_out_scalar_tensor_out(const at::Scalar & s, at::Tensor & out) { |
3149 | // No device check |
3150 | // DeviceGuard omitted |
3151 | return at::native::scalar_tensor_out(s, out); |
3152 | } |
3153 | } // anonymous namespace |
3154 | namespace { |
3155 | at::Tensor wrapper_CompositeExplicitAutograd_names_rand(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
3156 | // No device check |
3157 | // DeviceGuard omitted |
3158 | return at::native::rand(C10_AS_INTARRAYREF_SLOW(size), names, dtype, layout, device, pin_memory); |
3159 | } |
3160 | } // anonymous namespace |
3161 | namespace { |
3162 | at::Tensor & wrapper_CompositeExplicitAutograd_names_out_rand_out(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) { |
3163 | // No device check |
3164 | // DeviceGuard omitted |
3165 | return at::native::rand_names_out_symint(size, names, out); |
3166 | } |
3167 | } // anonymous namespace |
3168 | namespace { |
3169 | at::Tensor wrapper_CompositeExplicitAutograd_generator_with_names_rand(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
3170 | // No device check |
3171 | // DeviceGuard omitted |
3172 | return at::native::rand(C10_AS_INTARRAYREF_SLOW(size), generator, names, dtype, layout, device, pin_memory); |
3173 | } |
3174 | } // anonymous namespace |
3175 | namespace { |
3176 | at::Tensor & wrapper_CompositeExplicitAutograd_generator_with_names_out_rand_out(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::Tensor & out) { |
3177 | // No device check |
3178 | // DeviceGuard omitted |
3179 | return at::native::rand_generator_with_names_out_symint(size, generator, names, out); |
3180 | } |
3181 | } // anonymous namespace |
3182 | namespace { |
3183 | at::Tensor wrapper_CompositeExplicitAutograd__rand(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
3184 | // No device check |
3185 | // DeviceGuard omitted |
3186 | return at::native::rand(C10_AS_INTARRAYREF_SLOW(size), dtype, layout, device, pin_memory); |
3187 | } |
3188 | } // anonymous namespace |
3189 | namespace { |
3190 | at::Tensor & wrapper_CompositeExplicitAutograd_out_rand_out(c10::SymIntArrayRef size, at::Tensor & out) { |
3191 | // No device check |
3192 | // DeviceGuard omitted |
3193 | return at::native::rand_out(C10_AS_INTARRAYREF_SLOW(size), out); |
3194 | } |
3195 | } // anonymous namespace |
3196 | namespace { |
3197 | at::Tensor wrapper_CompositeExplicitAutograd_generator_rand(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
3198 | // No device check |
3199 | // DeviceGuard omitted |
3200 | return at::native::rand(C10_AS_INTARRAYREF_SLOW(size), generator, dtype, layout, device, pin_memory); |
3201 | } |
3202 | } // anonymous namespace |
3203 | namespace { |
3204 | at::Tensor wrapper_CompositeExplicitAutograd__rand_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
3205 | // No device check |
3206 | // DeviceGuard omitted |
3207 | return at::native::rand_like(self, dtype, layout, device, pin_memory, memory_format); |
3208 | } |
3209 | } // anonymous namespace |
3210 | namespace { |
3211 | at::Tensor & wrapper_CompositeExplicitAutograd_out_rand_like_out(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
3212 | // No device check |
3213 | // DeviceGuard omitted |
3214 | return at::native::rand_like_out(self, memory_format, out); |
3215 | } |
3216 | } // anonymous namespace |
3217 | namespace { |
3218 | at::Tensor wrapper_CompositeExplicitAutograd__randint(int64_t high, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
3219 | // No device check |
3220 | // DeviceGuard omitted |
3221 | return at::native::randint(high, C10_AS_INTARRAYREF_SLOW(size), dtype, layout, device, pin_memory); |
3222 | } |
3223 | } // anonymous namespace |
3224 | namespace { |
3225 | at::Tensor & wrapper_CompositeExplicitAutograd_out_randint_out(int64_t high, c10::SymIntArrayRef size, at::Tensor & out) { |
3226 | // No device check |
3227 | // DeviceGuard omitted |
3228 | return at::native::randint_out(high, C10_AS_INTARRAYREF_SLOW(size), out); |
3229 | } |
3230 | } // anonymous namespace |
3231 | namespace { |
3232 | at::Tensor wrapper_CompositeExplicitAutograd_generator_randint(int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
3233 | // No device check |
3234 | // DeviceGuard omitted |
3235 | return at::native::randint(high, C10_AS_INTARRAYREF_SLOW(size), generator, dtype, layout, device, pin_memory); |
3236 | } |
3237 | } // anonymous namespace |
3238 | namespace { |
3239 | at::Tensor & wrapper_CompositeExplicitAutograd_generator_out_randint_out(int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) { |
3240 | // No device check |
3241 | // DeviceGuard omitted |
3242 | return at::native::randint_out(high, C10_AS_INTARRAYREF_SLOW(size), generator, out); |
3243 | } |
3244 | } // anonymous namespace |
3245 | namespace { |
3246 | at::Tensor wrapper_CompositeExplicitAutograd_low_randint(int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
3247 | // No device check |
3248 | // DeviceGuard omitted |
3249 | return at::native::randint(low, high, C10_AS_INTARRAYREF_SLOW(size), dtype, layout, device, pin_memory); |
3250 | } |
3251 | } // anonymous namespace |
3252 | namespace { |
3253 | at::Tensor & wrapper_CompositeExplicitAutograd_low_out_randint_out(int64_t low, int64_t high, c10::SymIntArrayRef size, at::Tensor & out) { |
3254 | // No device check |
3255 | // DeviceGuard omitted |
3256 | return at::native::randint_out(low, high, C10_AS_INTARRAYREF_SLOW(size), out); |
3257 | } |
3258 | } // anonymous namespace |
3259 | namespace { |
3260 | at::Tensor wrapper_CompositeExplicitAutograd_low_generator_randint(int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
3261 | // No device check |
3262 | // DeviceGuard omitted |
3263 | return at::native::randint(low, high, C10_AS_INTARRAYREF_SLOW(size), generator, dtype, layout, device, pin_memory); |
3264 | } |
3265 | } // anonymous namespace |
3266 | namespace { |
3267 | at::Tensor & wrapper_CompositeExplicitAutograd_low_generator_out_randint_out(int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) { |
3268 | // No device check |
3269 | // DeviceGuard omitted |
3270 | return at::native::randint_out(low, high, C10_AS_INTARRAYREF_SLOW(size), generator, out); |
3271 | } |
3272 | } // anonymous namespace |
3273 | namespace { |
3274 | at::Tensor wrapper_CompositeExplicitAutograd__randint_like(const at::Tensor & self, int64_t high, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
3275 | // No device check |
3276 | // DeviceGuard omitted |
3277 | return at::native::randint_like(self, high, dtype, layout, device, pin_memory, memory_format); |
3278 | } |
3279 | } // anonymous namespace |
3280 | namespace { |
3281 | at::Tensor & wrapper_CompositeExplicitAutograd_out_randint_like_out(const at::Tensor & self, int64_t high, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
3282 | // No device check |
3283 | // DeviceGuard omitted |
3284 | return at::native::randint_like_out(self, high, memory_format, out); |
3285 | } |
3286 | } // anonymous namespace |
3287 | namespace { |
3288 | at::Tensor wrapper_CompositeExplicitAutograd_low_dtype_randint_like(const at::Tensor & self, int64_t low, int64_t high, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
3289 | // No device check |
3290 | // DeviceGuard omitted |
3291 | return at::native::randint_like(self, low, high, dtype, layout, device, pin_memory, memory_format); |
3292 | } |
3293 | } // anonymous namespace |
3294 | namespace { |
3295 | at::Tensor & wrapper_CompositeExplicitAutograd_low_dtype_out_randint_like_out(const at::Tensor & self, int64_t low, int64_t high, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
3296 | // No device check |
3297 | // DeviceGuard omitted |
3298 | return at::native::randint_like_low_dtype_out(self, low, high, memory_format, out); |
3299 | } |
3300 | } // anonymous namespace |
3301 | namespace { |
3302 | at::Tensor wrapper_CompositeExplicitAutograd__randn(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
3303 | // No device check |
3304 | // DeviceGuard omitted |
3305 | return at::native::randn(C10_AS_INTARRAYREF_SLOW(size), dtype, layout, device, pin_memory); |
3306 | } |
3307 | } // anonymous namespace |
3308 | namespace { |
3309 | at::Tensor wrapper_CompositeExplicitAutograd_generator_randn(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
3310 | // No device check |
3311 | // DeviceGuard omitted |
3312 | return at::native::randn(C10_AS_INTARRAYREF_SLOW(size), generator, dtype, layout, device, pin_memory); |
3313 | } |
3314 | } // anonymous namespace |
3315 | namespace { |
3316 | at::Tensor wrapper_CompositeExplicitAutograd_names_randn(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
3317 | // No device check |
3318 | // DeviceGuard omitted |
3319 | return at::native::randn(C10_AS_INTARRAYREF_SLOW(size), names, dtype, layout, device, pin_memory); |
3320 | } |
3321 | } // anonymous namespace |
3322 | namespace { |
3323 | at::Tensor & wrapper_CompositeExplicitAutograd_names_out_randn_out(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) { |
3324 | // No device check |
3325 | // DeviceGuard omitted |
3326 | return at::native::randn_names_out_symint(size, names, out); |
3327 | } |
3328 | } // anonymous namespace |
3329 | namespace { |
3330 | at::Tensor wrapper_CompositeExplicitAutograd_generator_with_names_randn(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
3331 | // No device check |
3332 | // DeviceGuard omitted |
3333 | return at::native::randn(C10_AS_INTARRAYREF_SLOW(size), generator, names, dtype, layout, device, pin_memory); |
3334 | } |
3335 | } // anonymous namespace |
3336 | namespace { |
3337 | at::Tensor & wrapper_CompositeExplicitAutograd_generator_with_names_out_randn_out(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::Tensor & out) { |
3338 | // No device check |
3339 | // DeviceGuard omitted |
3340 | return at::native::randn_generator_with_names_out_symint(size, generator, names, out); |
3341 | } |
3342 | } // anonymous namespace |
3343 | namespace { |
3344 | at::Tensor wrapper_CompositeExplicitAutograd__randn_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
3345 | // No device check |
3346 | // DeviceGuard omitted |
3347 | return at::native::randn_like(self, dtype, layout, device, pin_memory, memory_format); |
3348 | } |
3349 | } // anonymous namespace |
3350 | namespace { |
3351 | at::Tensor & wrapper_CompositeExplicitAutograd_out_randn_like_out(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
3352 | // No device check |
3353 | // DeviceGuard omitted |
3354 | return at::native::randn_like_out(self, memory_format, out); |
3355 | } |
3356 | } // anonymous namespace |
3357 | namespace { |
3358 | at::Tensor wrapper_CompositeExplicitAutograd__randperm(int64_t n, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
3359 | // No device check |
3360 | // DeviceGuard omitted |
3361 | return at::native::randperm(n, dtype, layout, device, pin_memory); |
3362 | } |
3363 | } // anonymous namespace |
3364 | namespace { |
3365 | at::Tensor & wrapper_CompositeExplicitAutograd_out_randperm_out(int64_t n, at::Tensor & out) { |
3366 | // No device check |
3367 | // DeviceGuard omitted |
3368 | return at::native::randperm_out(n, out); |
3369 | } |
3370 | } // anonymous namespace |
3371 | namespace { |
3372 | at::Tensor wrapper_CompositeExplicitAutograd_generator_randperm(int64_t n, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
3373 | // No device check |
3374 | // DeviceGuard omitted |
3375 | return at::native::randperm(n, generator, dtype, layout, device, pin_memory); |
3376 | } |
3377 | } // anonymous namespace |
3378 | namespace { |
3379 | at::Tensor wrapper_CompositeExplicitAutograd_step_range(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
3380 | // No device check |
3381 | // DeviceGuard omitted |
3382 | return at::native::range(start, end, step, dtype, layout, device, pin_memory); |
3383 | } |
3384 | } // anonymous namespace |
3385 | namespace { |
3386 | at::Tensor wrapper_CompositeExplicitAutograd__range(const at::Scalar & start, const at::Scalar & end, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
3387 | // No device check |
3388 | // DeviceGuard omitted |
3389 | return at::native::range(start, end, dtype, layout, device, pin_memory); |
3390 | } |
3391 | } // anonymous namespace |
3392 | namespace { |
3393 | at::Tensor & wrapper_CompositeExplicitAutograd_out__range_out(const at::Scalar & start, const at::Scalar & end, at::Tensor & out) { |
3394 | // No device check |
3395 | // DeviceGuard omitted |
3396 | return at::native::range_out_no_step(start, end, out); |
3397 | } |
3398 | } // anonymous namespace |
3399 | namespace { |
3400 | at::Tensor wrapper_CompositeExplicitAutograd__repeat(const at::Tensor & self, c10::SymIntArrayRef repeats) { |
3401 | // No device check |
3402 | // DeviceGuard omitted |
3403 | return at::native::repeat(self, C10_AS_INTARRAYREF_SLOW(repeats)); |
3404 | } |
3405 | } // anonymous namespace |
3406 | namespace { |
3407 | at::Tensor & wrapper_CompositeExplicitAutograd_out_repeat_out(const at::Tensor & self, c10::SymIntArrayRef repeats, at::Tensor & out) { |
3408 | // No device check |
3409 | // DeviceGuard omitted |
3410 | return at::native::repeat_out_symint(self, repeats, out); |
3411 | } |
3412 | } // anonymous namespace |
3413 | namespace { |
3414 | at::Tensor & wrapper_CompositeExplicitAutograd_Tensor_out_repeat_interleave_out(const at::Tensor & repeats, c10::optional<int64_t> output_size, at::Tensor & out) { |
3415 | // No device check |
3416 | // DeviceGuard omitted |
3417 | return at::native::repeat_interleave_Tensor_out(repeats, output_size, out); |
3418 | } |
3419 | } // anonymous namespace |
3420 | namespace { |
3421 | at::Tensor wrapper_CompositeExplicitAutograd___reshape_copy(const at::Tensor & self, c10::SymIntArrayRef size) { |
3422 | // No device check |
3423 | // DeviceGuard omitted |
3424 | return at::native::_reshape_copy_symint(self, size); |
3425 | } |
3426 | } // anonymous namespace |
3427 | namespace { |
3428 | at::Tensor & wrapper_CompositeExplicitAutograd_out__mkldnn_reshape_out(const at::Tensor & self, at::IntArrayRef shape, at::Tensor & out) { |
3429 | // No device check |
3430 | // DeviceGuard omitted |
3431 | return at::native::_mkldnn_reshape_out(self, shape, out); |
3432 | } |
3433 | } // anonymous namespace |
3434 | namespace { |
3435 | at::Tensor & wrapper_CompositeExplicitAutograd_out_relu_out(const at::Tensor & self, at::Tensor & out) { |
3436 | // No device check |
3437 | // DeviceGuard omitted |
3438 | return at::native::relu_out(self, out); |
3439 | } |
3440 | } // anonymous namespace |
3441 | namespace { |
3442 | at::Tensor wrapper_CompositeExplicitAutograd_int_select(const at::Tensor & self, int64_t dim, c10::SymInt index) { |
3443 | // No device check |
3444 | // DeviceGuard omitted |
3445 | return at::native::select_symint(self, dim, index); |
3446 | } |
3447 | } // anonymous namespace |
3448 | namespace { |
3449 | at::Tensor & wrapper_CompositeExplicitAutograd_out_select_backward_out(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index, at::Tensor & out) { |
3450 | // No device check |
3451 | // DeviceGuard omitted |
3452 | return at::native::select_backward_out_symint(grad_output, input_sizes, dim, index, out); |
3453 | } |
3454 | } // anonymous namespace |
3455 | namespace { |
3456 | at::Tensor wrapper_CompositeExplicitAutograd__celu(const at::Tensor & self, const at::Scalar & alpha) { |
3457 | // No device check |
3458 | // DeviceGuard omitted |
3459 | return at::native::celu(self, alpha); |
3460 | } |
3461 | } // anonymous namespace |
3462 | namespace { |
3463 | at::Tensor & wrapper_CompositeExplicitAutograd_out_celu_out(const at::Tensor & self, const at::Scalar & alpha, at::Tensor & out) { |
3464 | // No device check |
3465 | // DeviceGuard omitted |
3466 | return at::native::celu_out(self, alpha, out); |
3467 | } |
3468 | } // anonymous namespace |
3469 | namespace { |
3470 | at::Tensor & wrapper_CompositeExplicitAutograd__celu_(at::Tensor & self, const at::Scalar & alpha) { |
3471 | // No device check |
3472 | // DeviceGuard omitted |
3473 | return at::native::celu_(self, alpha); |
3474 | } |
3475 | } // anonymous namespace |
3476 | namespace { |
3477 | at::Tensor wrapper_CompositeExplicitAutograd__detach(const at::Tensor & self) { |
3478 | // No device check |
3479 | // DeviceGuard omitted |
3480 | return at::native::detach(self); |
3481 | } |
3482 | } // anonymous namespace |
3483 | namespace { |
3484 | at::Tensor & wrapper_CompositeExplicitAutograd__detach_(at::Tensor & self) { |
3485 | // No device check |
3486 | // DeviceGuard omitted |
3487 | return at::native::detach_(self); |
3488 | } |
3489 | } // anonymous namespace |
3490 | namespace { |
3491 | at::Tensor wrapper_CompositeExplicitAutograd_Tensor_slice(const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) { |
3492 | // No device check |
3493 | // DeviceGuard omitted |
3494 | return at::native::slice(self, dim, start.has_value() ? c10::make_optional(start->expect_int()) : c10::nullopt, end.has_value() ? c10::make_optional(end->expect_int()) : c10::nullopt, step.expect_int()); |
3495 | } |
3496 | } // anonymous namespace |
3497 | namespace { |
3498 | at::Tensor wrapper_CompositeExplicitAutograd__slice_backward(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step) { |
3499 | // No device check |
3500 | // DeviceGuard omitted |
3501 | return at::native::slice_backward(grad_output, C10_AS_INTARRAYREF_SLOW(input_sizes), dim, start.expect_int(), end.expect_int(), step.expect_int()); |
3502 | } |
3503 | } // anonymous namespace |
3504 | namespace { |
3505 | at::Tensor & wrapper_CompositeExplicitAutograd_out_slice_backward_out(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step, at::Tensor & out) { |
3506 | // No device check |
3507 | // DeviceGuard omitted |
3508 | return at::native::slice_backward_out_symint(grad_output, input_sizes, dim, start, end, step, out); |
3509 | } |
3510 | } // anonymous namespace |
3511 | namespace { |
3512 | at::Tensor wrapper_CompositeExplicitAutograd__slice_scatter(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) { |
3513 | // No device check |
3514 | // DeviceGuard omitted |
3515 | return at::native::slice_scatter(self, src, dim, start.has_value() ? c10::make_optional(start->expect_int()) : c10::nullopt, end.has_value() ? c10::make_optional(end->expect_int()) : c10::nullopt, step.expect_int()); |
3516 | } |
3517 | } // anonymous namespace |
3518 | namespace { |
3519 | at::Tensor & wrapper_CompositeExplicitAutograd_out_slice_scatter_out(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step, at::Tensor & out) { |
3520 | // No device check |
3521 | // DeviceGuard omitted |
3522 | return at::native::slice_scatter_out_symint(self, src, dim, start, end, step, out); |
3523 | } |
3524 | } // anonymous namespace |
3525 | namespace { |
3526 | at::Tensor wrapper_CompositeExplicitAutograd__select_scatter(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index) { |
3527 | // No device check |
3528 | // DeviceGuard omitted |
3529 | return at::native::select_scatter_symint(self, src, dim, index); |
3530 | } |
3531 | } // anonymous namespace |
3532 | namespace { |
3533 | at::Tensor & wrapper_CompositeExplicitAutograd_out_select_scatter_out(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index, at::Tensor & out) { |
3534 | // No device check |
3535 | // DeviceGuard omitted |
3536 | return at::native::select_scatter_out_symint(self, src, dim, index, out); |
3537 | } |
3538 | } // anonymous namespace |
3539 | namespace { |
3540 | at::Tensor wrapper_CompositeExplicitAutograd__diagonal_scatter(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2) { |
3541 | // No device check |
3542 | // DeviceGuard omitted |
3543 | return at::native::diagonal_scatter(self, src, offset, dim1, dim2); |
3544 | } |
3545 | } // anonymous namespace |
3546 | namespace { |
3547 | at::Tensor & wrapper_CompositeExplicitAutograd_out_diagonal_scatter_out(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { |
3548 | // No device check |
3549 | // DeviceGuard omitted |
3550 | return at::native::diagonal_scatter_out(self, src, offset, dim1, dim2, out); |
3551 | } |
3552 | } // anonymous namespace |
3553 | namespace { |
3554 | at::Tensor wrapper_CompositeExplicitAutograd__as_strided_scatter(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) { |
3555 | // No device check |
3556 | // DeviceGuard omitted |
3557 | return at::native::as_strided_scatter_symint(self, src, size, stride, storage_offset); |
3558 | } |
3559 | } // anonymous namespace |
3560 | namespace { |
3561 | at::Tensor & wrapper_CompositeExplicitAutograd_out_as_strided_scatter_out(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset, at::Tensor & out) { |
3562 | // No device check |
3563 | // DeviceGuard omitted |
3564 | return at::native::as_strided_scatter_out_symint(self, src, size, stride, storage_offset, out); |
3565 | } |
3566 | } // anonymous namespace |
3567 | namespace { |
3568 | at::Tensor & wrapper_CompositeExplicitAutograd_int_out_softmax_out(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
3569 | // No device check |
3570 | // DeviceGuard omitted |
3571 | return at::native::softmax_out(self, dim, dtype, out); |
3572 | } |
3573 | } // anonymous namespace |
3574 | namespace { |
3575 | ::std::vector<at::Tensor> wrapper_CompositeExplicitAutograd_Tensor_unsafe_split(const at::Tensor & self, c10::SymInt split_size, int64_t dim) { |
3576 | // No device check |
3577 | // DeviceGuard omitted |
3578 | return at::native::unsafe_split(self, split_size.expect_int(), dim); |
3579 | } |
3580 | } // anonymous namespace |
3581 | namespace { |
3582 | void wrapper_CompositeExplicitAutograd_Tensor_out_unsafe_split_out(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) { |
3583 | // No device check |
3584 | // DeviceGuard omitted |
3585 | return at::native::unsafe_split_Tensor_out_symint(self, split_size, dim, out); |
3586 | } |
3587 | } // anonymous namespace |
3588 | namespace { |
3589 | ::std::vector<at::Tensor> wrapper_CompositeExplicitAutograd_Tensor_split(const at::Tensor & self, c10::SymInt split_size, int64_t dim) { |
3590 | // No device check |
3591 | // DeviceGuard omitted |
3592 | return at::native::split(self, split_size.expect_int(), dim); |
3593 | } |
3594 | } // anonymous namespace |
3595 | namespace { |
3596 | ::std::vector<at::Tensor> wrapper_CompositeExplicitAutograd__unsafe_split_with_sizes(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) { |
3597 | // No device check |
3598 | // DeviceGuard omitted |
3599 | return at::native::unsafe_split_with_sizes(self, C10_AS_INTARRAYREF_SLOW(split_sizes), dim); |
3600 | } |
3601 | } // anonymous namespace |
3602 | namespace { |
3603 | void wrapper_CompositeExplicitAutograd_out_unsafe_split_with_sizes_out(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) { |
3604 | // No device check |
3605 | // DeviceGuard omitted |
3606 | return at::native::unsafe_split_with_sizes_out_symint(self, split_sizes, dim, out); |
3607 | } |
3608 | } // anonymous namespace |
3609 | namespace { |
3610 | ::std::vector<at::Tensor> wrapper_CompositeExplicitAutograd__split_with_sizes(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) { |
3611 | // No device check |
3612 | // DeviceGuard omitted |
3613 | return at::native::split_with_sizes(self, C10_AS_INTARRAYREF_SLOW(split_sizes), dim); |
3614 | } |
3615 | } // anonymous namespace |
3616 | namespace { |
3617 | at::Tensor wrapper_CompositeExplicitAutograd__squeeze(const at::Tensor & self) { |
3618 | // No device check |
3619 | // DeviceGuard omitted |
3620 | return at::native::squeeze(self); |
3621 | } |
3622 | } // anonymous namespace |
3623 | namespace { |
3624 | at::Tensor & wrapper_CompositeExplicitAutograd__squeeze_(at::Tensor & self) { |
3625 | // No device check |
3626 | // DeviceGuard omitted |
3627 | return at::native::squeeze_(self); |
3628 | } |
3629 | } // anonymous namespace |
3630 | namespace { |
3631 | at::Tensor wrapper_CompositeExplicitAutograd_dim_squeeze(const at::Tensor & self, int64_t dim) { |
3632 | // No device check |
3633 | // DeviceGuard omitted |
3634 | return at::native::squeeze(self, dim); |
3635 | } |
3636 | } // anonymous namespace |
3637 | namespace { |
3638 | at::Tensor & wrapper_CompositeExplicitAutograd_dim_squeeze_(at::Tensor & self, int64_t dim) { |
3639 | // No device check |
3640 | // DeviceGuard omitted |
3641 | return at::native::squeeze_(self, dim); |
3642 | } |
3643 | } // anonymous namespace |
3644 | namespace { |
3645 | at::Tensor wrapper_CompositeExplicitAutograd_dims_squeeze(const at::Tensor & self, at::IntArrayRef dim) { |
3646 | // No device check |
3647 | // DeviceGuard omitted |
3648 | return at::native::squeeze(self, dim); |
3649 | } |
3650 | } // anonymous namespace |
3651 | namespace { |
3652 | at::Tensor & wrapper_CompositeExplicitAutograd_dims_squeeze_(at::Tensor & self, at::IntArrayRef dim) { |
3653 | // No device check |
3654 | // DeviceGuard omitted |
3655 | return at::native::squeeze_(self, dim); |
3656 | } |
3657 | } // anonymous namespace |
3658 | namespace { |
3659 | at::Tensor wrapper_CompositeExplicitAutograd__stack(at::TensorList tensors, int64_t dim) { |
3660 | // No device check |
3661 | // DeviceGuard omitted |
3662 | return at::native::stack(tensors, dim); |
3663 | } |
3664 | } // anonymous namespace |
3665 | namespace { |
3666 | at::Tensor & wrapper_CompositeExplicitAutograd_out_stack_out(at::TensorList tensors, int64_t dim, at::Tensor & out) { |
3667 | // No device check |
3668 | // DeviceGuard omitted |
3669 | return at::native::stack_out(tensors, dim, out); |
3670 | } |
3671 | } // anonymous namespace |
3672 | namespace { |
3673 | at::Tensor wrapper_CompositeExplicitAutograd___stack(at::TensorList tensors, int64_t dim) { |
3674 | // No device check |
3675 | // DeviceGuard omitted |
3676 | return at::native::_stack(tensors, dim); |
3677 | } |
3678 | } // anonymous namespace |
3679 | namespace { |
3680 | at::Tensor & wrapper_CompositeExplicitAutograd_out__stack_out(at::TensorList tensors, int64_t dim, at::Tensor & out) { |
3681 | // No device check |
3682 | // DeviceGuard omitted |
3683 | return at::native::_stack_out(tensors, dim, out); |
3684 | } |
3685 | } // anonymous namespace |
3686 | namespace { |
3687 | at::Tensor wrapper_CompositeExplicitAutograd__sum(const at::Tensor & self, c10::optional<at::ScalarType> dtype) { |
3688 | // No device check |
3689 | // DeviceGuard omitted |
3690 | return at::native::sum(self, dtype); |
3691 | } |
3692 | } // anonymous namespace |
3693 | namespace { |
3694 | at::Tensor & wrapper_CompositeExplicitAutograd_out_sum_out(const at::Tensor & self, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
3695 | // No device check |
3696 | // DeviceGuard omitted |
3697 | return at::native::sum_out(self, dtype, out); |
3698 | } |
3699 | } // anonymous namespace |
3700 | namespace { |
3701 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_correction_out_std_mean_out(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out0, at::Tensor & out1) { |
3702 | // No device check |
3703 | // DeviceGuard omitted |
3704 | return at::native::std_mean_correction_out(self, dim, correction, keepdim, out0, out1); |
3705 | } |
3706 | } // anonymous namespace |
3707 | namespace { |
3708 | at::Tensor & wrapper_CompositeExplicitAutograd_out_prod_out(const at::Tensor & self, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
3709 | // No device check |
3710 | // DeviceGuard omitted |
3711 | return at::native::prod_out(self, dtype, out); |
3712 | } |
3713 | } // anonymous namespace |
3714 | namespace { |
3715 | at::Tensor wrapper_CompositeExplicitAutograd__t(const at::Tensor & self) { |
3716 | // No device check |
3717 | // DeviceGuard omitted |
3718 | return at::native::t(self); |
3719 | } |
3720 | } // anonymous namespace |
3721 | namespace { |
3722 | at::Tensor & wrapper_CompositeExplicitAutograd__t_(at::Tensor & self) { |
3723 | // No device check |
3724 | // DeviceGuard omitted |
3725 | return at::native::t_(self); |
3726 | } |
3727 | } // anonymous namespace |
3728 | namespace { |
3729 | at::Tensor wrapper_CompositeExplicitAutograd_int_transpose(const at::Tensor & self, int64_t dim0, int64_t dim1) { |
3730 | // No device check |
3731 | // DeviceGuard omitted |
3732 | return at::native::transpose(self, dim0, dim1); |
3733 | } |
3734 | } // anonymous namespace |
3735 | namespace { |
3736 | at::Tensor & wrapper_CompositeExplicitAutograd__transpose_(at::Tensor & self, int64_t dim0, int64_t dim1) { |
3737 | // No device check |
3738 | // DeviceGuard omitted |
3739 | return at::native::transpose_(self, dim0, dim1); |
3740 | } |
3741 | } // anonymous namespace |
3742 | namespace { |
3743 | at::Tensor & wrapper_CompositeExplicitAutograd_out__mkldnn_transpose_out(const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) { |
3744 | // No device check |
3745 | // DeviceGuard omitted |
3746 | return at::native::_mkldnn_transpose_out(self, dim0, dim1, out); |
3747 | } |
3748 | } // anonymous namespace |
3749 | namespace { |
3750 | at::Tensor & wrapper_CompositeExplicitAutograd_out_flip_out(const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) { |
3751 | // No device check |
3752 | // DeviceGuard omitted |
3753 | return at::native::flip_out(self, dims, out); |
3754 | } |
3755 | } // anonymous namespace |
3756 | namespace { |
3757 | at::Tensor & wrapper_CompositeExplicitAutograd_out_roll_out(const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims, at::Tensor & out) { |
3758 | // No device check |
3759 | // DeviceGuard omitted |
3760 | return at::native::roll_out(self, shifts, dims, out); |
3761 | } |
3762 | } // anonymous namespace |
3763 | namespace { |
3764 | at::Tensor wrapper_CompositeExplicitAutograd__rot90(const at::Tensor & self, int64_t k, at::IntArrayRef dims) { |
3765 | // No device check |
3766 | // DeviceGuard omitted |
3767 | return at::native::rot90(self, k, dims); |
3768 | } |
3769 | } // anonymous namespace |
3770 | namespace { |
3771 | at::Tensor & wrapper_CompositeExplicitAutograd_out_rot90_out(const at::Tensor & self, int64_t k, at::IntArrayRef dims, at::Tensor & out) { |
3772 | // No device check |
3773 | // DeviceGuard omitted |
3774 | return at::native::rot90_out(self, k, dims, out); |
3775 | } |
3776 | } // anonymous namespace |
3777 | namespace { |
3778 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out__transform_bias_rescale_qkv_out(const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
3779 | // No device check |
3780 | // DeviceGuard omitted |
3781 | return at::native::_transform_bias_rescale_qkv_out(qkv, qkv_bias, num_heads, out0, out1, out2); |
3782 | } |
3783 | } // anonymous namespace |
3784 | namespace { |
3785 | at::Tensor & wrapper_CompositeExplicitAutograd_out__nested_tensor_from_mask_out(const at::Tensor & t, const at::Tensor & mask, bool mask_check, at::Tensor & out) { |
3786 | // No device check |
3787 | // DeviceGuard omitted |
3788 | return at::native::_nested_tensor_from_mask_out(t, mask, mask_check, out); |
3789 | } |
3790 | } // anonymous namespace |
3791 | namespace { |
3792 | at::Tensor & wrapper_CompositeExplicitAutograd_out__nested_from_padded_out(const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213, at::Tensor & out) { |
3793 | // No device check |
3794 | // DeviceGuard omitted |
3795 | return at::native::_nested_from_padded_out(padded, cpu_nested_shape_example, fuse_transform_0213, out); |
3796 | } |
3797 | } // anonymous namespace |
3798 | namespace { |
3799 | at::Tensor & wrapper_CompositeExplicitAutograd_out__nested_tensor_size_out(const at::Tensor & self, at::Tensor & out) { |
3800 | // No device check |
3801 | // DeviceGuard omitted |
3802 | return at::native::_nested_tensor_size_out(self, out); |
3803 | } |
3804 | } // anonymous namespace |
3805 | namespace { |
3806 | at::Tensor & wrapper_CompositeExplicitAutograd_out__nested_tensor_strides_out(const at::Tensor & self, at::Tensor & out) { |
3807 | // No device check |
3808 | // DeviceGuard omitted |
3809 | return at::native::_nested_tensor_strides_out(self, out); |
3810 | } |
3811 | } // anonymous namespace |
3812 | namespace { |
3813 | at::Tensor & wrapper_CompositeExplicitAutograd_out__nested_from_padded_and_nested_example_out(const at::Tensor & padded, const at::Tensor & nt_example, at::Tensor & out) { |
3814 | // No device check |
3815 | // DeviceGuard omitted |
3816 | return at::native::_nested_from_padded_and_nested_example_out(padded, nt_example, out); |
3817 | } |
3818 | } // anonymous namespace |
3819 | namespace { |
3820 | at::Tensor & wrapper_CompositeExplicitAutograd_out__nested_view_from_buffer_copy_out(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets, at::Tensor & out) { |
3821 | // No device check |
3822 | // DeviceGuard omitted |
3823 | return at::native::_nested_view_from_buffer_copy_out(self, nested_size, nested_strides, offsets, out); |
3824 | } |
3825 | } // anonymous namespace |
3826 | namespace { |
3827 | at::Tensor & wrapper_CompositeExplicitAutograd_out__trilinear_out(const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim, at::Tensor & out) { |
3828 | // No device check |
3829 | // DeviceGuard omitted |
3830 | return at::native::_trilinear_out(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim, out); |
3831 | } |
3832 | } // anonymous namespace |
3833 | namespace { |
3834 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out__unique_out(const at::Tensor & self, bool sorted, bool return_inverse, at::Tensor & out0, at::Tensor & out1) { |
3835 | // No device check |
3836 | // DeviceGuard omitted |
3837 | return at::native::_unique_out(self, sorted, return_inverse, out0, out1); |
3838 | } |
3839 | } // anonymous namespace |
3840 | namespace { |
3841 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_unique_dim_out(const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
3842 | // No device check |
3843 | // DeviceGuard omitted |
3844 | return at::native::unique_dim_out(self, dim, sorted, return_inverse, return_counts, out0, out1, out2); |
3845 | } |
3846 | } // anonymous namespace |
3847 | namespace { |
3848 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_unique_consecutive_out(const at::Tensor & self, bool return_inverse, bool return_counts, c10::optional<int64_t> dim, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
3849 | // No device check |
3850 | // DeviceGuard omitted |
3851 | return at::native::unique_consecutive_out(self, return_inverse, return_counts, dim, out0, out1, out2); |
3852 | } |
3853 | } // anonymous namespace |
3854 | namespace { |
3855 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_unique_dim_consecutive_out(const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
3856 | // No device check |
3857 | // DeviceGuard omitted |
3858 | return at::native::unique_dim_consecutive_out(self, dim, return_inverse, return_counts, out0, out1, out2); |
3859 | } |
3860 | } // anonymous namespace |
3861 | namespace { |
3862 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out__unique2_out(const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
3863 | // No device check |
3864 | // DeviceGuard omitted |
3865 | return at::native::_unique2_out(self, sorted, return_inverse, return_counts, out0, out1, out2); |
3866 | } |
3867 | } // anonymous namespace |
3868 | namespace { |
3869 | at::Tensor wrapper_CompositeExplicitAutograd___unsafe_view(const at::Tensor & self, c10::SymIntArrayRef size) { |
3870 | // No device check |
3871 | // DeviceGuard omitted |
3872 | return at::native::_unsafe_view(self, C10_AS_INTARRAYREF_SLOW(size)); |
3873 | } |
3874 | } // anonymous namespace |
3875 | namespace { |
3876 | at::Tensor & wrapper_CompositeExplicitAutograd_out__unsafe_view_out(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { |
3877 | // No device check |
3878 | // DeviceGuard omitted |
3879 | return at::native::_unsafe_view_out_symint(self, size, out); |
3880 | } |
3881 | } // anonymous namespace |
3882 | namespace { |
3883 | at::Tensor wrapper_CompositeExplicitAutograd__unsqueeze(const at::Tensor & self, int64_t dim) { |
3884 | // No device check |
3885 | // DeviceGuard omitted |
3886 | return at::native::unsqueeze(self, dim); |
3887 | } |
3888 | } // anonymous namespace |
3889 | namespace { |
3890 | at::Tensor & wrapper_CompositeExplicitAutograd__unsqueeze_(at::Tensor & self, int64_t dim) { |
3891 | // No device check |
3892 | // DeviceGuard omitted |
3893 | return at::native::unsqueeze_(self, dim); |
3894 | } |
3895 | } // anonymous namespace |
3896 | namespace { |
3897 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_correction_out_var_mean_out(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out0, at::Tensor & out1) { |
3898 | // No device check |
3899 | // DeviceGuard omitted |
3900 | return at::native::var_mean_correction_out(self, dim, correction, keepdim, out0, out1); |
3901 | } |
3902 | } // anonymous namespace |
3903 | namespace { |
3904 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out__weight_norm_interface_out(const at::Tensor & v, const at::Tensor & g, int64_t dim, at::Tensor & out0, at::Tensor & out1) { |
3905 | // No device check |
3906 | // DeviceGuard omitted |
3907 | return at::native::_weight_norm_interface_out(v, g, dim, out0, out1); |
3908 | } |
3909 | } // anonymous namespace |
3910 | namespace { |
3911 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out__weight_norm_interface_backward_out(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim, at::Tensor & out0, at::Tensor & out1) { |
3912 | // No device check |
3913 | // DeviceGuard omitted |
3914 | return at::native::_weight_norm_interface_backward_out(grad_w, saved_v, saved_g, saved_norms, dim, out0, out1); |
3915 | } |
3916 | } // anonymous namespace |
3917 | namespace { |
3918 | at::Tensor wrapper_CompositeExplicitAutograd_names_zeros(at::IntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
3919 | // No device check |
3920 | // DeviceGuard omitted |
3921 | return at::native::zeros(size, names, dtype, layout, device, pin_memory); |
3922 | } |
3923 | } // anonymous namespace |
3924 | namespace { |
3925 | at::Tensor & wrapper_CompositeExplicitAutograd_names_out_zeros_out(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) { |
3926 | // No device check |
3927 | // DeviceGuard omitted |
3928 | return at::native::zeros_names_out(size, names, out); |
3929 | } |
3930 | } // anonymous namespace |
3931 | namespace { |
3932 | at::Tensor & wrapper_CompositeExplicitAutograd_out__efficientzerotensor_out(at::IntArrayRef size, at::Tensor & out) { |
3933 | // No device check |
3934 | // DeviceGuard omitted |
3935 | return at::native::_efficientzerotensor_out(size, out); |
3936 | } |
3937 | } // anonymous namespace |
3938 | namespace { |
3939 | at::Tensor wrapper_CompositeExplicitAutograd__zeros(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
3940 | // No device check |
3941 | // DeviceGuard omitted |
3942 | return at::native::zeros_symint(size, dtype, layout, device, pin_memory); |
3943 | } |
3944 | } // anonymous namespace |
3945 | namespace { |
3946 | at::Tensor & wrapper_CompositeExplicitAutograd_out_zeros_out(c10::SymIntArrayRef size, at::Tensor & out) { |
3947 | // No device check |
3948 | // DeviceGuard omitted |
3949 | return at::native::zeros_out(C10_AS_INTARRAYREF_SLOW(size), out); |
3950 | } |
3951 | } // anonymous namespace |
3952 | namespace { |
3953 | at::Tensor wrapper_CompositeExplicitAutograd__zeros_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
3954 | // No device check |
3955 | // DeviceGuard omitted |
3956 | return at::native::zeros_like(self, dtype, layout, device, pin_memory, memory_format); |
3957 | } |
3958 | } // anonymous namespace |
3959 | namespace { |
3960 | at::Tensor & wrapper_CompositeExplicitAutograd_out_zeros_like_out(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
3961 | // No device check |
3962 | // DeviceGuard omitted |
3963 | return at::native::zeros_like_out(self, memory_format, out); |
3964 | } |
3965 | } // anonymous namespace |
3966 | namespace { |
3967 | at::Tensor & wrapper_CompositeExplicitAutograd_out__standard_gamma_grad_out(const at::Tensor & self, const at::Tensor & output, at::Tensor & out) { |
3968 | // No device check |
3969 | // DeviceGuard omitted |
3970 | return at::native::_standard_gamma_grad_out(self, output, out); |
3971 | } |
3972 | } // anonymous namespace |
3973 | namespace { |
3974 | at::Tensor & wrapper_CompositeExplicitAutograd_out__standard_gamma_out(const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) { |
3975 | // No device check |
3976 | // DeviceGuard omitted |
3977 | return at::native::_standard_gamma_out(self, generator, out); |
3978 | } |
3979 | } // anonymous namespace |
3980 | namespace { |
3981 | at::Tensor & wrapper_CompositeExplicitAutograd_out__dirichlet_grad_out(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total, at::Tensor & out) { |
3982 | // No device check |
3983 | // DeviceGuard omitted |
3984 | return at::native::_dirichlet_grad_out(x, alpha, total, out); |
3985 | } |
3986 | } // anonymous namespace |
3987 | namespace { |
3988 | at::Tensor & wrapper_CompositeExplicitAutograd_out__sample_dirichlet_out(const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) { |
3989 | // No device check |
3990 | // DeviceGuard omitted |
3991 | return at::native::_sample_dirichlet_out(self, generator, out); |
3992 | } |
3993 | } // anonymous namespace |
3994 | namespace { |
3995 | at::Tensor & wrapper_CompositeExplicitAutograd_out_poisson_out(const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) { |
3996 | // No device check |
3997 | // DeviceGuard omitted |
3998 | return at::native::poisson_out(self, generator, out); |
3999 | } |
4000 | } // anonymous namespace |
4001 | namespace { |
4002 | at::Tensor & wrapper_CompositeExplicitAutograd_out_binomial_out(const at::Tensor & count, const at::Tensor & prob, c10::optional<at::Generator> generator, at::Tensor & out) { |
4003 | // No device check |
4004 | // DeviceGuard omitted |
4005 | return at::native::binomial_out(count, prob, generator, out); |
4006 | } |
4007 | } // anonymous namespace |
4008 | namespace { |
4009 | at::Tensor & wrapper_CompositeExplicitAutograd_out_native_norm_out(const at::Tensor & self, const at::Scalar & p, at::Tensor & out) { |
4010 | // No device check |
4011 | // DeviceGuard omitted |
4012 | return at::native::native_norm_out(self, p, out); |
4013 | } |
4014 | } // anonymous namespace |
4015 | namespace { |
4016 | at::Tensor & wrapper_CompositeExplicitAutograd_ScalarOpt_dim_dtype_out_native_norm_out(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
4017 | // No device check |
4018 | // DeviceGuard omitted |
4019 | return at::native::native_norm_ScalarOpt_dim_dtype_out(self, p, dim, keepdim, dtype, out); |
4020 | } |
4021 | } // anonymous namespace |
4022 | namespace { |
4023 | at::Tensor wrapper_CompositeExplicitAutograd_dim__sparse_sum(const at::Tensor & self, at::IntArrayRef dim) { |
4024 | // No device check |
4025 | // DeviceGuard omitted |
4026 | return at::native::_sparse_sum(self, dim); |
4027 | } |
4028 | } // anonymous namespace |
4029 | namespace { |
4030 | at::Tensor & wrapper_CompositeExplicitAutograd_dim_out__sparse_sum_out(const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) { |
4031 | // No device check |
4032 | // DeviceGuard omitted |
4033 | return at::native::_sparse_sum_dim_out(self, dim, out); |
4034 | } |
4035 | } // anonymous namespace |
4036 | namespace { |
4037 | at::Tensor & wrapper_CompositeExplicitAutograd_out__sparse_sum_backward_out(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) { |
4038 | // No device check |
4039 | // DeviceGuard omitted |
4040 | return at::native::_sparse_sum_backward_out(grad, self, dim, out); |
4041 | } |
4042 | } // anonymous namespace |
4043 | namespace { |
4044 | at::Tensor & wrapper_CompositeExplicitAutograd_dim_dtype_out__sparse_csr_sum_out(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
4045 | // No device check |
4046 | // DeviceGuard omitted |
4047 | return at::native::_sparse_csr_sum_dim_dtype_out(self, dim, keepdim, dtype, out); |
4048 | } |
4049 | } // anonymous namespace |
4050 | namespace { |
4051 | at::Tensor & wrapper_CompositeExplicitAutograd_dim_dtype_out__sparse_csr_prod_out(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
4052 | // No device check |
4053 | // DeviceGuard omitted |
4054 | return at::native::_sparse_csr_prod_dim_dtype_out(self, dim, keepdim, dtype, out); |
4055 | } |
4056 | } // anonymous namespace |
4057 | namespace { |
4058 | at::Tensor & wrapper_CompositeExplicitAutograd_out__sparse_softmax_out(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) { |
4059 | // No device check |
4060 | // DeviceGuard omitted |
4061 | return at::native::_sparse_softmax_out(self, dim, half_to_float, out); |
4062 | } |
4063 | } // anonymous namespace |
4064 | namespace { |
4065 | at::Tensor & wrapper_CompositeExplicitAutograd_out__sparse_softmax_backward_data_out(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out) { |
4066 | // No device check |
4067 | // DeviceGuard omitted |
4068 | return at::native::_sparse_softmax_backward_data_out(grad_output, output, dim, self, out); |
4069 | } |
4070 | } // anonymous namespace |
4071 | namespace { |
4072 | at::Tensor & wrapper_CompositeExplicitAutograd_out__sparse_log_softmax_out(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) { |
4073 | // No device check |
4074 | // DeviceGuard omitted |
4075 | return at::native::_sparse_log_softmax_out(self, dim, half_to_float, out); |
4076 | } |
4077 | } // anonymous namespace |
4078 | namespace { |
4079 | at::Tensor & wrapper_CompositeExplicitAutograd_out__sparse_log_softmax_backward_data_out(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out) { |
4080 | // No device check |
4081 | // DeviceGuard omitted |
4082 | return at::native::_sparse_log_softmax_backward_data_out(grad_output, output, dim, self, out); |
4083 | } |
4084 | } // anonymous namespace |
4085 | namespace { |
4086 | at::Tensor & wrapper_CompositeExplicitAutograd_out__spdiags_out(const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional<at::Layout> layout, at::Tensor & out) { |
4087 | // No device check |
4088 | // DeviceGuard omitted |
4089 | return at::native::_spdiags_out(diagonals, offsets, shape, layout, out); |
4090 | } |
4091 | } // anonymous namespace |
4092 | namespace { |
4093 | at::Tensor wrapper_CompositeExplicitAutograd_ScalarOpt_dtype_norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::ScalarType dtype) { |
4094 | // No device check |
4095 | // DeviceGuard omitted |
4096 | return at::native::norm(self, p, dtype); |
4097 | } |
4098 | } // anonymous namespace |
4099 | namespace { |
4100 | at::Tensor & wrapper_CompositeExplicitAutograd_ScalarOpt_dtype_out_norm_out(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::ScalarType dtype, at::Tensor & out) { |
4101 | // No device check |
4102 | // DeviceGuard omitted |
4103 | return at::native::norm_ScalarOpt_dtype_out(self, p, dtype, out); |
4104 | } |
4105 | } // anonymous namespace |
4106 | namespace { |
4107 | at::Tensor wrapper_CompositeExplicitAutograd_Scalar_norm(const at::Tensor & self, const at::Scalar & p) { |
4108 | // No device check |
4109 | // DeviceGuard omitted |
4110 | return at::native::norm(self, p); |
4111 | } |
4112 | } // anonymous namespace |
4113 | namespace { |
4114 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_out_norm_out(const at::Tensor & self, const at::Scalar & p, at::Tensor & out) { |
4115 | // No device check |
4116 | // DeviceGuard omitted |
4117 | return at::native::norm_Scalar_out(self, p, out); |
4118 | } |
4119 | } // anonymous namespace |
4120 | namespace { |
4121 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeExplicitAutograd_Tensor_frexp(const at::Tensor & self) { |
4122 | // No device check |
4123 | // DeviceGuard omitted |
4124 | return at::native::frexp(self); |
4125 | } |
4126 | } // anonymous namespace |
4127 | namespace { |
4128 | at::Tensor wrapper_CompositeExplicitAutograd__clone(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) { |
4129 | // No device check |
4130 | // DeviceGuard omitted |
4131 | return at::native::clone(self, memory_format); |
4132 | } |
4133 | } // anonymous namespace |
4134 | namespace { |
4135 | at::Tensor & wrapper_CompositeExplicitAutograd_out_clone_out(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
4136 | // No device check |
4137 | // DeviceGuard omitted |
4138 | return at::native::clone_out(self, memory_format, out); |
4139 | } |
4140 | } // anonymous namespace |
4141 | namespace { |
4142 | at::Tensor wrapper_CompositeExplicitAutograd__resize_as(const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format) { |
4143 | // No device check |
4144 | // DeviceGuard omitted |
4145 | return at::native::resize_as(self, the_template, memory_format); |
4146 | } |
4147 | } // anonymous namespace |
4148 | namespace { |
4149 | const at::Tensor & wrapper_CompositeExplicitAutograd_out_resize_as_out(const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format, const at::Tensor & out) { |
4150 | // No device check |
4151 | // DeviceGuard omitted |
4152 | return at::native::resize_as_out(self, the_template, memory_format, out); |
4153 | } |
4154 | } // anonymous namespace |
4155 | namespace { |
4156 | const at::Tensor & wrapper_CompositeExplicitAutograd__resize_as_(const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format) { |
4157 | // No device check |
4158 | // DeviceGuard omitted |
4159 | return at::native::resize_as_(self, the_template, memory_format); |
4160 | } |
4161 | } // anonymous namespace |
4162 | namespace { |
4163 | at::Tensor wrapper_CompositeExplicitAutograd__resize_as_sparse(const at::Tensor & self, const at::Tensor & the_template) { |
4164 | // No device check |
4165 | // DeviceGuard omitted |
4166 | return at::native::resize_as_sparse(self, the_template); |
4167 | } |
4168 | } // anonymous namespace |
4169 | namespace { |
4170 | const at::Tensor & wrapper_CompositeExplicitAutograd_out_resize_as_sparse_out(const at::Tensor & self, const at::Tensor & the_template, const at::Tensor & out) { |
4171 | // No device check |
4172 | // DeviceGuard omitted |
4173 | return at::native::resize_as_sparse_out(self, the_template, out); |
4174 | } |
4175 | } // anonymous namespace |
4176 | namespace { |
4177 | at::Tensor wrapper_CompositeExplicitAutograd__zero(const at::Tensor & self) { |
4178 | // No device check |
4179 | // DeviceGuard omitted |
4180 | return at::native::zero(self); |
4181 | } |
4182 | } // anonymous namespace |
4183 | namespace { |
4184 | at::Tensor & wrapper_CompositeExplicitAutograd_out_zero_out(const at::Tensor & self, at::Tensor & out) { |
4185 | // No device check |
4186 | // DeviceGuard omitted |
4187 | return at::native::zero_out(self, out); |
4188 | } |
4189 | } // anonymous namespace |
4190 | namespace { |
4191 | at::Tensor wrapper_CompositeExplicitAutograd_Scalar_sub(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { |
4192 | // No device check |
4193 | // DeviceGuard omitted |
4194 | return at::native::sub(self, other, alpha); |
4195 | } |
4196 | } // anonymous namespace |
4197 | namespace { |
4198 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_out_sub_out(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { |
4199 | // No device check |
4200 | // DeviceGuard omitted |
4201 | return at::native::sub_Scalar_out(self, other, alpha, out); |
4202 | } |
4203 | } // anonymous namespace |
4204 | namespace { |
4205 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_sub_(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { |
4206 | // No device check |
4207 | // DeviceGuard omitted |
4208 | return at::native::sub_(self, other, alpha); |
4209 | } |
4210 | } // anonymous namespace |
4211 | namespace { |
4212 | at::Tensor & wrapper_CompositeExplicitAutograd_Tensor_out_rsub_out(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { |
4213 | // No device check |
4214 | // DeviceGuard omitted |
4215 | return at::native::rsub_Tensor_out(self, other, alpha, out); |
4216 | } |
4217 | } // anonymous namespace |
4218 | namespace { |
4219 | at::Tensor wrapper_CompositeExplicitAutograd_Scalar_rsub(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { |
4220 | // No device check |
4221 | // DeviceGuard omitted |
4222 | return at::native::rsub(self, other, alpha); |
4223 | } |
4224 | } // anonymous namespace |
4225 | namespace { |
4226 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_out_rsub_out(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { |
4227 | // No device check |
4228 | // DeviceGuard omitted |
4229 | return at::native::rsub_Scalar_out(self, other, alpha, out); |
4230 | } |
4231 | } // anonymous namespace |
4232 | namespace { |
4233 | at::Tensor wrapper_CompositeExplicitAutograd___sparse_addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { |
4234 | // No device check |
4235 | // DeviceGuard omitted |
4236 | return at::native::_sparse_addmm(self, mat1, mat2, beta, alpha); |
4237 | } |
4238 | } // anonymous namespace |
4239 | namespace { |
4240 | at::Tensor & wrapper_CompositeExplicitAutograd_out__sparse_addmm_out(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
4241 | // No device check |
4242 | // DeviceGuard omitted |
4243 | return at::native::_sparse_addmm_out(self, mat1, mat2, beta, alpha, out); |
4244 | } |
4245 | } // anonymous namespace |
4246 | namespace { |
4247 | at::Tensor wrapper_CompositeExplicitAutograd_size_sparse_coo_tensor(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
4248 | // No device check |
4249 | // DeviceGuard omitted |
4250 | return at::native::sparse_coo_tensor(size, dtype, layout, device, pin_memory); |
4251 | } |
4252 | } // anonymous namespace |
4253 | namespace { |
4254 | at::Tensor & wrapper_CompositeExplicitAutograd_size_out_sparse_coo_tensor_out(at::IntArrayRef size, at::Tensor & out) { |
4255 | // No device check |
4256 | // DeviceGuard omitted |
4257 | return at::native::sparse_coo_tensor_size_out(size, out); |
4258 | } |
4259 | } // anonymous namespace |
4260 | namespace { |
4261 | at::Tensor & wrapper_CompositeExplicitAutograd_out__sparse_coo_tensor_with_dims_out(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::Tensor & out) { |
4262 | // No device check |
4263 | // DeviceGuard omitted |
4264 | return at::native::_sparse_coo_tensor_with_dims_out(sparse_dim, dense_dim, size, out); |
4265 | } |
4266 | } // anonymous namespace |
4267 | namespace { |
4268 | at::Tensor & wrapper_CompositeExplicitAutograd_out__sparse_coo_tensor_with_dims_and_tensors_out(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::Tensor & out) { |
4269 | // No device check |
4270 | // DeviceGuard omitted |
4271 | return at::native::_sparse_coo_tensor_with_dims_and_tensors_out_symint(sparse_dim, dense_dim, size, indices, values, out); |
4272 | } |
4273 | } // anonymous namespace |
4274 | namespace { |
4275 | at::Tensor wrapper_CompositeExplicitAutograd__sparse_resize(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { |
4276 | // No device check |
4277 | // DeviceGuard omitted |
4278 | return at::native::sparse_resize(self, size, sparse_dim, dense_dim); |
4279 | } |
4280 | } // anonymous namespace |
4281 | namespace { |
4282 | const at::Tensor & wrapper_CompositeExplicitAutograd_out_sparse_resize_out(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out) { |
4283 | // No device check |
4284 | // DeviceGuard omitted |
4285 | return at::native::sparse_resize_out(self, size, sparse_dim, dense_dim, out); |
4286 | } |
4287 | } // anonymous namespace |
4288 | namespace { |
4289 | at::Tensor wrapper_CompositeExplicitAutograd__sparse_resize_and_clear(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { |
4290 | // No device check |
4291 | // DeviceGuard omitted |
4292 | return at::native::sparse_resize_and_clear(self, size, sparse_dim, dense_dim); |
4293 | } |
4294 | } // anonymous namespace |
4295 | namespace { |
4296 | const at::Tensor & wrapper_CompositeExplicitAutograd_out_sparse_resize_and_clear_out(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out) { |
4297 | // No device check |
4298 | // DeviceGuard omitted |
4299 | return at::native::sparse_resize_and_clear_out(self, size, sparse_dim, dense_dim, out); |
4300 | } |
4301 | } // anonymous namespace |
4302 | namespace { |
4303 | at::Tensor & wrapper_CompositeExplicitAutograd_out_sparse_mask_out(const at::Tensor & self, const at::Tensor & mask, at::Tensor & out) { |
4304 | // No device check |
4305 | // DeviceGuard omitted |
4306 | return at::native::sparse_mask_out(self, mask, out); |
4307 | } |
4308 | } // anonymous namespace |
4309 | namespace { |
4310 | at::Tensor & wrapper_CompositeExplicitAutograd_out__to_dense_out(const at::Tensor & self, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
4311 | // No device check |
4312 | // DeviceGuard omitted |
4313 | return at::native::_to_dense_out(self, dtype, out); |
4314 | } |
4315 | } // anonymous namespace |
4316 | namespace { |
4317 | at::Tensor & wrapper_CompositeExplicitAutograd_out__coalesce_out(const at::Tensor & self, at::Tensor & out) { |
4318 | // No device check |
4319 | // DeviceGuard omitted |
4320 | return at::native::_coalesce_out(self, out); |
4321 | } |
4322 | } // anonymous namespace |
4323 | namespace { |
4324 | bool wrapper_CompositeExplicitAutograd__is_coalesced(const at::Tensor & self) { |
4325 | // No device check |
4326 | // DeviceGuard omitted |
4327 | return at::native::is_coalesced_default(self); |
4328 | } |
4329 | } // anonymous namespace |
4330 | namespace { |
4331 | at::Tensor wrapper_CompositeExplicitAutograd___coalesced(const at::Tensor & self, bool coalesced) { |
4332 | // No device check |
4333 | // DeviceGuard omitted |
4334 | return at::native::_coalesced(self, coalesced); |
4335 | } |
4336 | } // anonymous namespace |
4337 | namespace { |
4338 | at::Tensor & wrapper_CompositeExplicitAutograd_out__coalesced_out(const at::Tensor & self, bool coalesced, at::Tensor & out) { |
4339 | // No device check |
4340 | // DeviceGuard omitted |
4341 | return at::native::_coalesced_out(self, coalesced, out); |
4342 | } |
4343 | } // anonymous namespace |
4344 | namespace { |
4345 | at::Tensor wrapper_CompositeExplicitAutograd__indices(const at::Tensor & self) { |
4346 | // No device check |
4347 | // DeviceGuard omitted |
4348 | return at::native::indices_default(self); |
4349 | } |
4350 | } // anonymous namespace |
4351 | namespace { |
4352 | at::Tensor wrapper_CompositeExplicitAutograd__values(const at::Tensor & self) { |
4353 | // No device check |
4354 | // DeviceGuard omitted |
4355 | return at::native::values_default(self); |
4356 | } |
4357 | } // anonymous namespace |
4358 | namespace { |
4359 | at::Tensor wrapper_CompositeExplicitAutograd__crow_indices(const at::Tensor & self) { |
4360 | // No device check |
4361 | // DeviceGuard omitted |
4362 | return at::native::crow_indices_default(self); |
4363 | } |
4364 | } // anonymous namespace |
4365 | namespace { |
4366 | at::Tensor wrapper_CompositeExplicitAutograd__col_indices(const at::Tensor & self) { |
4367 | // No device check |
4368 | // DeviceGuard omitted |
4369 | return at::native::col_indices_default(self); |
4370 | } |
4371 | } // anonymous namespace |
4372 | namespace { |
4373 | at::Tensor wrapper_CompositeExplicitAutograd__ccol_indices(const at::Tensor & self) { |
4374 | // No device check |
4375 | // DeviceGuard omitted |
4376 | return at::native::ccol_indices_default(self); |
4377 | } |
4378 | } // anonymous namespace |
4379 | namespace { |
4380 | at::Tensor wrapper_CompositeExplicitAutograd__row_indices(const at::Tensor & self) { |
4381 | // No device check |
4382 | // DeviceGuard omitted |
4383 | return at::native::row_indices_default(self); |
4384 | } |
4385 | } // anonymous namespace |
4386 | namespace { |
4387 | at::Tensor wrapper_CompositeExplicitAutograd__copy_sparse_to_sparse(const at::Tensor & self, const at::Tensor & src, bool non_blocking) { |
4388 | // No device check |
4389 | // DeviceGuard omitted |
4390 | return at::native::copy_sparse_to_sparse(self, src, non_blocking); |
4391 | } |
4392 | } // anonymous namespace |
4393 | namespace { |
4394 | at::Tensor & wrapper_CompositeExplicitAutograd_out_copy_sparse_to_sparse_out(const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out) { |
4395 | // No device check |
4396 | // DeviceGuard omitted |
4397 | return at::native::copy_sparse_to_sparse_out(self, src, non_blocking, out); |
4398 | } |
4399 | } // anonymous namespace |
4400 | namespace { |
4401 | ::std::vector<at::Tensor> wrapper_CompositeExplicitAutograd_int_unbind(const at::Tensor & self, int64_t dim) { |
4402 | // No device check |
4403 | // DeviceGuard omitted |
4404 | return at::native::unbind(self, dim); |
4405 | } |
4406 | } // anonymous namespace |
4407 | namespace { |
4408 | at::Tensor & wrapper_CompositeExplicitAutograd_sparse_dim_out_to_sparse_out(const at::Tensor & self, int64_t sparse_dim, at::Tensor & out) { |
4409 | // No device check |
4410 | // DeviceGuard omitted |
4411 | return at::native::to_sparse_sparse_dim_out(self, sparse_dim, out); |
4412 | } |
4413 | } // anonymous namespace |
4414 | namespace { |
4415 | at::Tensor & wrapper_CompositeExplicitAutograd_out_to_sparse_out(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim, at::Tensor & out) { |
4416 | // No device check |
4417 | // DeviceGuard omitted |
4418 | return at::native::to_sparse_out(self, layout, blocksize, dense_dim, out); |
4419 | } |
4420 | } // anonymous namespace |
4421 | namespace { |
4422 | at::Tensor & wrapper_CompositeExplicitAutograd_out_to_sparse_csr_out(const at::Tensor & self, c10::optional<int64_t> dense_dim, at::Tensor & out) { |
4423 | // No device check |
4424 | // DeviceGuard omitted |
4425 | return at::native::to_sparse_csr_out(self, dense_dim, out); |
4426 | } |
4427 | } // anonymous namespace |
4428 | namespace { |
4429 | at::Tensor & wrapper_CompositeExplicitAutograd_out_to_sparse_csc_out(const at::Tensor & self, c10::optional<int64_t> dense_dim, at::Tensor & out) { |
4430 | // No device check |
4431 | // DeviceGuard omitted |
4432 | return at::native::to_sparse_csc_out(self, dense_dim, out); |
4433 | } |
4434 | } // anonymous namespace |
4435 | namespace { |
4436 | at::Tensor & wrapper_CompositeExplicitAutograd_out_to_sparse_bsr_out(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim, at::Tensor & out) { |
4437 | // No device check |
4438 | // DeviceGuard omitted |
4439 | return at::native::to_sparse_bsr_out(self, blocksize, dense_dim, out); |
4440 | } |
4441 | } // anonymous namespace |
4442 | namespace { |
4443 | at::Tensor & wrapper_CompositeExplicitAutograd_out_to_sparse_bsc_out(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim, at::Tensor & out) { |
4444 | // No device check |
4445 | // DeviceGuard omitted |
4446 | return at::native::to_sparse_bsc_out(self, blocksize, dense_dim, out); |
4447 | } |
4448 | } // anonymous namespace |
4449 | namespace { |
4450 | at::Tensor & wrapper_CompositeExplicitAutograd_out_to_mkldnn_out(const at::Tensor & self, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
4451 | // No device check |
4452 | // DeviceGuard omitted |
4453 | return at::native::to_mkldnn_out(self, dtype, out); |
4454 | } |
4455 | } // anonymous namespace |
4456 | namespace { |
4457 | at::Tensor & wrapper_CompositeExplicitAutograd_out_mkldnn_reorder_conv2d_weight_out(const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::OptionalIntArrayRef input_size, at::Tensor & out) { |
4458 | // No device check |
4459 | // DeviceGuard omitted |
4460 | return at::native::mkldnn_reorder_conv2d_weight_out(self, padding, stride, dilation, groups, input_size, out); |
4461 | } |
4462 | } // anonymous namespace |
4463 | namespace { |
4464 | at::Tensor & wrapper_CompositeExplicitAutograd_out_mkldnn_reorder_conv3d_weight_out(const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { |
4465 | // No device check |
4466 | // DeviceGuard omitted |
4467 | return at::native::mkldnn_reorder_conv3d_weight_out(self, padding, stride, dilation, groups, out); |
4468 | } |
4469 | } // anonymous namespace |
4470 | namespace { |
4471 | at::Tensor & wrapper_CompositeExplicitAutograd_out_quantize_per_tensor_dynamic_out(const at::Tensor & self, at::ScalarType dtype, bool reduce_range, at::Tensor & out) { |
4472 | // No device check |
4473 | // DeviceGuard omitted |
4474 | return at::native::quantize_per_tensor_dynamic_out(self, dtype, reduce_range, out); |
4475 | } |
4476 | } // anonymous namespace |
4477 | namespace { |
4478 | at::Tensor & wrapper_CompositeExplicitAutograd_out_quantize_per_tensor_out(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype, at::Tensor & out) { |
4479 | // No device check |
4480 | // DeviceGuard omitted |
4481 | return at::native::quantize_per_tensor_out(self, scale, zero_point, dtype, out); |
4482 | } |
4483 | } // anonymous namespace |
4484 | namespace { |
4485 | at::Tensor & wrapper_CompositeExplicitAutograd_tensor_qparams_out_quantize_per_tensor_out(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype, at::Tensor & out) { |
4486 | // No device check |
4487 | // DeviceGuard omitted |
4488 | return at::native::quantize_per_tensor_tensor_qparams_out(self, scale, zero_point, dtype, out); |
4489 | } |
4490 | } // anonymous namespace |
4491 | namespace { |
4492 | void wrapper_CompositeExplicitAutograd_tensors_out_quantize_per_tensor_out(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype, at::TensorList out) { |
4493 | // No device check |
4494 | // DeviceGuard omitted |
4495 | return at::native::quantize_per_tensor_tensors_out(tensors, scales, zero_points, dtype, out); |
4496 | } |
4497 | } // anonymous namespace |
4498 | namespace { |
4499 | at::Tensor & wrapper_CompositeExplicitAutograd_out_quantize_per_channel_out(const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype, at::Tensor & out) { |
4500 | // No device check |
4501 | // DeviceGuard omitted |
4502 | return at::native::quantize_per_channel_out(self, scales, zero_points, axis, dtype, out); |
4503 | } |
4504 | } // anonymous namespace |
4505 | namespace { |
4506 | at::Tensor & wrapper_CompositeExplicitAutograd_self_out_dequantize_out(const at::Tensor & self, at::Tensor & out) { |
4507 | // No device check |
4508 | // DeviceGuard omitted |
4509 | return at::native::dequantize_self_out(self, out); |
4510 | } |
4511 | } // anonymous namespace |
4512 | namespace { |
4513 | void wrapper_CompositeExplicitAutograd_tensors_out_dequantize_out(at::TensorList tensors, at::TensorList out) { |
4514 | // No device check |
4515 | // DeviceGuard omitted |
4516 | return at::native::dequantize_tensors_out(tensors, out); |
4517 | } |
4518 | } // anonymous namespace |
4519 | namespace { |
4520 | at::Tensor & wrapper_CompositeExplicitAutograd_out_q_per_channel_scales_out(const at::Tensor & self, at::Tensor & out) { |
4521 | // No device check |
4522 | // DeviceGuard omitted |
4523 | return at::native::q_per_channel_scales_out(self, out); |
4524 | } |
4525 | } // anonymous namespace |
4526 | namespace { |
4527 | at::Tensor & wrapper_CompositeExplicitAutograd_out_q_per_channel_zero_points_out(const at::Tensor & self, at::Tensor & out) { |
4528 | // No device check |
4529 | // DeviceGuard omitted |
4530 | return at::native::q_per_channel_zero_points_out(self, out); |
4531 | } |
4532 | } // anonymous namespace |
4533 | namespace { |
4534 | at::Tensor & wrapper_CompositeExplicitAutograd_out_int_repr_out(const at::Tensor & self, at::Tensor & out) { |
4535 | // No device check |
4536 | // DeviceGuard omitted |
4537 | return at::native::int_repr_out(self, out); |
4538 | } |
4539 | } // anonymous namespace |
4540 | namespace { |
4541 | at::Tensor & wrapper_CompositeExplicitAutograd_out__make_per_tensor_quantized_tensor_out(const at::Tensor & self, double scale, int64_t zero_point, at::Tensor & out) { |
4542 | // No device check |
4543 | // DeviceGuard omitted |
4544 | return at::native::_make_per_tensor_quantized_tensor_out(self, scale, zero_point, out); |
4545 | } |
4546 | } // anonymous namespace |
4547 | namespace { |
4548 | at::Tensor & wrapper_CompositeExplicitAutograd_out__make_per_channel_quantized_tensor_out(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, at::Tensor & out) { |
4549 | // No device check |
4550 | // DeviceGuard omitted |
4551 | return at::native::_make_per_channel_quantized_tensor_out(self, scale, zero_point, axis, out); |
4552 | } |
4553 | } // anonymous namespace |
4554 | namespace { |
4555 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_fake_quantize_per_tensor_affine_cachemask_out(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) { |
4556 | // No device check |
4557 | // DeviceGuard omitted |
4558 | return at::native::fake_quantize_per_tensor_affine_cachemask_out(self, scale, zero_point, quant_min, quant_max, out0, out1); |
4559 | } |
4560 | } // anonymous namespace |
4561 | namespace { |
4562 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) { |
4563 | // No device check |
4564 | // DeviceGuard omitted |
4565 | return at::native::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out(self, scale, zero_point, fake_quant_enabled, quant_min, quant_max, out0, out1); |
4566 | } |
4567 | } // anonymous namespace |
4568 | namespace { |
4569 | at::Tensor & wrapper_CompositeExplicitAutograd_out__fake_quantize_learnable_per_tensor_affine_out(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out) { |
4570 | // No device check |
4571 | // DeviceGuard omitted |
4572 | return at::native::_fake_quantize_learnable_per_tensor_affine_out(self, scale, zero_point, quant_min, quant_max, grad_factor, out); |
4573 | } |
4574 | } // anonymous namespace |
4575 | namespace { |
4576 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out_fake_quantize_per_channel_affine_cachemask_out(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) { |
4577 | // No device check |
4578 | // DeviceGuard omitted |
4579 | return at::native::fake_quantize_per_channel_affine_cachemask_out(self, scale, zero_point, axis, quant_min, quant_max, out0, out1); |
4580 | } |
4581 | } // anonymous namespace |
4582 | namespace { |
4583 | at::Tensor & wrapper_CompositeExplicitAutograd_out__fake_quantize_learnable_per_channel_affine_out(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out) { |
4584 | // No device check |
4585 | // DeviceGuard omitted |
4586 | return at::native::_fake_quantize_learnable_per_channel_affine_out(self, scale, zero_point, axis, quant_min, quant_max, grad_factor, out); |
4587 | } |
4588 | } // anonymous namespace |
4589 | namespace { |
4590 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeExplicitAutograd___fused_moving_avg_obs_fq_helper_functional(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, const at::Tensor & running_min, const at::Tensor & running_max, const at::Tensor & scale, const at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) { |
4591 | // No device check |
4592 | // DeviceGuard omitted |
4593 | return at::native::_fused_moving_avg_obs_fq_helper_functional(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant); |
4594 | } |
4595 | } // anonymous namespace |
4596 | namespace { |
4597 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out__fused_moving_avg_obs_fq_helper_out(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant, at::Tensor & out0, at::Tensor & out1) { |
4598 | // No device check |
4599 | // DeviceGuard omitted |
4600 | return at::native::_fused_moving_avg_obs_fq_helper_out(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant, out0, out1); |
4601 | } |
4602 | } // anonymous namespace |
4603 | namespace { |
4604 | at::Tensor wrapper_CompositeExplicitAutograd___to_copy(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, c10::optional<at::MemoryFormat> memory_format) { |
4605 | // No device check |
4606 | // DeviceGuard omitted |
4607 | return at::native::_to_copy(self, dtype, layout, device, pin_memory, non_blocking, memory_format); |
4608 | } |
4609 | } // anonymous namespace |
4610 | namespace { |
4611 | at::Tensor & wrapper_CompositeExplicitAutograd_out__to_copy_out(const at::Tensor & self, bool non_blocking, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
4612 | // No device check |
4613 | // DeviceGuard omitted |
4614 | return at::native::_to_copy_out(self, non_blocking, memory_format, out); |
4615 | } |
4616 | } // anonymous namespace |
4617 | namespace { |
4618 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out__lstm_mps_out(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) { |
4619 | // No device check |
4620 | // DeviceGuard omitted |
4621 | return at::native::_lstm_mps_out(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2, out3, out4); |
4622 | } |
4623 | } // anonymous namespace |
4624 | namespace { |
4625 | void wrapper_CompositeExplicitAutograd_out_lstm_mps_backward_out(const at::Tensor & grad_y, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2) { |
4626 | // No device check |
4627 | // DeviceGuard omitted |
4628 | return at::native::lstm_mps_backward_out(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2); |
4629 | } |
4630 | } // anonymous namespace |
4631 | namespace { |
4632 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out__thnn_fused_lstm_cell_out(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
4633 | // No device check |
4634 | // DeviceGuard omitted |
4635 | return at::native::_thnn_fused_lstm_cell_out(input_gates, hidden_gates, cx, input_bias, hidden_bias, out0, out1, out2); |
4636 | } |
4637 | } // anonymous namespace |
4638 | namespace { |
4639 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out__thnn_fused_lstm_cell_backward_impl_out(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
4640 | // No device check |
4641 | // DeviceGuard omitted |
4642 | return at::native::_thnn_fused_lstm_cell_backward_impl_out(grad_hy, grad_cy, cx, cy, workspace, has_bias, out0, out1, out2); |
4643 | } |
4644 | } // anonymous namespace |
4645 | namespace { |
4646 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out__thnn_fused_gru_cell_out(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, at::Tensor & out0, at::Tensor & out1) { |
4647 | // No device check |
4648 | // DeviceGuard omitted |
4649 | return at::native::_thnn_fused_gru_cell_out(input_gates, hidden_gates, hx, input_bias, hidden_bias, out0, out1); |
4650 | } |
4651 | } // anonymous namespace |
4652 | namespace { |
4653 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out__thnn_fused_gru_cell_backward_out(const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) { |
4654 | // No device check |
4655 | // DeviceGuard omitted |
4656 | return at::native::_thnn_fused_gru_cell_backward_out(grad_hy, workspace, has_bias, out0, out1, out2, out3, out4); |
4657 | } |
4658 | } // anonymous namespace |
4659 | namespace { |
4660 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeExplicitAutograd___pack_padded_sequence(const at::Tensor & input, const at::Tensor & lengths, bool batch_first) { |
4661 | // No device check |
4662 | // DeviceGuard omitted |
4663 | return at::native::_pack_padded_sequence(input, lengths, batch_first); |
4664 | } |
4665 | } // anonymous namespace |
4666 | namespace { |
4667 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out__pack_padded_sequence_out(const at::Tensor & input, const at::Tensor & lengths, bool batch_first, at::Tensor & out0, at::Tensor & out1) { |
4668 | // No device check |
4669 | // DeviceGuard omitted |
4670 | return at::native::_pack_padded_sequence_out(input, lengths, batch_first, out0, out1); |
4671 | } |
4672 | } // anonymous namespace |
4673 | namespace { |
4674 | at::Tensor wrapper_CompositeExplicitAutograd_source_Storage_set(const at::Tensor & self, at::Storage source) { |
4675 | // No device check |
4676 | // DeviceGuard omitted |
4677 | return at::native::set(self, source); |
4678 | } |
4679 | } // anonymous namespace |
4680 | namespace { |
4681 | at::Tensor & wrapper_CompositeExplicitAutograd_source_Storage_out_set_out(const at::Tensor & self, at::Storage source, at::Tensor & out) { |
4682 | // No device check |
4683 | // DeviceGuard omitted |
4684 | return at::native::set_source_Storage_out(self, source, out); |
4685 | } |
4686 | } // anonymous namespace |
4687 | namespace { |
4688 | at::Tensor wrapper_CompositeExplicitAutograd_source_Storage_storage_offset_set(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { |
4689 | // No device check |
4690 | // DeviceGuard omitted |
4691 | return at::native::set_symint(self, source, storage_offset, size, stride); |
4692 | } |
4693 | } // anonymous namespace |
4694 | namespace { |
4695 | at::Tensor & wrapper_CompositeExplicitAutograd_source_Storage_storage_offset_out_set_out(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) { |
4696 | // No device check |
4697 | // DeviceGuard omitted |
4698 | return at::native::set_source_Storage_storage_offset_out_symint(self, source, storage_offset, size, stride, out); |
4699 | } |
4700 | } // anonymous namespace |
4701 | namespace { |
4702 | at::Tensor wrapper_CompositeExplicitAutograd_source_Tensor_set(const at::Tensor & self, const at::Tensor & source) { |
4703 | // No device check |
4704 | // DeviceGuard omitted |
4705 | return at::native::set(self, source); |
4706 | } |
4707 | } // anonymous namespace |
4708 | namespace { |
4709 | at::Tensor & wrapper_CompositeExplicitAutograd_source_Tensor_out_set_out(const at::Tensor & self, const at::Tensor & source, at::Tensor & out) { |
4710 | // No device check |
4711 | // DeviceGuard omitted |
4712 | return at::native::set_source_Tensor_out(self, source, out); |
4713 | } |
4714 | } // anonymous namespace |
4715 | namespace { |
4716 | at::Tensor wrapper_CompositeExplicitAutograd__set(const at::Tensor & self) { |
4717 | // No device check |
4718 | // DeviceGuard omitted |
4719 | return at::native::set(self); |
4720 | } |
4721 | } // anonymous namespace |
4722 | namespace { |
4723 | at::Tensor & wrapper_CompositeExplicitAutograd_out_set_out(const at::Tensor & self, at::Tensor & out) { |
4724 | // No device check |
4725 | // DeviceGuard omitted |
4726 | return at::native::set_out(self, out); |
4727 | } |
4728 | } // anonymous namespace |
4729 | namespace { |
4730 | at::Tensor wrapper_CompositeExplicitAutograd__lift(const at::Tensor & self) { |
4731 | // No device check |
4732 | // DeviceGuard omitted |
4733 | return at::native::lift(self); |
4734 | } |
4735 | } // anonymous namespace |
4736 | namespace { |
4737 | at::Tensor & wrapper_CompositeExplicitAutograd_out_lift_out(const at::Tensor & self, at::Tensor & out) { |
4738 | // No device check |
4739 | // DeviceGuard omitted |
4740 | return at::native::lift_out(self, out); |
4741 | } |
4742 | } // anonymous namespace |
4743 | namespace { |
4744 | at::Tensor wrapper_CompositeExplicitAutograd__lift_fresh(const at::Tensor & self) { |
4745 | // No device check |
4746 | // DeviceGuard omitted |
4747 | return at::native::lift_fresh(self); |
4748 | } |
4749 | } // anonymous namespace |
4750 | namespace { |
4751 | at::Tensor & wrapper_CompositeExplicitAutograd_out_lift_fresh_copy_out(const at::Tensor & self, at::Tensor & out) { |
4752 | // No device check |
4753 | // DeviceGuard omitted |
4754 | return at::native::lift_fresh_copy_out(self, out); |
4755 | } |
4756 | } // anonymous namespace |
4757 | namespace { |
4758 | at::Tensor wrapper_CompositeExplicitAutograd_Scalar_masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) { |
4759 | // No device check |
4760 | // DeviceGuard omitted |
4761 | return at::native::masked_fill(self, mask, value); |
4762 | } |
4763 | } // anonymous namespace |
4764 | namespace { |
4765 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_out_masked_fill_out(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value, at::Tensor & out) { |
4766 | // No device check |
4767 | // DeviceGuard omitted |
4768 | return at::native::masked_fill_Scalar_out(self, mask, value, out); |
4769 | } |
4770 | } // anonymous namespace |
4771 | namespace { |
4772 | at::Tensor wrapper_CompositeExplicitAutograd_Tensor_masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) { |
4773 | // No device check |
4774 | // DeviceGuard omitted |
4775 | return at::native::masked_fill(self, mask, value); |
4776 | } |
4777 | } // anonymous namespace |
4778 | namespace { |
4779 | at::Tensor & wrapper_CompositeExplicitAutograd_Tensor_out_masked_fill_out(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value, at::Tensor & out) { |
4780 | // No device check |
4781 | // DeviceGuard omitted |
4782 | return at::native::masked_fill_Tensor_out(self, mask, value, out); |
4783 | } |
4784 | } // anonymous namespace |
4785 | namespace { |
4786 | at::Tensor wrapper_CompositeExplicitAutograd__masked_scatter(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) { |
4787 | // No device check |
4788 | // DeviceGuard omitted |
4789 | return at::native::masked_scatter(self, mask, source); |
4790 | } |
4791 | } // anonymous namespace |
4792 | namespace { |
4793 | at::Tensor & wrapper_CompositeExplicitAutograd_out_masked_scatter_out(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source, at::Tensor & out) { |
4794 | // No device check |
4795 | // DeviceGuard omitted |
4796 | return at::native::masked_scatter_out(self, mask, source, out); |
4797 | } |
4798 | } // anonymous namespace |
4799 | namespace { |
4800 | at::Tensor & wrapper_CompositeExplicitAutograd_out__masked_softmax_out(const at::Tensor & self, const at::Tensor & mask, c10::optional<int64_t> dim, c10::optional<int64_t> mask_type, at::Tensor & out) { |
4801 | // No device check |
4802 | // DeviceGuard omitted |
4803 | return at::native::_masked_softmax_out(self, mask, dim, mask_type, out); |
4804 | } |
4805 | } // anonymous namespace |
4806 | namespace { |
4807 | at::Tensor & wrapper_CompositeExplicitAutograd_out__masked_softmax_backward_out(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional<int64_t> dim, at::Tensor & out) { |
4808 | // No device check |
4809 | // DeviceGuard omitted |
4810 | return at::native::_masked_softmax_backward_out(grad_output, output, mask, dim, out); |
4811 | } |
4812 | } // anonymous namespace |
4813 | namespace { |
4814 | at::Tensor wrapper_CompositeExplicitAutograd_dtype_view(const at::Tensor & self, at::ScalarType dtype) { |
4815 | // No device check |
4816 | // DeviceGuard omitted |
4817 | return at::native::view_dtype(self, dtype); |
4818 | } |
4819 | } // anonymous namespace |
4820 | namespace { |
4821 | at::Tensor wrapper_CompositeExplicitAutograd__put(const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) { |
4822 | // No device check |
4823 | // DeviceGuard omitted |
4824 | return at::native::put(self, index, source, accumulate); |
4825 | } |
4826 | } // anonymous namespace |
4827 | namespace { |
4828 | at::Tensor & wrapper_CompositeExplicitAutograd_out_put_out(const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate, at::Tensor & out) { |
4829 | // No device check |
4830 | // DeviceGuard omitted |
4831 | return at::native::put_out(self, index, source, accumulate, out); |
4832 | } |
4833 | } // anonymous namespace |
4834 | namespace { |
4835 | at::Tensor wrapper_CompositeExplicitAutograd_int_Scalar_index_fill(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { |
4836 | // No device check |
4837 | // DeviceGuard omitted |
4838 | return at::native::index_fill(self, dim, index, value); |
4839 | } |
4840 | } // anonymous namespace |
4841 | namespace { |
4842 | at::Tensor & wrapper_CompositeExplicitAutograd_int_Scalar_out_index_fill_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) { |
4843 | // No device check |
4844 | // DeviceGuard omitted |
4845 | return at::native::index_fill_int_Scalar_out(self, dim, index, value, out); |
4846 | } |
4847 | } // anonymous namespace |
4848 | namespace { |
4849 | at::Tensor wrapper_CompositeExplicitAutograd_int_Tensor_index_fill(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) { |
4850 | // No device check |
4851 | // DeviceGuard omitted |
4852 | return at::native::index_fill(self, dim, index, value); |
4853 | } |
4854 | } // anonymous namespace |
4855 | namespace { |
4856 | at::Tensor & wrapper_CompositeExplicitAutograd_int_Tensor_out_index_fill_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out) { |
4857 | // No device check |
4858 | // DeviceGuard omitted |
4859 | return at::native::index_fill_int_Tensor_out(self, dim, index, value, out); |
4860 | } |
4861 | } // anonymous namespace |
4862 | namespace { |
4863 | at::Tensor wrapper_CompositeExplicitAutograd_Scalar_bitwise_and(const at::Tensor & self, const at::Scalar & other) { |
4864 | // No device check |
4865 | // DeviceGuard omitted |
4866 | return at::native::bitwise_and(self, other); |
4867 | } |
4868 | } // anonymous namespace |
4869 | namespace { |
4870 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_out_bitwise_and_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
4871 | // No device check |
4872 | // DeviceGuard omitted |
4873 | return at::native::bitwise_and_out(self, other, out); |
4874 | } |
4875 | } // anonymous namespace |
4876 | namespace { |
4877 | at::Tensor wrapper_CompositeExplicitAutograd_Scalar_Tensor_bitwise_and(const at::Scalar & self, const at::Tensor & other) { |
4878 | // No device check |
4879 | // DeviceGuard omitted |
4880 | return at::native::bitwise_and(self, other); |
4881 | } |
4882 | } // anonymous namespace |
4883 | namespace { |
4884 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_Tensor_out_bitwise_and_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
4885 | // No device check |
4886 | // DeviceGuard omitted |
4887 | return at::native::bitwise_and_Scalar_Tensor_out(self, other, out); |
4888 | } |
4889 | } // anonymous namespace |
4890 | namespace { |
4891 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_out_bitwise_or_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
4892 | // No device check |
4893 | // DeviceGuard omitted |
4894 | return at::native::bitwise_or_out(self, other, out); |
4895 | } |
4896 | } // anonymous namespace |
4897 | namespace { |
4898 | at::Tensor wrapper_CompositeExplicitAutograd_Scalar_Tensor_bitwise_or(const at::Scalar & self, const at::Tensor & other) { |
4899 | // No device check |
4900 | // DeviceGuard omitted |
4901 | return at::native::bitwise_or(self, other); |
4902 | } |
4903 | } // anonymous namespace |
4904 | namespace { |
4905 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_Tensor_out_bitwise_or_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
4906 | // No device check |
4907 | // DeviceGuard omitted |
4908 | return at::native::bitwise_or_Scalar_Tensor_out(self, other, out); |
4909 | } |
4910 | } // anonymous namespace |
4911 | namespace { |
4912 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_out_bitwise_xor_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
4913 | // No device check |
4914 | // DeviceGuard omitted |
4915 | return at::native::bitwise_xor_out(self, other, out); |
4916 | } |
4917 | } // anonymous namespace |
4918 | namespace { |
4919 | at::Tensor wrapper_CompositeExplicitAutograd_Scalar_Tensor_bitwise_xor(const at::Scalar & self, const at::Tensor & other) { |
4920 | // No device check |
4921 | // DeviceGuard omitted |
4922 | return at::native::bitwise_xor(self, other); |
4923 | } |
4924 | } // anonymous namespace |
4925 | namespace { |
4926 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_Tensor_out_bitwise_xor_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
4927 | // No device check |
4928 | // DeviceGuard omitted |
4929 | return at::native::bitwise_xor_Scalar_Tensor_out(self, other, out); |
4930 | } |
4931 | } // anonymous namespace |
4932 | namespace { |
4933 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_out___lshift___out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
4934 | // No device check |
4935 | // DeviceGuard omitted |
4936 | return at::native::__lshift___Scalar_out(self, other, out); |
4937 | } |
4938 | } // anonymous namespace |
4939 | namespace { |
4940 | at::Tensor & wrapper_CompositeExplicitAutograd_Tensor_out___lshift___out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
4941 | // No device check |
4942 | // DeviceGuard omitted |
4943 | return at::native::__lshift___Tensor_out(self, other, out); |
4944 | } |
4945 | } // anonymous namespace |
4946 | namespace { |
4947 | at::Tensor wrapper_CompositeExplicitAutograd_Tensor_Scalar_bitwise_left_shift(const at::Tensor & self, const at::Scalar & other) { |
4948 | // No device check |
4949 | // DeviceGuard omitted |
4950 | return at::native::bitwise_left_shift(self, other); |
4951 | } |
4952 | } // anonymous namespace |
4953 | namespace { |
4954 | at::Tensor & wrapper_CompositeExplicitAutograd_Tensor_Scalar_out_bitwise_left_shift_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
4955 | // No device check |
4956 | // DeviceGuard omitted |
4957 | return at::native::bitwise_left_shift_out(self, other, out); |
4958 | } |
4959 | } // anonymous namespace |
4960 | namespace { |
4961 | at::Tensor & wrapper_CompositeExplicitAutograd_Tensor_Scalar_bitwise_left_shift_(at::Tensor & self, const at::Scalar & other) { |
4962 | // No device check |
4963 | // DeviceGuard omitted |
4964 | return at::native::bitwise_left_shift_(self, other); |
4965 | } |
4966 | } // anonymous namespace |
4967 | namespace { |
4968 | at::Tensor wrapper_CompositeExplicitAutograd_Scalar_Tensor_bitwise_left_shift(const at::Scalar & self, const at::Tensor & other) { |
4969 | // No device check |
4970 | // DeviceGuard omitted |
4971 | return at::native::bitwise_left_shift(self, other); |
4972 | } |
4973 | } // anonymous namespace |
4974 | namespace { |
4975 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_Tensor_out_bitwise_left_shift_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
4976 | // No device check |
4977 | // DeviceGuard omitted |
4978 | return at::native::bitwise_left_shift_Scalar_Tensor_out(self, other, out); |
4979 | } |
4980 | } // anonymous namespace |
4981 | namespace { |
4982 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_out___rshift___out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
4983 | // No device check |
4984 | // DeviceGuard omitted |
4985 | return at::native::__rshift___Scalar_out(self, other, out); |
4986 | } |
4987 | } // anonymous namespace |
4988 | namespace { |
4989 | at::Tensor & wrapper_CompositeExplicitAutograd_Tensor_out___rshift___out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
4990 | // No device check |
4991 | // DeviceGuard omitted |
4992 | return at::native::__rshift___Tensor_out(self, other, out); |
4993 | } |
4994 | } // anonymous namespace |
4995 | namespace { |
4996 | at::Tensor wrapper_CompositeExplicitAutograd_Tensor_Scalar_bitwise_right_shift(const at::Tensor & self, const at::Scalar & other) { |
4997 | // No device check |
4998 | // DeviceGuard omitted |
4999 | return at::native::bitwise_right_shift(self, other); |
5000 | } |
5001 | } // anonymous namespace |
5002 | namespace { |
5003 | at::Tensor & wrapper_CompositeExplicitAutograd_Tensor_Scalar_out_bitwise_right_shift_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
5004 | // No device check |
5005 | // DeviceGuard omitted |
5006 | return at::native::bitwise_right_shift_out(self, other, out); |
5007 | } |
5008 | } // anonymous namespace |
5009 | namespace { |
5010 | at::Tensor & wrapper_CompositeExplicitAutograd_Tensor_Scalar_bitwise_right_shift_(at::Tensor & self, const at::Scalar & other) { |
5011 | // No device check |
5012 | // DeviceGuard omitted |
5013 | return at::native::bitwise_right_shift_(self, other); |
5014 | } |
5015 | } // anonymous namespace |
5016 | namespace { |
5017 | at::Tensor wrapper_CompositeExplicitAutograd_Scalar_Tensor_bitwise_right_shift(const at::Scalar & self, const at::Tensor & other) { |
5018 | // No device check |
5019 | // DeviceGuard omitted |
5020 | return at::native::bitwise_right_shift(self, other); |
5021 | } |
5022 | } // anonymous namespace |
5023 | namespace { |
5024 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_Tensor_out_bitwise_right_shift_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
5025 | // No device check |
5026 | // DeviceGuard omitted |
5027 | return at::native::bitwise_right_shift_Scalar_Tensor_out(self, other, out); |
5028 | } |
5029 | } // anonymous namespace |
5030 | namespace { |
5031 | at::Tensor wrapper_CompositeExplicitAutograd_from_random(const at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator) { |
5032 | // No device check |
5033 | // DeviceGuard omitted |
5034 | return at::native::random(self, from, to, generator); |
5035 | } |
5036 | } // anonymous namespace |
5037 | namespace { |
5038 | at::Tensor & wrapper_CompositeExplicitAutograd_from_out_random_out(const at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator, at::Tensor & out) { |
5039 | // No device check |
5040 | // DeviceGuard omitted |
5041 | return at::native::random_from_out(self, from, to, generator, out); |
5042 | } |
5043 | } // anonymous namespace |
5044 | namespace { |
5045 | at::Tensor wrapper_CompositeExplicitAutograd_to_random(const at::Tensor & self, int64_t to, c10::optional<at::Generator> generator) { |
5046 | // No device check |
5047 | // DeviceGuard omitted |
5048 | return at::native::random(self, to, generator); |
5049 | } |
5050 | } // anonymous namespace |
5051 | namespace { |
5052 | at::Tensor & wrapper_CompositeExplicitAutograd_to_out_random_out(const at::Tensor & self, int64_t to, c10::optional<at::Generator> generator, at::Tensor & out) { |
5053 | // No device check |
5054 | // DeviceGuard omitted |
5055 | return at::native::random_to_out(self, to, generator, out); |
5056 | } |
5057 | } // anonymous namespace |
5058 | namespace { |
5059 | at::Tensor wrapper_CompositeExplicitAutograd__random(const at::Tensor & self, c10::optional<at::Generator> generator) { |
5060 | // No device check |
5061 | // DeviceGuard omitted |
5062 | return at::native::random(self, generator); |
5063 | } |
5064 | } // anonymous namespace |
5065 | namespace { |
5066 | at::Tensor & wrapper_CompositeExplicitAutograd_out_random_out(const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) { |
5067 | // No device check |
5068 | // DeviceGuard omitted |
5069 | return at::native::random_out(self, generator, out); |
5070 | } |
5071 | } // anonymous namespace |
5072 | namespace { |
5073 | at::Tensor wrapper_CompositeExplicitAutograd__uniform(const at::Tensor & self, double from, double to, c10::optional<at::Generator> generator) { |
5074 | // No device check |
5075 | // DeviceGuard omitted |
5076 | return at::native::uniform(self, from, to, generator); |
5077 | } |
5078 | } // anonymous namespace |
5079 | namespace { |
5080 | at::Tensor & wrapper_CompositeExplicitAutograd_out_uniform_out(const at::Tensor & self, double from, double to, c10::optional<at::Generator> generator, at::Tensor & out) { |
5081 | // No device check |
5082 | // DeviceGuard omitted |
5083 | return at::native::uniform_out(self, from, to, generator, out); |
5084 | } |
5085 | } // anonymous namespace |
5086 | namespace { |
5087 | at::Tensor wrapper_CompositeExplicitAutograd__cauchy(const at::Tensor & self, double median, double sigma, c10::optional<at::Generator> generator) { |
5088 | // No device check |
5089 | // DeviceGuard omitted |
5090 | return at::native::cauchy(self, median, sigma, generator); |
5091 | } |
5092 | } // anonymous namespace |
5093 | namespace { |
5094 | at::Tensor & wrapper_CompositeExplicitAutograd_out_cauchy_out(const at::Tensor & self, double median, double sigma, c10::optional<at::Generator> generator, at::Tensor & out) { |
5095 | // No device check |
5096 | // DeviceGuard omitted |
5097 | return at::native::cauchy_out(self, median, sigma, generator, out); |
5098 | } |
5099 | } // anonymous namespace |
5100 | namespace { |
5101 | at::Tensor wrapper_CompositeExplicitAutograd__log_normal(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) { |
5102 | // No device check |
5103 | // DeviceGuard omitted |
5104 | return at::native::log_normal(self, mean, std, generator); |
5105 | } |
5106 | } // anonymous namespace |
5107 | namespace { |
5108 | at::Tensor & wrapper_CompositeExplicitAutograd_out_log_normal_out(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator, at::Tensor & out) { |
5109 | // No device check |
5110 | // DeviceGuard omitted |
5111 | return at::native::log_normal_out(self, mean, std, generator, out); |
5112 | } |
5113 | } // anonymous namespace |
5114 | namespace { |
5115 | at::Tensor wrapper_CompositeExplicitAutograd__exponential(const at::Tensor & self, double lambd, c10::optional<at::Generator> generator) { |
5116 | // No device check |
5117 | // DeviceGuard omitted |
5118 | return at::native::exponential(self, lambd, generator); |
5119 | } |
5120 | } // anonymous namespace |
5121 | namespace { |
5122 | at::Tensor & wrapper_CompositeExplicitAutograd_out_exponential_out(const at::Tensor & self, double lambd, c10::optional<at::Generator> generator, at::Tensor & out) { |
5123 | // No device check |
5124 | // DeviceGuard omitted |
5125 | return at::native::exponential_out(self, lambd, generator, out); |
5126 | } |
5127 | } // anonymous namespace |
5128 | namespace { |
5129 | at::Tensor wrapper_CompositeExplicitAutograd__geometric(const at::Tensor & self, double p, c10::optional<at::Generator> generator) { |
5130 | // No device check |
5131 | // DeviceGuard omitted |
5132 | return at::native::geometric(self, p, generator); |
5133 | } |
5134 | } // anonymous namespace |
5135 | namespace { |
5136 | at::Tensor & wrapper_CompositeExplicitAutograd_out_geometric_out(const at::Tensor & self, double p, c10::optional<at::Generator> generator, at::Tensor & out) { |
5137 | // No device check |
5138 | // DeviceGuard omitted |
5139 | return at::native::geometric_out(self, p, generator, out); |
5140 | } |
5141 | } // anonymous namespace |
5142 | namespace { |
5143 | at::Tensor & wrapper_CompositeExplicitAutograd_out_tril_indices_out(int64_t row, int64_t col, int64_t offset, at::Tensor & out) { |
5144 | // No device check |
5145 | // DeviceGuard omitted |
5146 | return at::native::tril_indices_out(row, col, offset, out); |
5147 | } |
5148 | } // anonymous namespace |
5149 | namespace { |
5150 | at::Tensor & wrapper_CompositeExplicitAutograd_out_triu_indices_out(int64_t row, int64_t col, int64_t offset, at::Tensor & out) { |
5151 | // No device check |
5152 | // DeviceGuard omitted |
5153 | return at::native::triu_indices_out(row, col, offset, out); |
5154 | } |
5155 | } // anonymous namespace |
5156 | namespace { |
5157 | at::Tensor & wrapper_CompositeExplicitAutograd_out_trace_out(const at::Tensor & self, at::Tensor & out) { |
5158 | // No device check |
5159 | // DeviceGuard omitted |
5160 | return at::native::trace_out(self, out); |
5161 | } |
5162 | } // anonymous namespace |
5163 | namespace { |
5164 | void wrapper_CompositeExplicitAutograd___linalg_check_errors(const at::Tensor & info, c10::string_view api_name, bool is_matrix) { |
5165 | // No device check |
5166 | // DeviceGuard omitted |
5167 | return at::native::_linalg_check_errors(info, api_name, is_matrix); |
5168 | } |
5169 | } // anonymous namespace |
5170 | namespace { |
5171 | at::Tensor wrapper_CompositeExplicitAutograd__cholesky_solve(const at::Tensor & self, const at::Tensor & input2, bool upper) { |
5172 | // No device check |
5173 | // DeviceGuard omitted |
5174 | return at::native::cholesky_solve(self, input2, upper); |
5175 | } |
5176 | } // anonymous namespace |
5177 | namespace { |
5178 | at::Tensor & wrapper_CompositeExplicitAutograd_out_cholesky_solve_out(const at::Tensor & self, const at::Tensor & input2, bool upper, at::Tensor & out) { |
5179 | // No device check |
5180 | // DeviceGuard omitted |
5181 | return at::native::cholesky_solve_out(self, input2, upper, out); |
5182 | } |
5183 | } // anonymous namespace |
5184 | namespace { |
5185 | at::Tensor & wrapper_CompositeExplicitAutograd_out__cholesky_solve_helper_out(const at::Tensor & self, const at::Tensor & A, bool upper, at::Tensor & out) { |
5186 | // No device check |
5187 | // DeviceGuard omitted |
5188 | return at::native::_cholesky_solve_helper_out(self, A, upper, out); |
5189 | } |
5190 | } // anonymous namespace |
5191 | namespace { |
5192 | at::Tensor & wrapper_CompositeExplicitAutograd__polygamma_(at::Tensor & self, int64_t n) { |
5193 | // No device check |
5194 | // DeviceGuard omitted |
5195 | return at::native::polygamma_(self, n); |
5196 | } |
5197 | } // anonymous namespace |
5198 | namespace { |
5199 | at::Tensor wrapper_CompositeExplicitAutograd__dist(const at::Tensor & self, const at::Tensor & other, const at::Scalar & p) { |
5200 | // No device check |
5201 | // DeviceGuard omitted |
5202 | return at::native::dist(self, other, p); |
5203 | } |
5204 | } // anonymous namespace |
5205 | namespace { |
5206 | at::Tensor & wrapper_CompositeExplicitAutograd_out_dist_out(const at::Tensor & self, const at::Tensor & other, const at::Scalar & p, at::Tensor & out) { |
5207 | // No device check |
5208 | // DeviceGuard omitted |
5209 | return at::native::dist_out(self, other, p, out); |
5210 | } |
5211 | } // anonymous namespace |
5212 | namespace { |
5213 | void wrapper_CompositeExplicitAutograd_out__histogramdd_bin_edges_out(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density, at::TensorList out) { |
5214 | // No device check |
5215 | // DeviceGuard omitted |
5216 | return at::native::_histogramdd_bin_edges_out(self, bins, range, weight, density, out); |
5217 | } |
5218 | } // anonymous namespace |
5219 | namespace { |
5220 | at::Tensor & wrapper_CompositeExplicitAutograd_out__histogramdd_from_bin_cts_out(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density, at::Tensor & out) { |
5221 | // No device check |
5222 | // DeviceGuard omitted |
5223 | return at::native::_histogramdd_from_bin_cts_out(self, bins, range, weight, density, out); |
5224 | } |
5225 | } // anonymous namespace |
5226 | namespace { |
5227 | at::Tensor & wrapper_CompositeExplicitAutograd_out__histogramdd_from_bin_tensors_out(const at::Tensor & self, at::TensorList bins, const c10::optional<at::Tensor> & weight, bool density, at::Tensor & out) { |
5228 | // No device check |
5229 | // DeviceGuard omitted |
5230 | return at::native::_histogramdd_from_bin_tensors_out(self, bins, weight, density, out); |
5231 | } |
5232 | } // anonymous namespace |
5233 | namespace { |
5234 | at::Tensor wrapper_CompositeExplicitAutograd_Scalar_fmod(const at::Tensor & self, const at::Scalar & other) { |
5235 | // No device check |
5236 | // DeviceGuard omitted |
5237 | return at::native::fmod(self, other); |
5238 | } |
5239 | } // anonymous namespace |
5240 | namespace { |
5241 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_out_fmod_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
5242 | // No device check |
5243 | // DeviceGuard omitted |
5244 | return at::native::fmod_out(self, other, out); |
5245 | } |
5246 | } // anonymous namespace |
5247 | namespace { |
5248 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_fmod_(at::Tensor & self, const at::Scalar & other) { |
5249 | // No device check |
5250 | // DeviceGuard omitted |
5251 | return at::native::fmod_(self, other); |
5252 | } |
5253 | } // anonymous namespace |
5254 | namespace { |
5255 | at::Tensor wrapper_CompositeExplicitAutograd_Scalar_remainder(const at::Tensor & self, const at::Scalar & other) { |
5256 | // No device check |
5257 | // DeviceGuard omitted |
5258 | return at::native::remainder(self, other); |
5259 | } |
5260 | } // anonymous namespace |
5261 | namespace { |
5262 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_out_remainder_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
5263 | // No device check |
5264 | // DeviceGuard omitted |
5265 | return at::native::remainder_out(self, other, out); |
5266 | } |
5267 | } // anonymous namespace |
5268 | namespace { |
5269 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_remainder_(at::Tensor & self, const at::Scalar & other) { |
5270 | // No device check |
5271 | // DeviceGuard omitted |
5272 | return at::native::remainder_(self, other); |
5273 | } |
5274 | } // anonymous namespace |
5275 | namespace { |
5276 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_Tensor_out_remainder_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
5277 | // No device check |
5278 | // DeviceGuard omitted |
5279 | return at::native::remainder_Scalar_Tensor_out(self, other, out); |
5280 | } |
5281 | } // anonymous namespace |
5282 | namespace { |
5283 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeExplicitAutograd__sort(const at::Tensor & self, int64_t dim, bool descending) { |
5284 | // No device check |
5285 | // DeviceGuard omitted |
5286 | return at::native::sort(self, dim, descending); |
5287 | } |
5288 | } // anonymous namespace |
5289 | namespace { |
5290 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_values_sort_out(const at::Tensor & self, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices) { |
5291 | // No device check |
5292 | // DeviceGuard omitted |
5293 | return at::native::sort_out(self, dim, descending, values, indices); |
5294 | } |
5295 | } // anonymous namespace |
5296 | namespace { |
5297 | at::Tensor & wrapper_CompositeExplicitAutograd_stable_out_argsort_out(const at::Tensor & self, bool stable, int64_t dim, bool descending, at::Tensor & out) { |
5298 | // No device check |
5299 | // DeviceGuard omitted |
5300 | return at::native::argsort_stable_out(self, stable, dim, descending, out); |
5301 | } |
5302 | } // anonymous namespace |
5303 | namespace { |
5304 | at::Tensor & wrapper_CompositeExplicitAutograd_out_unfold_backward_out(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out) { |
5305 | // No device check |
5306 | // DeviceGuard omitted |
5307 | return at::native::unfold_backward_out_symint(grad_in, input_sizes, dim, size, step, out); |
5308 | } |
5309 | } // anonymous namespace |
5310 | namespace { |
5311 | at::Tensor wrapper_CompositeExplicitAutograd__normal_functional(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) { |
5312 | // No device check |
5313 | // DeviceGuard omitted |
5314 | return at::native::normal_functional(self, mean, std, generator); |
5315 | } |
5316 | } // anonymous namespace |
5317 | namespace { |
5318 | at::Tensor & wrapper_CompositeExplicitAutograd_out_normal_out(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator, at::Tensor & out) { |
5319 | // No device check |
5320 | // DeviceGuard omitted |
5321 | return at::native::normal_out(self, mean, std, generator, out); |
5322 | } |
5323 | } // anonymous namespace |
5324 | namespace { |
5325 | at::Tensor wrapper_CompositeExplicitAutograd_float_float_normal(double mean, double std, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
5326 | // No device check |
5327 | // DeviceGuard omitted |
5328 | return at::native::normal(mean, std, C10_AS_INTARRAYREF_SLOW(size), generator, dtype, layout, device, pin_memory); |
5329 | } |
5330 | } // anonymous namespace |
5331 | namespace { |
5332 | at::Tensor & wrapper_CompositeExplicitAutograd_float_float_out_normal_out(double mean, double std, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) { |
5333 | // No device check |
5334 | // DeviceGuard omitted |
5335 | return at::native::normal_out(mean, std, C10_AS_INTARRAYREF_SLOW(size), generator, out); |
5336 | } |
5337 | } // anonymous namespace |
5338 | namespace { |
5339 | at::Tensor wrapper_CompositeExplicitAutograd__alias(const at::Tensor & self) { |
5340 | // No device check |
5341 | // DeviceGuard omitted |
5342 | return at::native::alias(self); |
5343 | } |
5344 | } // anonymous namespace |
5345 | namespace { |
5346 | ::std::tuple<::std::vector<at::Tensor>,at::Tensor> wrapper_CompositeExplicitAutograd___amp_foreach_non_finite_check_and_unscale(at::TensorList self, const at::Tensor & found_inf, const at::Tensor & inv_scale) { |
5347 | // No device check |
5348 | // DeviceGuard omitted |
5349 | return at::native::_amp_foreach_non_finite_check_and_unscale(self, found_inf, inv_scale); |
5350 | } |
5351 | } // anonymous namespace |
5352 | namespace { |
5353 | void wrapper_CompositeExplicitAutograd_out__amp_foreach_non_finite_check_and_unscale_out(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale, at::TensorList out) { |
5354 | // No device check |
5355 | // DeviceGuard omitted |
5356 | return at::native::_amp_foreach_non_finite_check_and_unscale_out(self, found_inf, inv_scale, out); |
5357 | } |
5358 | } // anonymous namespace |
5359 | namespace { |
5360 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeExplicitAutograd___amp_update_scale(const at::Tensor & self, const at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) { |
5361 | // No device check |
5362 | // DeviceGuard omitted |
5363 | return at::native::_amp_update_scale(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval); |
5364 | } |
5365 | } // anonymous namespace |
5366 | namespace { |
5367 | at::Tensor & wrapper_CompositeExplicitAutograd_out__amp_update_scale_out(const at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, at::Tensor & out) { |
5368 | // No device check |
5369 | // DeviceGuard omitted |
5370 | return at::native::_amp_update_scale_out(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval, out); |
5371 | } |
5372 | } // anonymous namespace |
5373 | namespace { |
5374 | void wrapper_CompositeExplicitAutograd_Scalar_out__foreach_add_out(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
5375 | // No device check |
5376 | // DeviceGuard omitted |
5377 | return at::native::_foreach_add_Scalar_out(self, scalar, out); |
5378 | } |
5379 | } // anonymous namespace |
5380 | namespace { |
5381 | void wrapper_CompositeExplicitAutograd_Scalar_out__foreach_sub_out(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
5382 | // No device check |
5383 | // DeviceGuard omitted |
5384 | return at::native::_foreach_sub_Scalar_out(self, scalar, out); |
5385 | } |
5386 | } // anonymous namespace |
5387 | namespace { |
5388 | void wrapper_CompositeExplicitAutograd_Scalar_out__foreach_mul_out(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
5389 | // No device check |
5390 | // DeviceGuard omitted |
5391 | return at::native::_foreach_mul_Scalar_out(self, scalar, out); |
5392 | } |
5393 | } // anonymous namespace |
5394 | namespace { |
5395 | void wrapper_CompositeExplicitAutograd_Scalar_out__foreach_div_out(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
5396 | // No device check |
5397 | // DeviceGuard omitted |
5398 | return at::native::_foreach_div_Scalar_out(self, scalar, out); |
5399 | } |
5400 | } // anonymous namespace |
5401 | namespace { |
5402 | void wrapper_CompositeExplicitAutograd_Scalar_out__foreach_clamp_min_out(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
5403 | // No device check |
5404 | // DeviceGuard omitted |
5405 | return at::native::_foreach_clamp_min_Scalar_out(self, scalar, out); |
5406 | } |
5407 | } // anonymous namespace |
5408 | namespace { |
5409 | void wrapper_CompositeExplicitAutograd_Scalar_out__foreach_clamp_max_out(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
5410 | // No device check |
5411 | // DeviceGuard omitted |
5412 | return at::native::_foreach_clamp_max_Scalar_out(self, scalar, out); |
5413 | } |
5414 | } // anonymous namespace |
5415 | namespace { |
5416 | void wrapper_CompositeExplicitAutograd_Scalar_out__foreach_maximum_out(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
5417 | // No device check |
5418 | // DeviceGuard omitted |
5419 | return at::native::_foreach_maximum_Scalar_out(self, scalar, out); |
5420 | } |
5421 | } // anonymous namespace |
5422 | namespace { |
5423 | void wrapper_CompositeExplicitAutograd_Scalar_out__foreach_minimum_out(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
5424 | // No device check |
5425 | // DeviceGuard omitted |
5426 | return at::native::_foreach_minimum_Scalar_out(self, scalar, out); |
5427 | } |
5428 | } // anonymous namespace |
5429 | namespace { |
5430 | void wrapper_CompositeExplicitAutograd_List_out__foreach_add_out(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) { |
5431 | // No device check |
5432 | // DeviceGuard omitted |
5433 | return at::native::_foreach_add_List_out(self, other, alpha, out); |
5434 | } |
5435 | } // anonymous namespace |
5436 | namespace { |
5437 | void wrapper_CompositeExplicitAutograd_List_out__foreach_sub_out(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) { |
5438 | // No device check |
5439 | // DeviceGuard omitted |
5440 | return at::native::_foreach_sub_List_out(self, other, alpha, out); |
5441 | } |
5442 | } // anonymous namespace |
5443 | namespace { |
5444 | void wrapper_CompositeExplicitAutograd_List_out__foreach_mul_out(at::TensorList self, at::TensorList other, at::TensorList out) { |
5445 | // No device check |
5446 | // DeviceGuard omitted |
5447 | return at::native::_foreach_mul_List_out(self, other, out); |
5448 | } |
5449 | } // anonymous namespace |
5450 | namespace { |
5451 | void wrapper_CompositeExplicitAutograd_List_out__foreach_div_out(at::TensorList self, at::TensorList other, at::TensorList out) { |
5452 | // No device check |
5453 | // DeviceGuard omitted |
5454 | return at::native::_foreach_div_List_out(self, other, out); |
5455 | } |
5456 | } // anonymous namespace |
5457 | namespace { |
5458 | void wrapper_CompositeExplicitAutograd_List_out__foreach_clamp_min_out(at::TensorList self, at::TensorList other, at::TensorList out) { |
5459 | // No device check |
5460 | // DeviceGuard omitted |
5461 | return at::native::_foreach_clamp_min_List_out(self, other, out); |
5462 | } |
5463 | } // anonymous namespace |
5464 | namespace { |
5465 | void wrapper_CompositeExplicitAutograd_List_out__foreach_clamp_max_out(at::TensorList self, at::TensorList other, at::TensorList out) { |
5466 | // No device check |
5467 | // DeviceGuard omitted |
5468 | return at::native::_foreach_clamp_max_List_out(self, other, out); |
5469 | } |
5470 | } // anonymous namespace |
5471 | namespace { |
5472 | void wrapper_CompositeExplicitAutograd_List_out__foreach_maximum_out(at::TensorList self, at::TensorList other, at::TensorList out) { |
5473 | // No device check |
5474 | // DeviceGuard omitted |
5475 | return at::native::_foreach_maximum_List_out(self, other, out); |
5476 | } |
5477 | } // anonymous namespace |
5478 | namespace { |
5479 | void wrapper_CompositeExplicitAutograd_List_out__foreach_minimum_out(at::TensorList self, at::TensorList other, at::TensorList out) { |
5480 | // No device check |
5481 | // DeviceGuard omitted |
5482 | return at::native::_foreach_minimum_List_out(self, other, out); |
5483 | } |
5484 | } // anonymous namespace |
5485 | namespace { |
5486 | void wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_add_out(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
5487 | // No device check |
5488 | // DeviceGuard omitted |
5489 | return at::native::_foreach_add_ScalarList_out(self, scalars, out); |
5490 | } |
5491 | } // anonymous namespace |
5492 | namespace { |
5493 | void wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_sub_out(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
5494 | // No device check |
5495 | // DeviceGuard omitted |
5496 | return at::native::_foreach_sub_ScalarList_out(self, scalars, out); |
5497 | } |
5498 | } // anonymous namespace |
5499 | namespace { |
5500 | void wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_div_out(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
5501 | // No device check |
5502 | // DeviceGuard omitted |
5503 | return at::native::_foreach_div_ScalarList_out(self, scalars, out); |
5504 | } |
5505 | } // anonymous namespace |
5506 | namespace { |
5507 | void wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_mul_out(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
5508 | // No device check |
5509 | // DeviceGuard omitted |
5510 | return at::native::_foreach_mul_ScalarList_out(self, scalars, out); |
5511 | } |
5512 | } // anonymous namespace |
5513 | namespace { |
5514 | void wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_clamp_min_out(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
5515 | // No device check |
5516 | // DeviceGuard omitted |
5517 | return at::native::_foreach_clamp_min_ScalarList_out(self, scalars, out); |
5518 | } |
5519 | } // anonymous namespace |
5520 | namespace { |
5521 | void wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_clamp_max_out(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
5522 | // No device check |
5523 | // DeviceGuard omitted |
5524 | return at::native::_foreach_clamp_max_ScalarList_out(self, scalars, out); |
5525 | } |
5526 | } // anonymous namespace |
5527 | namespace { |
5528 | void wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_maximum_out(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
5529 | // No device check |
5530 | // DeviceGuard omitted |
5531 | return at::native::_foreach_maximum_ScalarList_out(self, scalars, out); |
5532 | } |
5533 | } // anonymous namespace |
5534 | namespace { |
5535 | void wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_minimum_out(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
5536 | // No device check |
5537 | // DeviceGuard omitted |
5538 | return at::native::_foreach_minimum_ScalarList_out(self, scalars, out); |
5539 | } |
5540 | } // anonymous namespace |
5541 | namespace { |
5542 | void wrapper_CompositeExplicitAutograd_out__foreach_exp_out(at::TensorList self, at::TensorList out) { |
5543 | // No device check |
5544 | // DeviceGuard omitted |
5545 | return at::native::_foreach_exp_out(self, out); |
5546 | } |
5547 | } // anonymous namespace |
5548 | namespace { |
5549 | ::std::vector<at::Tensor> wrapper_CompositeExplicitAutograd___foreach_zero(at::TensorList self) { |
5550 | // No device check |
5551 | // DeviceGuard omitted |
5552 | return at::native::_foreach_zero(self); |
5553 | } |
5554 | } // anonymous namespace |
5555 | namespace { |
5556 | void wrapper_CompositeExplicitAutograd_out__foreach_zero_out(at::TensorList self, at::TensorList out) { |
5557 | // No device check |
5558 | // DeviceGuard omitted |
5559 | return at::native::_foreach_zero_out(self, out); |
5560 | } |
5561 | } // anonymous namespace |
5562 | namespace { |
5563 | void wrapper_CompositeExplicitAutograd_out__foreach_sqrt_out(at::TensorList self, at::TensorList out) { |
5564 | // No device check |
5565 | // DeviceGuard omitted |
5566 | return at::native::_foreach_sqrt_out(self, out); |
5567 | } |
5568 | } // anonymous namespace |
5569 | namespace { |
5570 | void wrapper_CompositeExplicitAutograd_out__foreach_abs_out(at::TensorList self, at::TensorList out) { |
5571 | // No device check |
5572 | // DeviceGuard omitted |
5573 | return at::native::_foreach_abs_out(self, out); |
5574 | } |
5575 | } // anonymous namespace |
5576 | namespace { |
5577 | void wrapper_CompositeExplicitAutograd_out__foreach_acos_out(at::TensorList self, at::TensorList out) { |
5578 | // No device check |
5579 | // DeviceGuard omitted |
5580 | return at::native::_foreach_acos_out(self, out); |
5581 | } |
5582 | } // anonymous namespace |
5583 | namespace { |
5584 | void wrapper_CompositeExplicitAutograd_out__foreach_asin_out(at::TensorList self, at::TensorList out) { |
5585 | // No device check |
5586 | // DeviceGuard omitted |
5587 | return at::native::_foreach_asin_out(self, out); |
5588 | } |
5589 | } // anonymous namespace |
5590 | namespace { |
5591 | void wrapper_CompositeExplicitAutograd_out__foreach_atan_out(at::TensorList self, at::TensorList out) { |
5592 | // No device check |
5593 | // DeviceGuard omitted |
5594 | return at::native::_foreach_atan_out(self, out); |
5595 | } |
5596 | } // anonymous namespace |
5597 | namespace { |
5598 | void wrapper_CompositeExplicitAutograd_out__foreach_ceil_out(at::TensorList self, at::TensorList out) { |
5599 | // No device check |
5600 | // DeviceGuard omitted |
5601 | return at::native::_foreach_ceil_out(self, out); |
5602 | } |
5603 | } // anonymous namespace |
5604 | namespace { |
5605 | void wrapper_CompositeExplicitAutograd_out__foreach_cos_out(at::TensorList self, at::TensorList out) { |
5606 | // No device check |
5607 | // DeviceGuard omitted |
5608 | return at::native::_foreach_cos_out(self, out); |
5609 | } |
5610 | } // anonymous namespace |
5611 | namespace { |
5612 | void wrapper_CompositeExplicitAutograd_out__foreach_cosh_out(at::TensorList self, at::TensorList out) { |
5613 | // No device check |
5614 | // DeviceGuard omitted |
5615 | return at::native::_foreach_cosh_out(self, out); |
5616 | } |
5617 | } // anonymous namespace |
5618 | namespace { |
5619 | void wrapper_CompositeExplicitAutograd_out__foreach_erf_out(at::TensorList self, at::TensorList out) { |
5620 | // No device check |
5621 | // DeviceGuard omitted |
5622 | return at::native::_foreach_erf_out(self, out); |
5623 | } |
5624 | } // anonymous namespace |
5625 | namespace { |
5626 | void wrapper_CompositeExplicitAutograd_out__foreach_erfc_out(at::TensorList self, at::TensorList out) { |
5627 | // No device check |
5628 | // DeviceGuard omitted |
5629 | return at::native::_foreach_erfc_out(self, out); |
5630 | } |
5631 | } // anonymous namespace |
5632 | namespace { |
5633 | void wrapper_CompositeExplicitAutograd_out__foreach_expm1_out(at::TensorList self, at::TensorList out) { |
5634 | // No device check |
5635 | // DeviceGuard omitted |
5636 | return at::native::_foreach_expm1_out(self, out); |
5637 | } |
5638 | } // anonymous namespace |
5639 | namespace { |
5640 | void wrapper_CompositeExplicitAutograd_out__foreach_floor_out(at::TensorList self, at::TensorList out) { |
5641 | // No device check |
5642 | // DeviceGuard omitted |
5643 | return at::native::_foreach_floor_out(self, out); |
5644 | } |
5645 | } // anonymous namespace |
5646 | namespace { |
5647 | void wrapper_CompositeExplicitAutograd_out__foreach_log_out(at::TensorList self, at::TensorList out) { |
5648 | // No device check |
5649 | // DeviceGuard omitted |
5650 | return at::native::_foreach_log_out(self, out); |
5651 | } |
5652 | } // anonymous namespace |
5653 | namespace { |
5654 | void wrapper_CompositeExplicitAutograd_out__foreach_log10_out(at::TensorList self, at::TensorList out) { |
5655 | // No device check |
5656 | // DeviceGuard omitted |
5657 | return at::native::_foreach_log10_out(self, out); |
5658 | } |
5659 | } // anonymous namespace |
5660 | namespace { |
5661 | void wrapper_CompositeExplicitAutograd_out__foreach_log1p_out(at::TensorList self, at::TensorList out) { |
5662 | // No device check |
5663 | // DeviceGuard omitted |
5664 | return at::native::_foreach_log1p_out(self, out); |
5665 | } |
5666 | } // anonymous namespace |
5667 | namespace { |
5668 | void wrapper_CompositeExplicitAutograd_out__foreach_log2_out(at::TensorList self, at::TensorList out) { |
5669 | // No device check |
5670 | // DeviceGuard omitted |
5671 | return at::native::_foreach_log2_out(self, out); |
5672 | } |
5673 | } // anonymous namespace |
5674 | namespace { |
5675 | void wrapper_CompositeExplicitAutograd_out__foreach_neg_out(at::TensorList self, at::TensorList out) { |
5676 | // No device check |
5677 | // DeviceGuard omitted |
5678 | return at::native::_foreach_neg_out(self, out); |
5679 | } |
5680 | } // anonymous namespace |
5681 | namespace { |
5682 | void wrapper_CompositeExplicitAutograd_out__foreach_tan_out(at::TensorList self, at::TensorList out) { |
5683 | // No device check |
5684 | // DeviceGuard omitted |
5685 | return at::native::_foreach_tan_out(self, out); |
5686 | } |
5687 | } // anonymous namespace |
5688 | namespace { |
5689 | void wrapper_CompositeExplicitAutograd_out__foreach_tanh_out(at::TensorList self, at::TensorList out) { |
5690 | // No device check |
5691 | // DeviceGuard omitted |
5692 | return at::native::_foreach_tanh_out(self, out); |
5693 | } |
5694 | } // anonymous namespace |
5695 | namespace { |
5696 | void wrapper_CompositeExplicitAutograd_out__foreach_sin_out(at::TensorList self, at::TensorList out) { |
5697 | // No device check |
5698 | // DeviceGuard omitted |
5699 | return at::native::_foreach_sin_out(self, out); |
5700 | } |
5701 | } // anonymous namespace |
5702 | namespace { |
5703 | void wrapper_CompositeExplicitAutograd_out__foreach_sinh_out(at::TensorList self, at::TensorList out) { |
5704 | // No device check |
5705 | // DeviceGuard omitted |
5706 | return at::native::_foreach_sinh_out(self, out); |
5707 | } |
5708 | } // anonymous namespace |
5709 | namespace { |
5710 | void wrapper_CompositeExplicitAutograd_out__foreach_round_out(at::TensorList self, at::TensorList out) { |
5711 | // No device check |
5712 | // DeviceGuard omitted |
5713 | return at::native::_foreach_round_out(self, out); |
5714 | } |
5715 | } // anonymous namespace |
5716 | namespace { |
5717 | void wrapper_CompositeExplicitAutograd_out__foreach_lgamma_out(at::TensorList self, at::TensorList out) { |
5718 | // No device check |
5719 | // DeviceGuard omitted |
5720 | return at::native::_foreach_lgamma_out(self, out); |
5721 | } |
5722 | } // anonymous namespace |
5723 | namespace { |
5724 | void wrapper_CompositeExplicitAutograd_out__foreach_frac_out(at::TensorList self, at::TensorList out) { |
5725 | // No device check |
5726 | // DeviceGuard omitted |
5727 | return at::native::_foreach_frac_out(self, out); |
5728 | } |
5729 | } // anonymous namespace |
5730 | namespace { |
5731 | void wrapper_CompositeExplicitAutograd_out__foreach_reciprocal_out(at::TensorList self, at::TensorList out) { |
5732 | // No device check |
5733 | // DeviceGuard omitted |
5734 | return at::native::_foreach_reciprocal_out(self, out); |
5735 | } |
5736 | } // anonymous namespace |
5737 | namespace { |
5738 | void wrapper_CompositeExplicitAutograd_out__foreach_sigmoid_out(at::TensorList self, at::TensorList out) { |
5739 | // No device check |
5740 | // DeviceGuard omitted |
5741 | return at::native::_foreach_sigmoid_out(self, out); |
5742 | } |
5743 | } // anonymous namespace |
5744 | namespace { |
5745 | void wrapper_CompositeExplicitAutograd_out__foreach_trunc_out(at::TensorList self, at::TensorList out) { |
5746 | // No device check |
5747 | // DeviceGuard omitted |
5748 | return at::native::_foreach_trunc_out(self, out); |
5749 | } |
5750 | } // anonymous namespace |
5751 | namespace { |
5752 | void wrapper_CompositeExplicitAutograd_Scalar_out__foreach_addcdiv_out(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) { |
5753 | // No device check |
5754 | // DeviceGuard omitted |
5755 | return at::native::_foreach_addcdiv_Scalar_out(self, tensor1, tensor2, value, out); |
5756 | } |
5757 | } // anonymous namespace |
5758 | namespace { |
5759 | void wrapper_CompositeExplicitAutograd_Scalar_out__foreach_addcmul_out(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) { |
5760 | // No device check |
5761 | // DeviceGuard omitted |
5762 | return at::native::_foreach_addcmul_Scalar_out(self, tensor1, tensor2, value, out); |
5763 | } |
5764 | } // anonymous namespace |
5765 | namespace { |
5766 | void wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_addcdiv_out(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
5767 | // No device check |
5768 | // DeviceGuard omitted |
5769 | return at::native::_foreach_addcdiv_ScalarList_out(self, tensor1, tensor2, scalars, out); |
5770 | } |
5771 | } // anonymous namespace |
5772 | namespace { |
5773 | void wrapper_CompositeExplicitAutograd_Tensor_out__foreach_addcdiv_out(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) { |
5774 | // No device check |
5775 | // DeviceGuard omitted |
5776 | return at::native::_foreach_addcdiv_Tensor_out(self, tensor1, tensor2, scalars, out); |
5777 | } |
5778 | } // anonymous namespace |
5779 | namespace { |
5780 | void wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_addcmul_out(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
5781 | // No device check |
5782 | // DeviceGuard omitted |
5783 | return at::native::_foreach_addcmul_ScalarList_out(self, tensor1, tensor2, scalars, out); |
5784 | } |
5785 | } // anonymous namespace |
5786 | namespace { |
5787 | void wrapper_CompositeExplicitAutograd_Tensor_out__foreach_addcmul_out(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) { |
5788 | // No device check |
5789 | // DeviceGuard omitted |
5790 | return at::native::_foreach_addcmul_Tensor_out(self, tensor1, tensor2, scalars, out); |
5791 | } |
5792 | } // anonymous namespace |
5793 | namespace { |
5794 | void wrapper_CompositeExplicitAutograd_Scalar_out__foreach_norm_out(at::TensorList self, const at::Scalar & ord, at::TensorList out) { |
5795 | // No device check |
5796 | // DeviceGuard omitted |
5797 | return at::native::_foreach_norm_Scalar_out(self, ord, out); |
5798 | } |
5799 | } // anonymous namespace |
5800 | namespace { |
5801 | void wrapper_CompositeExplicitAutograd_List_out__foreach_lerp_out(at::TensorList self, at::TensorList tensors1, at::TensorList weights, at::TensorList out) { |
5802 | // No device check |
5803 | // DeviceGuard omitted |
5804 | return at::native::_foreach_lerp_List_out(self, tensors1, weights, out); |
5805 | } |
5806 | } // anonymous namespace |
5807 | namespace { |
5808 | void wrapper_CompositeExplicitAutograd_Scalar_out__foreach_lerp_out(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight, at::TensorList out) { |
5809 | // No device check |
5810 | // DeviceGuard omitted |
5811 | return at::native::_foreach_lerp_Scalar_out(self, tensors1, weight, out); |
5812 | } |
5813 | } // anonymous namespace |
5814 | namespace { |
5815 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_out_bucketize_out(const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right, at::Tensor & out) { |
5816 | // No device check |
5817 | // DeviceGuard omitted |
5818 | return at::native::bucketize_Scalar_out(self, boundaries, out_int32, right, out); |
5819 | } |
5820 | } // anonymous namespace |
5821 | namespace { |
5822 | at::Tensor & wrapper_CompositeExplicitAutograd_Scalar_out_searchsorted_out(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter, at::Tensor & out) { |
5823 | // No device check |
5824 | // DeviceGuard omitted |
5825 | return at::native::searchsorted_Scalar_out(sorted_sequence, self, out_int32, right, side, sorter, out); |
5826 | } |
5827 | } // anonymous namespace |
5828 | namespace { |
5829 | at::Tensor wrapper_CompositeExplicitAutograd__smooth_l1_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) { |
5830 | // No device check |
5831 | // DeviceGuard omitted |
5832 | return at::native::smooth_l1_loss_backward(grad_output, self, target, reduction, beta); |
5833 | } |
5834 | } // anonymous namespace |
5835 | namespace { |
5836 | at::Tensor wrapper_CompositeExplicitAutograd__huber_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) { |
5837 | // No device check |
5838 | // DeviceGuard omitted |
5839 | return at::native::huber_loss_backward(grad_output, self, target, reduction, delta); |
5840 | } |
5841 | } // anonymous namespace |
5842 | namespace { |
5843 | at::Tensor wrapper_CompositeExplicitAutograd__soft_margin_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction) { |
5844 | // No device check |
5845 | // DeviceGuard omitted |
5846 | return at::native::soft_margin_loss(self, target, reduction); |
5847 | } |
5848 | } // anonymous namespace |
5849 | namespace { |
5850 | at::Tensor & wrapper_CompositeExplicitAutograd_out_soft_margin_loss_out(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) { |
5851 | // No device check |
5852 | // DeviceGuard omitted |
5853 | return at::native::soft_margin_loss_out(self, target, reduction, out); |
5854 | } |
5855 | } // anonymous namespace |
5856 | namespace { |
5857 | at::Tensor wrapper_CompositeExplicitAutograd__soft_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) { |
5858 | // No device check |
5859 | // DeviceGuard omitted |
5860 | return at::native::soft_margin_loss_backward(grad_output, self, target, reduction); |
5861 | } |
5862 | } // anonymous namespace |
5863 | namespace { |
5864 | at::Tensor & wrapper_CompositeExplicitAutograd_grad_input_soft_margin_loss_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input) { |
5865 | // No device check |
5866 | // DeviceGuard omitted |
5867 | return at::native::soft_margin_loss_backward_out(grad_output, self, target, reduction, grad_input); |
5868 | } |
5869 | } // anonymous namespace |
5870 | namespace { |
5871 | at::Tensor & wrapper_CompositeExplicitAutograd_out_glu_jvp_out(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim, at::Tensor & out) { |
5872 | // No device check |
5873 | // DeviceGuard omitted |
5874 | return at::native::glu_jvp_out(glu, x, dx, dim, out); |
5875 | } |
5876 | } // anonymous namespace |
5877 | namespace { |
5878 | at::Tensor & wrapper_CompositeExplicitAutograd_out_glu_backward_jvp_out(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim, at::Tensor & out) { |
5879 | // No device check |
5880 | // DeviceGuard omitted |
5881 | return at::native::glu_backward_jvp_out(grad_x, grad_glu, x, dgrad_glu, dx, dim, out); |
5882 | } |
5883 | } // anonymous namespace |
5884 | namespace { |
5885 | at::Tensor & wrapper_CompositeExplicitAutograd_out_hardswish_backward_out(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { |
5886 | // No device check |
5887 | // DeviceGuard omitted |
5888 | return at::native::hardswish_backward_out(grad_output, self, out); |
5889 | } |
5890 | } // anonymous namespace |
5891 | namespace { |
5892 | at::Tensor wrapper_CompositeExplicitAutograd__rrelu_with_noise_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result) { |
5893 | // No device check |
5894 | // DeviceGuard omitted |
5895 | return at::native::rrelu_with_noise_backward(grad_output, self, noise, lower, upper, training, self_is_result); |
5896 | } |
5897 | } // anonymous namespace |
5898 | namespace { |
5899 | at::Tensor & wrapper_CompositeExplicitAutograd_out_rrelu_with_noise_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result, at::Tensor & out) { |
5900 | // No device check |
5901 | // DeviceGuard omitted |
5902 | return at::native::rrelu_with_noise_backward_out(grad_output, self, noise, lower, upper, training, self_is_result, out); |
5903 | } |
5904 | } // anonymous namespace |
5905 | namespace { |
5906 | at::Tensor & wrapper_CompositeExplicitAutograd_out_mkldnn_adaptive_avg_pool2d_backward_out(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { |
5907 | // No device check |
5908 | // DeviceGuard omitted |
5909 | return at::native::mkldnn_adaptive_avg_pool2d_backward_out(grad_output, self, out); |
5910 | } |
5911 | } // anonymous namespace |
5912 | namespace { |
5913 | at::Tensor & wrapper_CompositeExplicitAutograd_out__adaptive_avg_pool2d_out(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) { |
5914 | // No device check |
5915 | // DeviceGuard omitted |
5916 | return at::native::_adaptive_avg_pool2d_out_symint(self, output_size, out); |
5917 | } |
5918 | } // anonymous namespace |
5919 | namespace { |
5920 | at::Tensor & wrapper_CompositeExplicitAutograd_out__adaptive_avg_pool2d_backward_out(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { |
5921 | // No device check |
5922 | // DeviceGuard omitted |
5923 | return at::native::_adaptive_avg_pool2d_backward_out(grad_output, self, out); |
5924 | } |
5925 | } // anonymous namespace |
5926 | namespace { |
5927 | at::Tensor & wrapper_CompositeExplicitAutograd_out__adaptive_avg_pool3d_out(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) { |
5928 | // No device check |
5929 | // DeviceGuard omitted |
5930 | return at::native::_adaptive_avg_pool3d_out_symint(self, output_size, out); |
5931 | } |
5932 | } // anonymous namespace |
5933 | namespace { |
5934 | at::Tensor & wrapper_CompositeExplicitAutograd_out__adaptive_avg_pool3d_backward_out(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { |
5935 | // No device check |
5936 | // DeviceGuard omitted |
5937 | return at::native::_adaptive_avg_pool3d_backward_out(grad_output, self, out); |
5938 | } |
5939 | } // anonymous namespace |
5940 | namespace { |
5941 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_output_mask_out__slow_conv2d_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
5942 | // No device check |
5943 | // DeviceGuard omitted |
5944 | return at::native::_slow_conv2d_backward_output_mask_out(grad_output, self, weight, kernel_size, stride, padding, output_mask, out0, out1, out2); |
5945 | } |
5946 | } // anonymous namespace |
5947 | namespace { |
5948 | at::Tensor & wrapper_CompositeExplicitAutograd_out_conv_depthwise3d_out(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) { |
5949 | // No device check |
5950 | // DeviceGuard omitted |
5951 | return at::native::conv_depthwise3d_out_symint(self, weight, kernel_size, bias, stride, padding, dilation, out); |
5952 | } |
5953 | } // anonymous namespace |
5954 | namespace { |
5955 | at::Tensor & wrapper_CompositeExplicitAutograd_out_slow_conv_dilated2d_out(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) { |
5956 | // No device check |
5957 | // DeviceGuard omitted |
5958 | return at::native::slow_conv_dilated2d_out_symint(self, weight, kernel_size, bias, stride, padding, dilation, out); |
5959 | } |
5960 | } // anonymous namespace |
5961 | namespace { |
5962 | at::Tensor & wrapper_CompositeExplicitAutograd_out_slow_conv_dilated3d_out(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) { |
5963 | // No device check |
5964 | // DeviceGuard omitted |
5965 | return at::native::slow_conv_dilated3d_out_symint(self, weight, kernel_size, bias, stride, padding, dilation, out); |
5966 | } |
5967 | } // anonymous namespace |
5968 | namespace { |
5969 | at::Tensor wrapper_CompositeExplicitAutograd__isinf(const at::Tensor & self) { |
5970 | // No device check |
5971 | // DeviceGuard omitted |
5972 | return at::native::isinf(self); |
5973 | } |
5974 | } // anonymous namespace |
5975 | namespace { |
5976 | at::Tensor & wrapper_CompositeExplicitAutograd_out_isinf_out(const at::Tensor & self, at::Tensor & out) { |
5977 | // No device check |
5978 | // DeviceGuard omitted |
5979 | return at::native::isinf_out(self, out); |
5980 | } |
5981 | } // anonymous namespace |
5982 | namespace { |
5983 | at::Tensor wrapper_CompositeExplicitAutograd_self_scalar_special_xlog1py(const at::Scalar & self, const at::Tensor & other) { |
5984 | // No device check |
5985 | // DeviceGuard omitted |
5986 | return at::native::special_xlog1py(self, other); |
5987 | } |
5988 | } // anonymous namespace |
5989 | namespace { |
5990 | at::Tensor & wrapper_CompositeExplicitAutograd_self_scalar_out_special_xlog1py_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
5991 | // No device check |
5992 | // DeviceGuard omitted |
5993 | return at::native::special_xlog1py_out(self, other, out); |
5994 | } |
5995 | } // anonymous namespace |
5996 | namespace { |
5997 | at::Tensor wrapper_CompositeExplicitAutograd_other_scalar_special_xlog1py(const at::Tensor & self, const at::Scalar & other) { |
5998 | // No device check |
5999 | // DeviceGuard omitted |
6000 | return at::native::special_xlog1py(self, other); |
6001 | } |
6002 | } // anonymous namespace |
6003 | namespace { |
6004 | at::Tensor & wrapper_CompositeExplicitAutograd_other_scalar_out_special_xlog1py_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
6005 | // No device check |
6006 | // DeviceGuard omitted |
6007 | return at::native::special_xlog1py_out(self, other, out); |
6008 | } |
6009 | } // anonymous namespace |
6010 | namespace { |
6011 | at::Tensor wrapper_CompositeExplicitAutograd_self_scalar_special_zeta(const at::Scalar & self, const at::Tensor & other) { |
6012 | // No device check |
6013 | // DeviceGuard omitted |
6014 | return at::native::special_zeta(self, other); |
6015 | } |
6016 | } // anonymous namespace |
6017 | namespace { |
6018 | at::Tensor & wrapper_CompositeExplicitAutograd_self_scalar_out_special_zeta_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
6019 | // No device check |
6020 | // DeviceGuard omitted |
6021 | return at::native::special_zeta_out(self, other, out); |
6022 | } |
6023 | } // anonymous namespace |
6024 | namespace { |
6025 | at::Tensor wrapper_CompositeExplicitAutograd_other_scalar_special_zeta(const at::Tensor & self, const at::Scalar & other) { |
6026 | // No device check |
6027 | // DeviceGuard omitted |
6028 | return at::native::special_zeta(self, other); |
6029 | } |
6030 | } // anonymous namespace |
6031 | namespace { |
6032 | at::Tensor & wrapper_CompositeExplicitAutograd_other_scalar_out_special_zeta_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
6033 | // No device check |
6034 | // DeviceGuard omitted |
6035 | return at::native::special_zeta_out(self, other, out); |
6036 | } |
6037 | } // anonymous namespace |
6038 | namespace { |
6039 | at::Tensor wrapper_CompositeExplicitAutograd__fft_fftfreq(int64_t n, double d, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
6040 | // No device check |
6041 | // DeviceGuard omitted |
6042 | return at::native::fft_fftfreq(n, d, dtype, layout, device, pin_memory); |
6043 | } |
6044 | } // anonymous namespace |
6045 | namespace { |
6046 | at::Tensor & wrapper_CompositeExplicitAutograd_out_fft_fftfreq_out(int64_t n, double d, at::Tensor & out) { |
6047 | // No device check |
6048 | // DeviceGuard omitted |
6049 | return at::native::fft_fftfreq_out(n, d, out); |
6050 | } |
6051 | } // anonymous namespace |
6052 | namespace { |
6053 | at::Tensor wrapper_CompositeExplicitAutograd__fft_rfftfreq(int64_t n, double d, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
6054 | // No device check |
6055 | // DeviceGuard omitted |
6056 | return at::native::fft_rfftfreq(n, d, dtype, layout, device, pin_memory); |
6057 | } |
6058 | } // anonymous namespace |
6059 | namespace { |
6060 | at::Tensor & wrapper_CompositeExplicitAutograd_out_fft_rfftfreq_out(int64_t n, double d, at::Tensor & out) { |
6061 | // No device check |
6062 | // DeviceGuard omitted |
6063 | return at::native::fft_rfftfreq_out(n, d, out); |
6064 | } |
6065 | } // anonymous namespace |
6066 | namespace { |
6067 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeExplicitAutograd__linalg_lstsq(const at::Tensor & self, const at::Tensor & b, c10::optional<double> rcond, c10::optional<c10::string_view> driver) { |
6068 | // No device check |
6069 | // DeviceGuard omitted |
6070 | return at::native::linalg_lstsq(self, b, rcond, driver); |
6071 | } |
6072 | } // anonymous namespace |
6073 | namespace { |
6074 | at::Tensor & wrapper_CompositeExplicitAutograd_out_linalg_matrix_exp_out(const at::Tensor & self, at::Tensor & out) { |
6075 | // No device check |
6076 | // DeviceGuard omitted |
6077 | return at::native::linalg_matrix_exp_out(self, out); |
6078 | } |
6079 | } // anonymous namespace |
6080 | namespace { |
6081 | at::Tensor & wrapper_CompositeExplicitAutograd_atol_rtol_tensor_out_linalg_pinv_out(const at::Tensor & self, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian, at::Tensor & out) { |
6082 | // No device check |
6083 | // DeviceGuard omitted |
6084 | return at::native::linalg_pinv_out(self, atol, rtol, hermitian, out); |
6085 | } |
6086 | } // anonymous namespace |
6087 | namespace { |
6088 | at::Tensor & wrapper_CompositeExplicitAutograd_out__test_optional_intlist_out(const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out) { |
6089 | // No device check |
6090 | // DeviceGuard omitted |
6091 | return at::native::_test_optional_intlist_out(values, addends, out); |
6092 | } |
6093 | } // anonymous namespace |
6094 | namespace { |
6095 | at::Tensor & wrapper_CompositeExplicitAutograd_out__test_optional_filled_intlist_out(const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out) { |
6096 | // No device check |
6097 | // DeviceGuard omitted |
6098 | return at::native::_test_optional_filled_intlist_out(values, addends, out); |
6099 | } |
6100 | } // anonymous namespace |
6101 | namespace { |
6102 | at::Tensor & wrapper_CompositeExplicitAutograd_out__test_optional_floatlist_out(const at::Tensor & values, c10::optional<at::ArrayRef<double>> addends, at::Tensor & out) { |
6103 | // No device check |
6104 | // DeviceGuard omitted |
6105 | return at::native::_test_optional_floatlist_out(values, addends, out); |
6106 | } |
6107 | } // anonymous namespace |
6108 | namespace { |
6109 | at::Tensor wrapper_CompositeExplicitAutograd___test_warn_in_autograd(const at::Tensor & self) { |
6110 | // No device check |
6111 | // DeviceGuard omitted |
6112 | return at::native::_test_warn_in_autograd(self); |
6113 | } |
6114 | } // anonymous namespace |
6115 | namespace { |
6116 | at::Tensor & wrapper_CompositeExplicitAutograd_out__test_warn_in_autograd_out(const at::Tensor & self, at::Tensor & out) { |
6117 | // No device check |
6118 | // DeviceGuard omitted |
6119 | return at::native::_test_warn_in_autograd_out(self, out); |
6120 | } |
6121 | } // anonymous namespace |
6122 | namespace { |
6123 | at::Tensor wrapper_CompositeExplicitAutograd_fullcoverage__test_autograd_multiple_dispatch(const at::Tensor & self) { |
6124 | // No device check |
6125 | // DeviceGuard omitted |
6126 | return at::native::_test_autograd_multiple_dispatch_fullcoverage(self); |
6127 | } |
6128 | } // anonymous namespace |
6129 | namespace { |
6130 | at::Tensor & wrapper_CompositeExplicitAutograd_fullcoverage_out__test_autograd_multiple_dispatch_out(const at::Tensor & self, at::Tensor & out) { |
6131 | // No device check |
6132 | // DeviceGuard omitted |
6133 | return at::native::_test_autograd_multiple_dispatch_fullcoverage_out(self, out); |
6134 | } |
6135 | } // anonymous namespace |
6136 | namespace { |
6137 | at::Tensor wrapper_CompositeExplicitAutograd___test_autograd_multiple_dispatch_view(const at::Tensor & self) { |
6138 | // No device check |
6139 | // DeviceGuard omitted |
6140 | return at::native::_test_autograd_multiple_dispatch_view(self); |
6141 | } |
6142 | } // anonymous namespace |
6143 | namespace { |
6144 | at::Tensor & wrapper_CompositeExplicitAutograd_out__test_autograd_multiple_dispatch_view_copy_out(const at::Tensor & self, at::Tensor & out) { |
6145 | // No device check |
6146 | // DeviceGuard omitted |
6147 | return at::native::_test_autograd_multiple_dispatch_view_copy_out(self, out); |
6148 | } |
6149 | } // anonymous namespace |
6150 | namespace { |
6151 | at::Tensor & wrapper_CompositeExplicitAutograd_out_segment_reduce_out(const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & indices, const c10::optional<at::Tensor> & offsets, int64_t axis, bool unsafe, const c10::optional<at::Scalar> & initial, at::Tensor & out) { |
6152 | // No device check |
6153 | // DeviceGuard omitted |
6154 | return at::native::segment_reduce_out(data, reduce, lengths, indices, offsets, axis, unsafe, initial, out); |
6155 | } |
6156 | } // anonymous namespace |
6157 | namespace { |
6158 | at::Tensor & wrapper_CompositeExplicitAutograd_out__segment_reduce_backward_out(const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & offsets, int64_t axis, const c10::optional<at::Scalar> & initial, at::Tensor & out) { |
6159 | // No device check |
6160 | // DeviceGuard omitted |
6161 | return at::native::_segment_reduce_backward_out(grad, output, data, reduce, lengths, offsets, axis, initial, out); |
6162 | } |
6163 | } // anonymous namespace |
6164 | namespace { |
6165 | at::Tensor wrapper_CompositeExplicitAutograd___nested_tensor_from_tensor_list(at::TensorList list, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
6166 | // No device check |
6167 | // DeviceGuard omitted |
6168 | return at::native::_nested_tensor_from_tensor_list(list, dtype, layout, device, pin_memory); |
6169 | } |
6170 | } // anonymous namespace |
6171 | namespace { |
6172 | at::Tensor & wrapper_CompositeExplicitAutograd_out__nested_tensor_from_tensor_list_out(at::TensorList list, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, at::Tensor & out) { |
6173 | // No device check |
6174 | // DeviceGuard omitted |
6175 | return at::native::_nested_tensor_from_tensor_list_out(list, dtype, layout, device, pin_memory, out); |
6176 | } |
6177 | } // anonymous namespace |
6178 | namespace { |
6179 | at::Tensor & wrapper_CompositeExplicitAutograd_out__fw_primal_copy_out(const at::Tensor & self, int64_t level, at::Tensor & out) { |
6180 | // No device check |
6181 | // DeviceGuard omitted |
6182 | return at::native::_fw_primal_copy_out(self, level, out); |
6183 | } |
6184 | } // anonymous namespace |
6185 | namespace { |
6186 | at::Tensor & wrapper_CompositeExplicitAutograd_out__make_dual_copy_out(const at::Tensor & primal, const at::Tensor & tangent, int64_t level, at::Tensor & out) { |
6187 | // No device check |
6188 | // DeviceGuard omitted |
6189 | return at::native::_make_dual_copy_out(primal, tangent, level, out); |
6190 | } |
6191 | } // anonymous namespace |
6192 | namespace { |
6193 | at::Tensor & wrapper_CompositeExplicitAutograd_out_view_as_real_copy_out(const at::Tensor & self, at::Tensor & out) { |
6194 | // No device check |
6195 | // DeviceGuard omitted |
6196 | return at::native::view_as_real_copy_out(self, out); |
6197 | } |
6198 | } // anonymous namespace |
6199 | namespace { |
6200 | at::Tensor & wrapper_CompositeExplicitAutograd_out_view_as_complex_copy_out(const at::Tensor & self, at::Tensor & out) { |
6201 | // No device check |
6202 | // DeviceGuard omitted |
6203 | return at::native::view_as_complex_copy_out(self, out); |
6204 | } |
6205 | } // anonymous namespace |
6206 | namespace { |
6207 | at::Tensor & wrapper_CompositeExplicitAutograd_out__conj_copy_out(const at::Tensor & self, at::Tensor & out) { |
6208 | // No device check |
6209 | // DeviceGuard omitted |
6210 | return at::native::_conj_copy_out(self, out); |
6211 | } |
6212 | } // anonymous namespace |
6213 | namespace { |
6214 | at::Tensor & wrapper_CompositeExplicitAutograd_out__neg_view_copy_out(const at::Tensor & self, at::Tensor & out) { |
6215 | // No device check |
6216 | // DeviceGuard omitted |
6217 | return at::native::_neg_view_copy_out(self, out); |
6218 | } |
6219 | } // anonymous namespace |
6220 | namespace { |
6221 | at::Tensor & wrapper_CompositeExplicitAutograd_out_as_strided_copy_out(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset, at::Tensor & out) { |
6222 | // No device check |
6223 | // DeviceGuard omitted |
6224 | return at::native::as_strided_copy_out_symint(self, size, stride, storage_offset, out); |
6225 | } |
6226 | } // anonymous namespace |
6227 | namespace { |
6228 | at::Tensor & wrapper_CompositeExplicitAutograd_out__sparse_broadcast_to_copy_out(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { |
6229 | // No device check |
6230 | // DeviceGuard omitted |
6231 | return at::native::_sparse_broadcast_to_copy_out(self, size, out); |
6232 | } |
6233 | } // anonymous namespace |
6234 | namespace { |
6235 | at::Tensor & wrapper_CompositeExplicitAutograd_out_diagonal_copy_out(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { |
6236 | // No device check |
6237 | // DeviceGuard omitted |
6238 | return at::native::diagonal_copy_out(self, offset, dim1, dim2, out); |
6239 | } |
6240 | } // anonymous namespace |
6241 | namespace { |
6242 | at::Tensor & wrapper_CompositeExplicitAutograd_out_expand_copy_out(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit, at::Tensor & out) { |
6243 | // No device check |
6244 | // DeviceGuard omitted |
6245 | return at::native::expand_copy_out_symint(self, size, implicit, out); |
6246 | } |
6247 | } // anonymous namespace |
6248 | namespace { |
6249 | at::Tensor & wrapper_CompositeExplicitAutograd_out_permute_copy_out(const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) { |
6250 | // No device check |
6251 | // DeviceGuard omitted |
6252 | return at::native::permute_copy_out(self, dims, out); |
6253 | } |
6254 | } // anonymous namespace |
6255 | namespace { |
6256 | at::Tensor & wrapper_CompositeExplicitAutograd_out__reshape_alias_copy_out(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) { |
6257 | // No device check |
6258 | // DeviceGuard omitted |
6259 | return at::native::_reshape_alias_copy_out_symint(self, size, stride, out); |
6260 | } |
6261 | } // anonymous namespace |
6262 | namespace { |
6263 | at::Tensor & wrapper_CompositeExplicitAutograd_int_out_select_copy_out(const at::Tensor & self, int64_t dim, c10::SymInt index, at::Tensor & out) { |
6264 | // No device check |
6265 | // DeviceGuard omitted |
6266 | return at::native::select_copy_int_out_symint(self, dim, index, out); |
6267 | } |
6268 | } // anonymous namespace |
6269 | namespace { |
6270 | at::Tensor & wrapper_CompositeExplicitAutograd_out_detach_copy_out(const at::Tensor & self, at::Tensor & out) { |
6271 | // No device check |
6272 | // DeviceGuard omitted |
6273 | return at::native::detach_copy_out(self, out); |
6274 | } |
6275 | } // anonymous namespace |
6276 | namespace { |
6277 | at::Tensor & wrapper_CompositeExplicitAutograd_Tensor_out_slice_copy_out(const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step, at::Tensor & out) { |
6278 | // No device check |
6279 | // DeviceGuard omitted |
6280 | return at::native::slice_copy_Tensor_out_symint(self, dim, start, end, step, out); |
6281 | } |
6282 | } // anonymous namespace |
6283 | namespace { |
6284 | void wrapper_CompositeExplicitAutograd_Tensor_out_split_copy_out(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) { |
6285 | // No device check |
6286 | // DeviceGuard omitted |
6287 | return at::native::split_copy_Tensor_out(self, split_size.expect_int(), dim, out); |
6288 | } |
6289 | } // anonymous namespace |
6290 | namespace { |
6291 | void wrapper_CompositeExplicitAutograd_out_split_with_sizes_copy_out(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) { |
6292 | // No device check |
6293 | // DeviceGuard omitted |
6294 | return at::native::split_with_sizes_copy_out(self, C10_AS_INTARRAYREF_SLOW(split_sizes), dim, out); |
6295 | } |
6296 | } // anonymous namespace |
6297 | namespace { |
6298 | at::Tensor & wrapper_CompositeExplicitAutograd_out_squeeze_copy_out(const at::Tensor & self, at::Tensor & out) { |
6299 | // No device check |
6300 | // DeviceGuard omitted |
6301 | return at::native::squeeze_copy_out(self, out); |
6302 | } |
6303 | } // anonymous namespace |
6304 | namespace { |
6305 | at::Tensor & wrapper_CompositeExplicitAutograd_dim_out_squeeze_copy_out(const at::Tensor & self, int64_t dim, at::Tensor & out) { |
6306 | // No device check |
6307 | // DeviceGuard omitted |
6308 | return at::native::squeeze_copy_dim_out(self, dim, out); |
6309 | } |
6310 | } // anonymous namespace |
6311 | namespace { |
6312 | at::Tensor & wrapper_CompositeExplicitAutograd_dims_out_squeeze_copy_out(const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) { |
6313 | // No device check |
6314 | // DeviceGuard omitted |
6315 | return at::native::squeeze_copy_dims_out(self, dim, out); |
6316 | } |
6317 | } // anonymous namespace |
6318 | namespace { |
6319 | at::Tensor & wrapper_CompositeExplicitAutograd_out_t_copy_out(const at::Tensor & self, at::Tensor & out) { |
6320 | // No device check |
6321 | // DeviceGuard omitted |
6322 | return at::native::t_copy_out(self, out); |
6323 | } |
6324 | } // anonymous namespace |
6325 | namespace { |
6326 | at::Tensor & wrapper_CompositeExplicitAutograd_int_out_transpose_copy_out(const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) { |
6327 | // No device check |
6328 | // DeviceGuard omitted |
6329 | return at::native::transpose_copy_int_out(self, dim0, dim1, out); |
6330 | } |
6331 | } // anonymous namespace |
6332 | namespace { |
6333 | at::Tensor & wrapper_CompositeExplicitAutograd_out_unsqueeze_copy_out(const at::Tensor & self, int64_t dim, at::Tensor & out) { |
6334 | // No device check |
6335 | // DeviceGuard omitted |
6336 | return at::native::unsqueeze_copy_out(self, dim, out); |
6337 | } |
6338 | } // anonymous namespace |
6339 | namespace { |
6340 | at::Tensor & wrapper_CompositeExplicitAutograd_out__indices_copy_out(const at::Tensor & self, at::Tensor & out) { |
6341 | // No device check |
6342 | // DeviceGuard omitted |
6343 | return at::native::_indices_copy_out(self, out); |
6344 | } |
6345 | } // anonymous namespace |
6346 | namespace { |
6347 | at::Tensor & wrapper_CompositeExplicitAutograd_out__values_copy_out(const at::Tensor & self, at::Tensor & out) { |
6348 | // No device check |
6349 | // DeviceGuard omitted |
6350 | return at::native::_values_copy_out(self, out); |
6351 | } |
6352 | } // anonymous namespace |
6353 | namespace { |
6354 | at::Tensor & wrapper_CompositeExplicitAutograd_out_indices_copy_out(const at::Tensor & self, at::Tensor & out) { |
6355 | // No device check |
6356 | // DeviceGuard omitted |
6357 | return at::native::indices_copy_out(self, out); |
6358 | } |
6359 | } // anonymous namespace |
6360 | namespace { |
6361 | at::Tensor & wrapper_CompositeExplicitAutograd_out_values_copy_out(const at::Tensor & self, at::Tensor & out) { |
6362 | // No device check |
6363 | // DeviceGuard omitted |
6364 | return at::native::values_copy_out(self, out); |
6365 | } |
6366 | } // anonymous namespace |
6367 | namespace { |
6368 | at::Tensor & wrapper_CompositeExplicitAutograd_out_crow_indices_copy_out(const at::Tensor & self, at::Tensor & out) { |
6369 | // No device check |
6370 | // DeviceGuard omitted |
6371 | return at::native::crow_indices_copy_out(self, out); |
6372 | } |
6373 | } // anonymous namespace |
6374 | namespace { |
6375 | at::Tensor & wrapper_CompositeExplicitAutograd_out_col_indices_copy_out(const at::Tensor & self, at::Tensor & out) { |
6376 | // No device check |
6377 | // DeviceGuard omitted |
6378 | return at::native::col_indices_copy_out(self, out); |
6379 | } |
6380 | } // anonymous namespace |
6381 | namespace { |
6382 | at::Tensor & wrapper_CompositeExplicitAutograd_out_ccol_indices_copy_out(const at::Tensor & self, at::Tensor & out) { |
6383 | // No device check |
6384 | // DeviceGuard omitted |
6385 | return at::native::ccol_indices_copy_out(self, out); |
6386 | } |
6387 | } // anonymous namespace |
6388 | namespace { |
6389 | at::Tensor & wrapper_CompositeExplicitAutograd_out_row_indices_copy_out(const at::Tensor & self, at::Tensor & out) { |
6390 | // No device check |
6391 | // DeviceGuard omitted |
6392 | return at::native::row_indices_copy_out(self, out); |
6393 | } |
6394 | } // anonymous namespace |
6395 | namespace { |
6396 | void wrapper_CompositeExplicitAutograd_int_out_unbind_copy_out(const at::Tensor & self, int64_t dim, at::TensorList out) { |
6397 | // No device check |
6398 | // DeviceGuard omitted |
6399 | return at::native::unbind_copy_int_out(self, dim, out); |
6400 | } |
6401 | } // anonymous namespace |
6402 | namespace { |
6403 | at::Tensor & wrapper_CompositeExplicitAutograd_out_view_copy_out(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { |
6404 | // No device check |
6405 | // DeviceGuard omitted |
6406 | return at::native::view_copy_out_symint(self, size, out); |
6407 | } |
6408 | } // anonymous namespace |
6409 | namespace { |
6410 | at::Tensor & wrapper_CompositeExplicitAutograd_dtype_out_view_copy_out(const at::Tensor & self, at::ScalarType dtype, at::Tensor & out) { |
6411 | // No device check |
6412 | // DeviceGuard omitted |
6413 | return at::native::view_copy_dtype_out(self, dtype, out); |
6414 | } |
6415 | } // anonymous namespace |
6416 | namespace { |
6417 | at::Tensor & wrapper_CompositeExplicitAutograd_out_unfold_copy_out(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step, at::Tensor & out) { |
6418 | // No device check |
6419 | // DeviceGuard omitted |
6420 | return at::native::unfold_copy_out(self, dimension, size, step, out); |
6421 | } |
6422 | } // anonymous namespace |
6423 | namespace { |
6424 | at::Tensor & wrapper_CompositeExplicitAutograd_out_alias_copy_out(const at::Tensor & self, at::Tensor & out) { |
6425 | // No device check |
6426 | // DeviceGuard omitted |
6427 | return at::native::alias_copy_out(self, out); |
6428 | } |
6429 | } // anonymous namespace |
6430 | namespace { |
6431 | at::Tensor & wrapper_CompositeExplicitAutograd_out_to_padded_tensor_out(const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size, at::Tensor & out) { |
6432 | // No device check |
6433 | // DeviceGuard omitted |
6434 | return at::native::to_padded_tensor_out_symint(self, padding, output_size, out); |
6435 | } |
6436 | } // anonymous namespace |
6437 | namespace { |
6438 | at::Tensor & wrapper_CompositeExplicitAutograd_out__transformer_encoder_layer_fwd_out(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, c10::optional<int64_t> mask_type, at::Tensor & out) { |
6439 | // No device check |
6440 | // DeviceGuard omitted |
6441 | return at::native::_transformer_encoder_layer_fwd_out(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type, out); |
6442 | } |
6443 | } // anonymous namespace |
6444 | namespace { |
6445 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out__native_multi_head_attention_out(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, bool need_weights, bool average_attn_weights, c10::optional<int64_t> mask_type, at::Tensor & out0, at::Tensor & out1) { |
6446 | // No device check |
6447 | // DeviceGuard omitted |
6448 | return at::native::_native_multi_head_attention_out(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type, out0, out1); |
6449 | } |
6450 | } // anonymous namespace |
6451 | namespace { |
6452 | at::Tensor & wrapper_CompositeExplicitAutograd_out__triton_scaled_dot_attention_out(const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p, at::Tensor & out) { |
6453 | // No device check |
6454 | // DeviceGuard omitted |
6455 | return at::native::_triton_scaled_dot_attention_out(q, k, v, dropout_p, out); |
6456 | } |
6457 | } // anonymous namespace |
6458 | namespace { |
6459 | at::Tensor & wrapper_CompositeExplicitAutograd_out__triton_multi_head_attention_out(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, at::Tensor & out) { |
6460 | // No device check |
6461 | // DeviceGuard omitted |
6462 | return at::native::_triton_multi_head_attention_out(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, out); |
6463 | } |
6464 | } // anonymous namespace |
6465 | namespace { |
6466 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out__transformer_decoder_only_layer_fwd_out(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, const c10::optional<at::Tensor> & incr_key, const c10::optional<at::Tensor> & incr_value, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
6467 | // No device check |
6468 | // DeviceGuard omitted |
6469 | return at::native::_transformer_decoder_only_layer_fwd_out(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value, out0, out1, out2); |
6470 | } |
6471 | } // anonymous namespace |
6472 | namespace { |
6473 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeExplicitAutograd_out__native_decoder_only_multi_head_attention_out(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, const c10::optional<at::Tensor> & incr_key, const c10::optional<at::Tensor> & incr_value, bool need_weights, bool average_attn_weights, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { |
6474 | // No device check |
6475 | // DeviceGuard omitted |
6476 | return at::native::_native_decoder_only_multi_head_attention_out(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, incr_key, incr_value, need_weights, average_attn_weights, out0, out1, out2, out3); |
6477 | } |
6478 | } // anonymous namespace |
6479 | namespace { |
6480 | at::Tensor & wrapper_CompositeExplicitAutograd_n_scalar_out_special_chebyshev_polynomial_t_out(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
6481 | // No device check |
6482 | // DeviceGuard omitted |
6483 | return at::native::special_chebyshev_polynomial_t_out(x, n, out); |
6484 | } |
6485 | } // anonymous namespace |
6486 | namespace { |
6487 | at::Tensor & wrapper_CompositeExplicitAutograd_n_scalar_out_special_chebyshev_polynomial_u_out(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
6488 | // No device check |
6489 | // DeviceGuard omitted |
6490 | return at::native::special_chebyshev_polynomial_u_out(x, n, out); |
6491 | } |
6492 | } // anonymous namespace |
6493 | namespace { |
6494 | at::Tensor & wrapper_CompositeExplicitAutograd_n_scalar_out_special_chebyshev_polynomial_v_out(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
6495 | // No device check |
6496 | // DeviceGuard omitted |
6497 | return at::native::special_chebyshev_polynomial_v_out(x, n, out); |
6498 | } |
6499 | } // anonymous namespace |
6500 | namespace { |
6501 | at::Tensor & wrapper_CompositeExplicitAutograd_n_scalar_out_special_chebyshev_polynomial_w_out(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
6502 | // No device check |
6503 | // DeviceGuard omitted |
6504 | return at::native::special_chebyshev_polynomial_w_out(x, n, out); |
6505 | } |
6506 | } // anonymous namespace |
6507 | namespace { |
6508 | at::Tensor & wrapper_CompositeExplicitAutograd_n_scalar_out_special_hermite_polynomial_h_out(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
6509 | // No device check |
6510 | // DeviceGuard omitted |
6511 | return at::native::special_hermite_polynomial_h_out(x, n, out); |
6512 | } |
6513 | } // anonymous namespace |
6514 | namespace { |
6515 | at::Tensor & wrapper_CompositeExplicitAutograd_n_scalar_out_special_hermite_polynomial_he_out(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
6516 | // No device check |
6517 | // DeviceGuard omitted |
6518 | return at::native::special_hermite_polynomial_he_out(x, n, out); |
6519 | } |
6520 | } // anonymous namespace |
6521 | namespace { |
6522 | at::Tensor & wrapper_CompositeExplicitAutograd_n_scalar_out_special_laguerre_polynomial_l_out(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
6523 | // No device check |
6524 | // DeviceGuard omitted |
6525 | return at::native::special_laguerre_polynomial_l_out(x, n, out); |
6526 | } |
6527 | } // anonymous namespace |
6528 | namespace { |
6529 | at::Tensor & wrapper_CompositeExplicitAutograd_n_scalar_out_special_legendre_polynomial_p_out(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
6530 | // No device check |
6531 | // DeviceGuard omitted |
6532 | return at::native::special_legendre_polynomial_p_out(x, n, out); |
6533 | } |
6534 | } // anonymous namespace |
6535 | namespace { |
6536 | at::Tensor & wrapper_CompositeExplicitAutograd_n_scalar_out_special_shifted_chebyshev_polynomial_t_out(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
6537 | // No device check |
6538 | // DeviceGuard omitted |
6539 | return at::native::special_shifted_chebyshev_polynomial_t_out(x, n, out); |
6540 | } |
6541 | } // anonymous namespace |
6542 | namespace { |
6543 | at::Tensor & wrapper_CompositeExplicitAutograd_n_scalar_out_special_shifted_chebyshev_polynomial_u_out(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
6544 | // No device check |
6545 | // DeviceGuard omitted |
6546 | return at::native::special_shifted_chebyshev_polynomial_u_out(x, n, out); |
6547 | } |
6548 | } // anonymous namespace |
6549 | namespace { |
6550 | at::Tensor & wrapper_CompositeExplicitAutograd_n_scalar_out_special_shifted_chebyshev_polynomial_v_out(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
6551 | // No device check |
6552 | // DeviceGuard omitted |
6553 | return at::native::special_shifted_chebyshev_polynomial_v_out(x, n, out); |
6554 | } |
6555 | } // anonymous namespace |
6556 | namespace { |
6557 | at::Tensor & wrapper_CompositeExplicitAutograd_n_scalar_out_special_shifted_chebyshev_polynomial_w_out(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
6558 | // No device check |
6559 | // DeviceGuard omitted |
6560 | return at::native::special_shifted_chebyshev_polynomial_w_out(x, n, out); |
6561 | } |
6562 | } // anonymous namespace |
6563 | namespace { |
6564 | at::Tensor & wrapper_CompositeExplicitAutograd_out__foobar_out(const at::Tensor & self, bool arg1, bool arg2, bool arg3, at::Tensor & out) { |
6565 | // No device check |
6566 | // DeviceGuard omitted |
6567 | return at::native::_foobar_out(self, arg1, arg2, arg3, out); |
6568 | } |
6569 | } // anonymous namespace |
6570 | namespace { |
6571 | ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> wrapper_CompositeExplicitAutograd___fused_adam(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) { |
6572 | // No device check |
6573 | // DeviceGuard omitted |
6574 | return at::native::_fused_adam(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); |
6575 | } |
6576 | } // anonymous namespace |
6577 | namespace { |
6578 | void wrapper_CompositeExplicitAutograd_out__fused_adam_out(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf, at::TensorList out) { |
6579 | // No device check |
6580 | // DeviceGuard omitted |
6581 | return at::native::_fused_adam_out(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); |
6582 | } |
6583 | } // anonymous namespace |
6584 | namespace { |
6585 | ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> wrapper_CompositeExplicitAutograd___fused_adamw(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) { |
6586 | // No device check |
6587 | // DeviceGuard omitted |
6588 | return at::native::_fused_adamw(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); |
6589 | } |
6590 | } // anonymous namespace |
6591 | namespace { |
6592 | void wrapper_CompositeExplicitAutograd_out__fused_adamw_out(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf, at::TensorList out) { |
6593 | // No device check |
6594 | // DeviceGuard omitted |
6595 | return at::native::_fused_adamw_out(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); |
6596 | } |
6597 | } // anonymous namespace |
6598 | TORCH_LIBRARY_IMPL(aten, CompositeExplicitAutograd, m) { |
6599 | m.impl("_fw_primal" , |
6600 | TORCH_FN(wrapper_CompositeExplicitAutograd___fw_primal)); |
6601 | m.impl("_make_dual" , |
6602 | TORCH_FN(wrapper_CompositeExplicitAutograd___make_dual)); |
6603 | m.impl("_new_zeros_with_same_feature_meta" , |
6604 | TORCH_FN(wrapper_CompositeExplicitAutograd___new_zeros_with_same_feature_meta)); |
6605 | m.impl("_new_zeros_with_same_feature_meta.out" , |
6606 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__new_zeros_with_same_feature_meta_out)); |
6607 | m.impl("_has_same_storage_numel" , |
6608 | TORCH_FN(wrapper_CompositeExplicitAutograd___has_same_storage_numel)); |
6609 | m.impl("_cudnn_ctc_loss.out" , |
6610 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__cudnn_ctc_loss_out)); |
6611 | m.impl("_cudnn_rnn_flatten_weight.out" , |
6612 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__cudnn_rnn_flatten_weight_out)); |
6613 | m.impl("_cudnn_rnn.out" , |
6614 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__cudnn_rnn_out)); |
6615 | m.impl("_cudnn_rnn_backward.out" , |
6616 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__cudnn_rnn_backward_out)); |
6617 | m.impl("_cudnn_init_dropout_state.out" , |
6618 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__cudnn_init_dropout_state_out)); |
6619 | m.impl("_fused_dropout.out" , |
6620 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__fused_dropout_out)); |
6621 | m.impl("_masked_scale.out" , |
6622 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__masked_scale_out)); |
6623 | m.impl("native_dropout.out" , |
6624 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_native_dropout_out)); |
6625 | m.impl("native_dropout_backward.out" , |
6626 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_native_dropout_backward_out)); |
6627 | m.impl("abs" , |
6628 | TORCH_FN(wrapper_CompositeExplicitAutograd__abs)); |
6629 | m.impl("abs_" , |
6630 | TORCH_FN(wrapper_CompositeExplicitAutograd__abs_)); |
6631 | m.impl("_conj" , |
6632 | TORCH_FN(wrapper_CompositeExplicitAutograd___conj)); |
6633 | m.impl("_conj_physical" , |
6634 | TORCH_FN(wrapper_CompositeExplicitAutograd___conj_physical)); |
6635 | m.impl("_conj_physical.out" , |
6636 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__conj_physical_out)); |
6637 | m.impl("conj_physical_" , |
6638 | TORCH_FN(wrapper_CompositeExplicitAutograd__conj_physical_)); |
6639 | m.impl("_neg_view" , |
6640 | TORCH_FN(wrapper_CompositeExplicitAutograd___neg_view)); |
6641 | m.impl("_add_relu.Scalar_out" , |
6642 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out__add_relu_out)); |
6643 | m.impl("add.Scalar" , |
6644 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_add)); |
6645 | m.impl("add.Scalar_out" , |
6646 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out_add_out)); |
6647 | m.impl("add_.Scalar" , |
6648 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_add_)); |
6649 | m.impl("addr" , |
6650 | TORCH_FN(wrapper_CompositeExplicitAutograd__addr)); |
6651 | m.impl("addr.out" , |
6652 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_addr_out)); |
6653 | m.impl("addr_" , |
6654 | TORCH_FN(wrapper_CompositeExplicitAutograd__addr_)); |
6655 | m.impl("affine_grid_generator" , |
6656 | TORCH_FN(wrapper_CompositeExplicitAutograd__affine_grid_generator)); |
6657 | m.impl("affine_grid_generator.out" , |
6658 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_affine_grid_generator_out)); |
6659 | m.impl("_is_all_true" , |
6660 | TORCH_FN(wrapper_CompositeExplicitAutograd___is_all_true)); |
6661 | m.impl("_is_any_true" , |
6662 | TORCH_FN(wrapper_CompositeExplicitAutograd___is_any_true)); |
6663 | m.impl("allclose" , |
6664 | TORCH_FN(wrapper_CompositeExplicitAutograd__allclose)); |
6665 | m.impl("arange" , |
6666 | TORCH_FN(wrapper_CompositeExplicitAutograd__arange)); |
6667 | m.impl("arange.out" , |
6668 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_arange_out)); |
6669 | m.impl("arange.start" , |
6670 | TORCH_FN(wrapper_CompositeExplicitAutograd_start_arange)); |
6671 | m.impl("arange.start_step" , |
6672 | TORCH_FN(wrapper_CompositeExplicitAutograd_start_step_arange)); |
6673 | m.impl("bartlett_window" , |
6674 | TORCH_FN(wrapper_CompositeExplicitAutograd__bartlett_window)); |
6675 | m.impl("bartlett_window.out" , |
6676 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_bartlett_window_out)); |
6677 | m.impl("bartlett_window.periodic" , |
6678 | TORCH_FN(wrapper_CompositeExplicitAutograd_periodic_bartlett_window)); |
6679 | m.impl("bartlett_window.periodic_out" , |
6680 | TORCH_FN(wrapper_CompositeExplicitAutograd_periodic_out_bartlett_window_out)); |
6681 | m.impl("quantized_batch_norm.out" , |
6682 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_quantized_batch_norm_out)); |
6683 | m.impl("bernoulli" , |
6684 | TORCH_FN(wrapper_CompositeExplicitAutograd__bernoulli)); |
6685 | m.impl("bernoulli.Tensor" , |
6686 | TORCH_FN(wrapper_CompositeExplicitAutograd_Tensor_bernoulli)); |
6687 | m.impl("bernoulli.Tensor_out" , |
6688 | TORCH_FN(wrapper_CompositeExplicitAutograd_Tensor_out_bernoulli_out)); |
6689 | m.impl("bernoulli.float_out" , |
6690 | TORCH_FN(wrapper_CompositeExplicitAutograd_float_out_bernoulli_out)); |
6691 | m.impl("binary_cross_entropy_with_logits" , |
6692 | TORCH_FN(wrapper_CompositeExplicitAutograd__binary_cross_entropy_with_logits)); |
6693 | m.impl("binary_cross_entropy_with_logits.out" , |
6694 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_binary_cross_entropy_with_logits_out)); |
6695 | m.impl("bincount.out" , |
6696 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_bincount_out)); |
6697 | m.impl("copysign.Scalar" , |
6698 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_copysign)); |
6699 | m.impl("copysign.Scalar_out" , |
6700 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out_copysign_out)); |
6701 | m.impl("copysign_.Scalar" , |
6702 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_copysign_)); |
6703 | m.impl("logical_not" , |
6704 | TORCH_FN(wrapper_CompositeExplicitAutograd__logical_not)); |
6705 | m.impl("logical_not_" , |
6706 | TORCH_FN(wrapper_CompositeExplicitAutograd__logical_not_)); |
6707 | m.impl("logical_xor" , |
6708 | TORCH_FN(wrapper_CompositeExplicitAutograd__logical_xor)); |
6709 | m.impl("logical_xor_" , |
6710 | TORCH_FN(wrapper_CompositeExplicitAutograd__logical_xor_)); |
6711 | m.impl("logical_and" , |
6712 | TORCH_FN(wrapper_CompositeExplicitAutograd__logical_and)); |
6713 | m.impl("logical_and_" , |
6714 | TORCH_FN(wrapper_CompositeExplicitAutograd__logical_and_)); |
6715 | m.impl("logical_or" , |
6716 | TORCH_FN(wrapper_CompositeExplicitAutograd__logical_or)); |
6717 | m.impl("logical_or_" , |
6718 | TORCH_FN(wrapper_CompositeExplicitAutograd__logical_or_)); |
6719 | m.impl("blackman_window" , |
6720 | TORCH_FN(wrapper_CompositeExplicitAutograd__blackman_window)); |
6721 | m.impl("blackman_window.out" , |
6722 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_blackman_window_out)); |
6723 | m.impl("blackman_window.periodic" , |
6724 | TORCH_FN(wrapper_CompositeExplicitAutograd_periodic_blackman_window)); |
6725 | m.impl("blackman_window.periodic_out" , |
6726 | TORCH_FN(wrapper_CompositeExplicitAutograd_periodic_out_blackman_window_out)); |
6727 | m.impl("block_diag" , |
6728 | TORCH_FN(wrapper_CompositeExplicitAutograd__block_diag)); |
6729 | m.impl("block_diag.out" , |
6730 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_block_diag_out)); |
6731 | m.impl("complex" , |
6732 | TORCH_FN(wrapper_CompositeExplicitAutograd__complex)); |
6733 | m.impl("polar" , |
6734 | TORCH_FN(wrapper_CompositeExplicitAutograd__polar)); |
6735 | m.impl("constant_pad_nd" , |
6736 | TORCH_FN(wrapper_CompositeExplicitAutograd__constant_pad_nd)); |
6737 | m.impl("constant_pad_nd.out" , |
6738 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_constant_pad_nd_out)); |
6739 | m.impl("convolution" , |
6740 | TORCH_FN(wrapper_CompositeExplicitAutograd__convolution)); |
6741 | m.impl("convolution.out" , |
6742 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_convolution_out)); |
6743 | m.impl("convolution_backward" , |
6744 | TORCH_FN(wrapper_CompositeExplicitAutograd__convolution_backward)); |
6745 | m.impl("convolution_backward.out" , |
6746 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_convolution_backward_out)); |
6747 | m.impl("convolution_overrideable" , |
6748 | TORCH_FN(wrapper_CompositeExplicitAutograd__convolution_overrideable)); |
6749 | m.impl("convolution_overrideable.out" , |
6750 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_convolution_overrideable_out)); |
6751 | m.impl("convolution_backward_overrideable" , |
6752 | TORCH_FN(wrapper_CompositeExplicitAutograd__convolution_backward_overrideable)); |
6753 | m.impl("convolution_backward_overrideable.out" , |
6754 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_convolution_backward_overrideable_out)); |
6755 | m.impl("_convolution" , |
6756 | TORCH_FN(wrapper_CompositeExplicitAutograd___convolution)); |
6757 | m.impl("_convolution.out" , |
6758 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__convolution_out)); |
6759 | m.impl("conv_tbc" , |
6760 | TORCH_FN(wrapper_CompositeExplicitAutograd__conv_tbc)); |
6761 | m.impl("conv_tbc.out" , |
6762 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_conv_tbc_out)); |
6763 | m.impl("copy.out" , |
6764 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_copy_out)); |
6765 | m.impl("copy_" , |
6766 | TORCH_FN(wrapper_CompositeExplicitAutograd__copy_)); |
6767 | m.impl("_copy_from.out" , |
6768 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__copy_from_out)); |
6769 | m.impl("_copy_from_and_resize.out" , |
6770 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__copy_from_and_resize_out)); |
6771 | m.impl("count_nonzero.dim_IntList_out" , |
6772 | TORCH_FN(wrapper_CompositeExplicitAutograd_dim_IntList_out_count_nonzero_out)); |
6773 | m.impl("count_nonzero" , |
6774 | TORCH_FN(wrapper_CompositeExplicitAutograd__count_nonzero)); |
6775 | m.impl("count_nonzero.out" , |
6776 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_count_nonzero_out)); |
6777 | m.impl("cudnn_affine_grid_generator.out" , |
6778 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_cudnn_affine_grid_generator_out)); |
6779 | m.impl("cudnn_affine_grid_generator_backward.out" , |
6780 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_cudnn_affine_grid_generator_backward_out)); |
6781 | m.impl("cudnn_batch_norm.out" , |
6782 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_cudnn_batch_norm_out)); |
6783 | m.impl("cudnn_batch_norm_backward.out" , |
6784 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_cudnn_batch_norm_backward_out)); |
6785 | m.impl("cudnn_convolution.out" , |
6786 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_cudnn_convolution_out)); |
6787 | m.impl("cudnn_convolution_transpose.out" , |
6788 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_cudnn_convolution_transpose_out)); |
6789 | m.impl("_mps_convolution_transpose.out" , |
6790 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__mps_convolution_transpose_out)); |
6791 | m.impl("mps_convolution_transpose_backward.out" , |
6792 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_mps_convolution_transpose_backward_out)); |
6793 | m.impl("cudnn_convolution_relu.out" , |
6794 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_cudnn_convolution_relu_out)); |
6795 | m.impl("cudnn_convolution_add_relu.out" , |
6796 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_cudnn_convolution_add_relu_out)); |
6797 | m.impl("cudnn_grid_sampler.out" , |
6798 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_cudnn_grid_sampler_out)); |
6799 | m.impl("cudnn_grid_sampler_backward.out" , |
6800 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_cudnn_grid_sampler_backward_out)); |
6801 | m.impl("cummax" , |
6802 | TORCH_FN(wrapper_CompositeExplicitAutograd__cummax)); |
6803 | m.impl("cummax.out" , |
6804 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_cummax_out)); |
6805 | m.impl("cummin" , |
6806 | TORCH_FN(wrapper_CompositeExplicitAutograd__cummin)); |
6807 | m.impl("cummin.out" , |
6808 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_cummin_out)); |
6809 | m.impl("_ctc_loss.out" , |
6810 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__ctc_loss_out)); |
6811 | m.impl("_ctc_loss.Tensor_out" , |
6812 | TORCH_FN(wrapper_CompositeExplicitAutograd_Tensor_out__ctc_loss_out)); |
6813 | m.impl("_ctc_loss_backward.out" , |
6814 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__ctc_loss_backward_out)); |
6815 | m.impl("diag_embed.out" , |
6816 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_diag_embed_out)); |
6817 | m.impl("diagonal" , |
6818 | TORCH_FN(wrapper_CompositeExplicitAutograd__diagonal)); |
6819 | m.impl("diagonal_backward" , |
6820 | TORCH_FN(wrapper_CompositeExplicitAutograd__diagonal_backward)); |
6821 | m.impl("diagonal_backward.out" , |
6822 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_diagonal_backward_out)); |
6823 | m.impl("div.Scalar" , |
6824 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_div)); |
6825 | m.impl("div.Scalar_out" , |
6826 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out_div_out)); |
6827 | m.impl("div_.Scalar" , |
6828 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_div_)); |
6829 | m.impl("div.Scalar_mode" , |
6830 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_mode_div)); |
6831 | m.impl("div.Scalar_mode_out" , |
6832 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_mode_out_div_out)); |
6833 | m.impl("div_.Scalar_mode" , |
6834 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_mode_div_)); |
6835 | m.impl("dot.out" , |
6836 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_dot_out)); |
6837 | m.impl("vdot.out" , |
6838 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_vdot_out)); |
6839 | m.impl("embedding" , |
6840 | TORCH_FN(wrapper_CompositeExplicitAutograd__embedding)); |
6841 | m.impl("embedding.out" , |
6842 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_embedding_out)); |
6843 | m.impl("embedding_dense_backward.out" , |
6844 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_embedding_dense_backward_out)); |
6845 | m.impl("embedding_renorm" , |
6846 | TORCH_FN(wrapper_CompositeExplicitAutograd__embedding_renorm)); |
6847 | m.impl("embedding_renorm.out" , |
6848 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_embedding_renorm_out)); |
6849 | m.impl("_embedding_bag_forward_only.out" , |
6850 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__embedding_bag_forward_only_out)); |
6851 | m.impl("_embedding_bag.out" , |
6852 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__embedding_bag_out)); |
6853 | m.impl("_embedding_bag_dense_backward.out" , |
6854 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__embedding_bag_dense_backward_out)); |
6855 | m.impl("_embedding_bag_per_sample_weights_backward.out" , |
6856 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__embedding_bag_per_sample_weights_backward_out)); |
6857 | m.impl("empty.names" , |
6858 | TORCH_FN(wrapper_CompositeExplicitAutograd_names_empty)); |
6859 | m.impl("empty.names_out" , |
6860 | TORCH_FN(wrapper_CompositeExplicitAutograd_names_out_empty_out)); |
6861 | m.impl("new_empty" , |
6862 | TORCH_FN(wrapper_CompositeExplicitAutograd__new_empty)); |
6863 | m.impl("new_empty.out" , |
6864 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_new_empty_out)); |
6865 | m.impl("new_empty_strided.out" , |
6866 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_new_empty_strided_out)); |
6867 | m.impl("new_full" , |
6868 | TORCH_FN(wrapper_CompositeExplicitAutograd__new_full)); |
6869 | m.impl("new_full.out" , |
6870 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_new_full_out)); |
6871 | m.impl("new_zeros" , |
6872 | TORCH_FN(wrapper_CompositeExplicitAutograd__new_zeros)); |
6873 | m.impl("new_zeros.out" , |
6874 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_new_zeros_out)); |
6875 | m.impl("new_ones" , |
6876 | TORCH_FN(wrapper_CompositeExplicitAutograd__new_ones)); |
6877 | m.impl("new_ones.out" , |
6878 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_new_ones_out)); |
6879 | m.impl("_empty_affine_quantized.out" , |
6880 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__empty_affine_quantized_out)); |
6881 | m.impl("_empty_per_channel_affine_quantized.out" , |
6882 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__empty_per_channel_affine_quantized_out)); |
6883 | m.impl("resize" , |
6884 | TORCH_FN(wrapper_CompositeExplicitAutograd__resize)); |
6885 | m.impl("resize.out" , |
6886 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_resize_out)); |
6887 | m.impl("_resize_output" , |
6888 | TORCH_FN(wrapper_CompositeExplicitAutograd___resize_output)); |
6889 | m.impl("_resize_output.out" , |
6890 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__resize_output_out)); |
6891 | m.impl("empty_quantized.out" , |
6892 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_empty_quantized_out)); |
6893 | m.impl("empty_like" , |
6894 | TORCH_FN(wrapper_CompositeExplicitAutograd__empty_like)); |
6895 | m.impl("empty_like.out" , |
6896 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_empty_like_out)); |
6897 | m.impl("empty_strided.out" , |
6898 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_empty_strided_out)); |
6899 | m.impl("expand" , |
6900 | TORCH_FN(wrapper_CompositeExplicitAutograd__expand)); |
6901 | m.impl("eye" , |
6902 | TORCH_FN(wrapper_CompositeExplicitAutograd__eye)); |
6903 | m.impl("eye.m" , |
6904 | TORCH_FN(wrapper_CompositeExplicitAutograd_m_eye)); |
6905 | m.impl("fill.Scalar" , |
6906 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_fill)); |
6907 | m.impl("fill.Scalar_out" , |
6908 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out_fill_out)); |
6909 | m.impl("fill.Tensor" , |
6910 | TORCH_FN(wrapper_CompositeExplicitAutograd_Tensor_fill)); |
6911 | m.impl("fill.Tensor_out" , |
6912 | TORCH_FN(wrapper_CompositeExplicitAutograd_Tensor_out_fill_out)); |
6913 | m.impl("full.names" , |
6914 | TORCH_FN(wrapper_CompositeExplicitAutograd_names_full)); |
6915 | m.impl("full.names_out" , |
6916 | TORCH_FN(wrapper_CompositeExplicitAutograd_names_out_full_out)); |
6917 | m.impl("full" , |
6918 | TORCH_FN(wrapper_CompositeExplicitAutograd__full)); |
6919 | m.impl("full.out" , |
6920 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_full_out)); |
6921 | m.impl("full_like" , |
6922 | TORCH_FN(wrapper_CompositeExplicitAutograd__full_like)); |
6923 | m.impl("full_like.out" , |
6924 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_full_like_out)); |
6925 | m.impl("from_file.out" , |
6926 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_from_file_out)); |
6927 | m.impl("grid_sampler_2d.out" , |
6928 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_grid_sampler_2d_out)); |
6929 | m.impl("grid_sampler_2d_backward.out" , |
6930 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_grid_sampler_2d_backward_out)); |
6931 | m.impl("_grid_sampler_2d_cpu_fallback" , |
6932 | TORCH_FN(wrapper_CompositeExplicitAutograd___grid_sampler_2d_cpu_fallback)); |
6933 | m.impl("_grid_sampler_2d_cpu_fallback.out" , |
6934 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__grid_sampler_2d_cpu_fallback_out)); |
6935 | m.impl("grid_sampler_3d.out" , |
6936 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_grid_sampler_3d_out)); |
6937 | m.impl("grid_sampler_3d_backward.out" , |
6938 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_grid_sampler_3d_backward_out)); |
6939 | m.impl("hann_window" , |
6940 | TORCH_FN(wrapper_CompositeExplicitAutograd__hann_window)); |
6941 | m.impl("hann_window.out" , |
6942 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_hann_window_out)); |
6943 | m.impl("hann_window.periodic" , |
6944 | TORCH_FN(wrapper_CompositeExplicitAutograd_periodic_hann_window)); |
6945 | m.impl("hann_window.periodic_out" , |
6946 | TORCH_FN(wrapper_CompositeExplicitAutograd_periodic_out_hann_window_out)); |
6947 | m.impl("hamming_window" , |
6948 | TORCH_FN(wrapper_CompositeExplicitAutograd__hamming_window)); |
6949 | m.impl("hamming_window.out" , |
6950 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_hamming_window_out)); |
6951 | m.impl("hamming_window.periodic" , |
6952 | TORCH_FN(wrapper_CompositeExplicitAutograd_periodic_hamming_window)); |
6953 | m.impl("hamming_window.periodic_out" , |
6954 | TORCH_FN(wrapper_CompositeExplicitAutograd_periodic_out_hamming_window_out)); |
6955 | m.impl("hamming_window.periodic_alpha" , |
6956 | TORCH_FN(wrapper_CompositeExplicitAutograd_periodic_alpha_hamming_window)); |
6957 | m.impl("hamming_window.periodic_alpha_out" , |
6958 | TORCH_FN(wrapper_CompositeExplicitAutograd_periodic_alpha_out_hamming_window_out)); |
6959 | m.impl("hamming_window.periodic_alpha_beta" , |
6960 | TORCH_FN(wrapper_CompositeExplicitAutograd_periodic_alpha_beta_hamming_window)); |
6961 | m.impl("hamming_window.periodic_alpha_beta_out" , |
6962 | TORCH_FN(wrapper_CompositeExplicitAutograd_periodic_alpha_beta_out_hamming_window_out)); |
6963 | m.impl("kaiser_window" , |
6964 | TORCH_FN(wrapper_CompositeExplicitAutograd__kaiser_window)); |
6965 | m.impl("kaiser_window.out" , |
6966 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_kaiser_window_out)); |
6967 | m.impl("kaiser_window.periodic" , |
6968 | TORCH_FN(wrapper_CompositeExplicitAutograd_periodic_kaiser_window)); |
6969 | m.impl("kaiser_window.periodic_out" , |
6970 | TORCH_FN(wrapper_CompositeExplicitAutograd_periodic_out_kaiser_window_out)); |
6971 | m.impl("kaiser_window.beta" , |
6972 | TORCH_FN(wrapper_CompositeExplicitAutograd_beta_kaiser_window)); |
6973 | m.impl("kaiser_window.beta_out" , |
6974 | TORCH_FN(wrapper_CompositeExplicitAutograd_beta_out_kaiser_window_out)); |
6975 | m.impl("native_group_norm" , |
6976 | TORCH_FN(wrapper_CompositeExplicitAutograd__native_group_norm)); |
6977 | m.impl("native_group_norm.out" , |
6978 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_native_group_norm_out)); |
6979 | m.impl("native_group_norm_backward.out" , |
6980 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_native_group_norm_backward_out)); |
6981 | m.impl("index_put" , |
6982 | TORCH_FN(wrapper_CompositeExplicitAutograd__index_put)); |
6983 | m.impl("index_put.out" , |
6984 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_index_put_out)); |
6985 | m.impl("index_put_" , |
6986 | TORCH_FN(wrapper_CompositeExplicitAutograd__index_put_)); |
6987 | m.impl("_index_put_impl" , |
6988 | TORCH_FN(wrapper_CompositeExplicitAutograd___index_put_impl)); |
6989 | m.impl("_index_put_impl.out" , |
6990 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__index_put_impl_out)); |
6991 | m.impl("isnan.out" , |
6992 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_isnan_out)); |
6993 | m.impl("is_same_size" , |
6994 | TORCH_FN(wrapper_CompositeExplicitAutograd__is_same_size)); |
6995 | m.impl("kthvalue" , |
6996 | TORCH_FN(wrapper_CompositeExplicitAutograd__kthvalue)); |
6997 | m.impl("native_layer_norm" , |
6998 | TORCH_FN(wrapper_CompositeExplicitAutograd__native_layer_norm)); |
6999 | m.impl("native_layer_norm.out" , |
7000 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_native_layer_norm_out)); |
7001 | m.impl("native_layer_norm_backward.out" , |
7002 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_native_layer_norm_backward_out)); |
7003 | m.impl("nan_to_num" , |
7004 | TORCH_FN(wrapper_CompositeExplicitAutograd__nan_to_num)); |
7005 | m.impl("nan_to_num_" , |
7006 | TORCH_FN(wrapper_CompositeExplicitAutograd__nan_to_num_)); |
7007 | m.impl("linear.out" , |
7008 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_linear_out)); |
7009 | m.impl("linear_backward.out" , |
7010 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_linear_backward_out)); |
7011 | m.impl("mkldnn_linear.out" , |
7012 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_mkldnn_linear_out)); |
7013 | m.impl("mkldnn_linear_backward_input.out" , |
7014 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_mkldnn_linear_backward_input_out)); |
7015 | m.impl("mkldnn_linear_backward_weights.out" , |
7016 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_mkldnn_linear_backward_weights_out)); |
7017 | m.impl("mkldnn_linear_backward.out" , |
7018 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_mkldnn_linear_backward_out)); |
7019 | m.impl("linspace" , |
7020 | TORCH_FN(wrapper_CompositeExplicitAutograd__linspace)); |
7021 | m.impl("xlogy.Scalar_Self" , |
7022 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_Self_xlogy)); |
7023 | m.impl("xlogy.OutScalar_Self" , |
7024 | TORCH_FN(wrapper_CompositeExplicitAutograd_OutScalar_Self_xlogy_out)); |
7025 | m.impl("xlogy.Scalar_Other" , |
7026 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_Other_xlogy)); |
7027 | m.impl("xlogy.OutScalar_Other" , |
7028 | TORCH_FN(wrapper_CompositeExplicitAutograd_OutScalar_Other_xlogy_out)); |
7029 | m.impl("xlogy_.Scalar_Other" , |
7030 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_Other_xlogy_)); |
7031 | m.impl("logspace" , |
7032 | TORCH_FN(wrapper_CompositeExplicitAutograd__logspace)); |
7033 | m.impl("log_softmax.int_out" , |
7034 | TORCH_FN(wrapper_CompositeExplicitAutograd_int_out_log_softmax_out)); |
7035 | m.impl("logcumsumexp" , |
7036 | TORCH_FN(wrapper_CompositeExplicitAutograd__logcumsumexp)); |
7037 | m.impl("logcumsumexp.out" , |
7038 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_logcumsumexp_out)); |
7039 | m.impl("logsumexp" , |
7040 | TORCH_FN(wrapper_CompositeExplicitAutograd__logsumexp)); |
7041 | m.impl("matmul_backward.out" , |
7042 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_matmul_backward_out)); |
7043 | m.impl("_aminmax.out" , |
7044 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__aminmax_out)); |
7045 | m.impl("_aminmax.dim_out" , |
7046 | TORCH_FN(wrapper_CompositeExplicitAutograd_dim_out__aminmax_out)); |
7047 | m.impl("_mps_max_pool2d.out" , |
7048 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__mps_max_pool2d_out)); |
7049 | m.impl("mps_max_pool2d_backward.out" , |
7050 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_mps_max_pool2d_backward_out)); |
7051 | m.impl("mkldnn_max_pool2d.out" , |
7052 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_mkldnn_max_pool2d_out)); |
7053 | m.impl("mkldnn_max_pool2d_backward.out" , |
7054 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_mkldnn_max_pool2d_backward_out)); |
7055 | m.impl("mkldnn_max_pool3d.out" , |
7056 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_mkldnn_max_pool3d_out)); |
7057 | m.impl("mkldnn_max_pool3d_backward.out" , |
7058 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_mkldnn_max_pool3d_backward_out)); |
7059 | m.impl("quantized_max_pool1d.out" , |
7060 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_quantized_max_pool1d_out)); |
7061 | m.impl("quantized_max_pool2d.out" , |
7062 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_quantized_max_pool2d_out)); |
7063 | m.impl("mean" , |
7064 | TORCH_FN(wrapper_CompositeExplicitAutograd__mean)); |
7065 | m.impl("median.out" , |
7066 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_median_out)); |
7067 | m.impl("median.dim" , |
7068 | TORCH_FN(wrapper_CompositeExplicitAutograd_dim_median)); |
7069 | m.impl("nanmedian.out" , |
7070 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_nanmedian_out)); |
7071 | m.impl("nanmedian.dim" , |
7072 | TORCH_FN(wrapper_CompositeExplicitAutograd_dim_nanmedian)); |
7073 | m.impl("_mps_convolution.out" , |
7074 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__mps_convolution_out)); |
7075 | m.impl("mps_convolution_backward.out" , |
7076 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_mps_convolution_backward_out)); |
7077 | m.impl("mkldnn_convolution" , |
7078 | TORCH_FN(wrapper_CompositeExplicitAutograd__mkldnn_convolution)); |
7079 | m.impl("mkldnn_convolution.out" , |
7080 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_mkldnn_convolution_out)); |
7081 | m.impl("mkldnn_rnn_layer.out" , |
7082 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_mkldnn_rnn_layer_out)); |
7083 | m.impl("mkldnn_rnn_layer_backward.out" , |
7084 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_mkldnn_rnn_layer_backward_out)); |
7085 | m.impl("miopen_batch_norm.out" , |
7086 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_miopen_batch_norm_out)); |
7087 | m.impl("miopen_batch_norm_backward.out" , |
7088 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_miopen_batch_norm_backward_out)); |
7089 | m.impl("miopen_convolution.out" , |
7090 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_miopen_convolution_out)); |
7091 | m.impl("miopen_convolution_transpose.out" , |
7092 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_miopen_convolution_transpose_out)); |
7093 | m.impl("miopen_depthwise_convolution.out" , |
7094 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_miopen_depthwise_convolution_out)); |
7095 | m.impl("miopen_rnn.out" , |
7096 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_miopen_rnn_out)); |
7097 | m.impl("miopen_rnn_backward.out" , |
7098 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_miopen_rnn_backward_out)); |
7099 | m.impl("_sparse_sparse_matmul.out" , |
7100 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__sparse_sparse_matmul_out)); |
7101 | m.impl("mode.values" , |
7102 | TORCH_FN(wrapper_CompositeExplicitAutograd_values_mode_out)); |
7103 | m.impl("mul.Scalar" , |
7104 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_mul)); |
7105 | m.impl("mul.Scalar_out" , |
7106 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out_mul_out)); |
7107 | m.impl("mul_.Scalar" , |
7108 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_mul_)); |
7109 | m.impl("mv" , |
7110 | TORCH_FN(wrapper_CompositeExplicitAutograd__mv)); |
7111 | m.impl("mv.out" , |
7112 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_mv_out)); |
7113 | m.impl("mvlgamma" , |
7114 | TORCH_FN(wrapper_CompositeExplicitAutograd__mvlgamma)); |
7115 | m.impl("mvlgamma_" , |
7116 | TORCH_FN(wrapper_CompositeExplicitAutograd__mvlgamma_)); |
7117 | m.impl("_native_batch_norm_legit_functional" , |
7118 | TORCH_FN(wrapper_CompositeExplicitAutograd___native_batch_norm_legit_functional)); |
7119 | m.impl("batch_norm_stats.out" , |
7120 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_batch_norm_stats_out)); |
7121 | m.impl("batch_norm_gather_stats.out" , |
7122 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_batch_norm_gather_stats_out)); |
7123 | m.impl("batch_norm_gather_stats_with_counts.out" , |
7124 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_batch_norm_gather_stats_with_counts_out)); |
7125 | m.impl("native_batch_norm_backward.out" , |
7126 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_native_batch_norm_backward_out)); |
7127 | m.impl("batch_norm_backward_reduce.out" , |
7128 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_batch_norm_backward_reduce_out)); |
7129 | m.impl("batch_norm_backward_elemt.out" , |
7130 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_batch_norm_backward_elemt_out)); |
7131 | m.impl("batch_norm_update_stats.out" , |
7132 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_batch_norm_update_stats_out)); |
7133 | m.impl("_nnpack_spatial_convolution" , |
7134 | TORCH_FN(wrapper_CompositeExplicitAutograd___nnpack_spatial_convolution)); |
7135 | m.impl("_nnpack_spatial_convolution.out" , |
7136 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__nnpack_spatial_convolution_out)); |
7137 | m.impl("ones.names" , |
7138 | TORCH_FN(wrapper_CompositeExplicitAutograd_names_ones)); |
7139 | m.impl("ones.names_out" , |
7140 | TORCH_FN(wrapper_CompositeExplicitAutograd_names_out_ones_out)); |
7141 | m.impl("ones" , |
7142 | TORCH_FN(wrapper_CompositeExplicitAutograd__ones)); |
7143 | m.impl("ones.out" , |
7144 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_ones_out)); |
7145 | m.impl("ones_like" , |
7146 | TORCH_FN(wrapper_CompositeExplicitAutograd__ones_like)); |
7147 | m.impl("ones_like.out" , |
7148 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_ones_like_out)); |
7149 | m.impl("_euclidean_dist" , |
7150 | TORCH_FN(wrapper_CompositeExplicitAutograd___euclidean_dist)); |
7151 | m.impl("_euclidean_dist.out" , |
7152 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__euclidean_dist_out)); |
7153 | m.impl("_cdist_forward.out" , |
7154 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__cdist_forward_out)); |
7155 | m.impl("_cdist_backward.out" , |
7156 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__cdist_backward_out)); |
7157 | m.impl("_pdist_forward.out" , |
7158 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__pdist_forward_out)); |
7159 | m.impl("_pdist_backward.out" , |
7160 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__pdist_backward_out)); |
7161 | m.impl("permute" , |
7162 | TORCH_FN(wrapper_CompositeExplicitAutograd__permute)); |
7163 | m.impl("pixel_shuffle.out" , |
7164 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_pixel_shuffle_out)); |
7165 | m.impl("pixel_unshuffle.out" , |
7166 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_pixel_unshuffle_out)); |
7167 | m.impl("channel_shuffle.out" , |
7168 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_channel_shuffle_out)); |
7169 | m.impl("is_pinned" , |
7170 | TORCH_FN(wrapper_CompositeExplicitAutograd__is_pinned)); |
7171 | m.impl("_pin_memory.out" , |
7172 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__pin_memory_out)); |
7173 | m.impl("rad2deg" , |
7174 | TORCH_FN(wrapper_CompositeExplicitAutograd__rad2deg)); |
7175 | m.impl("rad2deg.out" , |
7176 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_rad2deg_out)); |
7177 | m.impl("rad2deg_" , |
7178 | TORCH_FN(wrapper_CompositeExplicitAutograd__rad2deg_)); |
7179 | m.impl("deg2rad" , |
7180 | TORCH_FN(wrapper_CompositeExplicitAutograd__deg2rad)); |
7181 | m.impl("deg2rad.out" , |
7182 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_deg2rad_out)); |
7183 | m.impl("deg2rad_" , |
7184 | TORCH_FN(wrapper_CompositeExplicitAutograd__deg2rad_)); |
7185 | m.impl("scalar_tensor" , |
7186 | TORCH_FN(wrapper_CompositeExplicitAutograd__scalar_tensor)); |
7187 | m.impl("scalar_tensor.out" , |
7188 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_scalar_tensor_out)); |
7189 | m.impl("rand.names" , |
7190 | TORCH_FN(wrapper_CompositeExplicitAutograd_names_rand)); |
7191 | m.impl("rand.names_out" , |
7192 | TORCH_FN(wrapper_CompositeExplicitAutograd_names_out_rand_out)); |
7193 | m.impl("rand.generator_with_names" , |
7194 | TORCH_FN(wrapper_CompositeExplicitAutograd_generator_with_names_rand)); |
7195 | m.impl("rand.generator_with_names_out" , |
7196 | TORCH_FN(wrapper_CompositeExplicitAutograd_generator_with_names_out_rand_out)); |
7197 | m.impl("rand" , |
7198 | TORCH_FN(wrapper_CompositeExplicitAutograd__rand)); |
7199 | m.impl("rand.out" , |
7200 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_rand_out)); |
7201 | m.impl("rand.generator" , |
7202 | TORCH_FN(wrapper_CompositeExplicitAutograd_generator_rand)); |
7203 | m.impl("rand_like" , |
7204 | TORCH_FN(wrapper_CompositeExplicitAutograd__rand_like)); |
7205 | m.impl("rand_like.out" , |
7206 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_rand_like_out)); |
7207 | m.impl("randint" , |
7208 | TORCH_FN(wrapper_CompositeExplicitAutograd__randint)); |
7209 | m.impl("randint.out" , |
7210 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_randint_out)); |
7211 | m.impl("randint.generator" , |
7212 | TORCH_FN(wrapper_CompositeExplicitAutograd_generator_randint)); |
7213 | m.impl("randint.generator_out" , |
7214 | TORCH_FN(wrapper_CompositeExplicitAutograd_generator_out_randint_out)); |
7215 | m.impl("randint.low" , |
7216 | TORCH_FN(wrapper_CompositeExplicitAutograd_low_randint)); |
7217 | m.impl("randint.low_out" , |
7218 | TORCH_FN(wrapper_CompositeExplicitAutograd_low_out_randint_out)); |
7219 | m.impl("randint.low_generator" , |
7220 | TORCH_FN(wrapper_CompositeExplicitAutograd_low_generator_randint)); |
7221 | m.impl("randint.low_generator_out" , |
7222 | TORCH_FN(wrapper_CompositeExplicitAutograd_low_generator_out_randint_out)); |
7223 | m.impl("randint_like" , |
7224 | TORCH_FN(wrapper_CompositeExplicitAutograd__randint_like)); |
7225 | m.impl("randint_like.out" , |
7226 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_randint_like_out)); |
7227 | m.impl("randint_like.low_dtype" , |
7228 | TORCH_FN(wrapper_CompositeExplicitAutograd_low_dtype_randint_like)); |
7229 | m.impl("randint_like.low_dtype_out" , |
7230 | TORCH_FN(wrapper_CompositeExplicitAutograd_low_dtype_out_randint_like_out)); |
7231 | m.impl("randn" , |
7232 | TORCH_FN(wrapper_CompositeExplicitAutograd__randn)); |
7233 | m.impl("randn.generator" , |
7234 | TORCH_FN(wrapper_CompositeExplicitAutograd_generator_randn)); |
7235 | m.impl("randn.names" , |
7236 | TORCH_FN(wrapper_CompositeExplicitAutograd_names_randn)); |
7237 | m.impl("randn.names_out" , |
7238 | TORCH_FN(wrapper_CompositeExplicitAutograd_names_out_randn_out)); |
7239 | m.impl("randn.generator_with_names" , |
7240 | TORCH_FN(wrapper_CompositeExplicitAutograd_generator_with_names_randn)); |
7241 | m.impl("randn.generator_with_names_out" , |
7242 | TORCH_FN(wrapper_CompositeExplicitAutograd_generator_with_names_out_randn_out)); |
7243 | m.impl("randn_like" , |
7244 | TORCH_FN(wrapper_CompositeExplicitAutograd__randn_like)); |
7245 | m.impl("randn_like.out" , |
7246 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_randn_like_out)); |
7247 | m.impl("randperm" , |
7248 | TORCH_FN(wrapper_CompositeExplicitAutograd__randperm)); |
7249 | m.impl("randperm.out" , |
7250 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_randperm_out)); |
7251 | m.impl("randperm.generator" , |
7252 | TORCH_FN(wrapper_CompositeExplicitAutograd_generator_randperm)); |
7253 | m.impl("range.step" , |
7254 | TORCH_FN(wrapper_CompositeExplicitAutograd_step_range)); |
7255 | m.impl("range" , |
7256 | TORCH_FN(wrapper_CompositeExplicitAutograd__range)); |
7257 | m.impl("range.out_" , |
7258 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__range_out)); |
7259 | m.impl("repeat" , |
7260 | TORCH_FN(wrapper_CompositeExplicitAutograd__repeat)); |
7261 | m.impl("repeat.out" , |
7262 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_repeat_out)); |
7263 | m.impl("repeat_interleave.Tensor_out" , |
7264 | TORCH_FN(wrapper_CompositeExplicitAutograd_Tensor_out_repeat_interleave_out)); |
7265 | m.impl("_reshape_copy" , |
7266 | TORCH_FN(wrapper_CompositeExplicitAutograd___reshape_copy)); |
7267 | m.impl("_mkldnn_reshape.out" , |
7268 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__mkldnn_reshape_out)); |
7269 | m.impl("relu.out" , |
7270 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_relu_out)); |
7271 | m.impl("select.int" , |
7272 | TORCH_FN(wrapper_CompositeExplicitAutograd_int_select)); |
7273 | m.impl("select_backward.out" , |
7274 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_select_backward_out)); |
7275 | m.impl("celu" , |
7276 | TORCH_FN(wrapper_CompositeExplicitAutograd__celu)); |
7277 | m.impl("celu.out" , |
7278 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_celu_out)); |
7279 | m.impl("celu_" , |
7280 | TORCH_FN(wrapper_CompositeExplicitAutograd__celu_)); |
7281 | m.impl("detach" , |
7282 | TORCH_FN(wrapper_CompositeExplicitAutograd__detach)); |
7283 | m.impl("detach_" , |
7284 | TORCH_FN(wrapper_CompositeExplicitAutograd__detach_)); |
7285 | m.impl("slice.Tensor" , |
7286 | TORCH_FN(wrapper_CompositeExplicitAutograd_Tensor_slice)); |
7287 | m.impl("slice_backward" , |
7288 | TORCH_FN(wrapper_CompositeExplicitAutograd__slice_backward)); |
7289 | m.impl("slice_backward.out" , |
7290 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_slice_backward_out)); |
7291 | m.impl("slice_scatter" , |
7292 | TORCH_FN(wrapper_CompositeExplicitAutograd__slice_scatter)); |
7293 | m.impl("slice_scatter.out" , |
7294 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_slice_scatter_out)); |
7295 | m.impl("select_scatter" , |
7296 | TORCH_FN(wrapper_CompositeExplicitAutograd__select_scatter)); |
7297 | m.impl("select_scatter.out" , |
7298 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_select_scatter_out)); |
7299 | m.impl("diagonal_scatter" , |
7300 | TORCH_FN(wrapper_CompositeExplicitAutograd__diagonal_scatter)); |
7301 | m.impl("diagonal_scatter.out" , |
7302 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_diagonal_scatter_out)); |
7303 | m.impl("as_strided_scatter" , |
7304 | TORCH_FN(wrapper_CompositeExplicitAutograd__as_strided_scatter)); |
7305 | m.impl("as_strided_scatter.out" , |
7306 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_as_strided_scatter_out)); |
7307 | m.impl("softmax.int_out" , |
7308 | TORCH_FN(wrapper_CompositeExplicitAutograd_int_out_softmax_out)); |
7309 | m.impl("unsafe_split.Tensor" , |
7310 | TORCH_FN(wrapper_CompositeExplicitAutograd_Tensor_unsafe_split)); |
7311 | m.impl("unsafe_split.Tensor_out" , |
7312 | TORCH_FN(wrapper_CompositeExplicitAutograd_Tensor_out_unsafe_split_out)); |
7313 | m.impl("split.Tensor" , |
7314 | TORCH_FN(wrapper_CompositeExplicitAutograd_Tensor_split)); |
7315 | m.impl("unsafe_split_with_sizes" , |
7316 | TORCH_FN(wrapper_CompositeExplicitAutograd__unsafe_split_with_sizes)); |
7317 | m.impl("unsafe_split_with_sizes.out" , |
7318 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_unsafe_split_with_sizes_out)); |
7319 | m.impl("split_with_sizes" , |
7320 | TORCH_FN(wrapper_CompositeExplicitAutograd__split_with_sizes)); |
7321 | m.impl("squeeze" , |
7322 | TORCH_FN(wrapper_CompositeExplicitAutograd__squeeze)); |
7323 | m.impl("squeeze_" , |
7324 | TORCH_FN(wrapper_CompositeExplicitAutograd__squeeze_)); |
7325 | m.impl("squeeze.dim" , |
7326 | TORCH_FN(wrapper_CompositeExplicitAutograd_dim_squeeze)); |
7327 | m.impl("squeeze_.dim" , |
7328 | TORCH_FN(wrapper_CompositeExplicitAutograd_dim_squeeze_)); |
7329 | m.impl("squeeze.dims" , |
7330 | TORCH_FN(wrapper_CompositeExplicitAutograd_dims_squeeze)); |
7331 | m.impl("squeeze_.dims" , |
7332 | TORCH_FN(wrapper_CompositeExplicitAutograd_dims_squeeze_)); |
7333 | m.impl("stack" , |
7334 | TORCH_FN(wrapper_CompositeExplicitAutograd__stack)); |
7335 | m.impl("stack.out" , |
7336 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_stack_out)); |
7337 | m.impl("_stack" , |
7338 | TORCH_FN(wrapper_CompositeExplicitAutograd___stack)); |
7339 | m.impl("_stack.out" , |
7340 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__stack_out)); |
7341 | m.impl("sum" , |
7342 | TORCH_FN(wrapper_CompositeExplicitAutograd__sum)); |
7343 | m.impl("sum.out" , |
7344 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_sum_out)); |
7345 | m.impl("std_mean.correction_out" , |
7346 | TORCH_FN(wrapper_CompositeExplicitAutograd_correction_out_std_mean_out)); |
7347 | m.impl("prod.out" , |
7348 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_prod_out)); |
7349 | m.impl("t" , |
7350 | TORCH_FN(wrapper_CompositeExplicitAutograd__t)); |
7351 | m.impl("t_" , |
7352 | TORCH_FN(wrapper_CompositeExplicitAutograd__t_)); |
7353 | m.impl("transpose.int" , |
7354 | TORCH_FN(wrapper_CompositeExplicitAutograd_int_transpose)); |
7355 | m.impl("transpose_" , |
7356 | TORCH_FN(wrapper_CompositeExplicitAutograd__transpose_)); |
7357 | m.impl("_mkldnn_transpose.out" , |
7358 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__mkldnn_transpose_out)); |
7359 | m.impl("flip.out" , |
7360 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_flip_out)); |
7361 | m.impl("roll.out" , |
7362 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_roll_out)); |
7363 | m.impl("rot90" , |
7364 | TORCH_FN(wrapper_CompositeExplicitAutograd__rot90)); |
7365 | m.impl("rot90.out" , |
7366 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_rot90_out)); |
7367 | m.impl("_transform_bias_rescale_qkv.out" , |
7368 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__transform_bias_rescale_qkv_out)); |
7369 | m.impl("_nested_tensor_from_mask.out" , |
7370 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__nested_tensor_from_mask_out)); |
7371 | m.impl("_nested_from_padded.out" , |
7372 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__nested_from_padded_out)); |
7373 | m.impl("_nested_tensor_size.out" , |
7374 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__nested_tensor_size_out)); |
7375 | m.impl("_nested_tensor_strides.out" , |
7376 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__nested_tensor_strides_out)); |
7377 | m.impl("_nested_from_padded_and_nested_example.out" , |
7378 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__nested_from_padded_and_nested_example_out)); |
7379 | m.impl("_nested_view_from_buffer_copy.out" , |
7380 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__nested_view_from_buffer_copy_out)); |
7381 | m.impl("_trilinear.out" , |
7382 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__trilinear_out)); |
7383 | m.impl("_unique.out" , |
7384 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__unique_out)); |
7385 | m.impl("unique_dim.out" , |
7386 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_unique_dim_out)); |
7387 | m.impl("unique_consecutive.out" , |
7388 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_unique_consecutive_out)); |
7389 | m.impl("unique_dim_consecutive.out" , |
7390 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_unique_dim_consecutive_out)); |
7391 | m.impl("_unique2.out" , |
7392 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__unique2_out)); |
7393 | m.impl("_unsafe_view" , |
7394 | TORCH_FN(wrapper_CompositeExplicitAutograd___unsafe_view)); |
7395 | m.impl("_unsafe_view.out" , |
7396 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__unsafe_view_out)); |
7397 | m.impl("unsqueeze" , |
7398 | TORCH_FN(wrapper_CompositeExplicitAutograd__unsqueeze)); |
7399 | m.impl("unsqueeze_" , |
7400 | TORCH_FN(wrapper_CompositeExplicitAutograd__unsqueeze_)); |
7401 | m.impl("var_mean.correction_out" , |
7402 | TORCH_FN(wrapper_CompositeExplicitAutograd_correction_out_var_mean_out)); |
7403 | m.impl("_weight_norm_interface.out" , |
7404 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__weight_norm_interface_out)); |
7405 | m.impl("_weight_norm_interface_backward.out" , |
7406 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__weight_norm_interface_backward_out)); |
7407 | m.impl("zeros.names" , |
7408 | TORCH_FN(wrapper_CompositeExplicitAutograd_names_zeros)); |
7409 | m.impl("zeros.names_out" , |
7410 | TORCH_FN(wrapper_CompositeExplicitAutograd_names_out_zeros_out)); |
7411 | m.impl("_efficientzerotensor.out" , |
7412 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__efficientzerotensor_out)); |
7413 | m.impl("zeros" , |
7414 | TORCH_FN(wrapper_CompositeExplicitAutograd__zeros)); |
7415 | m.impl("zeros.out" , |
7416 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_zeros_out)); |
7417 | m.impl("zeros_like" , |
7418 | TORCH_FN(wrapper_CompositeExplicitAutograd__zeros_like)); |
7419 | m.impl("zeros_like.out" , |
7420 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_zeros_like_out)); |
7421 | m.impl("_standard_gamma_grad.out" , |
7422 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__standard_gamma_grad_out)); |
7423 | m.impl("_standard_gamma.out" , |
7424 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__standard_gamma_out)); |
7425 | m.impl("_dirichlet_grad.out" , |
7426 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__dirichlet_grad_out)); |
7427 | m.impl("_sample_dirichlet.out" , |
7428 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__sample_dirichlet_out)); |
7429 | m.impl("poisson.out" , |
7430 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_poisson_out)); |
7431 | m.impl("binomial.out" , |
7432 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_binomial_out)); |
7433 | m.impl("native_norm.out" , |
7434 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_native_norm_out)); |
7435 | m.impl("native_norm.ScalarOpt_dim_dtype_out" , |
7436 | TORCH_FN(wrapper_CompositeExplicitAutograd_ScalarOpt_dim_dtype_out_native_norm_out)); |
7437 | m.impl("_sparse_sum.dim" , |
7438 | TORCH_FN(wrapper_CompositeExplicitAutograd_dim__sparse_sum)); |
7439 | m.impl("_sparse_sum.dim_out" , |
7440 | TORCH_FN(wrapper_CompositeExplicitAutograd_dim_out__sparse_sum_out)); |
7441 | m.impl("_sparse_sum_backward.out" , |
7442 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__sparse_sum_backward_out)); |
7443 | m.impl("_sparse_csr_sum.dim_dtype_out" , |
7444 | TORCH_FN(wrapper_CompositeExplicitAutograd_dim_dtype_out__sparse_csr_sum_out)); |
7445 | m.impl("_sparse_csr_prod.dim_dtype_out" , |
7446 | TORCH_FN(wrapper_CompositeExplicitAutograd_dim_dtype_out__sparse_csr_prod_out)); |
7447 | m.impl("_sparse_softmax.out" , |
7448 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__sparse_softmax_out)); |
7449 | m.impl("_sparse_softmax_backward_data.out" , |
7450 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__sparse_softmax_backward_data_out)); |
7451 | m.impl("_sparse_log_softmax.out" , |
7452 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__sparse_log_softmax_out)); |
7453 | m.impl("_sparse_log_softmax_backward_data.out" , |
7454 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__sparse_log_softmax_backward_data_out)); |
7455 | m.impl("_spdiags.out" , |
7456 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__spdiags_out)); |
7457 | m.impl("norm.ScalarOpt_dtype" , |
7458 | TORCH_FN(wrapper_CompositeExplicitAutograd_ScalarOpt_dtype_norm)); |
7459 | m.impl("norm.ScalarOpt_dtype_out" , |
7460 | TORCH_FN(wrapper_CompositeExplicitAutograd_ScalarOpt_dtype_out_norm_out)); |
7461 | m.impl("norm.Scalar" , |
7462 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_norm)); |
7463 | m.impl("norm.Scalar_out" , |
7464 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out_norm_out)); |
7465 | m.impl("frexp.Tensor" , |
7466 | TORCH_FN(wrapper_CompositeExplicitAutograd_Tensor_frexp)); |
7467 | m.impl("clone" , |
7468 | TORCH_FN(wrapper_CompositeExplicitAutograd__clone)); |
7469 | m.impl("clone.out" , |
7470 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_clone_out)); |
7471 | m.impl("resize_as" , |
7472 | TORCH_FN(wrapper_CompositeExplicitAutograd__resize_as)); |
7473 | m.impl("resize_as.out" , |
7474 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_resize_as_out)); |
7475 | m.impl("resize_as_" , |
7476 | TORCH_FN(wrapper_CompositeExplicitAutograd__resize_as_)); |
7477 | m.impl("resize_as_sparse" , |
7478 | TORCH_FN(wrapper_CompositeExplicitAutograd__resize_as_sparse)); |
7479 | m.impl("resize_as_sparse.out" , |
7480 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_resize_as_sparse_out)); |
7481 | m.impl("zero" , |
7482 | TORCH_FN(wrapper_CompositeExplicitAutograd__zero)); |
7483 | m.impl("zero.out" , |
7484 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_zero_out)); |
7485 | m.impl("sub.Scalar" , |
7486 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_sub)); |
7487 | m.impl("sub.Scalar_out" , |
7488 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out_sub_out)); |
7489 | m.impl("sub_.Scalar" , |
7490 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_sub_)); |
7491 | m.impl("rsub.Tensor_out" , |
7492 | TORCH_FN(wrapper_CompositeExplicitAutograd_Tensor_out_rsub_out)); |
7493 | m.impl("rsub.Scalar" , |
7494 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_rsub)); |
7495 | m.impl("rsub.Scalar_out" , |
7496 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out_rsub_out)); |
7497 | m.impl("_sparse_addmm" , |
7498 | TORCH_FN(wrapper_CompositeExplicitAutograd___sparse_addmm)); |
7499 | m.impl("_sparse_addmm.out" , |
7500 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__sparse_addmm_out)); |
7501 | m.impl("sparse_coo_tensor.size" , |
7502 | TORCH_FN(wrapper_CompositeExplicitAutograd_size_sparse_coo_tensor)); |
7503 | m.impl("sparse_coo_tensor.size_out" , |
7504 | TORCH_FN(wrapper_CompositeExplicitAutograd_size_out_sparse_coo_tensor_out)); |
7505 | m.impl("_sparse_coo_tensor_with_dims.out" , |
7506 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__sparse_coo_tensor_with_dims_out)); |
7507 | m.impl("_sparse_coo_tensor_with_dims_and_tensors.out" , |
7508 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__sparse_coo_tensor_with_dims_and_tensors_out)); |
7509 | m.impl("sparse_resize" , |
7510 | TORCH_FN(wrapper_CompositeExplicitAutograd__sparse_resize)); |
7511 | m.impl("sparse_resize.out" , |
7512 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_sparse_resize_out)); |
7513 | m.impl("sparse_resize_and_clear" , |
7514 | TORCH_FN(wrapper_CompositeExplicitAutograd__sparse_resize_and_clear)); |
7515 | m.impl("sparse_resize_and_clear.out" , |
7516 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_sparse_resize_and_clear_out)); |
7517 | m.impl("sparse_mask.out" , |
7518 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_sparse_mask_out)); |
7519 | m.impl("_to_dense.out" , |
7520 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__to_dense_out)); |
7521 | m.impl("_coalesce.out" , |
7522 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__coalesce_out)); |
7523 | m.impl("is_coalesced" , |
7524 | TORCH_FN(wrapper_CompositeExplicitAutograd__is_coalesced)); |
7525 | m.impl("_coalesced" , |
7526 | TORCH_FN(wrapper_CompositeExplicitAutograd___coalesced)); |
7527 | m.impl("_coalesced.out" , |
7528 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__coalesced_out)); |
7529 | m.impl("indices" , |
7530 | TORCH_FN(wrapper_CompositeExplicitAutograd__indices)); |
7531 | m.impl("values" , |
7532 | TORCH_FN(wrapper_CompositeExplicitAutograd__values)); |
7533 | m.impl("crow_indices" , |
7534 | TORCH_FN(wrapper_CompositeExplicitAutograd__crow_indices)); |
7535 | m.impl("col_indices" , |
7536 | TORCH_FN(wrapper_CompositeExplicitAutograd__col_indices)); |
7537 | m.impl("ccol_indices" , |
7538 | TORCH_FN(wrapper_CompositeExplicitAutograd__ccol_indices)); |
7539 | m.impl("row_indices" , |
7540 | TORCH_FN(wrapper_CompositeExplicitAutograd__row_indices)); |
7541 | m.impl("copy_sparse_to_sparse" , |
7542 | TORCH_FN(wrapper_CompositeExplicitAutograd__copy_sparse_to_sparse)); |
7543 | m.impl("copy_sparse_to_sparse.out" , |
7544 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_copy_sparse_to_sparse_out)); |
7545 | m.impl("unbind.int" , |
7546 | TORCH_FN(wrapper_CompositeExplicitAutograd_int_unbind)); |
7547 | m.impl("to_sparse.sparse_dim_out" , |
7548 | TORCH_FN(wrapper_CompositeExplicitAutograd_sparse_dim_out_to_sparse_out)); |
7549 | m.impl("to_sparse.out" , |
7550 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_to_sparse_out)); |
7551 | m.impl("to_sparse_csr.out" , |
7552 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_to_sparse_csr_out)); |
7553 | m.impl("to_sparse_csc.out" , |
7554 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_to_sparse_csc_out)); |
7555 | m.impl("to_sparse_bsr.out" , |
7556 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_to_sparse_bsr_out)); |
7557 | m.impl("to_sparse_bsc.out" , |
7558 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_to_sparse_bsc_out)); |
7559 | m.impl("to_mkldnn.out" , |
7560 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_to_mkldnn_out)); |
7561 | m.impl("mkldnn_reorder_conv2d_weight.out" , |
7562 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_mkldnn_reorder_conv2d_weight_out)); |
7563 | m.impl("mkldnn_reorder_conv3d_weight.out" , |
7564 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_mkldnn_reorder_conv3d_weight_out)); |
7565 | m.impl("quantize_per_tensor_dynamic.out" , |
7566 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_quantize_per_tensor_dynamic_out)); |
7567 | m.impl("quantize_per_tensor.out" , |
7568 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_quantize_per_tensor_out)); |
7569 | m.impl("quantize_per_tensor.tensor_qparams_out" , |
7570 | TORCH_FN(wrapper_CompositeExplicitAutograd_tensor_qparams_out_quantize_per_tensor_out)); |
7571 | m.impl("quantize_per_tensor.tensors_out" , |
7572 | TORCH_FN(wrapper_CompositeExplicitAutograd_tensors_out_quantize_per_tensor_out)); |
7573 | m.impl("quantize_per_channel.out" , |
7574 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_quantize_per_channel_out)); |
7575 | m.impl("dequantize.self_out" , |
7576 | TORCH_FN(wrapper_CompositeExplicitAutograd_self_out_dequantize_out)); |
7577 | m.impl("dequantize.tensors_out" , |
7578 | TORCH_FN(wrapper_CompositeExplicitAutograd_tensors_out_dequantize_out)); |
7579 | m.impl("q_per_channel_scales.out" , |
7580 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_q_per_channel_scales_out)); |
7581 | m.impl("q_per_channel_zero_points.out" , |
7582 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_q_per_channel_zero_points_out)); |
7583 | m.impl("int_repr.out" , |
7584 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_int_repr_out)); |
7585 | m.impl("_make_per_tensor_quantized_tensor.out" , |
7586 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__make_per_tensor_quantized_tensor_out)); |
7587 | m.impl("_make_per_channel_quantized_tensor.out" , |
7588 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__make_per_channel_quantized_tensor_out)); |
7589 | m.impl("fake_quantize_per_tensor_affine_cachemask.out" , |
7590 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_fake_quantize_per_tensor_affine_cachemask_out)); |
7591 | m.impl("_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out" , |
7592 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out)); |
7593 | m.impl("_fake_quantize_learnable_per_tensor_affine.out" , |
7594 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__fake_quantize_learnable_per_tensor_affine_out)); |
7595 | m.impl("fake_quantize_per_channel_affine_cachemask.out" , |
7596 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_fake_quantize_per_channel_affine_cachemask_out)); |
7597 | m.impl("_fake_quantize_learnable_per_channel_affine.out" , |
7598 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__fake_quantize_learnable_per_channel_affine_out)); |
7599 | m.impl("_fused_moving_avg_obs_fq_helper_functional" , |
7600 | TORCH_FN(wrapper_CompositeExplicitAutograd___fused_moving_avg_obs_fq_helper_functional)); |
7601 | m.impl("_fused_moving_avg_obs_fq_helper.out" , |
7602 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__fused_moving_avg_obs_fq_helper_out)); |
7603 | m.impl("_to_copy" , |
7604 | TORCH_FN(wrapper_CompositeExplicitAutograd___to_copy)); |
7605 | m.impl("_to_copy.out" , |
7606 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__to_copy_out)); |
7607 | m.impl("_lstm_mps.out" , |
7608 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__lstm_mps_out)); |
7609 | m.impl("lstm_mps_backward.out" , |
7610 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_lstm_mps_backward_out)); |
7611 | m.impl("_thnn_fused_lstm_cell.out" , |
7612 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__thnn_fused_lstm_cell_out)); |
7613 | m.impl("_thnn_fused_lstm_cell_backward_impl.out" , |
7614 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__thnn_fused_lstm_cell_backward_impl_out)); |
7615 | m.impl("_thnn_fused_gru_cell.out" , |
7616 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__thnn_fused_gru_cell_out)); |
7617 | m.impl("_thnn_fused_gru_cell_backward.out" , |
7618 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__thnn_fused_gru_cell_backward_out)); |
7619 | m.impl("_pack_padded_sequence" , |
7620 | TORCH_FN(wrapper_CompositeExplicitAutograd___pack_padded_sequence)); |
7621 | m.impl("_pack_padded_sequence.out" , |
7622 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__pack_padded_sequence_out)); |
7623 | m.impl("set.source_Storage" , |
7624 | TORCH_FN(wrapper_CompositeExplicitAutograd_source_Storage_set)); |
7625 | m.impl("set.source_Storage_out" , |
7626 | TORCH_FN(wrapper_CompositeExplicitAutograd_source_Storage_out_set_out)); |
7627 | m.impl("set.source_Storage_storage_offset" , |
7628 | TORCH_FN(wrapper_CompositeExplicitAutograd_source_Storage_storage_offset_set)); |
7629 | m.impl("set.source_Storage_storage_offset_out" , |
7630 | TORCH_FN(wrapper_CompositeExplicitAutograd_source_Storage_storage_offset_out_set_out)); |
7631 | m.impl("set.source_Tensor" , |
7632 | TORCH_FN(wrapper_CompositeExplicitAutograd_source_Tensor_set)); |
7633 | m.impl("set.source_Tensor_out" , |
7634 | TORCH_FN(wrapper_CompositeExplicitAutograd_source_Tensor_out_set_out)); |
7635 | m.impl("set" , |
7636 | TORCH_FN(wrapper_CompositeExplicitAutograd__set)); |
7637 | m.impl("set.out" , |
7638 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_set_out)); |
7639 | m.impl("lift" , |
7640 | TORCH_FN(wrapper_CompositeExplicitAutograd__lift)); |
7641 | m.impl("lift.out" , |
7642 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_lift_out)); |
7643 | m.impl("lift_fresh" , |
7644 | TORCH_FN(wrapper_CompositeExplicitAutograd__lift_fresh)); |
7645 | m.impl("lift_fresh_copy.out" , |
7646 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_lift_fresh_copy_out)); |
7647 | m.impl("masked_fill.Scalar" , |
7648 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_masked_fill)); |
7649 | m.impl("masked_fill.Scalar_out" , |
7650 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out_masked_fill_out)); |
7651 | m.impl("masked_fill.Tensor" , |
7652 | TORCH_FN(wrapper_CompositeExplicitAutograd_Tensor_masked_fill)); |
7653 | m.impl("masked_fill.Tensor_out" , |
7654 | TORCH_FN(wrapper_CompositeExplicitAutograd_Tensor_out_masked_fill_out)); |
7655 | m.impl("masked_scatter" , |
7656 | TORCH_FN(wrapper_CompositeExplicitAutograd__masked_scatter)); |
7657 | m.impl("masked_scatter.out" , |
7658 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_masked_scatter_out)); |
7659 | m.impl("_masked_softmax.out" , |
7660 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__masked_softmax_out)); |
7661 | m.impl("_masked_softmax_backward.out" , |
7662 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__masked_softmax_backward_out)); |
7663 | m.impl("view.dtype" , |
7664 | TORCH_FN(wrapper_CompositeExplicitAutograd_dtype_view)); |
7665 | m.impl("put" , |
7666 | TORCH_FN(wrapper_CompositeExplicitAutograd__put)); |
7667 | m.impl("put.out" , |
7668 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_put_out)); |
7669 | m.impl("index_fill.int_Scalar" , |
7670 | TORCH_FN(wrapper_CompositeExplicitAutograd_int_Scalar_index_fill)); |
7671 | m.impl("index_fill.int_Scalar_out" , |
7672 | TORCH_FN(wrapper_CompositeExplicitAutograd_int_Scalar_out_index_fill_out)); |
7673 | m.impl("index_fill.int_Tensor" , |
7674 | TORCH_FN(wrapper_CompositeExplicitAutograd_int_Tensor_index_fill)); |
7675 | m.impl("index_fill.int_Tensor_out" , |
7676 | TORCH_FN(wrapper_CompositeExplicitAutograd_int_Tensor_out_index_fill_out)); |
7677 | m.impl("bitwise_and.Scalar" , |
7678 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_bitwise_and)); |
7679 | m.impl("bitwise_and.Scalar_out" , |
7680 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out_bitwise_and_out)); |
7681 | m.impl("bitwise_and.Scalar_Tensor" , |
7682 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_Tensor_bitwise_and)); |
7683 | m.impl("bitwise_and.Scalar_Tensor_out" , |
7684 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_Tensor_out_bitwise_and_out)); |
7685 | m.impl("bitwise_or.Scalar_out" , |
7686 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out_bitwise_or_out)); |
7687 | m.impl("bitwise_or.Scalar_Tensor" , |
7688 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_Tensor_bitwise_or)); |
7689 | m.impl("bitwise_or.Scalar_Tensor_out" , |
7690 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_Tensor_out_bitwise_or_out)); |
7691 | m.impl("bitwise_xor.Scalar_out" , |
7692 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out_bitwise_xor_out)); |
7693 | m.impl("bitwise_xor.Scalar_Tensor" , |
7694 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_Tensor_bitwise_xor)); |
7695 | m.impl("bitwise_xor.Scalar_Tensor_out" , |
7696 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_Tensor_out_bitwise_xor_out)); |
7697 | m.impl("__lshift__.Scalar_out" , |
7698 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out___lshift___out)); |
7699 | m.impl("__lshift__.Tensor_out" , |
7700 | TORCH_FN(wrapper_CompositeExplicitAutograd_Tensor_out___lshift___out)); |
7701 | m.impl("bitwise_left_shift.Tensor_Scalar" , |
7702 | TORCH_FN(wrapper_CompositeExplicitAutograd_Tensor_Scalar_bitwise_left_shift)); |
7703 | m.impl("bitwise_left_shift.Tensor_Scalar_out" , |
7704 | TORCH_FN(wrapper_CompositeExplicitAutograd_Tensor_Scalar_out_bitwise_left_shift_out)); |
7705 | m.impl("bitwise_left_shift_.Tensor_Scalar" , |
7706 | TORCH_FN(wrapper_CompositeExplicitAutograd_Tensor_Scalar_bitwise_left_shift_)); |
7707 | m.impl("bitwise_left_shift.Scalar_Tensor" , |
7708 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_Tensor_bitwise_left_shift)); |
7709 | m.impl("bitwise_left_shift.Scalar_Tensor_out" , |
7710 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_Tensor_out_bitwise_left_shift_out)); |
7711 | m.impl("__rshift__.Scalar_out" , |
7712 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out___rshift___out)); |
7713 | m.impl("__rshift__.Tensor_out" , |
7714 | TORCH_FN(wrapper_CompositeExplicitAutograd_Tensor_out___rshift___out)); |
7715 | m.impl("bitwise_right_shift.Tensor_Scalar" , |
7716 | TORCH_FN(wrapper_CompositeExplicitAutograd_Tensor_Scalar_bitwise_right_shift)); |
7717 | m.impl("bitwise_right_shift.Tensor_Scalar_out" , |
7718 | TORCH_FN(wrapper_CompositeExplicitAutograd_Tensor_Scalar_out_bitwise_right_shift_out)); |
7719 | m.impl("bitwise_right_shift_.Tensor_Scalar" , |
7720 | TORCH_FN(wrapper_CompositeExplicitAutograd_Tensor_Scalar_bitwise_right_shift_)); |
7721 | m.impl("bitwise_right_shift.Scalar_Tensor" , |
7722 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_Tensor_bitwise_right_shift)); |
7723 | m.impl("bitwise_right_shift.Scalar_Tensor_out" , |
7724 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_Tensor_out_bitwise_right_shift_out)); |
7725 | m.impl("random.from" , |
7726 | TORCH_FN(wrapper_CompositeExplicitAutograd_from_random)); |
7727 | m.impl("random.from_out" , |
7728 | TORCH_FN(wrapper_CompositeExplicitAutograd_from_out_random_out)); |
7729 | m.impl("random.to" , |
7730 | TORCH_FN(wrapper_CompositeExplicitAutograd_to_random)); |
7731 | m.impl("random.to_out" , |
7732 | TORCH_FN(wrapper_CompositeExplicitAutograd_to_out_random_out)); |
7733 | m.impl("random" , |
7734 | TORCH_FN(wrapper_CompositeExplicitAutograd__random)); |
7735 | m.impl("random.out" , |
7736 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_random_out)); |
7737 | m.impl("uniform" , |
7738 | TORCH_FN(wrapper_CompositeExplicitAutograd__uniform)); |
7739 | m.impl("uniform.out" , |
7740 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_uniform_out)); |
7741 | m.impl("cauchy" , |
7742 | TORCH_FN(wrapper_CompositeExplicitAutograd__cauchy)); |
7743 | m.impl("cauchy.out" , |
7744 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_cauchy_out)); |
7745 | m.impl("log_normal" , |
7746 | TORCH_FN(wrapper_CompositeExplicitAutograd__log_normal)); |
7747 | m.impl("log_normal.out" , |
7748 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_log_normal_out)); |
7749 | m.impl("exponential" , |
7750 | TORCH_FN(wrapper_CompositeExplicitAutograd__exponential)); |
7751 | m.impl("exponential.out" , |
7752 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_exponential_out)); |
7753 | m.impl("geometric" , |
7754 | TORCH_FN(wrapper_CompositeExplicitAutograd__geometric)); |
7755 | m.impl("geometric.out" , |
7756 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_geometric_out)); |
7757 | m.impl("tril_indices.out" , |
7758 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_tril_indices_out)); |
7759 | m.impl("triu_indices.out" , |
7760 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_triu_indices_out)); |
7761 | m.impl("trace.out" , |
7762 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_trace_out)); |
7763 | m.impl("_linalg_check_errors" , |
7764 | TORCH_FN(wrapper_CompositeExplicitAutograd___linalg_check_errors)); |
7765 | m.impl("cholesky_solve" , |
7766 | TORCH_FN(wrapper_CompositeExplicitAutograd__cholesky_solve)); |
7767 | m.impl("cholesky_solve.out" , |
7768 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_cholesky_solve_out)); |
7769 | m.impl("_cholesky_solve_helper.out" , |
7770 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__cholesky_solve_helper_out)); |
7771 | m.impl("polygamma_" , |
7772 | TORCH_FN(wrapper_CompositeExplicitAutograd__polygamma_)); |
7773 | m.impl("dist" , |
7774 | TORCH_FN(wrapper_CompositeExplicitAutograd__dist)); |
7775 | m.impl("dist.out" , |
7776 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_dist_out)); |
7777 | m.impl("_histogramdd_bin_edges.out" , |
7778 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__histogramdd_bin_edges_out)); |
7779 | m.impl("_histogramdd_from_bin_cts.out" , |
7780 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__histogramdd_from_bin_cts_out)); |
7781 | m.impl("_histogramdd_from_bin_tensors.out" , |
7782 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__histogramdd_from_bin_tensors_out)); |
7783 | m.impl("fmod.Scalar" , |
7784 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_fmod)); |
7785 | m.impl("fmod.Scalar_out" , |
7786 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out_fmod_out)); |
7787 | m.impl("fmod_.Scalar" , |
7788 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_fmod_)); |
7789 | m.impl("remainder.Scalar" , |
7790 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_remainder)); |
7791 | m.impl("remainder.Scalar_out" , |
7792 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out_remainder_out)); |
7793 | m.impl("remainder_.Scalar" , |
7794 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_remainder_)); |
7795 | m.impl("remainder.Scalar_Tensor_out" , |
7796 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_Tensor_out_remainder_out)); |
7797 | m.impl("sort" , |
7798 | TORCH_FN(wrapper_CompositeExplicitAutograd__sort)); |
7799 | m.impl("sort.values" , |
7800 | TORCH_FN(wrapper_CompositeExplicitAutograd_values_sort_out)); |
7801 | m.impl("argsort.stable_out" , |
7802 | TORCH_FN(wrapper_CompositeExplicitAutograd_stable_out_argsort_out)); |
7803 | m.impl("unfold_backward.out" , |
7804 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_unfold_backward_out)); |
7805 | m.impl("normal_functional" , |
7806 | TORCH_FN(wrapper_CompositeExplicitAutograd__normal_functional)); |
7807 | m.impl("normal.out" , |
7808 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_normal_out)); |
7809 | m.impl("normal.float_float" , |
7810 | TORCH_FN(wrapper_CompositeExplicitAutograd_float_float_normal)); |
7811 | m.impl("normal.float_float_out" , |
7812 | TORCH_FN(wrapper_CompositeExplicitAutograd_float_float_out_normal_out)); |
7813 | m.impl("alias" , |
7814 | TORCH_FN(wrapper_CompositeExplicitAutograd__alias)); |
7815 | m.impl("_amp_foreach_non_finite_check_and_unscale" , |
7816 | TORCH_FN(wrapper_CompositeExplicitAutograd___amp_foreach_non_finite_check_and_unscale)); |
7817 | m.impl("_amp_foreach_non_finite_check_and_unscale.out" , |
7818 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__amp_foreach_non_finite_check_and_unscale_out)); |
7819 | m.impl("_amp_update_scale" , |
7820 | TORCH_FN(wrapper_CompositeExplicitAutograd___amp_update_scale)); |
7821 | m.impl("_amp_update_scale.out" , |
7822 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__amp_update_scale_out)); |
7823 | m.impl("_foreach_add.Scalar_out" , |
7824 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out__foreach_add_out)); |
7825 | m.impl("_foreach_sub.Scalar_out" , |
7826 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out__foreach_sub_out)); |
7827 | m.impl("_foreach_mul.Scalar_out" , |
7828 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out__foreach_mul_out)); |
7829 | m.impl("_foreach_div.Scalar_out" , |
7830 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out__foreach_div_out)); |
7831 | m.impl("_foreach_clamp_min.Scalar_out" , |
7832 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out__foreach_clamp_min_out)); |
7833 | m.impl("_foreach_clamp_max.Scalar_out" , |
7834 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out__foreach_clamp_max_out)); |
7835 | m.impl("_foreach_maximum.Scalar_out" , |
7836 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out__foreach_maximum_out)); |
7837 | m.impl("_foreach_minimum.Scalar_out" , |
7838 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out__foreach_minimum_out)); |
7839 | m.impl("_foreach_add.List_out" , |
7840 | TORCH_FN(wrapper_CompositeExplicitAutograd_List_out__foreach_add_out)); |
7841 | m.impl("_foreach_sub.List_out" , |
7842 | TORCH_FN(wrapper_CompositeExplicitAutograd_List_out__foreach_sub_out)); |
7843 | m.impl("_foreach_mul.List_out" , |
7844 | TORCH_FN(wrapper_CompositeExplicitAutograd_List_out__foreach_mul_out)); |
7845 | m.impl("_foreach_div.List_out" , |
7846 | TORCH_FN(wrapper_CompositeExplicitAutograd_List_out__foreach_div_out)); |
7847 | m.impl("_foreach_clamp_min.List_out" , |
7848 | TORCH_FN(wrapper_CompositeExplicitAutograd_List_out__foreach_clamp_min_out)); |
7849 | m.impl("_foreach_clamp_max.List_out" , |
7850 | TORCH_FN(wrapper_CompositeExplicitAutograd_List_out__foreach_clamp_max_out)); |
7851 | m.impl("_foreach_maximum.List_out" , |
7852 | TORCH_FN(wrapper_CompositeExplicitAutograd_List_out__foreach_maximum_out)); |
7853 | m.impl("_foreach_minimum.List_out" , |
7854 | TORCH_FN(wrapper_CompositeExplicitAutograd_List_out__foreach_minimum_out)); |
7855 | m.impl("_foreach_add.ScalarList_out" , |
7856 | TORCH_FN(wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_add_out)); |
7857 | m.impl("_foreach_sub.ScalarList_out" , |
7858 | TORCH_FN(wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_sub_out)); |
7859 | m.impl("_foreach_div.ScalarList_out" , |
7860 | TORCH_FN(wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_div_out)); |
7861 | m.impl("_foreach_mul.ScalarList_out" , |
7862 | TORCH_FN(wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_mul_out)); |
7863 | m.impl("_foreach_clamp_min.ScalarList_out" , |
7864 | TORCH_FN(wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_clamp_min_out)); |
7865 | m.impl("_foreach_clamp_max.ScalarList_out" , |
7866 | TORCH_FN(wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_clamp_max_out)); |
7867 | m.impl("_foreach_maximum.ScalarList_out" , |
7868 | TORCH_FN(wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_maximum_out)); |
7869 | m.impl("_foreach_minimum.ScalarList_out" , |
7870 | TORCH_FN(wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_minimum_out)); |
7871 | m.impl("_foreach_exp.out" , |
7872 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_exp_out)); |
7873 | m.impl("_foreach_zero" , |
7874 | TORCH_FN(wrapper_CompositeExplicitAutograd___foreach_zero)); |
7875 | m.impl("_foreach_zero.out" , |
7876 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_zero_out)); |
7877 | m.impl("_foreach_sqrt.out" , |
7878 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_sqrt_out)); |
7879 | m.impl("_foreach_abs.out" , |
7880 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_abs_out)); |
7881 | m.impl("_foreach_acos.out" , |
7882 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_acos_out)); |
7883 | m.impl("_foreach_asin.out" , |
7884 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_asin_out)); |
7885 | m.impl("_foreach_atan.out" , |
7886 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_atan_out)); |
7887 | m.impl("_foreach_ceil.out" , |
7888 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_ceil_out)); |
7889 | m.impl("_foreach_cos.out" , |
7890 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_cos_out)); |
7891 | m.impl("_foreach_cosh.out" , |
7892 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_cosh_out)); |
7893 | m.impl("_foreach_erf.out" , |
7894 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_erf_out)); |
7895 | m.impl("_foreach_erfc.out" , |
7896 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_erfc_out)); |
7897 | m.impl("_foreach_expm1.out" , |
7898 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_expm1_out)); |
7899 | m.impl("_foreach_floor.out" , |
7900 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_floor_out)); |
7901 | m.impl("_foreach_log.out" , |
7902 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_log_out)); |
7903 | m.impl("_foreach_log10.out" , |
7904 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_log10_out)); |
7905 | m.impl("_foreach_log1p.out" , |
7906 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_log1p_out)); |
7907 | m.impl("_foreach_log2.out" , |
7908 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_log2_out)); |
7909 | m.impl("_foreach_neg.out" , |
7910 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_neg_out)); |
7911 | m.impl("_foreach_tan.out" , |
7912 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_tan_out)); |
7913 | m.impl("_foreach_tanh.out" , |
7914 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_tanh_out)); |
7915 | m.impl("_foreach_sin.out" , |
7916 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_sin_out)); |
7917 | m.impl("_foreach_sinh.out" , |
7918 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_sinh_out)); |
7919 | m.impl("_foreach_round.out" , |
7920 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_round_out)); |
7921 | m.impl("_foreach_lgamma.out" , |
7922 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_lgamma_out)); |
7923 | m.impl("_foreach_frac.out" , |
7924 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_frac_out)); |
7925 | m.impl("_foreach_reciprocal.out" , |
7926 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_reciprocal_out)); |
7927 | m.impl("_foreach_sigmoid.out" , |
7928 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_sigmoid_out)); |
7929 | m.impl("_foreach_trunc.out" , |
7930 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foreach_trunc_out)); |
7931 | m.impl("_foreach_addcdiv.Scalar_out" , |
7932 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out__foreach_addcdiv_out)); |
7933 | m.impl("_foreach_addcmul.Scalar_out" , |
7934 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out__foreach_addcmul_out)); |
7935 | m.impl("_foreach_addcdiv.ScalarList_out" , |
7936 | TORCH_FN(wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_addcdiv_out)); |
7937 | m.impl("_foreach_addcdiv.Tensor_out" , |
7938 | TORCH_FN(wrapper_CompositeExplicitAutograd_Tensor_out__foreach_addcdiv_out)); |
7939 | m.impl("_foreach_addcmul.ScalarList_out" , |
7940 | TORCH_FN(wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_addcmul_out)); |
7941 | m.impl("_foreach_addcmul.Tensor_out" , |
7942 | TORCH_FN(wrapper_CompositeExplicitAutograd_Tensor_out__foreach_addcmul_out)); |
7943 | m.impl("_foreach_norm.Scalar_out" , |
7944 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out__foreach_norm_out)); |
7945 | m.impl("_foreach_lerp.List_out" , |
7946 | TORCH_FN(wrapper_CompositeExplicitAutograd_List_out__foreach_lerp_out)); |
7947 | m.impl("_foreach_lerp.Scalar_out" , |
7948 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out__foreach_lerp_out)); |
7949 | m.impl("bucketize.Scalar_out" , |
7950 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out_bucketize_out)); |
7951 | m.impl("searchsorted.Scalar_out" , |
7952 | TORCH_FN(wrapper_CompositeExplicitAutograd_Scalar_out_searchsorted_out)); |
7953 | m.impl("smooth_l1_loss_backward" , |
7954 | TORCH_FN(wrapper_CompositeExplicitAutograd__smooth_l1_loss_backward)); |
7955 | m.impl("huber_loss_backward" , |
7956 | TORCH_FN(wrapper_CompositeExplicitAutograd__huber_loss_backward)); |
7957 | m.impl("soft_margin_loss" , |
7958 | TORCH_FN(wrapper_CompositeExplicitAutograd__soft_margin_loss)); |
7959 | m.impl("soft_margin_loss.out" , |
7960 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_soft_margin_loss_out)); |
7961 | m.impl("soft_margin_loss_backward" , |
7962 | TORCH_FN(wrapper_CompositeExplicitAutograd__soft_margin_loss_backward)); |
7963 | m.impl("soft_margin_loss_backward.grad_input" , |
7964 | TORCH_FN(wrapper_CompositeExplicitAutograd_grad_input_soft_margin_loss_backward_out)); |
7965 | m.impl("glu_jvp.out" , |
7966 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_glu_jvp_out)); |
7967 | m.impl("glu_backward_jvp.out" , |
7968 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_glu_backward_jvp_out)); |
7969 | m.impl("hardswish_backward.out" , |
7970 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_hardswish_backward_out)); |
7971 | m.impl("rrelu_with_noise_backward" , |
7972 | TORCH_FN(wrapper_CompositeExplicitAutograd__rrelu_with_noise_backward)); |
7973 | m.impl("rrelu_with_noise_backward.out" , |
7974 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_rrelu_with_noise_backward_out)); |
7975 | m.impl("mkldnn_adaptive_avg_pool2d_backward.out" , |
7976 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_mkldnn_adaptive_avg_pool2d_backward_out)); |
7977 | m.impl("_adaptive_avg_pool2d.out" , |
7978 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__adaptive_avg_pool2d_out)); |
7979 | m.impl("_adaptive_avg_pool2d_backward.out" , |
7980 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__adaptive_avg_pool2d_backward_out)); |
7981 | m.impl("_adaptive_avg_pool3d.out" , |
7982 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__adaptive_avg_pool3d_out)); |
7983 | m.impl("_adaptive_avg_pool3d_backward.out" , |
7984 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__adaptive_avg_pool3d_backward_out)); |
7985 | m.impl("_slow_conv2d_backward.output_mask_out" , |
7986 | TORCH_FN(wrapper_CompositeExplicitAutograd_output_mask_out__slow_conv2d_backward_out)); |
7987 | m.impl("conv_depthwise3d.out" , |
7988 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_conv_depthwise3d_out)); |
7989 | m.impl("slow_conv_dilated2d.out" , |
7990 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_slow_conv_dilated2d_out)); |
7991 | m.impl("slow_conv_dilated3d.out" , |
7992 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_slow_conv_dilated3d_out)); |
7993 | m.impl("isinf" , |
7994 | TORCH_FN(wrapper_CompositeExplicitAutograd__isinf)); |
7995 | m.impl("isinf.out" , |
7996 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_isinf_out)); |
7997 | m.impl("special_xlog1py.self_scalar" , |
7998 | TORCH_FN(wrapper_CompositeExplicitAutograd_self_scalar_special_xlog1py)); |
7999 | m.impl("special_xlog1py.self_scalar_out" , |
8000 | TORCH_FN(wrapper_CompositeExplicitAutograd_self_scalar_out_special_xlog1py_out)); |
8001 | m.impl("special_xlog1py.other_scalar" , |
8002 | TORCH_FN(wrapper_CompositeExplicitAutograd_other_scalar_special_xlog1py)); |
8003 | m.impl("special_xlog1py.other_scalar_out" , |
8004 | TORCH_FN(wrapper_CompositeExplicitAutograd_other_scalar_out_special_xlog1py_out)); |
8005 | m.impl("special_zeta.self_scalar" , |
8006 | TORCH_FN(wrapper_CompositeExplicitAutograd_self_scalar_special_zeta)); |
8007 | m.impl("special_zeta.self_scalar_out" , |
8008 | TORCH_FN(wrapper_CompositeExplicitAutograd_self_scalar_out_special_zeta_out)); |
8009 | m.impl("special_zeta.other_scalar" , |
8010 | TORCH_FN(wrapper_CompositeExplicitAutograd_other_scalar_special_zeta)); |
8011 | m.impl("special_zeta.other_scalar_out" , |
8012 | TORCH_FN(wrapper_CompositeExplicitAutograd_other_scalar_out_special_zeta_out)); |
8013 | m.impl("fft_fftfreq" , |
8014 | TORCH_FN(wrapper_CompositeExplicitAutograd__fft_fftfreq)); |
8015 | m.impl("fft_fftfreq.out" , |
8016 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_fft_fftfreq_out)); |
8017 | m.impl("fft_rfftfreq" , |
8018 | TORCH_FN(wrapper_CompositeExplicitAutograd__fft_rfftfreq)); |
8019 | m.impl("fft_rfftfreq.out" , |
8020 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_fft_rfftfreq_out)); |
8021 | m.impl("linalg_lstsq" , |
8022 | TORCH_FN(wrapper_CompositeExplicitAutograd__linalg_lstsq)); |
8023 | m.impl("linalg_matrix_exp.out" , |
8024 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_linalg_matrix_exp_out)); |
8025 | m.impl("linalg_pinv.atol_rtol_tensor_out" , |
8026 | TORCH_FN(wrapper_CompositeExplicitAutograd_atol_rtol_tensor_out_linalg_pinv_out)); |
8027 | m.impl("_test_optional_intlist.out" , |
8028 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__test_optional_intlist_out)); |
8029 | m.impl("_test_optional_filled_intlist.out" , |
8030 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__test_optional_filled_intlist_out)); |
8031 | m.impl("_test_optional_floatlist.out" , |
8032 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__test_optional_floatlist_out)); |
8033 | m.impl("_test_warn_in_autograd" , |
8034 | TORCH_FN(wrapper_CompositeExplicitAutograd___test_warn_in_autograd)); |
8035 | m.impl("_test_warn_in_autograd.out" , |
8036 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__test_warn_in_autograd_out)); |
8037 | m.impl("_test_autograd_multiple_dispatch.fullcoverage" , |
8038 | TORCH_FN(wrapper_CompositeExplicitAutograd_fullcoverage__test_autograd_multiple_dispatch)); |
8039 | m.impl("_test_autograd_multiple_dispatch.fullcoverage_out" , |
8040 | TORCH_FN(wrapper_CompositeExplicitAutograd_fullcoverage_out__test_autograd_multiple_dispatch_out)); |
8041 | m.impl("_test_autograd_multiple_dispatch_view" , |
8042 | TORCH_FN(wrapper_CompositeExplicitAutograd___test_autograd_multiple_dispatch_view)); |
8043 | m.impl("_test_autograd_multiple_dispatch_view_copy.out" , |
8044 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__test_autograd_multiple_dispatch_view_copy_out)); |
8045 | m.impl("segment_reduce.out" , |
8046 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_segment_reduce_out)); |
8047 | m.impl("_segment_reduce_backward.out" , |
8048 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__segment_reduce_backward_out)); |
8049 | m.impl("_nested_tensor_from_tensor_list" , |
8050 | TORCH_FN(wrapper_CompositeExplicitAutograd___nested_tensor_from_tensor_list)); |
8051 | m.impl("_nested_tensor_from_tensor_list.out" , |
8052 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__nested_tensor_from_tensor_list_out)); |
8053 | m.impl("_fw_primal_copy.out" , |
8054 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__fw_primal_copy_out)); |
8055 | m.impl("_make_dual_copy.out" , |
8056 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__make_dual_copy_out)); |
8057 | m.impl("view_as_real_copy.out" , |
8058 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_view_as_real_copy_out)); |
8059 | m.impl("view_as_complex_copy.out" , |
8060 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_view_as_complex_copy_out)); |
8061 | m.impl("_conj_copy.out" , |
8062 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__conj_copy_out)); |
8063 | m.impl("_neg_view_copy.out" , |
8064 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__neg_view_copy_out)); |
8065 | m.impl("as_strided_copy.out" , |
8066 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_as_strided_copy_out)); |
8067 | m.impl("_sparse_broadcast_to_copy.out" , |
8068 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__sparse_broadcast_to_copy_out)); |
8069 | m.impl("diagonal_copy.out" , |
8070 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_diagonal_copy_out)); |
8071 | m.impl("expand_copy.out" , |
8072 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_expand_copy_out)); |
8073 | m.impl("permute_copy.out" , |
8074 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_permute_copy_out)); |
8075 | m.impl("_reshape_alias_copy.out" , |
8076 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__reshape_alias_copy_out)); |
8077 | m.impl("select_copy.int_out" , |
8078 | TORCH_FN(wrapper_CompositeExplicitAutograd_int_out_select_copy_out)); |
8079 | m.impl("detach_copy.out" , |
8080 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_detach_copy_out)); |
8081 | m.impl("slice_copy.Tensor_out" , |
8082 | TORCH_FN(wrapper_CompositeExplicitAutograd_Tensor_out_slice_copy_out)); |
8083 | m.impl("split_copy.Tensor_out" , |
8084 | TORCH_FN(wrapper_CompositeExplicitAutograd_Tensor_out_split_copy_out)); |
8085 | m.impl("split_with_sizes_copy.out" , |
8086 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_split_with_sizes_copy_out)); |
8087 | m.impl("squeeze_copy.out" , |
8088 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_squeeze_copy_out)); |
8089 | m.impl("squeeze_copy.dim_out" , |
8090 | TORCH_FN(wrapper_CompositeExplicitAutograd_dim_out_squeeze_copy_out)); |
8091 | m.impl("squeeze_copy.dims_out" , |
8092 | TORCH_FN(wrapper_CompositeExplicitAutograd_dims_out_squeeze_copy_out)); |
8093 | m.impl("t_copy.out" , |
8094 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_t_copy_out)); |
8095 | m.impl("transpose_copy.int_out" , |
8096 | TORCH_FN(wrapper_CompositeExplicitAutograd_int_out_transpose_copy_out)); |
8097 | m.impl("unsqueeze_copy.out" , |
8098 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_unsqueeze_copy_out)); |
8099 | m.impl("_indices_copy.out" , |
8100 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__indices_copy_out)); |
8101 | m.impl("_values_copy.out" , |
8102 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__values_copy_out)); |
8103 | m.impl("indices_copy.out" , |
8104 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_indices_copy_out)); |
8105 | m.impl("values_copy.out" , |
8106 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_values_copy_out)); |
8107 | m.impl("crow_indices_copy.out" , |
8108 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_crow_indices_copy_out)); |
8109 | m.impl("col_indices_copy.out" , |
8110 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_col_indices_copy_out)); |
8111 | m.impl("ccol_indices_copy.out" , |
8112 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_ccol_indices_copy_out)); |
8113 | m.impl("row_indices_copy.out" , |
8114 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_row_indices_copy_out)); |
8115 | m.impl("unbind_copy.int_out" , |
8116 | TORCH_FN(wrapper_CompositeExplicitAutograd_int_out_unbind_copy_out)); |
8117 | m.impl("view_copy.out" , |
8118 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_view_copy_out)); |
8119 | m.impl("view_copy.dtype_out" , |
8120 | TORCH_FN(wrapper_CompositeExplicitAutograd_dtype_out_view_copy_out)); |
8121 | m.impl("unfold_copy.out" , |
8122 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_unfold_copy_out)); |
8123 | m.impl("alias_copy.out" , |
8124 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_alias_copy_out)); |
8125 | m.impl("to_padded_tensor.out" , |
8126 | TORCH_FN(wrapper_CompositeExplicitAutograd_out_to_padded_tensor_out)); |
8127 | m.impl("_transformer_encoder_layer_fwd.out" , |
8128 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__transformer_encoder_layer_fwd_out)); |
8129 | m.impl("_native_multi_head_attention.out" , |
8130 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__native_multi_head_attention_out)); |
8131 | m.impl("_triton_scaled_dot_attention.out" , |
8132 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__triton_scaled_dot_attention_out)); |
8133 | m.impl("_triton_multi_head_attention.out" , |
8134 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__triton_multi_head_attention_out)); |
8135 | m.impl("_transformer_decoder_only_layer_fwd.out" , |
8136 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__transformer_decoder_only_layer_fwd_out)); |
8137 | m.impl("_native_decoder_only_multi_head_attention.out" , |
8138 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__native_decoder_only_multi_head_attention_out)); |
8139 | m.impl("special_chebyshev_polynomial_t.n_scalar_out" , |
8140 | TORCH_FN(wrapper_CompositeExplicitAutograd_n_scalar_out_special_chebyshev_polynomial_t_out)); |
8141 | m.impl("special_chebyshev_polynomial_u.n_scalar_out" , |
8142 | TORCH_FN(wrapper_CompositeExplicitAutograd_n_scalar_out_special_chebyshev_polynomial_u_out)); |
8143 | m.impl("special_chebyshev_polynomial_v.n_scalar_out" , |
8144 | TORCH_FN(wrapper_CompositeExplicitAutograd_n_scalar_out_special_chebyshev_polynomial_v_out)); |
8145 | m.impl("special_chebyshev_polynomial_w.n_scalar_out" , |
8146 | TORCH_FN(wrapper_CompositeExplicitAutograd_n_scalar_out_special_chebyshev_polynomial_w_out)); |
8147 | m.impl("special_hermite_polynomial_h.n_scalar_out" , |
8148 | TORCH_FN(wrapper_CompositeExplicitAutograd_n_scalar_out_special_hermite_polynomial_h_out)); |
8149 | m.impl("special_hermite_polynomial_he.n_scalar_out" , |
8150 | TORCH_FN(wrapper_CompositeExplicitAutograd_n_scalar_out_special_hermite_polynomial_he_out)); |
8151 | m.impl("special_laguerre_polynomial_l.n_scalar_out" , |
8152 | TORCH_FN(wrapper_CompositeExplicitAutograd_n_scalar_out_special_laguerre_polynomial_l_out)); |
8153 | m.impl("special_legendre_polynomial_p.n_scalar_out" , |
8154 | TORCH_FN(wrapper_CompositeExplicitAutograd_n_scalar_out_special_legendre_polynomial_p_out)); |
8155 | m.impl("special_shifted_chebyshev_polynomial_t.n_scalar_out" , |
8156 | TORCH_FN(wrapper_CompositeExplicitAutograd_n_scalar_out_special_shifted_chebyshev_polynomial_t_out)); |
8157 | m.impl("special_shifted_chebyshev_polynomial_u.n_scalar_out" , |
8158 | TORCH_FN(wrapper_CompositeExplicitAutograd_n_scalar_out_special_shifted_chebyshev_polynomial_u_out)); |
8159 | m.impl("special_shifted_chebyshev_polynomial_v.n_scalar_out" , |
8160 | TORCH_FN(wrapper_CompositeExplicitAutograd_n_scalar_out_special_shifted_chebyshev_polynomial_v_out)); |
8161 | m.impl("special_shifted_chebyshev_polynomial_w.n_scalar_out" , |
8162 | TORCH_FN(wrapper_CompositeExplicitAutograd_n_scalar_out_special_shifted_chebyshev_polynomial_w_out)); |
8163 | m.impl("_foobar.out" , |
8164 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__foobar_out)); |
8165 | m.impl("_fused_adam" , |
8166 | TORCH_FN(wrapper_CompositeExplicitAutograd___fused_adam)); |
8167 | m.impl("_fused_adam.out" , |
8168 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__fused_adam_out)); |
8169 | m.impl("_fused_adamw" , |
8170 | TORCH_FN(wrapper_CompositeExplicitAutograd___fused_adamw)); |
8171 | m.impl("_fused_adamw.out" , |
8172 | TORCH_FN(wrapper_CompositeExplicitAutograd_out__fused_adamw_out)); |
8173 | }; |
8174 | } // anonymous namespace |
8175 | namespace compositeexplicitautograd { |
8176 | at::Tensor _fw_primal(const at::Tensor & self, int64_t level) { |
8177 | return wrapper_CompositeExplicitAutograd___fw_primal(self, level); |
8178 | } |
8179 | at::Tensor _make_dual(const at::Tensor & primal, const at::Tensor & tangent, int64_t level) { |
8180 | return wrapper_CompositeExplicitAutograd___make_dual(primal, tangent, level); |
8181 | } |
8182 | at::Tensor _new_zeros_with_same_feature_meta(const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims) { |
8183 | return wrapper_CompositeExplicitAutograd___new_zeros_with_same_feature_meta(self, other, self_num_batch_dims); |
8184 | } |
8185 | at::Tensor & _new_zeros_with_same_feature_meta_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims) { |
8186 | return wrapper_CompositeExplicitAutograd_out__new_zeros_with_same_feature_meta_out(self, other, self_num_batch_dims, out); |
8187 | } |
8188 | at::Tensor & _new_zeros_with_same_feature_meta_outf(const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims, at::Tensor & out) { |
8189 | return wrapper_CompositeExplicitAutograd_out__new_zeros_with_same_feature_meta_out(self, other, self_num_batch_dims, out); |
8190 | } |
8191 | bool _has_same_storage_numel(const at::Tensor & self, const at::Tensor & other) { |
8192 | return wrapper_CompositeExplicitAutograd___has_same_storage_numel(self, other); |
8193 | } |
8194 | ::std::tuple<at::Tensor &,at::Tensor &> _cudnn_ctc_loss_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity) { |
8195 | return wrapper_CompositeExplicitAutograd_out__cudnn_ctc_loss_out(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity, out0, out1); |
8196 | } |
8197 | ::std::tuple<at::Tensor &,at::Tensor &> _cudnn_ctc_loss_outf(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) { |
8198 | return wrapper_CompositeExplicitAutograd_out__cudnn_ctc_loss_out(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity, out0, out1); |
8199 | } |
8200 | at::Tensor & _cudnn_rnn_flatten_weight_out(at::Tensor & out, at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { |
8201 | return wrapper_CompositeExplicitAutograd_out__cudnn_rnn_flatten_weight_out(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); |
8202 | } |
8203 | at::Tensor & _cudnn_rnn_flatten_weight_outf(at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) { |
8204 | return wrapper_CompositeExplicitAutograd_out__cudnn_rnn_flatten_weight_out(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); |
8205 | } |
8206 | at::Tensor & _cudnn_rnn_flatten_weight_symint_out(at::Tensor & out, at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { |
8207 | return wrapper_CompositeExplicitAutograd_out__cudnn_rnn_flatten_weight_out(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); |
8208 | } |
8209 | at::Tensor & _cudnn_rnn_flatten_weight_symint_outf(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) { |
8210 | return wrapper_CompositeExplicitAutograd_out__cudnn_rnn_flatten_weight_out(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); |
8211 | } |
8212 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _cudnn_rnn_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state) { |
8213 | return wrapper_CompositeExplicitAutograd_out__cudnn_rnn_out(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state, out0, out1, out2, out3, out4); |
8214 | } |
8215 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _cudnn_rnn_outf(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) { |
8216 | return wrapper_CompositeExplicitAutograd_out__cudnn_rnn_out(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state, out0, out1, out2, out3, out4); |
8217 | } |
8218 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _cudnn_rnn_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state) { |
8219 | return wrapper_CompositeExplicitAutograd_out__cudnn_rnn_out(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4); |
8220 | } |
8221 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _cudnn_rnn_symint_outf(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) { |
8222 | return wrapper_CompositeExplicitAutograd_out__cudnn_rnn_out(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4); |
8223 | } |
8224 | void _cudnn_rnn_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) { |
8225 | return wrapper_CompositeExplicitAutograd_out__cudnn_rnn_backward_out(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state, reserve, output_mask, out0, out1, out2, out3); |
8226 | } |
8227 | void _cudnn_rnn_backward_outf(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) { |
8228 | return wrapper_CompositeExplicitAutograd_out__cudnn_rnn_backward_out(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state, reserve, output_mask, out0, out1, out2, out3); |
8229 | } |
8230 | void _cudnn_rnn_backward_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) { |
8231 | return wrapper_CompositeExplicitAutograd_out__cudnn_rnn_backward_out(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3); |
8232 | } |
8233 | void _cudnn_rnn_backward_symint_outf(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) { |
8234 | return wrapper_CompositeExplicitAutograd_out__cudnn_rnn_backward_out(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3); |
8235 | } |
8236 | at::Tensor & _cudnn_init_dropout_state_out(at::Tensor & out, double dropout, bool train, int64_t dropout_seed) { |
8237 | return wrapper_CompositeExplicitAutograd_out__cudnn_init_dropout_state_out(dropout, train, dropout_seed, out); |
8238 | } |
8239 | at::Tensor & _cudnn_init_dropout_state_outf(double dropout, bool train, int64_t dropout_seed, at::Tensor & out) { |
8240 | return wrapper_CompositeExplicitAutograd_out__cudnn_init_dropout_state_out(dropout, train, dropout_seed, out); |
8241 | } |
8242 | ::std::tuple<at::Tensor &,at::Tensor &> _fused_dropout_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, double p, c10::optional<at::Generator> generator) { |
8243 | return wrapper_CompositeExplicitAutograd_out__fused_dropout_out(self, p, generator, out0, out1); |
8244 | } |
8245 | ::std::tuple<at::Tensor &,at::Tensor &> _fused_dropout_outf(const at::Tensor & self, double p, c10::optional<at::Generator> generator, at::Tensor & out0, at::Tensor & out1) { |
8246 | return wrapper_CompositeExplicitAutograd_out__fused_dropout_out(self, p, generator, out0, out1); |
8247 | } |
8248 | at::Tensor & _masked_scale_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, double scale) { |
8249 | return wrapper_CompositeExplicitAutograd_out__masked_scale_out(self, mask, scale, out); |
8250 | } |
8251 | at::Tensor & _masked_scale_outf(const at::Tensor & self, const at::Tensor & mask, double scale, at::Tensor & out) { |
8252 | return wrapper_CompositeExplicitAutograd_out__masked_scale_out(self, mask, scale, out); |
8253 | } |
8254 | ::std::tuple<at::Tensor &,at::Tensor &> native_dropout_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, double p, c10::optional<bool> train) { |
8255 | return wrapper_CompositeExplicitAutograd_out_native_dropout_out(input, p, train, out0, out1); |
8256 | } |
8257 | ::std::tuple<at::Tensor &,at::Tensor &> native_dropout_outf(const at::Tensor & input, double p, c10::optional<bool> train, at::Tensor & out0, at::Tensor & out1) { |
8258 | return wrapper_CompositeExplicitAutograd_out_native_dropout_out(input, p, train, out0, out1); |
8259 | } |
8260 | at::Tensor & native_dropout_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & mask, double scale) { |
8261 | return wrapper_CompositeExplicitAutograd_out_native_dropout_backward_out(grad_output, mask, scale, out); |
8262 | } |
8263 | at::Tensor & native_dropout_backward_outf(const at::Tensor & grad_output, const at::Tensor & mask, double scale, at::Tensor & out) { |
8264 | return wrapper_CompositeExplicitAutograd_out_native_dropout_backward_out(grad_output, mask, scale, out); |
8265 | } |
8266 | at::Tensor abs(const at::Tensor & self) { |
8267 | return wrapper_CompositeExplicitAutograd__abs(self); |
8268 | } |
8269 | at::Tensor & abs_(at::Tensor & self) { |
8270 | return wrapper_CompositeExplicitAutograd__abs_(self); |
8271 | } |
8272 | at::Tensor _conj(const at::Tensor & self) { |
8273 | return wrapper_CompositeExplicitAutograd___conj(self); |
8274 | } |
8275 | at::Tensor _conj_physical(const at::Tensor & self) { |
8276 | return wrapper_CompositeExplicitAutograd___conj_physical(self); |
8277 | } |
8278 | at::Tensor & _conj_physical_out(at::Tensor & out, const at::Tensor & self) { |
8279 | return wrapper_CompositeExplicitAutograd_out__conj_physical_out(self, out); |
8280 | } |
8281 | at::Tensor & _conj_physical_outf(const at::Tensor & self, at::Tensor & out) { |
8282 | return wrapper_CompositeExplicitAutograd_out__conj_physical_out(self, out); |
8283 | } |
8284 | at::Tensor & conj_physical_(at::Tensor & self) { |
8285 | return wrapper_CompositeExplicitAutograd__conj_physical_(self); |
8286 | } |
8287 | at::Tensor _neg_view(const at::Tensor & self) { |
8288 | return wrapper_CompositeExplicitAutograd___neg_view(self); |
8289 | } |
8290 | at::Tensor & _add_relu_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { |
8291 | return wrapper_CompositeExplicitAutograd_Scalar_out__add_relu_out(self, other, alpha, out); |
8292 | } |
8293 | at::Tensor & _add_relu_outf(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { |
8294 | return wrapper_CompositeExplicitAutograd_Scalar_out__add_relu_out(self, other, alpha, out); |
8295 | } |
8296 | at::Tensor add(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { |
8297 | return wrapper_CompositeExplicitAutograd_Scalar_add(self, other, alpha); |
8298 | } |
8299 | at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { |
8300 | return wrapper_CompositeExplicitAutograd_Scalar_out_add_out(self, other, alpha, out); |
8301 | } |
8302 | at::Tensor & add_outf(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { |
8303 | return wrapper_CompositeExplicitAutograd_Scalar_out_add_out(self, other, alpha, out); |
8304 | } |
8305 | at::Tensor & add_(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { |
8306 | return wrapper_CompositeExplicitAutograd_Scalar_add_(self, other, alpha); |
8307 | } |
8308 | at::Tensor addr(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) { |
8309 | return wrapper_CompositeExplicitAutograd__addr(self, vec1, vec2, beta, alpha); |
8310 | } |
8311 | at::Tensor & addr_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) { |
8312 | return wrapper_CompositeExplicitAutograd_out_addr_out(self, vec1, vec2, beta, alpha, out); |
8313 | } |
8314 | at::Tensor & addr_outf(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
8315 | return wrapper_CompositeExplicitAutograd_out_addr_out(self, vec1, vec2, beta, alpha, out); |
8316 | } |
8317 | at::Tensor & addr_(at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) { |
8318 | return wrapper_CompositeExplicitAutograd__addr_(self, vec1, vec2, beta, alpha); |
8319 | } |
8320 | at::Tensor affine_grid_generator(const at::Tensor & theta, at::IntArrayRef size, bool align_corners) { |
8321 | return wrapper_CompositeExplicitAutograd__affine_grid_generator(theta, size, align_corners); |
8322 | } |
8323 | at::Tensor & affine_grid_generator_out(at::Tensor & out, const at::Tensor & theta, at::IntArrayRef size, bool align_corners) { |
8324 | return wrapper_CompositeExplicitAutograd_out_affine_grid_generator_out(theta, size, align_corners, out); |
8325 | } |
8326 | at::Tensor & affine_grid_generator_outf(const at::Tensor & theta, at::IntArrayRef size, bool align_corners, at::Tensor & out) { |
8327 | return wrapper_CompositeExplicitAutograd_out_affine_grid_generator_out(theta, size, align_corners, out); |
8328 | } |
8329 | at::Tensor _is_all_true(const at::Tensor & self) { |
8330 | return wrapper_CompositeExplicitAutograd___is_all_true(self); |
8331 | } |
8332 | at::Tensor _is_any_true(const at::Tensor & self) { |
8333 | return wrapper_CompositeExplicitAutograd___is_any_true(self); |
8334 | } |
8335 | bool allclose(const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan) { |
8336 | return wrapper_CompositeExplicitAutograd__allclose(self, other, rtol, atol, equal_nan); |
8337 | } |
8338 | at::Tensor arange(const at::Scalar & end, at::TensorOptions options) { |
8339 | return wrapper_CompositeExplicitAutograd__arange(end, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
8340 | } |
8341 | at::Tensor arange(const at::Scalar & end, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
8342 | return wrapper_CompositeExplicitAutograd__arange(end, dtype, layout, device, pin_memory); |
8343 | } |
8344 | at::Tensor & arange_out(at::Tensor & out, const at::Scalar & end) { |
8345 | return wrapper_CompositeExplicitAutograd_out_arange_out(end, out); |
8346 | } |
8347 | at::Tensor & arange_outf(const at::Scalar & end, at::Tensor & out) { |
8348 | return wrapper_CompositeExplicitAutograd_out_arange_out(end, out); |
8349 | } |
8350 | at::Tensor arange(const at::Scalar & start, const at::Scalar & end, at::TensorOptions options) { |
8351 | return wrapper_CompositeExplicitAutograd_start_arange(start, end, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
8352 | } |
8353 | at::Tensor arange(const at::Scalar & start, const at::Scalar & end, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
8354 | return wrapper_CompositeExplicitAutograd_start_arange(start, end, dtype, layout, device, pin_memory); |
8355 | } |
8356 | at::Tensor arange(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::TensorOptions options) { |
8357 | return wrapper_CompositeExplicitAutograd_start_step_arange(start, end, step, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
8358 | } |
8359 | at::Tensor arange(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
8360 | return wrapper_CompositeExplicitAutograd_start_step_arange(start, end, step, dtype, layout, device, pin_memory); |
8361 | } |
8362 | at::Tensor bartlett_window(int64_t window_length, at::TensorOptions options) { |
8363 | return wrapper_CompositeExplicitAutograd__bartlett_window(window_length, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
8364 | } |
8365 | at::Tensor bartlett_window(int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
8366 | return wrapper_CompositeExplicitAutograd__bartlett_window(window_length, dtype, layout, device, pin_memory); |
8367 | } |
8368 | at::Tensor & bartlett_window_out(at::Tensor & out, int64_t window_length) { |
8369 | return wrapper_CompositeExplicitAutograd_out_bartlett_window_out(window_length, out); |
8370 | } |
8371 | at::Tensor & bartlett_window_outf(int64_t window_length, at::Tensor & out) { |
8372 | return wrapper_CompositeExplicitAutograd_out_bartlett_window_out(window_length, out); |
8373 | } |
8374 | at::Tensor bartlett_window(int64_t window_length, bool periodic, at::TensorOptions options) { |
8375 | return wrapper_CompositeExplicitAutograd_periodic_bartlett_window(window_length, periodic, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
8376 | } |
8377 | at::Tensor bartlett_window(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
8378 | return wrapper_CompositeExplicitAutograd_periodic_bartlett_window(window_length, periodic, dtype, layout, device, pin_memory); |
8379 | } |
8380 | at::Tensor & bartlett_window_out(at::Tensor & out, int64_t window_length, bool periodic) { |
8381 | return wrapper_CompositeExplicitAutograd_periodic_out_bartlett_window_out(window_length, periodic, out); |
8382 | } |
8383 | at::Tensor & bartlett_window_outf(int64_t window_length, bool periodic, at::Tensor & out) { |
8384 | return wrapper_CompositeExplicitAutograd_periodic_out_bartlett_window_out(window_length, periodic, out); |
8385 | } |
8386 | at::Tensor & quantized_batch_norm_out(at::Tensor & out, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point) { |
8387 | return wrapper_CompositeExplicitAutograd_out_quantized_batch_norm_out(input, weight, bias, mean, var, eps, output_scale, output_zero_point, out); |
8388 | } |
8389 | at::Tensor & quantized_batch_norm_outf(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point, at::Tensor & out) { |
8390 | return wrapper_CompositeExplicitAutograd_out_quantized_batch_norm_out(input, weight, bias, mean, var, eps, output_scale, output_zero_point, out); |
8391 | } |
8392 | at::Tensor bernoulli(const at::Tensor & self, c10::optional<at::Generator> generator) { |
8393 | return wrapper_CompositeExplicitAutograd__bernoulli(self, generator); |
8394 | } |
8395 | at::Tensor bernoulli(const at::Tensor & self, const at::Tensor & p, c10::optional<at::Generator> generator) { |
8396 | return wrapper_CompositeExplicitAutograd_Tensor_bernoulli(self, p, generator); |
8397 | } |
8398 | at::Tensor & bernoulli_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & p, c10::optional<at::Generator> generator) { |
8399 | return wrapper_CompositeExplicitAutograd_Tensor_out_bernoulli_out(self, p, generator, out); |
8400 | } |
8401 | at::Tensor & bernoulli_outf(const at::Tensor & self, const at::Tensor & p, c10::optional<at::Generator> generator, at::Tensor & out) { |
8402 | return wrapper_CompositeExplicitAutograd_Tensor_out_bernoulli_out(self, p, generator, out); |
8403 | } |
8404 | at::Tensor & bernoulli_out(at::Tensor & out, const at::Tensor & self, double p, c10::optional<at::Generator> generator) { |
8405 | return wrapper_CompositeExplicitAutograd_float_out_bernoulli_out(self, p, generator, out); |
8406 | } |
8407 | at::Tensor & bernoulli_outf(const at::Tensor & self, double p, c10::optional<at::Generator> generator, at::Tensor & out) { |
8408 | return wrapper_CompositeExplicitAutograd_float_out_bernoulli_out(self, p, generator, out); |
8409 | } |
8410 | at::Tensor binary_cross_entropy_with_logits(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & pos_weight, int64_t reduction) { |
8411 | return wrapper_CompositeExplicitAutograd__binary_cross_entropy_with_logits(self, target, weight, pos_weight, reduction); |
8412 | } |
8413 | at::Tensor & binary_cross_entropy_with_logits_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & pos_weight, int64_t reduction) { |
8414 | return wrapper_CompositeExplicitAutograd_out_binary_cross_entropy_with_logits_out(self, target, weight, pos_weight, reduction, out); |
8415 | } |
8416 | at::Tensor & binary_cross_entropy_with_logits_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & pos_weight, int64_t reduction, at::Tensor & out) { |
8417 | return wrapper_CompositeExplicitAutograd_out_binary_cross_entropy_with_logits_out(self, target, weight, pos_weight, reduction, out); |
8418 | } |
8419 | at::Tensor & bincount_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Tensor> & weights, int64_t minlength) { |
8420 | return wrapper_CompositeExplicitAutograd_out_bincount_out(self, weights, minlength, out); |
8421 | } |
8422 | at::Tensor & bincount_outf(const at::Tensor & self, const c10::optional<at::Tensor> & weights, int64_t minlength, at::Tensor & out) { |
8423 | return wrapper_CompositeExplicitAutograd_out_bincount_out(self, weights, minlength, out); |
8424 | } |
8425 | at::Tensor copysign(const at::Tensor & self, const at::Scalar & other) { |
8426 | return wrapper_CompositeExplicitAutograd_Scalar_copysign(self, other); |
8427 | } |
8428 | at::Tensor & copysign_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { |
8429 | return wrapper_CompositeExplicitAutograd_Scalar_out_copysign_out(self, other, out); |
8430 | } |
8431 | at::Tensor & copysign_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
8432 | return wrapper_CompositeExplicitAutograd_Scalar_out_copysign_out(self, other, out); |
8433 | } |
8434 | at::Tensor & copysign_(at::Tensor & self, const at::Scalar & other) { |
8435 | return wrapper_CompositeExplicitAutograd_Scalar_copysign_(self, other); |
8436 | } |
8437 | at::Tensor logical_not(const at::Tensor & self) { |
8438 | return wrapper_CompositeExplicitAutograd__logical_not(self); |
8439 | } |
8440 | at::Tensor & logical_not_(at::Tensor & self) { |
8441 | return wrapper_CompositeExplicitAutograd__logical_not_(self); |
8442 | } |
8443 | at::Tensor logical_xor(const at::Tensor & self, const at::Tensor & other) { |
8444 | return wrapper_CompositeExplicitAutograd__logical_xor(self, other); |
8445 | } |
8446 | at::Tensor & logical_xor_(at::Tensor & self, const at::Tensor & other) { |
8447 | return wrapper_CompositeExplicitAutograd__logical_xor_(self, other); |
8448 | } |
8449 | at::Tensor logical_and(const at::Tensor & self, const at::Tensor & other) { |
8450 | return wrapper_CompositeExplicitAutograd__logical_and(self, other); |
8451 | } |
8452 | at::Tensor & logical_and_(at::Tensor & self, const at::Tensor & other) { |
8453 | return wrapper_CompositeExplicitAutograd__logical_and_(self, other); |
8454 | } |
8455 | at::Tensor logical_or(const at::Tensor & self, const at::Tensor & other) { |
8456 | return wrapper_CompositeExplicitAutograd__logical_or(self, other); |
8457 | } |
8458 | at::Tensor & logical_or_(at::Tensor & self, const at::Tensor & other) { |
8459 | return wrapper_CompositeExplicitAutograd__logical_or_(self, other); |
8460 | } |
8461 | at::Tensor blackman_window(int64_t window_length, at::TensorOptions options) { |
8462 | return wrapper_CompositeExplicitAutograd__blackman_window(window_length, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
8463 | } |
8464 | at::Tensor blackman_window(int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
8465 | return wrapper_CompositeExplicitAutograd__blackman_window(window_length, dtype, layout, device, pin_memory); |
8466 | } |
8467 | at::Tensor & blackman_window_out(at::Tensor & out, int64_t window_length) { |
8468 | return wrapper_CompositeExplicitAutograd_out_blackman_window_out(window_length, out); |
8469 | } |
8470 | at::Tensor & blackman_window_outf(int64_t window_length, at::Tensor & out) { |
8471 | return wrapper_CompositeExplicitAutograd_out_blackman_window_out(window_length, out); |
8472 | } |
8473 | at::Tensor blackman_window(int64_t window_length, bool periodic, at::TensorOptions options) { |
8474 | return wrapper_CompositeExplicitAutograd_periodic_blackman_window(window_length, periodic, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
8475 | } |
8476 | at::Tensor blackman_window(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
8477 | return wrapper_CompositeExplicitAutograd_periodic_blackman_window(window_length, periodic, dtype, layout, device, pin_memory); |
8478 | } |
8479 | at::Tensor & blackman_window_out(at::Tensor & out, int64_t window_length, bool periodic) { |
8480 | return wrapper_CompositeExplicitAutograd_periodic_out_blackman_window_out(window_length, periodic, out); |
8481 | } |
8482 | at::Tensor & blackman_window_outf(int64_t window_length, bool periodic, at::Tensor & out) { |
8483 | return wrapper_CompositeExplicitAutograd_periodic_out_blackman_window_out(window_length, periodic, out); |
8484 | } |
8485 | at::Tensor block_diag(at::TensorList tensors) { |
8486 | return wrapper_CompositeExplicitAutograd__block_diag(tensors); |
8487 | } |
8488 | at::Tensor & block_diag_out(at::Tensor & out, at::TensorList tensors) { |
8489 | return wrapper_CompositeExplicitAutograd_out_block_diag_out(tensors, out); |
8490 | } |
8491 | at::Tensor & block_diag_outf(at::TensorList tensors, at::Tensor & out) { |
8492 | return wrapper_CompositeExplicitAutograd_out_block_diag_out(tensors, out); |
8493 | } |
8494 | at::Tensor complex(const at::Tensor & real, const at::Tensor & imag) { |
8495 | return wrapper_CompositeExplicitAutograd__complex(real, imag); |
8496 | } |
8497 | at::Tensor polar(const at::Tensor & abs, const at::Tensor & angle) { |
8498 | return wrapper_CompositeExplicitAutograd__polar(abs, angle); |
8499 | } |
8500 | at::Tensor constant_pad_nd(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value) { |
8501 | return wrapper_CompositeExplicitAutograd__constant_pad_nd(self, c10::fromIntArrayRefSlow(pad), value); |
8502 | } |
8503 | at::Tensor constant_pad_nd_symint(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value) { |
8504 | return wrapper_CompositeExplicitAutograd__constant_pad_nd(self, pad, value); |
8505 | } |
8506 | at::Tensor & constant_pad_nd_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value) { |
8507 | return wrapper_CompositeExplicitAutograd_out_constant_pad_nd_out(self, c10::fromIntArrayRefSlow(pad), value, out); |
8508 | } |
8509 | at::Tensor & constant_pad_nd_outf(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value, at::Tensor & out) { |
8510 | return wrapper_CompositeExplicitAutograd_out_constant_pad_nd_out(self, c10::fromIntArrayRefSlow(pad), value, out); |
8511 | } |
8512 | at::Tensor & constant_pad_nd_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value) { |
8513 | return wrapper_CompositeExplicitAutograd_out_constant_pad_nd_out(self, pad, value, out); |
8514 | } |
8515 | at::Tensor & constant_pad_nd_symint_outf(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value, at::Tensor & out) { |
8516 | return wrapper_CompositeExplicitAutograd_out_constant_pad_nd_out(self, pad, value, out); |
8517 | } |
8518 | at::Tensor convolution(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) { |
8519 | return wrapper_CompositeExplicitAutograd__convolution(input, weight, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups); |
8520 | } |
8521 | at::Tensor convolution_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups) { |
8522 | return wrapper_CompositeExplicitAutograd__convolution(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); |
8523 | } |
8524 | at::Tensor & convolution_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) { |
8525 | return wrapper_CompositeExplicitAutograd_out_convolution_out(input, weight, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, out); |
8526 | } |
8527 | at::Tensor & convolution_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, at::Tensor & out) { |
8528 | return wrapper_CompositeExplicitAutograd_out_convolution_out(input, weight, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, out); |
8529 | } |
8530 | at::Tensor & convolution_symint_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups) { |
8531 | return wrapper_CompositeExplicitAutograd_out_convolution_out(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out); |
8532 | } |
8533 | at::Tensor & convolution_symint_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, at::Tensor & out) { |
8534 | return wrapper_CompositeExplicitAutograd_out_convolution_out(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out); |
8535 | } |
8536 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> convolution_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) { |
8537 | return wrapper_CompositeExplicitAutograd__convolution_backward(grad_output, input, weight, bias_sizes.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*bias_sizes)) : c10::nullopt, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask); |
8538 | } |
8539 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> convolution_backward_symint(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) { |
8540 | return wrapper_CompositeExplicitAutograd__convolution_backward(grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask); |
8541 | } |
8542 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) { |
8543 | return wrapper_CompositeExplicitAutograd_out_convolution_backward_out(grad_output, input, weight, bias_sizes.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*bias_sizes)) : c10::nullopt, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask, out0, out1, out2); |
8544 | } |
8545 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
8546 | return wrapper_CompositeExplicitAutograd_out_convolution_backward_out(grad_output, input, weight, bias_sizes.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*bias_sizes)) : c10::nullopt, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask, out0, out1, out2); |
8547 | } |
8548 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) { |
8549 | return wrapper_CompositeExplicitAutograd_out_convolution_backward_out(grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2); |
8550 | } |
8551 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
8552 | return wrapper_CompositeExplicitAutograd_out_convolution_backward_out(grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2); |
8553 | } |
8554 | at::Tensor convolution_overrideable(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) { |
8555 | return wrapper_CompositeExplicitAutograd__convolution_overrideable(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); |
8556 | } |
8557 | at::Tensor & convolution_overrideable_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) { |
8558 | return wrapper_CompositeExplicitAutograd_out_convolution_overrideable_out(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out); |
8559 | } |
8560 | at::Tensor & convolution_overrideable_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, at::Tensor & out) { |
8561 | return wrapper_CompositeExplicitAutograd_out_convolution_overrideable_out(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out); |
8562 | } |
8563 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> convolution_backward_overrideable(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) { |
8564 | return wrapper_CompositeExplicitAutograd__convolution_backward_overrideable(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask); |
8565 | } |
8566 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_overrideable_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) { |
8567 | return wrapper_CompositeExplicitAutograd_out_convolution_backward_overrideable_out(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2); |
8568 | } |
8569 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_overrideable_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
8570 | return wrapper_CompositeExplicitAutograd_out_convolution_backward_overrideable_out(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2); |
8571 | } |
8572 | at::Tensor _convolution(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) { |
8573 | return wrapper_CompositeExplicitAutograd___convolution(input, weight, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, benchmark, deterministic, cudnn_enabled, allow_tf32); |
8574 | } |
8575 | at::Tensor _convolution_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) { |
8576 | return wrapper_CompositeExplicitAutograd___convolution(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32); |
8577 | } |
8578 | at::Tensor & _convolution_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) { |
8579 | return wrapper_CompositeExplicitAutograd_out__convolution_out(input, weight, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, benchmark, deterministic, cudnn_enabled, allow_tf32, out); |
8580 | } |
8581 | at::Tensor & _convolution_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, at::Tensor & out) { |
8582 | return wrapper_CompositeExplicitAutograd_out__convolution_out(input, weight, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, benchmark, deterministic, cudnn_enabled, allow_tf32, out); |
8583 | } |
8584 | at::Tensor & _convolution_symint_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) { |
8585 | return wrapper_CompositeExplicitAutograd_out__convolution_out(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32, out); |
8586 | } |
8587 | at::Tensor & _convolution_symint_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, at::Tensor & out) { |
8588 | return wrapper_CompositeExplicitAutograd_out__convolution_out(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32, out); |
8589 | } |
8590 | at::Tensor conv_tbc(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) { |
8591 | return wrapper_CompositeExplicitAutograd__conv_tbc(self, weight, bias, pad); |
8592 | } |
8593 | at::Tensor & conv_tbc_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) { |
8594 | return wrapper_CompositeExplicitAutograd_out_conv_tbc_out(self, weight, bias, pad, out); |
8595 | } |
8596 | at::Tensor & conv_tbc_outf(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad, at::Tensor & out) { |
8597 | return wrapper_CompositeExplicitAutograd_out_conv_tbc_out(self, weight, bias, pad, out); |
8598 | } |
8599 | at::Tensor & copy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, bool non_blocking) { |
8600 | return wrapper_CompositeExplicitAutograd_out_copy_out(self, src, non_blocking, out); |
8601 | } |
8602 | at::Tensor & copy_outf(const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out) { |
8603 | return wrapper_CompositeExplicitAutograd_out_copy_out(self, src, non_blocking, out); |
8604 | } |
8605 | at::Tensor & copy_(at::Tensor & self, const at::Tensor & src, bool non_blocking) { |
8606 | return wrapper_CompositeExplicitAutograd__copy_(self, src, non_blocking); |
8607 | } |
8608 | at::Tensor & _copy_from_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & dst, bool non_blocking) { |
8609 | return wrapper_CompositeExplicitAutograd_out__copy_from_out(self, dst, non_blocking, out); |
8610 | } |
8611 | at::Tensor & _copy_from_outf(const at::Tensor & self, const at::Tensor & dst, bool non_blocking, at::Tensor & out) { |
8612 | return wrapper_CompositeExplicitAutograd_out__copy_from_out(self, dst, non_blocking, out); |
8613 | } |
8614 | at::Tensor & _copy_from_and_resize_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & dst) { |
8615 | return wrapper_CompositeExplicitAutograd_out__copy_from_and_resize_out(self, dst, out); |
8616 | } |
8617 | at::Tensor & _copy_from_and_resize_outf(const at::Tensor & self, const at::Tensor & dst, at::Tensor & out) { |
8618 | return wrapper_CompositeExplicitAutograd_out__copy_from_and_resize_out(self, dst, out); |
8619 | } |
8620 | at::Tensor & count_nonzero_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim) { |
8621 | return wrapper_CompositeExplicitAutograd_dim_IntList_out_count_nonzero_out(self, dim, out); |
8622 | } |
8623 | at::Tensor & count_nonzero_outf(const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) { |
8624 | return wrapper_CompositeExplicitAutograd_dim_IntList_out_count_nonzero_out(self, dim, out); |
8625 | } |
8626 | at::Tensor count_nonzero(const at::Tensor & self, c10::optional<int64_t> dim) { |
8627 | return wrapper_CompositeExplicitAutograd__count_nonzero(self, dim); |
8628 | } |
8629 | at::Tensor & count_nonzero_out(at::Tensor & out, const at::Tensor & self, c10::optional<int64_t> dim) { |
8630 | return wrapper_CompositeExplicitAutograd_out_count_nonzero_out(self, dim, out); |
8631 | } |
8632 | at::Tensor & count_nonzero_outf(const at::Tensor & self, c10::optional<int64_t> dim, at::Tensor & out) { |
8633 | return wrapper_CompositeExplicitAutograd_out_count_nonzero_out(self, dim, out); |
8634 | } |
8635 | at::Tensor & cudnn_affine_grid_generator_out(at::Tensor & out, const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W) { |
8636 | return wrapper_CompositeExplicitAutograd_out_cudnn_affine_grid_generator_out(theta, N, C, H, W, out); |
8637 | } |
8638 | at::Tensor & cudnn_affine_grid_generator_outf(const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out) { |
8639 | return wrapper_CompositeExplicitAutograd_out_cudnn_affine_grid_generator_out(theta, N, C, H, W, out); |
8640 | } |
8641 | at::Tensor & cudnn_affine_grid_generator_backward_out(at::Tensor & out, const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W) { |
8642 | return wrapper_CompositeExplicitAutograd_out_cudnn_affine_grid_generator_backward_out(grad, N, C, H, W, out); |
8643 | } |
8644 | at::Tensor & cudnn_affine_grid_generator_backward_outf(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out) { |
8645 | return wrapper_CompositeExplicitAutograd_out_cudnn_affine_grid_generator_backward_out(grad, N, C, H, W, out); |
8646 | } |
8647 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> cudnn_batch_norm_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon) { |
8648 | return wrapper_CompositeExplicitAutograd_out_cudnn_batch_norm_out(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2, out3); |
8649 | } |
8650 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> cudnn_batch_norm_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { |
8651 | return wrapper_CompositeExplicitAutograd_out_cudnn_batch_norm_out(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2, out3); |
8652 | } |
8653 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> cudnn_batch_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace) { |
8654 | return wrapper_CompositeExplicitAutograd_out_cudnn_batch_norm_backward_out(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace, out0, out1, out2); |
8655 | } |
8656 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> cudnn_batch_norm_backward_outf(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
8657 | return wrapper_CompositeExplicitAutograd_out_cudnn_batch_norm_backward_out(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace, out0, out1, out2); |
8658 | } |
8659 | at::Tensor & cudnn_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) { |
8660 | return wrapper_CompositeExplicitAutograd_out_cudnn_convolution_out(self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out); |
8661 | } |
8662 | at::Tensor & cudnn_convolution_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) { |
8663 | return wrapper_CompositeExplicitAutograd_out_cudnn_convolution_out(self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out); |
8664 | } |
8665 | at::Tensor & cudnn_convolution_transpose_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) { |
8666 | return wrapper_CompositeExplicitAutograd_out_cudnn_convolution_transpose_out(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out); |
8667 | } |
8668 | at::Tensor & cudnn_convolution_transpose_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) { |
8669 | return wrapper_CompositeExplicitAutograd_out_cudnn_convolution_transpose_out(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out); |
8670 | } |
8671 | at::Tensor & _mps_convolution_transpose_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { |
8672 | return wrapper_CompositeExplicitAutograd_out__mps_convolution_transpose_out(self, weight, padding, output_padding, stride, dilation, groups, out); |
8673 | } |
8674 | at::Tensor & _mps_convolution_transpose_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { |
8675 | return wrapper_CompositeExplicitAutograd_out__mps_convolution_transpose_out(self, weight, padding, output_padding, stride, dilation, groups, out); |
8676 | } |
8677 | ::std::tuple<at::Tensor &,at::Tensor &> mps_convolution_transpose_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array<bool,2> output_mask) { |
8678 | return wrapper_CompositeExplicitAutograd_out_mps_convolution_transpose_backward_out(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask, out0, out1); |
8679 | } |
8680 | ::std::tuple<at::Tensor &,at::Tensor &> mps_convolution_transpose_backward_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) { |
8681 | return wrapper_CompositeExplicitAutograd_out_mps_convolution_transpose_backward_out(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask, out0, out1); |
8682 | } |
8683 | at::Tensor & cudnn_convolution_relu_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) { |
8684 | return wrapper_CompositeExplicitAutograd_out_cudnn_convolution_relu_out(self, weight, bias, stride, padding, dilation, groups, out); |
8685 | } |
8686 | at::Tensor & cudnn_convolution_relu_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { |
8687 | return wrapper_CompositeExplicitAutograd_out_cudnn_convolution_relu_out(self, weight, bias, stride, padding, dilation, groups, out); |
8688 | } |
8689 | at::Tensor & cudnn_convolution_add_relu_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional<at::Scalar> & alpha, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) { |
8690 | return wrapper_CompositeExplicitAutograd_out_cudnn_convolution_add_relu_out(self, weight, z, alpha, bias, stride, padding, dilation, groups, out); |
8691 | } |
8692 | at::Tensor & cudnn_convolution_add_relu_outf(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional<at::Scalar> & alpha, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { |
8693 | return wrapper_CompositeExplicitAutograd_out_cudnn_convolution_add_relu_out(self, weight, z, alpha, bias, stride, padding, dilation, groups, out); |
8694 | } |
8695 | at::Tensor & cudnn_grid_sampler_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & grid) { |
8696 | return wrapper_CompositeExplicitAutograd_out_cudnn_grid_sampler_out(self, grid, out); |
8697 | } |
8698 | at::Tensor & cudnn_grid_sampler_outf(const at::Tensor & self, const at::Tensor & grid, at::Tensor & out) { |
8699 | return wrapper_CompositeExplicitAutograd_out_cudnn_grid_sampler_out(self, grid, out); |
8700 | } |
8701 | ::std::tuple<at::Tensor &,at::Tensor &> cudnn_grid_sampler_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output) { |
8702 | return wrapper_CompositeExplicitAutograd_out_cudnn_grid_sampler_backward_out(self, grid, grad_output, out0, out1); |
8703 | } |
8704 | ::std::tuple<at::Tensor &,at::Tensor &> cudnn_grid_sampler_backward_outf(const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output, at::Tensor & out0, at::Tensor & out1) { |
8705 | return wrapper_CompositeExplicitAutograd_out_cudnn_grid_sampler_backward_out(self, grid, grad_output, out0, out1); |
8706 | } |
8707 | ::std::tuple<at::Tensor,at::Tensor> cummax(const at::Tensor & self, int64_t dim) { |
8708 | return wrapper_CompositeExplicitAutograd__cummax(self, dim); |
8709 | } |
8710 | ::std::tuple<at::Tensor &,at::Tensor &> cummax_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim) { |
8711 | return wrapper_CompositeExplicitAutograd_out_cummax_out(self, dim, values, indices); |
8712 | } |
8713 | ::std::tuple<at::Tensor &,at::Tensor &> cummax_outf(const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices) { |
8714 | return wrapper_CompositeExplicitAutograd_out_cummax_out(self, dim, values, indices); |
8715 | } |
8716 | ::std::tuple<at::Tensor,at::Tensor> cummin(const at::Tensor & self, int64_t dim) { |
8717 | return wrapper_CompositeExplicitAutograd__cummin(self, dim); |
8718 | } |
8719 | ::std::tuple<at::Tensor &,at::Tensor &> cummin_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim) { |
8720 | return wrapper_CompositeExplicitAutograd_out_cummin_out(self, dim, values, indices); |
8721 | } |
8722 | ::std::tuple<at::Tensor &,at::Tensor &> cummin_outf(const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices) { |
8723 | return wrapper_CompositeExplicitAutograd_out_cummin_out(self, dim, values, indices); |
8724 | } |
8725 | ::std::tuple<at::Tensor &,at::Tensor &> _ctc_loss_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity) { |
8726 | return wrapper_CompositeExplicitAutograd_out__ctc_loss_out(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1); |
8727 | } |
8728 | ::std::tuple<at::Tensor &,at::Tensor &> _ctc_loss_outf(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) { |
8729 | return wrapper_CompositeExplicitAutograd_out__ctc_loss_out(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1); |
8730 | } |
8731 | ::std::tuple<at::Tensor &,at::Tensor &> _ctc_loss_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity) { |
8732 | return wrapper_CompositeExplicitAutograd_Tensor_out__ctc_loss_out(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1); |
8733 | } |
8734 | ::std::tuple<at::Tensor &,at::Tensor &> _ctc_loss_outf(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) { |
8735 | return wrapper_CompositeExplicitAutograd_Tensor_out__ctc_loss_out(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1); |
8736 | } |
8737 | at::Tensor & _ctc_loss_backward_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) { |
8738 | return wrapper_CompositeExplicitAutograd_out__ctc_loss_backward_out(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity, out); |
8739 | } |
8740 | at::Tensor & _ctc_loss_backward_outf(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity, at::Tensor & out) { |
8741 | return wrapper_CompositeExplicitAutograd_out__ctc_loss_backward_out(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity, out); |
8742 | } |
8743 | at::Tensor & diag_embed_out(at::Tensor & out, const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) { |
8744 | return wrapper_CompositeExplicitAutograd_out_diag_embed_out(self, offset, dim1, dim2, out); |
8745 | } |
8746 | at::Tensor & diag_embed_outf(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { |
8747 | return wrapper_CompositeExplicitAutograd_out_diag_embed_out(self, offset, dim1, dim2, out); |
8748 | } |
8749 | at::Tensor diagonal(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) { |
8750 | return wrapper_CompositeExplicitAutograd__diagonal(self, offset, dim1, dim2); |
8751 | } |
8752 | at::Tensor diagonal_backward(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { |
8753 | return wrapper_CompositeExplicitAutograd__diagonal_backward(grad_output, c10::fromIntArrayRefSlow(input_sizes), offset, dim1, dim2); |
8754 | } |
8755 | at::Tensor diagonal_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { |
8756 | return wrapper_CompositeExplicitAutograd__diagonal_backward(grad_output, input_sizes, offset, dim1, dim2); |
8757 | } |
8758 | at::Tensor & diagonal_backward_out(at::Tensor & out, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { |
8759 | return wrapper_CompositeExplicitAutograd_out_diagonal_backward_out(grad_output, c10::fromIntArrayRefSlow(input_sizes), offset, dim1, dim2, out); |
8760 | } |
8761 | at::Tensor & diagonal_backward_outf(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { |
8762 | return wrapper_CompositeExplicitAutograd_out_diagonal_backward_out(grad_output, c10::fromIntArrayRefSlow(input_sizes), offset, dim1, dim2, out); |
8763 | } |
8764 | at::Tensor & diagonal_backward_symint_out(at::Tensor & out, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { |
8765 | return wrapper_CompositeExplicitAutograd_out_diagonal_backward_out(grad_output, input_sizes, offset, dim1, dim2, out); |
8766 | } |
8767 | at::Tensor & diagonal_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { |
8768 | return wrapper_CompositeExplicitAutograd_out_diagonal_backward_out(grad_output, input_sizes, offset, dim1, dim2, out); |
8769 | } |
8770 | at::Tensor div(const at::Tensor & self, const at::Scalar & other) { |
8771 | return wrapper_CompositeExplicitAutograd_Scalar_div(self, other); |
8772 | } |
8773 | at::Tensor & div_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { |
8774 | return wrapper_CompositeExplicitAutograd_Scalar_out_div_out(self, other, out); |
8775 | } |
8776 | at::Tensor & div_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
8777 | return wrapper_CompositeExplicitAutograd_Scalar_out_div_out(self, other, out); |
8778 | } |
8779 | at::Tensor & div_(at::Tensor & self, const at::Scalar & other) { |
8780 | return wrapper_CompositeExplicitAutograd_Scalar_div_(self, other); |
8781 | } |
8782 | at::Tensor div(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) { |
8783 | return wrapper_CompositeExplicitAutograd_Scalar_mode_div(self, other, rounding_mode); |
8784 | } |
8785 | at::Tensor & div_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) { |
8786 | return wrapper_CompositeExplicitAutograd_Scalar_mode_out_div_out(self, other, rounding_mode, out); |
8787 | } |
8788 | at::Tensor & div_outf(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out) { |
8789 | return wrapper_CompositeExplicitAutograd_Scalar_mode_out_div_out(self, other, rounding_mode, out); |
8790 | } |
8791 | at::Tensor & div_(at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) { |
8792 | return wrapper_CompositeExplicitAutograd_Scalar_mode_div_(self, other, rounding_mode); |
8793 | } |
8794 | at::Tensor & dot_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor) { |
8795 | return wrapper_CompositeExplicitAutograd_out_dot_out(self, tensor, out); |
8796 | } |
8797 | at::Tensor & dot_outf(const at::Tensor & self, const at::Tensor & tensor, at::Tensor & out) { |
8798 | return wrapper_CompositeExplicitAutograd_out_dot_out(self, tensor, out); |
8799 | } |
8800 | at::Tensor & vdot_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
8801 | return wrapper_CompositeExplicitAutograd_out_vdot_out(self, other, out); |
8802 | } |
8803 | at::Tensor & vdot_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
8804 | return wrapper_CompositeExplicitAutograd_out_vdot_out(self, other, out); |
8805 | } |
8806 | at::Tensor embedding(const at::Tensor & weight, const at::Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) { |
8807 | return wrapper_CompositeExplicitAutograd__embedding(weight, indices, padding_idx, scale_grad_by_freq, sparse); |
8808 | } |
8809 | at::Tensor embedding_symint(const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) { |
8810 | return wrapper_CompositeExplicitAutograd__embedding(weight, indices, padding_idx, scale_grad_by_freq, sparse); |
8811 | } |
8812 | at::Tensor & embedding_out(at::Tensor & out, const at::Tensor & weight, const at::Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) { |
8813 | return wrapper_CompositeExplicitAutograd_out_embedding_out(weight, indices, padding_idx, scale_grad_by_freq, sparse, out); |
8814 | } |
8815 | at::Tensor & embedding_outf(const at::Tensor & weight, const at::Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse, at::Tensor & out) { |
8816 | return wrapper_CompositeExplicitAutograd_out_embedding_out(weight, indices, padding_idx, scale_grad_by_freq, sparse, out); |
8817 | } |
8818 | at::Tensor & embedding_symint_out(at::Tensor & out, const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) { |
8819 | return wrapper_CompositeExplicitAutograd_out_embedding_out(weight, indices, padding_idx, scale_grad_by_freq, sparse, out); |
8820 | } |
8821 | at::Tensor & embedding_symint_outf(const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse, at::Tensor & out) { |
8822 | return wrapper_CompositeExplicitAutograd_out_embedding_out(weight, indices, padding_idx, scale_grad_by_freq, sparse, out); |
8823 | } |
8824 | at::Tensor & embedding_dense_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { |
8825 | return wrapper_CompositeExplicitAutograd_out_embedding_dense_backward_out(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out); |
8826 | } |
8827 | at::Tensor & embedding_dense_backward_outf(const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, at::Tensor & out) { |
8828 | return wrapper_CompositeExplicitAutograd_out_embedding_dense_backward_out(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out); |
8829 | } |
8830 | at::Tensor & embedding_dense_backward_symint_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq) { |
8831 | return wrapper_CompositeExplicitAutograd_out_embedding_dense_backward_out(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out); |
8832 | } |
8833 | at::Tensor & embedding_dense_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, at::Tensor & out) { |
8834 | return wrapper_CompositeExplicitAutograd_out_embedding_dense_backward_out(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out); |
8835 | } |
8836 | at::Tensor embedding_renorm(const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) { |
8837 | return wrapper_CompositeExplicitAutograd__embedding_renorm(self, indices, max_norm, norm_type); |
8838 | } |
8839 | at::Tensor & embedding_renorm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) { |
8840 | return wrapper_CompositeExplicitAutograd_out_embedding_renorm_out(self, indices, max_norm, norm_type, out); |
8841 | } |
8842 | at::Tensor & embedding_renorm_outf(const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type, at::Tensor & out) { |
8843 | return wrapper_CompositeExplicitAutograd_out_embedding_renorm_out(self, indices, max_norm, norm_type, out); |
8844 | } |
8845 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _embedding_bag_forward_only_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx) { |
8846 | return wrapper_CompositeExplicitAutograd_out__embedding_bag_forward_only_out(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx, out0, out1, out2, out3); |
8847 | } |
8848 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _embedding_bag_forward_only_outf(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { |
8849 | return wrapper_CompositeExplicitAutograd_out__embedding_bag_forward_only_out(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx, out0, out1, out2, out3); |
8850 | } |
8851 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _embedding_bag_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx) { |
8852 | return wrapper_CompositeExplicitAutograd_out__embedding_bag_out(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx, out0, out1, out2, out3); |
8853 | } |
8854 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _embedding_bag_outf(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { |
8855 | return wrapper_CompositeExplicitAutograd_out__embedding_bag_out(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx, out0, out1, out2, out3); |
8856 | } |
8857 | at::Tensor & _embedding_bag_dense_backward_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) { |
8858 | return wrapper_CompositeExplicitAutograd_out__embedding_bag_dense_backward_out(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx, out); |
8859 | } |
8860 | at::Tensor & _embedding_bag_dense_backward_outf(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx, at::Tensor & out) { |
8861 | return wrapper_CompositeExplicitAutograd_out__embedding_bag_dense_backward_out(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx, out); |
8862 | } |
8863 | at::Tensor & _embedding_bag_dense_backward_symint_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) { |
8864 | return wrapper_CompositeExplicitAutograd_out__embedding_bag_dense_backward_out(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx, out); |
8865 | } |
8866 | at::Tensor & _embedding_bag_dense_backward_symint_outf(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx, at::Tensor & out) { |
8867 | return wrapper_CompositeExplicitAutograd_out__embedding_bag_dense_backward_out(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx, out); |
8868 | } |
8869 | at::Tensor & _embedding_bag_per_sample_weights_backward_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx) { |
8870 | return wrapper_CompositeExplicitAutograd_out__embedding_bag_per_sample_weights_backward_out(grad, weight, indices, offsets, offset2bag, mode, padding_idx, out); |
8871 | } |
8872 | at::Tensor & _embedding_bag_per_sample_weights_backward_outf(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx, at::Tensor & out) { |
8873 | return wrapper_CompositeExplicitAutograd_out__embedding_bag_per_sample_weights_backward_out(grad, weight, indices, offsets, offset2bag, mode, padding_idx, out); |
8874 | } |
8875 | at::Tensor empty(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) { |
8876 | return wrapper_CompositeExplicitAutograd_names_empty(size, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
8877 | } |
8878 | at::Tensor empty(at::IntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
8879 | return wrapper_CompositeExplicitAutograd_names_empty(size, names, dtype, layout, device, pin_memory, memory_format); |
8880 | } |
8881 | at::Tensor & empty_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::MemoryFormat> memory_format) { |
8882 | return wrapper_CompositeExplicitAutograd_names_out_empty_out(size, names, memory_format, out); |
8883 | } |
8884 | at::Tensor & empty_outf(at::IntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
8885 | return wrapper_CompositeExplicitAutograd_names_out_empty_out(size, names, memory_format, out); |
8886 | } |
8887 | at::Tensor new_empty(const at::Tensor & self, at::IntArrayRef size, at::TensorOptions options) { |
8888 | return wrapper_CompositeExplicitAutograd__new_empty(self, c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
8889 | } |
8890 | at::Tensor new_empty(const at::Tensor & self, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
8891 | return wrapper_CompositeExplicitAutograd__new_empty(self, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); |
8892 | } |
8893 | at::Tensor new_empty_symint(const at::Tensor & self, c10::SymIntArrayRef size, at::TensorOptions options) { |
8894 | return wrapper_CompositeExplicitAutograd__new_empty(self, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
8895 | } |
8896 | at::Tensor new_empty_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
8897 | return wrapper_CompositeExplicitAutograd__new_empty(self, size, dtype, layout, device, pin_memory); |
8898 | } |
8899 | at::Tensor & new_empty_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) { |
8900 | return wrapper_CompositeExplicitAutograd_out_new_empty_out(self, c10::fromIntArrayRefSlow(size), out); |
8901 | } |
8902 | at::Tensor & new_empty_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { |
8903 | return wrapper_CompositeExplicitAutograd_out_new_empty_out(self, c10::fromIntArrayRefSlow(size), out); |
8904 | } |
8905 | at::Tensor & new_empty_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) { |
8906 | return wrapper_CompositeExplicitAutograd_out_new_empty_out(self, size, out); |
8907 | } |
8908 | at::Tensor & new_empty_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { |
8909 | return wrapper_CompositeExplicitAutograd_out_new_empty_out(self, size, out); |
8910 | } |
8911 | at::Tensor & new_empty_strided_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) { |
8912 | return wrapper_CompositeExplicitAutograd_out_new_empty_strided_out(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out); |
8913 | } |
8914 | at::Tensor & new_empty_strided_outf(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor & out) { |
8915 | return wrapper_CompositeExplicitAutograd_out_new_empty_strided_out(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out); |
8916 | } |
8917 | at::Tensor & new_empty_strided_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { |
8918 | return wrapper_CompositeExplicitAutograd_out_new_empty_strided_out(self, size, stride, out); |
8919 | } |
8920 | at::Tensor & new_empty_strided_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) { |
8921 | return wrapper_CompositeExplicitAutograd_out_new_empty_strided_out(self, size, stride, out); |
8922 | } |
8923 | at::Tensor new_full(const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options) { |
8924 | return wrapper_CompositeExplicitAutograd__new_full(self, c10::fromIntArrayRefSlow(size), fill_value, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
8925 | } |
8926 | at::Tensor new_full(const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
8927 | return wrapper_CompositeExplicitAutograd__new_full(self, c10::fromIntArrayRefSlow(size), fill_value, dtype, layout, device, pin_memory); |
8928 | } |
8929 | at::Tensor new_full_symint(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options) { |
8930 | return wrapper_CompositeExplicitAutograd__new_full(self, size, fill_value, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
8931 | } |
8932 | at::Tensor new_full_symint(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
8933 | return wrapper_CompositeExplicitAutograd__new_full(self, size, fill_value, dtype, layout, device, pin_memory); |
8934 | } |
8935 | at::Tensor & new_full_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value) { |
8936 | return wrapper_CompositeExplicitAutograd_out_new_full_out(self, c10::fromIntArrayRefSlow(size), fill_value, out); |
8937 | } |
8938 | at::Tensor & new_full_outf(const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) { |
8939 | return wrapper_CompositeExplicitAutograd_out_new_full_out(self, c10::fromIntArrayRefSlow(size), fill_value, out); |
8940 | } |
8941 | at::Tensor & new_full_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value) { |
8942 | return wrapper_CompositeExplicitAutograd_out_new_full_out(self, size, fill_value, out); |
8943 | } |
8944 | at::Tensor & new_full_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) { |
8945 | return wrapper_CompositeExplicitAutograd_out_new_full_out(self, size, fill_value, out); |
8946 | } |
8947 | at::Tensor new_zeros(const at::Tensor & self, at::IntArrayRef size, at::TensorOptions options) { |
8948 | return wrapper_CompositeExplicitAutograd__new_zeros(self, c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
8949 | } |
8950 | at::Tensor new_zeros(const at::Tensor & self, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
8951 | return wrapper_CompositeExplicitAutograd__new_zeros(self, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); |
8952 | } |
8953 | at::Tensor new_zeros_symint(const at::Tensor & self, c10::SymIntArrayRef size, at::TensorOptions options) { |
8954 | return wrapper_CompositeExplicitAutograd__new_zeros(self, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
8955 | } |
8956 | at::Tensor new_zeros_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
8957 | return wrapper_CompositeExplicitAutograd__new_zeros(self, size, dtype, layout, device, pin_memory); |
8958 | } |
8959 | at::Tensor & new_zeros_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) { |
8960 | return wrapper_CompositeExplicitAutograd_out_new_zeros_out(self, c10::fromIntArrayRefSlow(size), out); |
8961 | } |
8962 | at::Tensor & new_zeros_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { |
8963 | return wrapper_CompositeExplicitAutograd_out_new_zeros_out(self, c10::fromIntArrayRefSlow(size), out); |
8964 | } |
8965 | at::Tensor & new_zeros_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) { |
8966 | return wrapper_CompositeExplicitAutograd_out_new_zeros_out(self, size, out); |
8967 | } |
8968 | at::Tensor & new_zeros_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { |
8969 | return wrapper_CompositeExplicitAutograd_out_new_zeros_out(self, size, out); |
8970 | } |
8971 | at::Tensor new_ones(const at::Tensor & self, at::IntArrayRef size, at::TensorOptions options) { |
8972 | return wrapper_CompositeExplicitAutograd__new_ones(self, c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
8973 | } |
8974 | at::Tensor new_ones(const at::Tensor & self, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
8975 | return wrapper_CompositeExplicitAutograd__new_ones(self, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); |
8976 | } |
8977 | at::Tensor new_ones_symint(const at::Tensor & self, c10::SymIntArrayRef size, at::TensorOptions options) { |
8978 | return wrapper_CompositeExplicitAutograd__new_ones(self, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
8979 | } |
8980 | at::Tensor new_ones_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
8981 | return wrapper_CompositeExplicitAutograd__new_ones(self, size, dtype, layout, device, pin_memory); |
8982 | } |
8983 | at::Tensor & new_ones_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) { |
8984 | return wrapper_CompositeExplicitAutograd_out_new_ones_out(self, c10::fromIntArrayRefSlow(size), out); |
8985 | } |
8986 | at::Tensor & new_ones_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { |
8987 | return wrapper_CompositeExplicitAutograd_out_new_ones_out(self, c10::fromIntArrayRefSlow(size), out); |
8988 | } |
8989 | at::Tensor & new_ones_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) { |
8990 | return wrapper_CompositeExplicitAutograd_out_new_ones_out(self, size, out); |
8991 | } |
8992 | at::Tensor & new_ones_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { |
8993 | return wrapper_CompositeExplicitAutograd_out_new_ones_out(self, size, out); |
8994 | } |
8995 | at::Tensor & _empty_affine_quantized_out(at::Tensor & out, at::IntArrayRef size, double scale, int64_t zero_point, c10::optional<at::MemoryFormat> memory_format) { |
8996 | return wrapper_CompositeExplicitAutograd_out__empty_affine_quantized_out(size, scale, zero_point, memory_format, out); |
8997 | } |
8998 | at::Tensor & _empty_affine_quantized_outf(at::IntArrayRef size, double scale, int64_t zero_point, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
8999 | return wrapper_CompositeExplicitAutograd_out__empty_affine_quantized_out(size, scale, zero_point, memory_format, out); |
9000 | } |
9001 | at::Tensor & _empty_per_channel_affine_quantized_out(at::Tensor & out, at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::MemoryFormat> memory_format) { |
9002 | return wrapper_CompositeExplicitAutograd_out__empty_per_channel_affine_quantized_out(size, scales, zero_points, axis, memory_format, out); |
9003 | } |
9004 | at::Tensor & _empty_per_channel_affine_quantized_outf(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
9005 | return wrapper_CompositeExplicitAutograd_out__empty_per_channel_affine_quantized_out(size, scales, zero_points, axis, memory_format, out); |
9006 | } |
9007 | at::Tensor resize(const at::Tensor & self, at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format) { |
9008 | return wrapper_CompositeExplicitAutograd__resize(self, c10::fromIntArrayRefSlow(size), memory_format); |
9009 | } |
9010 | at::Tensor resize_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) { |
9011 | return wrapper_CompositeExplicitAutograd__resize(self, size, memory_format); |
9012 | } |
9013 | const at::Tensor & resize_out(const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format) { |
9014 | return wrapper_CompositeExplicitAutograd_out_resize_out(self, c10::fromIntArrayRefSlow(size), memory_format, out); |
9015 | } |
9016 | const at::Tensor & resize_outf(const at::Tensor & self, at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format, const at::Tensor & out) { |
9017 | return wrapper_CompositeExplicitAutograd_out_resize_out(self, c10::fromIntArrayRefSlow(size), memory_format, out); |
9018 | } |
9019 | const at::Tensor & resize_symint_out(const at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) { |
9020 | return wrapper_CompositeExplicitAutograd_out_resize_out(self, size, memory_format, out); |
9021 | } |
9022 | const at::Tensor & resize_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format, const at::Tensor & out) { |
9023 | return wrapper_CompositeExplicitAutograd_out_resize_out(self, size, memory_format, out); |
9024 | } |
9025 | at::Tensor _resize_output(const at::Tensor & self, at::IntArrayRef size, at::Device device) { |
9026 | return wrapper_CompositeExplicitAutograd___resize_output(self, size, device); |
9027 | } |
9028 | const at::Tensor & _resize_output_out(const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::Device device) { |
9029 | return wrapper_CompositeExplicitAutograd_out__resize_output_out(self, size, device, out); |
9030 | } |
9031 | const at::Tensor & _resize_output_outf(const at::Tensor & self, at::IntArrayRef size, at::Device device, const at::Tensor & out) { |
9032 | return wrapper_CompositeExplicitAutograd_out__resize_output_out(self, size, device, out); |
9033 | } |
9034 | at::Tensor & empty_quantized_out(at::Tensor & out, at::IntArrayRef size, const at::Tensor & qtensor, c10::optional<at::MemoryFormat> memory_format) { |
9035 | return wrapper_CompositeExplicitAutograd_out_empty_quantized_out(size, qtensor, memory_format, out); |
9036 | } |
9037 | at::Tensor & empty_quantized_outf(at::IntArrayRef size, const at::Tensor & qtensor, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
9038 | return wrapper_CompositeExplicitAutograd_out_empty_quantized_out(size, qtensor, memory_format, out); |
9039 | } |
9040 | at::Tensor empty_like(const at::Tensor & self, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) { |
9041 | return wrapper_CompositeExplicitAutograd__empty_like(self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
9042 | } |
9043 | at::Tensor empty_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
9044 | return wrapper_CompositeExplicitAutograd__empty_like(self, dtype, layout, device, pin_memory, memory_format); |
9045 | } |
9046 | at::Tensor & empty_like_out(at::Tensor & out, const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) { |
9047 | return wrapper_CompositeExplicitAutograd_out_empty_like_out(self, memory_format, out); |
9048 | } |
9049 | at::Tensor & empty_like_outf(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
9050 | return wrapper_CompositeExplicitAutograd_out_empty_like_out(self, memory_format, out); |
9051 | } |
9052 | at::Tensor & empty_strided_out(at::Tensor & out, at::IntArrayRef size, at::IntArrayRef stride) { |
9053 | return wrapper_CompositeExplicitAutograd_out_empty_strided_out(c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out); |
9054 | } |
9055 | at::Tensor & empty_strided_outf(at::IntArrayRef size, at::IntArrayRef stride, at::Tensor & out) { |
9056 | return wrapper_CompositeExplicitAutograd_out_empty_strided_out(c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out); |
9057 | } |
9058 | at::Tensor & empty_strided_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { |
9059 | return wrapper_CompositeExplicitAutograd_out_empty_strided_out(size, stride, out); |
9060 | } |
9061 | at::Tensor & empty_strided_symint_outf(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) { |
9062 | return wrapper_CompositeExplicitAutograd_out_empty_strided_out(size, stride, out); |
9063 | } |
9064 | at::Tensor expand(const at::Tensor & self, at::IntArrayRef size, bool implicit) { |
9065 | return wrapper_CompositeExplicitAutograd__expand(self, c10::fromIntArrayRefSlow(size), implicit); |
9066 | } |
9067 | at::Tensor expand_symint(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) { |
9068 | return wrapper_CompositeExplicitAutograd__expand(self, size, implicit); |
9069 | } |
9070 | at::Tensor eye(int64_t n, at::TensorOptions options) { |
9071 | return wrapper_CompositeExplicitAutograd__eye(n, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9072 | } |
9073 | at::Tensor eye(int64_t n, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
9074 | return wrapper_CompositeExplicitAutograd__eye(n, dtype, layout, device, pin_memory); |
9075 | } |
9076 | at::Tensor eye(int64_t n, int64_t m, at::TensorOptions options) { |
9077 | return wrapper_CompositeExplicitAutograd_m_eye(n, m, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9078 | } |
9079 | at::Tensor eye(int64_t n, int64_t m, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
9080 | return wrapper_CompositeExplicitAutograd_m_eye(n, m, dtype, layout, device, pin_memory); |
9081 | } |
9082 | at::Tensor fill(const at::Tensor & self, const at::Scalar & value) { |
9083 | return wrapper_CompositeExplicitAutograd_Scalar_fill(self, value); |
9084 | } |
9085 | at::Tensor & fill_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & value) { |
9086 | return wrapper_CompositeExplicitAutograd_Scalar_out_fill_out(self, value, out); |
9087 | } |
9088 | at::Tensor & fill_outf(const at::Tensor & self, const at::Scalar & value, at::Tensor & out) { |
9089 | return wrapper_CompositeExplicitAutograd_Scalar_out_fill_out(self, value, out); |
9090 | } |
9091 | at::Tensor fill(const at::Tensor & self, const at::Tensor & value) { |
9092 | return wrapper_CompositeExplicitAutograd_Tensor_fill(self, value); |
9093 | } |
9094 | at::Tensor & fill_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & value) { |
9095 | return wrapper_CompositeExplicitAutograd_Tensor_out_fill_out(self, value, out); |
9096 | } |
9097 | at::Tensor & fill_outf(const at::Tensor & self, const at::Tensor & value, at::Tensor & out) { |
9098 | return wrapper_CompositeExplicitAutograd_Tensor_out_fill_out(self, value, out); |
9099 | } |
9100 | at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::DimnameList> names, at::TensorOptions options) { |
9101 | return wrapper_CompositeExplicitAutograd_names_full(size, fill_value, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9102 | } |
9103 | at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
9104 | return wrapper_CompositeExplicitAutograd_names_full(size, fill_value, names, dtype, layout, device, pin_memory); |
9105 | } |
9106 | at::Tensor & full_out(at::Tensor & out, at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::DimnameList> names) { |
9107 | return wrapper_CompositeExplicitAutograd_names_out_full_out(size, fill_value, names, out); |
9108 | } |
9109 | at::Tensor & full_outf(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::DimnameList> names, at::Tensor & out) { |
9110 | return wrapper_CompositeExplicitAutograd_names_out_full_out(size, fill_value, names, out); |
9111 | } |
9112 | at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options) { |
9113 | return wrapper_CompositeExplicitAutograd__full(c10::fromIntArrayRefSlow(size), fill_value, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9114 | } |
9115 | at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
9116 | return wrapper_CompositeExplicitAutograd__full(c10::fromIntArrayRefSlow(size), fill_value, dtype, layout, device, pin_memory); |
9117 | } |
9118 | at::Tensor full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options) { |
9119 | return wrapper_CompositeExplicitAutograd__full(size, fill_value, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9120 | } |
9121 | at::Tensor full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
9122 | return wrapper_CompositeExplicitAutograd__full(size, fill_value, dtype, layout, device, pin_memory); |
9123 | } |
9124 | at::Tensor & full_out(at::Tensor & out, at::IntArrayRef size, const at::Scalar & fill_value) { |
9125 | return wrapper_CompositeExplicitAutograd_out_full_out(c10::fromIntArrayRefSlow(size), fill_value, out); |
9126 | } |
9127 | at::Tensor & full_outf(at::IntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) { |
9128 | return wrapper_CompositeExplicitAutograd_out_full_out(c10::fromIntArrayRefSlow(size), fill_value, out); |
9129 | } |
9130 | at::Tensor & full_symint_out(at::Tensor & out, c10::SymIntArrayRef size, const at::Scalar & fill_value) { |
9131 | return wrapper_CompositeExplicitAutograd_out_full_out(size, fill_value, out); |
9132 | } |
9133 | at::Tensor & full_symint_outf(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) { |
9134 | return wrapper_CompositeExplicitAutograd_out_full_out(size, fill_value, out); |
9135 | } |
9136 | at::Tensor full_like(const at::Tensor & self, const at::Scalar & fill_value, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) { |
9137 | return wrapper_CompositeExplicitAutograd__full_like(self, fill_value, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
9138 | } |
9139 | at::Tensor full_like(const at::Tensor & self, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
9140 | return wrapper_CompositeExplicitAutograd__full_like(self, fill_value, dtype, layout, device, pin_memory, memory_format); |
9141 | } |
9142 | at::Tensor & full_like_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & fill_value, c10::optional<at::MemoryFormat> memory_format) { |
9143 | return wrapper_CompositeExplicitAutograd_out_full_like_out(self, fill_value, memory_format, out); |
9144 | } |
9145 | at::Tensor & full_like_outf(const at::Tensor & self, const at::Scalar & fill_value, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
9146 | return wrapper_CompositeExplicitAutograd_out_full_like_out(self, fill_value, memory_format, out); |
9147 | } |
9148 | at::Tensor & from_file_out(at::Tensor & out, c10::string_view filename, c10::optional<bool> shared, c10::optional<int64_t> size) { |
9149 | return wrapper_CompositeExplicitAutograd_out_from_file_out(filename, shared, size, out); |
9150 | } |
9151 | at::Tensor & from_file_outf(c10::string_view filename, c10::optional<bool> shared, c10::optional<int64_t> size, at::Tensor & out) { |
9152 | return wrapper_CompositeExplicitAutograd_out_from_file_out(filename, shared, size, out); |
9153 | } |
9154 | at::Tensor & grid_sampler_2d_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { |
9155 | return wrapper_CompositeExplicitAutograd_out_grid_sampler_2d_out(input, grid, interpolation_mode, padding_mode, align_corners, out); |
9156 | } |
9157 | at::Tensor & grid_sampler_2d_outf(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) { |
9158 | return wrapper_CompositeExplicitAutograd_out_grid_sampler_2d_out(input, grid, interpolation_mode, padding_mode, align_corners, out); |
9159 | } |
9160 | ::std::tuple<at::Tensor &,at::Tensor &> grid_sampler_2d_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) { |
9161 | return wrapper_CompositeExplicitAutograd_out_grid_sampler_2d_backward_out(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1); |
9162 | } |
9163 | ::std::tuple<at::Tensor &,at::Tensor &> grid_sampler_2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) { |
9164 | return wrapper_CompositeExplicitAutograd_out_grid_sampler_2d_backward_out(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1); |
9165 | } |
9166 | at::Tensor _grid_sampler_2d_cpu_fallback(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { |
9167 | return wrapper_CompositeExplicitAutograd___grid_sampler_2d_cpu_fallback(input, grid, interpolation_mode, padding_mode, align_corners); |
9168 | } |
9169 | at::Tensor & _grid_sampler_2d_cpu_fallback_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { |
9170 | return wrapper_CompositeExplicitAutograd_out__grid_sampler_2d_cpu_fallback_out(input, grid, interpolation_mode, padding_mode, align_corners, out); |
9171 | } |
9172 | at::Tensor & _grid_sampler_2d_cpu_fallback_outf(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) { |
9173 | return wrapper_CompositeExplicitAutograd_out__grid_sampler_2d_cpu_fallback_out(input, grid, interpolation_mode, padding_mode, align_corners, out); |
9174 | } |
9175 | at::Tensor & grid_sampler_3d_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { |
9176 | return wrapper_CompositeExplicitAutograd_out_grid_sampler_3d_out(input, grid, interpolation_mode, padding_mode, align_corners, out); |
9177 | } |
9178 | at::Tensor & grid_sampler_3d_outf(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) { |
9179 | return wrapper_CompositeExplicitAutograd_out_grid_sampler_3d_out(input, grid, interpolation_mode, padding_mode, align_corners, out); |
9180 | } |
9181 | ::std::tuple<at::Tensor &,at::Tensor &> grid_sampler_3d_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) { |
9182 | return wrapper_CompositeExplicitAutograd_out_grid_sampler_3d_backward_out(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1); |
9183 | } |
9184 | ::std::tuple<at::Tensor &,at::Tensor &> grid_sampler_3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) { |
9185 | return wrapper_CompositeExplicitAutograd_out_grid_sampler_3d_backward_out(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1); |
9186 | } |
9187 | at::Tensor hann_window(int64_t window_length, at::TensorOptions options) { |
9188 | return wrapper_CompositeExplicitAutograd__hann_window(window_length, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9189 | } |
9190 | at::Tensor hann_window(int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
9191 | return wrapper_CompositeExplicitAutograd__hann_window(window_length, dtype, layout, device, pin_memory); |
9192 | } |
9193 | at::Tensor & hann_window_out(at::Tensor & out, int64_t window_length) { |
9194 | return wrapper_CompositeExplicitAutograd_out_hann_window_out(window_length, out); |
9195 | } |
9196 | at::Tensor & hann_window_outf(int64_t window_length, at::Tensor & out) { |
9197 | return wrapper_CompositeExplicitAutograd_out_hann_window_out(window_length, out); |
9198 | } |
9199 | at::Tensor hann_window(int64_t window_length, bool periodic, at::TensorOptions options) { |
9200 | return wrapper_CompositeExplicitAutograd_periodic_hann_window(window_length, periodic, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9201 | } |
9202 | at::Tensor hann_window(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
9203 | return wrapper_CompositeExplicitAutograd_periodic_hann_window(window_length, periodic, dtype, layout, device, pin_memory); |
9204 | } |
9205 | at::Tensor & hann_window_out(at::Tensor & out, int64_t window_length, bool periodic) { |
9206 | return wrapper_CompositeExplicitAutograd_periodic_out_hann_window_out(window_length, periodic, out); |
9207 | } |
9208 | at::Tensor & hann_window_outf(int64_t window_length, bool periodic, at::Tensor & out) { |
9209 | return wrapper_CompositeExplicitAutograd_periodic_out_hann_window_out(window_length, periodic, out); |
9210 | } |
9211 | at::Tensor hamming_window(int64_t window_length, at::TensorOptions options) { |
9212 | return wrapper_CompositeExplicitAutograd__hamming_window(window_length, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9213 | } |
9214 | at::Tensor hamming_window(int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
9215 | return wrapper_CompositeExplicitAutograd__hamming_window(window_length, dtype, layout, device, pin_memory); |
9216 | } |
9217 | at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length) { |
9218 | return wrapper_CompositeExplicitAutograd_out_hamming_window_out(window_length, out); |
9219 | } |
9220 | at::Tensor & hamming_window_outf(int64_t window_length, at::Tensor & out) { |
9221 | return wrapper_CompositeExplicitAutograd_out_hamming_window_out(window_length, out); |
9222 | } |
9223 | at::Tensor hamming_window(int64_t window_length, bool periodic, at::TensorOptions options) { |
9224 | return wrapper_CompositeExplicitAutograd_periodic_hamming_window(window_length, periodic, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9225 | } |
9226 | at::Tensor hamming_window(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
9227 | return wrapper_CompositeExplicitAutograd_periodic_hamming_window(window_length, periodic, dtype, layout, device, pin_memory); |
9228 | } |
9229 | at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length, bool periodic) { |
9230 | return wrapper_CompositeExplicitAutograd_periodic_out_hamming_window_out(window_length, periodic, out); |
9231 | } |
9232 | at::Tensor & hamming_window_outf(int64_t window_length, bool periodic, at::Tensor & out) { |
9233 | return wrapper_CompositeExplicitAutograd_periodic_out_hamming_window_out(window_length, periodic, out); |
9234 | } |
9235 | at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, at::TensorOptions options) { |
9236 | return wrapper_CompositeExplicitAutograd_periodic_alpha_hamming_window(window_length, periodic, alpha, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9237 | } |
9238 | at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
9239 | return wrapper_CompositeExplicitAutograd_periodic_alpha_hamming_window(window_length, periodic, alpha, dtype, layout, device, pin_memory); |
9240 | } |
9241 | at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length, bool periodic, double alpha) { |
9242 | return wrapper_CompositeExplicitAutograd_periodic_alpha_out_hamming_window_out(window_length, periodic, alpha, out); |
9243 | } |
9244 | at::Tensor & hamming_window_outf(int64_t window_length, bool periodic, double alpha, at::Tensor & out) { |
9245 | return wrapper_CompositeExplicitAutograd_periodic_alpha_out_hamming_window_out(window_length, periodic, alpha, out); |
9246 | } |
9247 | at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, at::TensorOptions options) { |
9248 | return wrapper_CompositeExplicitAutograd_periodic_alpha_beta_hamming_window(window_length, periodic, alpha, beta, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9249 | } |
9250 | at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
9251 | return wrapper_CompositeExplicitAutograd_periodic_alpha_beta_hamming_window(window_length, periodic, alpha, beta, dtype, layout, device, pin_memory); |
9252 | } |
9253 | at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length, bool periodic, double alpha, double beta) { |
9254 | return wrapper_CompositeExplicitAutograd_periodic_alpha_beta_out_hamming_window_out(window_length, periodic, alpha, beta, out); |
9255 | } |
9256 | at::Tensor & hamming_window_outf(int64_t window_length, bool periodic, double alpha, double beta, at::Tensor & out) { |
9257 | return wrapper_CompositeExplicitAutograd_periodic_alpha_beta_out_hamming_window_out(window_length, periodic, alpha, beta, out); |
9258 | } |
9259 | at::Tensor kaiser_window(int64_t window_length, at::TensorOptions options) { |
9260 | return wrapper_CompositeExplicitAutograd__kaiser_window(window_length, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9261 | } |
9262 | at::Tensor kaiser_window(int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
9263 | return wrapper_CompositeExplicitAutograd__kaiser_window(window_length, dtype, layout, device, pin_memory); |
9264 | } |
9265 | at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length) { |
9266 | return wrapper_CompositeExplicitAutograd_out_kaiser_window_out(window_length, out); |
9267 | } |
9268 | at::Tensor & kaiser_window_outf(int64_t window_length, at::Tensor & out) { |
9269 | return wrapper_CompositeExplicitAutograd_out_kaiser_window_out(window_length, out); |
9270 | } |
9271 | at::Tensor kaiser_window(int64_t window_length, bool periodic, at::TensorOptions options) { |
9272 | return wrapper_CompositeExplicitAutograd_periodic_kaiser_window(window_length, periodic, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9273 | } |
9274 | at::Tensor kaiser_window(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
9275 | return wrapper_CompositeExplicitAutograd_periodic_kaiser_window(window_length, periodic, dtype, layout, device, pin_memory); |
9276 | } |
9277 | at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length, bool periodic) { |
9278 | return wrapper_CompositeExplicitAutograd_periodic_out_kaiser_window_out(window_length, periodic, out); |
9279 | } |
9280 | at::Tensor & kaiser_window_outf(int64_t window_length, bool periodic, at::Tensor & out) { |
9281 | return wrapper_CompositeExplicitAutograd_periodic_out_kaiser_window_out(window_length, periodic, out); |
9282 | } |
9283 | at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, at::TensorOptions options) { |
9284 | return wrapper_CompositeExplicitAutograd_beta_kaiser_window(window_length, periodic, beta, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9285 | } |
9286 | at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
9287 | return wrapper_CompositeExplicitAutograd_beta_kaiser_window(window_length, periodic, beta, dtype, layout, device, pin_memory); |
9288 | } |
9289 | at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length, bool periodic, double beta) { |
9290 | return wrapper_CompositeExplicitAutograd_beta_out_kaiser_window_out(window_length, periodic, beta, out); |
9291 | } |
9292 | at::Tensor & kaiser_window_outf(int64_t window_length, bool periodic, double beta, at::Tensor & out) { |
9293 | return wrapper_CompositeExplicitAutograd_beta_out_kaiser_window_out(window_length, periodic, beta, out); |
9294 | } |
9295 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps) { |
9296 | return wrapper_CompositeExplicitAutograd__native_group_norm(input, weight, bias, N, C, HxW, group, eps); |
9297 | } |
9298 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm_symint(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps) { |
9299 | return wrapper_CompositeExplicitAutograd__native_group_norm(input, weight, bias, N, C, HxW, group, eps); |
9300 | } |
9301 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps) { |
9302 | return wrapper_CompositeExplicitAutograd_out_native_group_norm_out(input, weight, bias, N, C, HxW, group, eps, out0, out1, out2); |
9303 | } |
9304 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_outf(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
9305 | return wrapper_CompositeExplicitAutograd_out_native_group_norm_out(input, weight, bias, N, C, HxW, group, eps, out0, out1, out2); |
9306 | } |
9307 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps) { |
9308 | return wrapper_CompositeExplicitAutograd_out_native_group_norm_out(input, weight, bias, N, C, HxW, group, eps, out0, out1, out2); |
9309 | } |
9310 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_symint_outf(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
9311 | return wrapper_CompositeExplicitAutograd_out_native_group_norm_out(input, weight, bias, N, C, HxW, group, eps, out0, out1, out2); |
9312 | } |
9313 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, int64_t N, int64_t C, int64_t HxW, int64_t group, ::std::array<bool,3> output_mask) { |
9314 | return wrapper_CompositeExplicitAutograd_out_native_group_norm_backward_out(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask, out0, out1, out2); |
9315 | } |
9316 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_backward_outf(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, int64_t N, int64_t C, int64_t HxW, int64_t group, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
9317 | return wrapper_CompositeExplicitAutograd_out_native_group_norm_backward_out(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask, out0, out1, out2); |
9318 | } |
9319 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_backward_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array<bool,3> output_mask) { |
9320 | return wrapper_CompositeExplicitAutograd_out_native_group_norm_backward_out(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask, out0, out1, out2); |
9321 | } |
9322 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_backward_symint_outf(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
9323 | return wrapper_CompositeExplicitAutograd_out_native_group_norm_backward_out(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask, out0, out1, out2); |
9324 | } |
9325 | at::Tensor index_put(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) { |
9326 | return wrapper_CompositeExplicitAutograd__index_put(self, indices, values, accumulate); |
9327 | } |
9328 | at::Tensor & index_put_out(at::Tensor & out, const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) { |
9329 | return wrapper_CompositeExplicitAutograd_out_index_put_out(self, indices, values, accumulate, out); |
9330 | } |
9331 | at::Tensor & index_put_outf(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, at::Tensor & out) { |
9332 | return wrapper_CompositeExplicitAutograd_out_index_put_out(self, indices, values, accumulate, out); |
9333 | } |
9334 | at::Tensor & index_put_(at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) { |
9335 | return wrapper_CompositeExplicitAutograd__index_put_(self, indices, values, accumulate); |
9336 | } |
9337 | at::Tensor _index_put_impl(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) { |
9338 | return wrapper_CompositeExplicitAutograd___index_put_impl(self, indices, values, accumulate, unsafe); |
9339 | } |
9340 | at::Tensor & _index_put_impl_out(at::Tensor & out, const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) { |
9341 | return wrapper_CompositeExplicitAutograd_out__index_put_impl_out(self, indices, values, accumulate, unsafe, out); |
9342 | } |
9343 | at::Tensor & _index_put_impl_outf(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe, at::Tensor & out) { |
9344 | return wrapper_CompositeExplicitAutograd_out__index_put_impl_out(self, indices, values, accumulate, unsafe, out); |
9345 | } |
9346 | at::Tensor & isnan_out(at::Tensor & out, const at::Tensor & self) { |
9347 | return wrapper_CompositeExplicitAutograd_out_isnan_out(self, out); |
9348 | } |
9349 | at::Tensor & isnan_outf(const at::Tensor & self, at::Tensor & out) { |
9350 | return wrapper_CompositeExplicitAutograd_out_isnan_out(self, out); |
9351 | } |
9352 | bool is_same_size(const at::Tensor & self, const at::Tensor & other) { |
9353 | return wrapper_CompositeExplicitAutograd__is_same_size(self, other); |
9354 | } |
9355 | ::std::tuple<at::Tensor,at::Tensor> kthvalue(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim) { |
9356 | return wrapper_CompositeExplicitAutograd__kthvalue(self, k, dim, keepdim); |
9357 | } |
9358 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps) { |
9359 | return wrapper_CompositeExplicitAutograd__native_layer_norm(input, c10::fromIntArrayRefSlow(normalized_shape), weight, bias, eps); |
9360 | } |
9361 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_symint(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps) { |
9362 | return wrapper_CompositeExplicitAutograd__native_layer_norm(input, normalized_shape, weight, bias, eps); |
9363 | } |
9364 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps) { |
9365 | return wrapper_CompositeExplicitAutograd_out_native_layer_norm_out(input, c10::fromIntArrayRefSlow(normalized_shape), weight, bias, eps, out0, out1, out2); |
9366 | } |
9367 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_outf(const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
9368 | return wrapper_CompositeExplicitAutograd_out_native_layer_norm_out(input, c10::fromIntArrayRefSlow(normalized_shape), weight, bias, eps, out0, out1, out2); |
9369 | } |
9370 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps) { |
9371 | return wrapper_CompositeExplicitAutograd_out_native_layer_norm_out(input, normalized_shape, weight, bias, eps, out0, out1, out2); |
9372 | } |
9373 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_symint_outf(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
9374 | return wrapper_CompositeExplicitAutograd_out_native_layer_norm_out(input, normalized_shape, weight, bias, eps, out0, out1, out2); |
9375 | } |
9376 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) { |
9377 | return wrapper_CompositeExplicitAutograd_out_native_layer_norm_backward_out(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask, out0, out1, out2); |
9378 | } |
9379 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_outf(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
9380 | return wrapper_CompositeExplicitAutograd_out_native_layer_norm_backward_out(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask, out0, out1, out2); |
9381 | } |
9382 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) { |
9383 | return wrapper_CompositeExplicitAutograd_out_native_layer_norm_backward_out(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask, out0, out1, out2); |
9384 | } |
9385 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_symint_outf(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
9386 | return wrapper_CompositeExplicitAutograd_out_native_layer_norm_backward_out(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask, out0, out1, out2); |
9387 | } |
9388 | at::Tensor nan_to_num(const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) { |
9389 | return wrapper_CompositeExplicitAutograd__nan_to_num(self, nan, posinf, neginf); |
9390 | } |
9391 | at::Tensor & nan_to_num_(at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) { |
9392 | return wrapper_CompositeExplicitAutograd__nan_to_num_(self, nan, posinf, neginf); |
9393 | } |
9394 | at::Tensor & linear_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias) { |
9395 | return wrapper_CompositeExplicitAutograd_out_linear_out(input, weight, bias, out); |
9396 | } |
9397 | at::Tensor & linear_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::Tensor & out) { |
9398 | return wrapper_CompositeExplicitAutograd_out_linear_out(input, weight, bias, out); |
9399 | } |
9400 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linear_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask) { |
9401 | return wrapper_CompositeExplicitAutograd_out_linear_backward_out(self, grad_output, weight, output_mask, out0, out1, out2); |
9402 | } |
9403 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linear_backward_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
9404 | return wrapper_CompositeExplicitAutograd_out_linear_backward_out(self, grad_output, weight, output_mask, out0, out1, out2); |
9405 | } |
9406 | at::Tensor & mkldnn_linear_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias) { |
9407 | return wrapper_CompositeExplicitAutograd_out_mkldnn_linear_out(self, weight, bias, out); |
9408 | } |
9409 | at::Tensor & mkldnn_linear_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::Tensor & out) { |
9410 | return wrapper_CompositeExplicitAutograd_out_mkldnn_linear_out(self, weight, bias, out); |
9411 | } |
9412 | at::Tensor & mkldnn_linear_backward_input_out(at::Tensor & out, at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight) { |
9413 | return wrapper_CompositeExplicitAutograd_out_mkldnn_linear_backward_input_out(input_size, grad_output, weight, out); |
9414 | } |
9415 | at::Tensor & mkldnn_linear_backward_input_outf(at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight, at::Tensor & out) { |
9416 | return wrapper_CompositeExplicitAutograd_out_mkldnn_linear_backward_input_out(input_size, grad_output, weight, out); |
9417 | } |
9418 | ::std::tuple<at::Tensor &,at::Tensor &> mkldnn_linear_backward_weights_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined) { |
9419 | return wrapper_CompositeExplicitAutograd_out_mkldnn_linear_backward_weights_out(grad_output, input, weight, bias_defined, out0, out1); |
9420 | } |
9421 | ::std::tuple<at::Tensor &,at::Tensor &> mkldnn_linear_backward_weights_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined, at::Tensor & out0, at::Tensor & out1) { |
9422 | return wrapper_CompositeExplicitAutograd_out_mkldnn_linear_backward_weights_out(grad_output, input, weight, bias_defined, out0, out1); |
9423 | } |
9424 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_linear_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask) { |
9425 | return wrapper_CompositeExplicitAutograd_out_mkldnn_linear_backward_out(self, grad_output, weight, output_mask, out0, out1, out2); |
9426 | } |
9427 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_linear_backward_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
9428 | return wrapper_CompositeExplicitAutograd_out_mkldnn_linear_backward_out(self, grad_output, weight, output_mask, out0, out1, out2); |
9429 | } |
9430 | at::Tensor linspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::TensorOptions options) { |
9431 | return wrapper_CompositeExplicitAutograd__linspace(start, end, steps, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9432 | } |
9433 | at::Tensor linspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
9434 | return wrapper_CompositeExplicitAutograd__linspace(start, end, steps, dtype, layout, device, pin_memory); |
9435 | } |
9436 | at::Tensor xlogy(const at::Scalar & self, const at::Tensor & other) { |
9437 | return wrapper_CompositeExplicitAutograd_Scalar_Self_xlogy(self, other); |
9438 | } |
9439 | at::Tensor & xlogy_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { |
9440 | return wrapper_CompositeExplicitAutograd_OutScalar_Self_xlogy_out(self, other, out); |
9441 | } |
9442 | at::Tensor & xlogy_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
9443 | return wrapper_CompositeExplicitAutograd_OutScalar_Self_xlogy_out(self, other, out); |
9444 | } |
9445 | at::Tensor xlogy(const at::Tensor & self, const at::Scalar & other) { |
9446 | return wrapper_CompositeExplicitAutograd_Scalar_Other_xlogy(self, other); |
9447 | } |
9448 | at::Tensor & xlogy_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { |
9449 | return wrapper_CompositeExplicitAutograd_OutScalar_Other_xlogy_out(self, other, out); |
9450 | } |
9451 | at::Tensor & xlogy_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
9452 | return wrapper_CompositeExplicitAutograd_OutScalar_Other_xlogy_out(self, other, out); |
9453 | } |
9454 | at::Tensor & xlogy_(at::Tensor & self, const at::Scalar & other) { |
9455 | return wrapper_CompositeExplicitAutograd_Scalar_Other_xlogy_(self, other); |
9456 | } |
9457 | at::Tensor logspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::TensorOptions options) { |
9458 | return wrapper_CompositeExplicitAutograd__logspace(start, end, steps, base, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9459 | } |
9460 | at::Tensor logspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
9461 | return wrapper_CompositeExplicitAutograd__logspace(start, end, steps, base, dtype, layout, device, pin_memory); |
9462 | } |
9463 | at::Tensor & log_softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) { |
9464 | return wrapper_CompositeExplicitAutograd_int_out_log_softmax_out(self, dim, dtype, out); |
9465 | } |
9466 | at::Tensor & log_softmax_outf(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
9467 | return wrapper_CompositeExplicitAutograd_int_out_log_softmax_out(self, dim, dtype, out); |
9468 | } |
9469 | at::Tensor logcumsumexp(const at::Tensor & self, int64_t dim) { |
9470 | return wrapper_CompositeExplicitAutograd__logcumsumexp(self, dim); |
9471 | } |
9472 | at::Tensor & logcumsumexp_out(at::Tensor & out, const at::Tensor & self, int64_t dim) { |
9473 | return wrapper_CompositeExplicitAutograd_out_logcumsumexp_out(self, dim, out); |
9474 | } |
9475 | at::Tensor & logcumsumexp_outf(const at::Tensor & self, int64_t dim, at::Tensor & out) { |
9476 | return wrapper_CompositeExplicitAutograd_out_logcumsumexp_out(self, dim, out); |
9477 | } |
9478 | at::Tensor logsumexp(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) { |
9479 | return wrapper_CompositeExplicitAutograd__logsumexp(self, dim, keepdim); |
9480 | } |
9481 | ::std::tuple<at::Tensor &,at::Tensor &> matmul_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array<bool,2> mask) { |
9482 | return wrapper_CompositeExplicitAutograd_out_matmul_backward_out(grad, self, other, mask, out0, out1); |
9483 | } |
9484 | ::std::tuple<at::Tensor &,at::Tensor &> matmul_backward_outf(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array<bool,2> mask, at::Tensor & out0, at::Tensor & out1) { |
9485 | return wrapper_CompositeExplicitAutograd_out_matmul_backward_out(grad, self, other, mask, out0, out1); |
9486 | } |
9487 | ::std::tuple<at::Tensor &,at::Tensor &> _aminmax_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self) { |
9488 | return wrapper_CompositeExplicitAutograd_out__aminmax_out(self, out0, out1); |
9489 | } |
9490 | ::std::tuple<at::Tensor &,at::Tensor &> _aminmax_outf(const at::Tensor & self, at::Tensor & out0, at::Tensor & out1) { |
9491 | return wrapper_CompositeExplicitAutograd_out__aminmax_out(self, out0, out1); |
9492 | } |
9493 | ::std::tuple<at::Tensor &,at::Tensor &> _aminmax_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, int64_t dim, bool keepdim) { |
9494 | return wrapper_CompositeExplicitAutograd_dim_out__aminmax_out(self, dim, keepdim, out0, out1); |
9495 | } |
9496 | ::std::tuple<at::Tensor &,at::Tensor &> _aminmax_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out0, at::Tensor & out1) { |
9497 | return wrapper_CompositeExplicitAutograd_dim_out__aminmax_out(self, dim, keepdim, out0, out1); |
9498 | } |
9499 | at::Tensor & _mps_max_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { |
9500 | return wrapper_CompositeExplicitAutograd_out__mps_max_pool2d_out(self, kernel_size, stride, padding, dilation, ceil_mode, out); |
9501 | } |
9502 | at::Tensor & _mps_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
9503 | return wrapper_CompositeExplicitAutograd_out__mps_max_pool2d_out(self, kernel_size, stride, padding, dilation, ceil_mode, out); |
9504 | } |
9505 | at::Tensor & mps_max_pool2d_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { |
9506 | return wrapper_CompositeExplicitAutograd_out_mps_max_pool2d_backward_out(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, out); |
9507 | } |
9508 | at::Tensor & mps_max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
9509 | return wrapper_CompositeExplicitAutograd_out_mps_max_pool2d_backward_out(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, out); |
9510 | } |
9511 | at::Tensor & mkldnn_max_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { |
9512 | return wrapper_CompositeExplicitAutograd_out_mkldnn_max_pool2d_out(self, kernel_size, stride, padding, dilation, ceil_mode, out); |
9513 | } |
9514 | at::Tensor & mkldnn_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
9515 | return wrapper_CompositeExplicitAutograd_out_mkldnn_max_pool2d_out(self, kernel_size, stride, padding, dilation, ceil_mode, out); |
9516 | } |
9517 | at::Tensor & mkldnn_max_pool2d_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { |
9518 | return wrapper_CompositeExplicitAutograd_out_mkldnn_max_pool2d_backward_out(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode, out); |
9519 | } |
9520 | at::Tensor & mkldnn_max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
9521 | return wrapper_CompositeExplicitAutograd_out_mkldnn_max_pool2d_backward_out(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode, out); |
9522 | } |
9523 | at::Tensor & mkldnn_max_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { |
9524 | return wrapper_CompositeExplicitAutograd_out_mkldnn_max_pool3d_out(self, kernel_size, stride, padding, dilation, ceil_mode, out); |
9525 | } |
9526 | at::Tensor & mkldnn_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
9527 | return wrapper_CompositeExplicitAutograd_out_mkldnn_max_pool3d_out(self, kernel_size, stride, padding, dilation, ceil_mode, out); |
9528 | } |
9529 | at::Tensor & mkldnn_max_pool3d_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { |
9530 | return wrapper_CompositeExplicitAutograd_out_mkldnn_max_pool3d_backward_out(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode, out); |
9531 | } |
9532 | at::Tensor & mkldnn_max_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
9533 | return wrapper_CompositeExplicitAutograd_out_mkldnn_max_pool3d_backward_out(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode, out); |
9534 | } |
9535 | at::Tensor & quantized_max_pool1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { |
9536 | return wrapper_CompositeExplicitAutograd_out_quantized_max_pool1d_out(self, kernel_size, stride, padding, dilation, ceil_mode, out); |
9537 | } |
9538 | at::Tensor & quantized_max_pool1d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
9539 | return wrapper_CompositeExplicitAutograd_out_quantized_max_pool1d_out(self, kernel_size, stride, padding, dilation, ceil_mode, out); |
9540 | } |
9541 | at::Tensor & quantized_max_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { |
9542 | return wrapper_CompositeExplicitAutograd_out_quantized_max_pool2d_out(self, kernel_size, stride, padding, dilation, ceil_mode, out); |
9543 | } |
9544 | at::Tensor & quantized_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
9545 | return wrapper_CompositeExplicitAutograd_out_quantized_max_pool2d_out(self, kernel_size, stride, padding, dilation, ceil_mode, out); |
9546 | } |
9547 | at::Tensor mean(const at::Tensor & self, c10::optional<at::ScalarType> dtype) { |
9548 | return wrapper_CompositeExplicitAutograd__mean(self, dtype); |
9549 | } |
9550 | at::Tensor & median_out(at::Tensor & out, const at::Tensor & self) { |
9551 | return wrapper_CompositeExplicitAutograd_out_median_out(self, out); |
9552 | } |
9553 | at::Tensor & median_outf(const at::Tensor & self, at::Tensor & out) { |
9554 | return wrapper_CompositeExplicitAutograd_out_median_out(self, out); |
9555 | } |
9556 | ::std::tuple<at::Tensor,at::Tensor> median(const at::Tensor & self, int64_t dim, bool keepdim) { |
9557 | return wrapper_CompositeExplicitAutograd_dim_median(self, dim, keepdim); |
9558 | } |
9559 | at::Tensor & nanmedian_out(at::Tensor & out, const at::Tensor & self) { |
9560 | return wrapper_CompositeExplicitAutograd_out_nanmedian_out(self, out); |
9561 | } |
9562 | at::Tensor & nanmedian_outf(const at::Tensor & self, at::Tensor & out) { |
9563 | return wrapper_CompositeExplicitAutograd_out_nanmedian_out(self, out); |
9564 | } |
9565 | ::std::tuple<at::Tensor,at::Tensor> nanmedian(const at::Tensor & self, int64_t dim, bool keepdim) { |
9566 | return wrapper_CompositeExplicitAutograd_dim_nanmedian(self, dim, keepdim); |
9567 | } |
9568 | at::Tensor & _mps_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { |
9569 | return wrapper_CompositeExplicitAutograd_out__mps_convolution_out(self, weight, bias, padding, stride, dilation, groups, out); |
9570 | } |
9571 | at::Tensor & _mps_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { |
9572 | return wrapper_CompositeExplicitAutograd_out__mps_convolution_out(self, weight, bias, padding, stride, dilation, groups, out); |
9573 | } |
9574 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> mps_convolution_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array<bool,3> output_mask) { |
9575 | return wrapper_CompositeExplicitAutograd_out_mps_convolution_backward_out(self, grad_output, weight, padding, stride, dilation, groups, output_mask, out0, out1, out2); |
9576 | } |
9577 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> mps_convolution_backward_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
9578 | return wrapper_CompositeExplicitAutograd_out_mps_convolution_backward_out(self, grad_output, weight, padding, stride, dilation, groups, output_mask, out0, out1, out2); |
9579 | } |
9580 | at::Tensor mkldnn_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { |
9581 | return wrapper_CompositeExplicitAutograd__mkldnn_convolution(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups); |
9582 | } |
9583 | at::Tensor mkldnn_convolution_symint(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { |
9584 | return wrapper_CompositeExplicitAutograd__mkldnn_convolution(self, weight, bias, padding, stride, dilation, groups); |
9585 | } |
9586 | at::Tensor & mkldnn_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { |
9587 | return wrapper_CompositeExplicitAutograd_out_mkldnn_convolution_out(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, out); |
9588 | } |
9589 | at::Tensor & mkldnn_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { |
9590 | return wrapper_CompositeExplicitAutograd_out_mkldnn_convolution_out(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, out); |
9591 | } |
9592 | at::Tensor & mkldnn_convolution_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { |
9593 | return wrapper_CompositeExplicitAutograd_out_mkldnn_convolution_out(self, weight, bias, padding, stride, dilation, groups, out); |
9594 | } |
9595 | at::Tensor & mkldnn_convolution_symint_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { |
9596 | return wrapper_CompositeExplicitAutograd_out_mkldnn_convolution_out(self, weight, bias, padding, stride, dilation, groups, out); |
9597 | } |
9598 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_rnn_layer_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) { |
9599 | return wrapper_CompositeExplicitAutograd_out_mkldnn_rnn_layer_out(input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train, out0, out1, out2, out3); |
9600 | } |
9601 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_rnn_layer_outf(const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { |
9602 | return wrapper_CompositeExplicitAutograd_out_mkldnn_rnn_layer_out(input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train, out0, out1, out2, out3); |
9603 | } |
9604 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_rnn_layer_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5, at::Tensor & out6, const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace) { |
9605 | return wrapper_CompositeExplicitAutograd_out_mkldnn_rnn_layer_backward_out(input, weight1, weight2, weight3, weight4, hx_, cx_tmp, output, hy_, cy_, grad_output, grad_hy, grad_cy, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace, out0, out1, out2, out3, out4, out5, out6); |
9606 | } |
9607 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_rnn_layer_backward_outf(const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5, at::Tensor & out6) { |
9608 | return wrapper_CompositeExplicitAutograd_out_mkldnn_rnn_layer_backward_out(input, weight1, weight2, weight3, weight4, hx_, cx_tmp, output, hy_, cy_, grad_output, grad_hy, grad_cy, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace, out0, out1, out2, out3, out4, out5, out6); |
9609 | } |
9610 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> miopen_batch_norm_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon) { |
9611 | return wrapper_CompositeExplicitAutograd_out_miopen_batch_norm_out(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2); |
9612 | } |
9613 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> miopen_batch_norm_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
9614 | return wrapper_CompositeExplicitAutograd_out_miopen_batch_norm_out(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2); |
9615 | } |
9616 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> miopen_batch_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon) { |
9617 | return wrapper_CompositeExplicitAutograd_out_miopen_batch_norm_backward_out(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, out0, out1, out2); |
9618 | } |
9619 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> miopen_batch_norm_backward_outf(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
9620 | return wrapper_CompositeExplicitAutograd_out_miopen_batch_norm_backward_out(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, out0, out1, out2); |
9621 | } |
9622 | at::Tensor & miopen_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { |
9623 | return wrapper_CompositeExplicitAutograd_out_miopen_convolution_out(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, benchmark, deterministic, out); |
9624 | } |
9625 | at::Tensor & miopen_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { |
9626 | return wrapper_CompositeExplicitAutograd_out_miopen_convolution_out(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, benchmark, deterministic, out); |
9627 | } |
9628 | at::Tensor & miopen_convolution_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { |
9629 | return wrapper_CompositeExplicitAutograd_out_miopen_convolution_out(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out); |
9630 | } |
9631 | at::Tensor & miopen_convolution_symint_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { |
9632 | return wrapper_CompositeExplicitAutograd_out_miopen_convolution_out(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out); |
9633 | } |
9634 | at::Tensor & miopen_convolution_transpose_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { |
9635 | return wrapper_CompositeExplicitAutograd_out_miopen_convolution_transpose_out(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), stride, dilation, groups, benchmark, deterministic, out); |
9636 | } |
9637 | at::Tensor & miopen_convolution_transpose_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { |
9638 | return wrapper_CompositeExplicitAutograd_out_miopen_convolution_transpose_out(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), stride, dilation, groups, benchmark, deterministic, out); |
9639 | } |
9640 | at::Tensor & miopen_convolution_transpose_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { |
9641 | return wrapper_CompositeExplicitAutograd_out_miopen_convolution_transpose_out(self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic, out); |
9642 | } |
9643 | at::Tensor & miopen_convolution_transpose_symint_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { |
9644 | return wrapper_CompositeExplicitAutograd_out_miopen_convolution_transpose_out(self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic, out); |
9645 | } |
9646 | at::Tensor & miopen_depthwise_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { |
9647 | return wrapper_CompositeExplicitAutograd_out_miopen_depthwise_convolution_out(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, benchmark, deterministic, out); |
9648 | } |
9649 | at::Tensor & miopen_depthwise_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { |
9650 | return wrapper_CompositeExplicitAutograd_out_miopen_depthwise_convolution_out(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, benchmark, deterministic, out); |
9651 | } |
9652 | at::Tensor & miopen_depthwise_convolution_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { |
9653 | return wrapper_CompositeExplicitAutograd_out_miopen_depthwise_convolution_out(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out); |
9654 | } |
9655 | at::Tensor & miopen_depthwise_convolution_symint_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { |
9656 | return wrapper_CompositeExplicitAutograd_out_miopen_depthwise_convolution_out(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out); |
9657 | } |
9658 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> miopen_rnn_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state) { |
9659 | return wrapper_CompositeExplicitAutograd_out_miopen_rnn_out(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4); |
9660 | } |
9661 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> miopen_rnn_outf(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) { |
9662 | return wrapper_CompositeExplicitAutograd_out_miopen_rnn_out(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4); |
9663 | } |
9664 | void miopen_rnn_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) { |
9665 | return wrapper_CompositeExplicitAutograd_out_miopen_rnn_backward_out(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3); |
9666 | } |
9667 | void miopen_rnn_backward_outf(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) { |
9668 | return wrapper_CompositeExplicitAutograd_out_miopen_rnn_backward_out(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3); |
9669 | } |
9670 | at::Tensor & _sparse_sparse_matmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
9671 | return wrapper_CompositeExplicitAutograd_out__sparse_sparse_matmul_out(self, other, out); |
9672 | } |
9673 | at::Tensor & _sparse_sparse_matmul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
9674 | return wrapper_CompositeExplicitAutograd_out__sparse_sparse_matmul_out(self, other, out); |
9675 | } |
9676 | ::std::tuple<at::Tensor &,at::Tensor &> mode_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim, bool keepdim) { |
9677 | return wrapper_CompositeExplicitAutograd_values_mode_out(self, dim, keepdim, values, indices); |
9678 | } |
9679 | ::std::tuple<at::Tensor &,at::Tensor &> mode_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { |
9680 | return wrapper_CompositeExplicitAutograd_values_mode_out(self, dim, keepdim, values, indices); |
9681 | } |
9682 | at::Tensor mul(const at::Tensor & self, const at::Scalar & other) { |
9683 | return wrapper_CompositeExplicitAutograd_Scalar_mul(self, other); |
9684 | } |
9685 | at::Tensor & mul_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { |
9686 | return wrapper_CompositeExplicitAutograd_Scalar_out_mul_out(self, other, out); |
9687 | } |
9688 | at::Tensor & mul_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
9689 | return wrapper_CompositeExplicitAutograd_Scalar_out_mul_out(self, other, out); |
9690 | } |
9691 | at::Tensor & mul_(at::Tensor & self, const at::Scalar & other) { |
9692 | return wrapper_CompositeExplicitAutograd_Scalar_mul_(self, other); |
9693 | } |
9694 | at::Tensor mv(const at::Tensor & self, const at::Tensor & vec) { |
9695 | return wrapper_CompositeExplicitAutograd__mv(self, vec); |
9696 | } |
9697 | at::Tensor & mv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & vec) { |
9698 | return wrapper_CompositeExplicitAutograd_out_mv_out(self, vec, out); |
9699 | } |
9700 | at::Tensor & mv_outf(const at::Tensor & self, const at::Tensor & vec, at::Tensor & out) { |
9701 | return wrapper_CompositeExplicitAutograd_out_mv_out(self, vec, out); |
9702 | } |
9703 | at::Tensor mvlgamma(const at::Tensor & self, int64_t p) { |
9704 | return wrapper_CompositeExplicitAutograd__mvlgamma(self, p); |
9705 | } |
9706 | at::Tensor & mvlgamma_(at::Tensor & self, int64_t p) { |
9707 | return wrapper_CompositeExplicitAutograd__mvlgamma_(self, p); |
9708 | } |
9709 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_functional(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool training, double momentum, double eps) { |
9710 | return wrapper_CompositeExplicitAutograd___native_batch_norm_legit_functional(input, weight, bias, running_mean, running_var, training, momentum, eps); |
9711 | } |
9712 | ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_stats_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, double eps) { |
9713 | return wrapper_CompositeExplicitAutograd_out_batch_norm_stats_out(input, eps, out0, out1); |
9714 | } |
9715 | ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_stats_outf(const at::Tensor & input, double eps, at::Tensor & out0, at::Tensor & out1) { |
9716 | return wrapper_CompositeExplicitAutograd_out_batch_norm_stats_out(input, eps, out0, out1); |
9717 | } |
9718 | ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_gather_stats_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, int64_t count) { |
9719 | return wrapper_CompositeExplicitAutograd_out_batch_norm_gather_stats_out(input, mean, invstd, running_mean, running_var, momentum, eps, count, out0, out1); |
9720 | } |
9721 | ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_gather_stats_outf(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, int64_t count, at::Tensor & out0, at::Tensor & out1) { |
9722 | return wrapper_CompositeExplicitAutograd_out_batch_norm_gather_stats_out(input, mean, invstd, running_mean, running_var, momentum, eps, count, out0, out1); |
9723 | } |
9724 | ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_gather_stats_with_counts_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts) { |
9725 | return wrapper_CompositeExplicitAutograd_out_batch_norm_gather_stats_with_counts_out(input, mean, invstd, running_mean, running_var, momentum, eps, counts, out0, out1); |
9726 | } |
9727 | ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_gather_stats_with_counts_outf(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts, at::Tensor & out0, at::Tensor & out1) { |
9728 | return wrapper_CompositeExplicitAutograd_out_batch_norm_gather_stats_with_counts_out(input, mean, invstd, running_mean, running_var, momentum, eps, counts, out0, out1); |
9729 | } |
9730 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_batch_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask) { |
9731 | return wrapper_CompositeExplicitAutograd_out_native_batch_norm_backward_out(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask, out0, out1, out2); |
9732 | } |
9733 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_batch_norm_backward_outf(const at::Tensor & grad_out, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
9734 | return wrapper_CompositeExplicitAutograd_out_native_batch_norm_backward_out(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask, out0, out1, out2); |
9735 | } |
9736 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> batch_norm_backward_reduce_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, bool input_g, bool weight_g, bool bias_g) { |
9737 | return wrapper_CompositeExplicitAutograd_out_batch_norm_backward_reduce_out(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g, out0, out1, out2, out3); |
9738 | } |
9739 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> batch_norm_backward_reduce_outf(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, bool input_g, bool weight_g, bool bias_g, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { |
9740 | return wrapper_CompositeExplicitAutograd_out_batch_norm_backward_reduce_out(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g, out0, out1, out2, out3); |
9741 | } |
9742 | at::Tensor & batch_norm_backward_elemt_out(at::Tensor & out, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, const at::Tensor & mean_dy, const at::Tensor & mean_dy_xmu, const at::Tensor & count) { |
9743 | return wrapper_CompositeExplicitAutograd_out_batch_norm_backward_elemt_out(grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count, out); |
9744 | } |
9745 | at::Tensor & batch_norm_backward_elemt_outf(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, const at::Tensor & mean_dy, const at::Tensor & mean_dy_xmu, const at::Tensor & count, at::Tensor & out) { |
9746 | return wrapper_CompositeExplicitAutograd_out_batch_norm_backward_elemt_out(grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count, out); |
9747 | } |
9748 | ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_update_stats_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum) { |
9749 | return wrapper_CompositeExplicitAutograd_out_batch_norm_update_stats_out(input, running_mean, running_var, momentum, out0, out1); |
9750 | } |
9751 | ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_update_stats_outf(const at::Tensor & input, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, at::Tensor & out0, at::Tensor & out1) { |
9752 | return wrapper_CompositeExplicitAutograd_out_batch_norm_update_stats_out(input, running_mean, running_var, momentum, out0, out1); |
9753 | } |
9754 | at::Tensor _nnpack_spatial_convolution(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride) { |
9755 | return wrapper_CompositeExplicitAutograd___nnpack_spatial_convolution(input, weight, bias, c10::fromIntArrayRefSlow(padding), stride); |
9756 | } |
9757 | at::Tensor _nnpack_spatial_convolution_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride) { |
9758 | return wrapper_CompositeExplicitAutograd___nnpack_spatial_convolution(input, weight, bias, padding, stride); |
9759 | } |
9760 | at::Tensor & _nnpack_spatial_convolution_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride) { |
9761 | return wrapper_CompositeExplicitAutograd_out__nnpack_spatial_convolution_out(input, weight, bias, c10::fromIntArrayRefSlow(padding), stride, out); |
9762 | } |
9763 | at::Tensor & _nnpack_spatial_convolution_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { |
9764 | return wrapper_CompositeExplicitAutograd_out__nnpack_spatial_convolution_out(input, weight, bias, c10::fromIntArrayRefSlow(padding), stride, out); |
9765 | } |
9766 | at::Tensor & _nnpack_spatial_convolution_symint_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride) { |
9767 | return wrapper_CompositeExplicitAutograd_out__nnpack_spatial_convolution_out(input, weight, bias, padding, stride, out); |
9768 | } |
9769 | at::Tensor & _nnpack_spatial_convolution_symint_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { |
9770 | return wrapper_CompositeExplicitAutograd_out__nnpack_spatial_convolution_out(input, weight, bias, padding, stride, out); |
9771 | } |
9772 | at::Tensor ones(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options) { |
9773 | return wrapper_CompositeExplicitAutograd_names_ones(size, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9774 | } |
9775 | at::Tensor ones(at::IntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
9776 | return wrapper_CompositeExplicitAutograd_names_ones(size, names, dtype, layout, device, pin_memory); |
9777 | } |
9778 | at::Tensor & ones_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::DimnameList> names) { |
9779 | return wrapper_CompositeExplicitAutograd_names_out_ones_out(size, names, out); |
9780 | } |
9781 | at::Tensor & ones_outf(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) { |
9782 | return wrapper_CompositeExplicitAutograd_names_out_ones_out(size, names, out); |
9783 | } |
9784 | at::Tensor ones(at::IntArrayRef size, at::TensorOptions options) { |
9785 | return wrapper_CompositeExplicitAutograd__ones(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9786 | } |
9787 | at::Tensor ones(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
9788 | return wrapper_CompositeExplicitAutograd__ones(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); |
9789 | } |
9790 | at::Tensor ones_symint(c10::SymIntArrayRef size, at::TensorOptions options) { |
9791 | return wrapper_CompositeExplicitAutograd__ones(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9792 | } |
9793 | at::Tensor ones_symint(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
9794 | return wrapper_CompositeExplicitAutograd__ones(size, dtype, layout, device, pin_memory); |
9795 | } |
9796 | at::Tensor & ones_out(at::Tensor & out, at::IntArrayRef size) { |
9797 | return wrapper_CompositeExplicitAutograd_out_ones_out(c10::fromIntArrayRefSlow(size), out); |
9798 | } |
9799 | at::Tensor & ones_outf(at::IntArrayRef size, at::Tensor & out) { |
9800 | return wrapper_CompositeExplicitAutograd_out_ones_out(c10::fromIntArrayRefSlow(size), out); |
9801 | } |
9802 | at::Tensor & ones_symint_out(at::Tensor & out, c10::SymIntArrayRef size) { |
9803 | return wrapper_CompositeExplicitAutograd_out_ones_out(size, out); |
9804 | } |
9805 | at::Tensor & ones_symint_outf(c10::SymIntArrayRef size, at::Tensor & out) { |
9806 | return wrapper_CompositeExplicitAutograd_out_ones_out(size, out); |
9807 | } |
9808 | at::Tensor ones_like(const at::Tensor & self, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) { |
9809 | return wrapper_CompositeExplicitAutograd__ones_like(self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
9810 | } |
9811 | at::Tensor ones_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
9812 | return wrapper_CompositeExplicitAutograd__ones_like(self, dtype, layout, device, pin_memory, memory_format); |
9813 | } |
9814 | at::Tensor & ones_like_out(at::Tensor & out, const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) { |
9815 | return wrapper_CompositeExplicitAutograd_out_ones_like_out(self, memory_format, out); |
9816 | } |
9817 | at::Tensor & ones_like_outf(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
9818 | return wrapper_CompositeExplicitAutograd_out_ones_like_out(self, memory_format, out); |
9819 | } |
9820 | at::Tensor _euclidean_dist(const at::Tensor & x1, const at::Tensor & x2) { |
9821 | return wrapper_CompositeExplicitAutograd___euclidean_dist(x1, x2); |
9822 | } |
9823 | at::Tensor & _euclidean_dist_out(at::Tensor & out, const at::Tensor & x1, const at::Tensor & x2) { |
9824 | return wrapper_CompositeExplicitAutograd_out__euclidean_dist_out(x1, x2, out); |
9825 | } |
9826 | at::Tensor & _euclidean_dist_outf(const at::Tensor & x1, const at::Tensor & x2, at::Tensor & out) { |
9827 | return wrapper_CompositeExplicitAutograd_out__euclidean_dist_out(x1, x2, out); |
9828 | } |
9829 | at::Tensor & _cdist_forward_out(at::Tensor & out, const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode) { |
9830 | return wrapper_CompositeExplicitAutograd_out__cdist_forward_out(x1, x2, p, compute_mode, out); |
9831 | } |
9832 | at::Tensor & _cdist_forward_outf(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode, at::Tensor & out) { |
9833 | return wrapper_CompositeExplicitAutograd_out__cdist_forward_out(x1, x2, p, compute_mode, out); |
9834 | } |
9835 | at::Tensor & _cdist_backward_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist) { |
9836 | return wrapper_CompositeExplicitAutograd_out__cdist_backward_out(grad, x1, x2, p, cdist, out); |
9837 | } |
9838 | at::Tensor & _cdist_backward_outf(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist, at::Tensor & out) { |
9839 | return wrapper_CompositeExplicitAutograd_out__cdist_backward_out(grad, x1, x2, p, cdist, out); |
9840 | } |
9841 | at::Tensor & _pdist_forward_out(at::Tensor & out, const at::Tensor & self, double p) { |
9842 | return wrapper_CompositeExplicitAutograd_out__pdist_forward_out(self, p, out); |
9843 | } |
9844 | at::Tensor & _pdist_forward_outf(const at::Tensor & self, double p, at::Tensor & out) { |
9845 | return wrapper_CompositeExplicitAutograd_out__pdist_forward_out(self, p, out); |
9846 | } |
9847 | at::Tensor & _pdist_backward_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist) { |
9848 | return wrapper_CompositeExplicitAutograd_out__pdist_backward_out(grad, self, p, pdist, out); |
9849 | } |
9850 | at::Tensor & _pdist_backward_outf(const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist, at::Tensor & out) { |
9851 | return wrapper_CompositeExplicitAutograd_out__pdist_backward_out(grad, self, p, pdist, out); |
9852 | } |
9853 | at::Tensor permute(const at::Tensor & self, at::IntArrayRef dims) { |
9854 | return wrapper_CompositeExplicitAutograd__permute(self, dims); |
9855 | } |
9856 | at::Tensor & pixel_shuffle_out(at::Tensor & out, const at::Tensor & self, int64_t upscale_factor) { |
9857 | return wrapper_CompositeExplicitAutograd_out_pixel_shuffle_out(self, upscale_factor, out); |
9858 | } |
9859 | at::Tensor & pixel_shuffle_outf(const at::Tensor & self, int64_t upscale_factor, at::Tensor & out) { |
9860 | return wrapper_CompositeExplicitAutograd_out_pixel_shuffle_out(self, upscale_factor, out); |
9861 | } |
9862 | at::Tensor & pixel_unshuffle_out(at::Tensor & out, const at::Tensor & self, int64_t downscale_factor) { |
9863 | return wrapper_CompositeExplicitAutograd_out_pixel_unshuffle_out(self, downscale_factor, out); |
9864 | } |
9865 | at::Tensor & pixel_unshuffle_outf(const at::Tensor & self, int64_t downscale_factor, at::Tensor & out) { |
9866 | return wrapper_CompositeExplicitAutograd_out_pixel_unshuffle_out(self, downscale_factor, out); |
9867 | } |
9868 | at::Tensor & channel_shuffle_out(at::Tensor & out, const at::Tensor & self, int64_t groups) { |
9869 | return wrapper_CompositeExplicitAutograd_out_channel_shuffle_out(self, groups, out); |
9870 | } |
9871 | at::Tensor & channel_shuffle_outf(const at::Tensor & self, int64_t groups, at::Tensor & out) { |
9872 | return wrapper_CompositeExplicitAutograd_out_channel_shuffle_out(self, groups, out); |
9873 | } |
9874 | bool is_pinned(const at::Tensor & self, c10::optional<at::Device> device) { |
9875 | return wrapper_CompositeExplicitAutograd__is_pinned(self, device); |
9876 | } |
9877 | at::Tensor & _pin_memory_out(at::Tensor & out, const at::Tensor & self, c10::optional<at::Device> device) { |
9878 | return wrapper_CompositeExplicitAutograd_out__pin_memory_out(self, device, out); |
9879 | } |
9880 | at::Tensor & _pin_memory_outf(const at::Tensor & self, c10::optional<at::Device> device, at::Tensor & out) { |
9881 | return wrapper_CompositeExplicitAutograd_out__pin_memory_out(self, device, out); |
9882 | } |
9883 | at::Tensor rad2deg(const at::Tensor & self) { |
9884 | return wrapper_CompositeExplicitAutograd__rad2deg(self); |
9885 | } |
9886 | at::Tensor & rad2deg_out(at::Tensor & out, const at::Tensor & self) { |
9887 | return wrapper_CompositeExplicitAutograd_out_rad2deg_out(self, out); |
9888 | } |
9889 | at::Tensor & rad2deg_outf(const at::Tensor & self, at::Tensor & out) { |
9890 | return wrapper_CompositeExplicitAutograd_out_rad2deg_out(self, out); |
9891 | } |
9892 | at::Tensor & rad2deg_(at::Tensor & self) { |
9893 | return wrapper_CompositeExplicitAutograd__rad2deg_(self); |
9894 | } |
9895 | at::Tensor deg2rad(const at::Tensor & self) { |
9896 | return wrapper_CompositeExplicitAutograd__deg2rad(self); |
9897 | } |
9898 | at::Tensor & deg2rad_out(at::Tensor & out, const at::Tensor & self) { |
9899 | return wrapper_CompositeExplicitAutograd_out_deg2rad_out(self, out); |
9900 | } |
9901 | at::Tensor & deg2rad_outf(const at::Tensor & self, at::Tensor & out) { |
9902 | return wrapper_CompositeExplicitAutograd_out_deg2rad_out(self, out); |
9903 | } |
9904 | at::Tensor & deg2rad_(at::Tensor & self) { |
9905 | return wrapper_CompositeExplicitAutograd__deg2rad_(self); |
9906 | } |
9907 | at::Tensor scalar_tensor(const at::Scalar & s, at::TensorOptions options) { |
9908 | return wrapper_CompositeExplicitAutograd__scalar_tensor(s, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9909 | } |
9910 | at::Tensor scalar_tensor(const at::Scalar & s, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
9911 | return wrapper_CompositeExplicitAutograd__scalar_tensor(s, dtype, layout, device, pin_memory); |
9912 | } |
9913 | at::Tensor & scalar_tensor_out(at::Tensor & out, const at::Scalar & s) { |
9914 | return wrapper_CompositeExplicitAutograd_out_scalar_tensor_out(s, out); |
9915 | } |
9916 | at::Tensor & scalar_tensor_outf(const at::Scalar & s, at::Tensor & out) { |
9917 | return wrapper_CompositeExplicitAutograd_out_scalar_tensor_out(s, out); |
9918 | } |
9919 | at::Tensor rand(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options) { |
9920 | return wrapper_CompositeExplicitAutograd_names_rand(c10::fromIntArrayRefSlow(size), names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9921 | } |
9922 | at::Tensor rand(at::IntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
9923 | return wrapper_CompositeExplicitAutograd_names_rand(c10::fromIntArrayRefSlow(size), names, dtype, layout, device, pin_memory); |
9924 | } |
9925 | at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options) { |
9926 | return wrapper_CompositeExplicitAutograd_names_rand(size, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9927 | } |
9928 | at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
9929 | return wrapper_CompositeExplicitAutograd_names_rand(size, names, dtype, layout, device, pin_memory); |
9930 | } |
9931 | at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::DimnameList> names) { |
9932 | return wrapper_CompositeExplicitAutograd_names_out_rand_out(c10::fromIntArrayRefSlow(size), names, out); |
9933 | } |
9934 | at::Tensor & rand_outf(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) { |
9935 | return wrapper_CompositeExplicitAutograd_names_out_rand_out(c10::fromIntArrayRefSlow(size), names, out); |
9936 | } |
9937 | at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional<at::DimnameList> names) { |
9938 | return wrapper_CompositeExplicitAutograd_names_out_rand_out(size, names, out); |
9939 | } |
9940 | at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) { |
9941 | return wrapper_CompositeExplicitAutograd_names_out_rand_out(size, names, out); |
9942 | } |
9943 | at::Tensor rand(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options) { |
9944 | return wrapper_CompositeExplicitAutograd_generator_with_names_rand(c10::fromIntArrayRefSlow(size), generator, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9945 | } |
9946 | at::Tensor rand(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
9947 | return wrapper_CompositeExplicitAutograd_generator_with_names_rand(c10::fromIntArrayRefSlow(size), generator, names, dtype, layout, device, pin_memory); |
9948 | } |
9949 | at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options) { |
9950 | return wrapper_CompositeExplicitAutograd_generator_with_names_rand(size, generator, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9951 | } |
9952 | at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
9953 | return wrapper_CompositeExplicitAutograd_generator_with_names_rand(size, generator, names, dtype, layout, device, pin_memory); |
9954 | } |
9955 | at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names) { |
9956 | return wrapper_CompositeExplicitAutograd_generator_with_names_out_rand_out(c10::fromIntArrayRefSlow(size), generator, names, out); |
9957 | } |
9958 | at::Tensor & rand_outf(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::Tensor & out) { |
9959 | return wrapper_CompositeExplicitAutograd_generator_with_names_out_rand_out(c10::fromIntArrayRefSlow(size), generator, names, out); |
9960 | } |
9961 | at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names) { |
9962 | return wrapper_CompositeExplicitAutograd_generator_with_names_out_rand_out(size, generator, names, out); |
9963 | } |
9964 | at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::Tensor & out) { |
9965 | return wrapper_CompositeExplicitAutograd_generator_with_names_out_rand_out(size, generator, names, out); |
9966 | } |
9967 | at::Tensor rand(at::IntArrayRef size, at::TensorOptions options) { |
9968 | return wrapper_CompositeExplicitAutograd__rand(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9969 | } |
9970 | at::Tensor rand(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
9971 | return wrapper_CompositeExplicitAutograd__rand(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); |
9972 | } |
9973 | at::Tensor rand_symint(c10::SymIntArrayRef size, at::TensorOptions options) { |
9974 | return wrapper_CompositeExplicitAutograd__rand(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9975 | } |
9976 | at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
9977 | return wrapper_CompositeExplicitAutograd__rand(size, dtype, layout, device, pin_memory); |
9978 | } |
9979 | at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size) { |
9980 | return wrapper_CompositeExplicitAutograd_out_rand_out(c10::fromIntArrayRefSlow(size), out); |
9981 | } |
9982 | at::Tensor & rand_outf(at::IntArrayRef size, at::Tensor & out) { |
9983 | return wrapper_CompositeExplicitAutograd_out_rand_out(c10::fromIntArrayRefSlow(size), out); |
9984 | } |
9985 | at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size) { |
9986 | return wrapper_CompositeExplicitAutograd_out_rand_out(size, out); |
9987 | } |
9988 | at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, at::Tensor & out) { |
9989 | return wrapper_CompositeExplicitAutograd_out_rand_out(size, out); |
9990 | } |
9991 | at::Tensor rand(at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options) { |
9992 | return wrapper_CompositeExplicitAutograd_generator_rand(c10::fromIntArrayRefSlow(size), generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9993 | } |
9994 | at::Tensor rand(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
9995 | return wrapper_CompositeExplicitAutograd_generator_rand(c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory); |
9996 | } |
9997 | at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options) { |
9998 | return wrapper_CompositeExplicitAutograd_generator_rand(size, generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
9999 | } |
10000 | at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10001 | return wrapper_CompositeExplicitAutograd_generator_rand(size, generator, dtype, layout, device, pin_memory); |
10002 | } |
10003 | at::Tensor rand_like(const at::Tensor & self, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) { |
10004 | return wrapper_CompositeExplicitAutograd__rand_like(self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
10005 | } |
10006 | at::Tensor rand_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
10007 | return wrapper_CompositeExplicitAutograd__rand_like(self, dtype, layout, device, pin_memory, memory_format); |
10008 | } |
10009 | at::Tensor & rand_like_out(at::Tensor & out, const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) { |
10010 | return wrapper_CompositeExplicitAutograd_out_rand_like_out(self, memory_format, out); |
10011 | } |
10012 | at::Tensor & rand_like_outf(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
10013 | return wrapper_CompositeExplicitAutograd_out_rand_like_out(self, memory_format, out); |
10014 | } |
10015 | at::Tensor randint(int64_t high, at::IntArrayRef size, at::TensorOptions options) { |
10016 | return wrapper_CompositeExplicitAutograd__randint(high, c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10017 | } |
10018 | at::Tensor randint(int64_t high, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10019 | return wrapper_CompositeExplicitAutograd__randint(high, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); |
10020 | } |
10021 | at::Tensor randint_symint(int64_t high, c10::SymIntArrayRef size, at::TensorOptions options) { |
10022 | return wrapper_CompositeExplicitAutograd__randint(high, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10023 | } |
10024 | at::Tensor randint_symint(int64_t high, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10025 | return wrapper_CompositeExplicitAutograd__randint(high, size, dtype, layout, device, pin_memory); |
10026 | } |
10027 | at::Tensor & randint_out(at::Tensor & out, int64_t high, at::IntArrayRef size) { |
10028 | return wrapper_CompositeExplicitAutograd_out_randint_out(high, c10::fromIntArrayRefSlow(size), out); |
10029 | } |
10030 | at::Tensor & randint_outf(int64_t high, at::IntArrayRef size, at::Tensor & out) { |
10031 | return wrapper_CompositeExplicitAutograd_out_randint_out(high, c10::fromIntArrayRefSlow(size), out); |
10032 | } |
10033 | at::Tensor & randint_symint_out(at::Tensor & out, int64_t high, c10::SymIntArrayRef size) { |
10034 | return wrapper_CompositeExplicitAutograd_out_randint_out(high, size, out); |
10035 | } |
10036 | at::Tensor & randint_symint_outf(int64_t high, c10::SymIntArrayRef size, at::Tensor & out) { |
10037 | return wrapper_CompositeExplicitAutograd_out_randint_out(high, size, out); |
10038 | } |
10039 | at::Tensor randint(int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options) { |
10040 | return wrapper_CompositeExplicitAutograd_generator_randint(high, c10::fromIntArrayRefSlow(size), generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10041 | } |
10042 | at::Tensor randint(int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10043 | return wrapper_CompositeExplicitAutograd_generator_randint(high, c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory); |
10044 | } |
10045 | at::Tensor randint_symint(int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options) { |
10046 | return wrapper_CompositeExplicitAutograd_generator_randint(high, size, generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10047 | } |
10048 | at::Tensor randint_symint(int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10049 | return wrapper_CompositeExplicitAutograd_generator_randint(high, size, generator, dtype, layout, device, pin_memory); |
10050 | } |
10051 | at::Tensor & randint_out(at::Tensor & out, int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator) { |
10052 | return wrapper_CompositeExplicitAutograd_generator_out_randint_out(high, c10::fromIntArrayRefSlow(size), generator, out); |
10053 | } |
10054 | at::Tensor & randint_outf(int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) { |
10055 | return wrapper_CompositeExplicitAutograd_generator_out_randint_out(high, c10::fromIntArrayRefSlow(size), generator, out); |
10056 | } |
10057 | at::Tensor & randint_symint_out(at::Tensor & out, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator) { |
10058 | return wrapper_CompositeExplicitAutograd_generator_out_randint_out(high, size, generator, out); |
10059 | } |
10060 | at::Tensor & randint_symint_outf(int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) { |
10061 | return wrapper_CompositeExplicitAutograd_generator_out_randint_out(high, size, generator, out); |
10062 | } |
10063 | at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, at::TensorOptions options) { |
10064 | return wrapper_CompositeExplicitAutograd_low_randint(low, high, c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10065 | } |
10066 | at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10067 | return wrapper_CompositeExplicitAutograd_low_randint(low, high, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); |
10068 | } |
10069 | at::Tensor randint_symint(int64_t low, int64_t high, c10::SymIntArrayRef size, at::TensorOptions options) { |
10070 | return wrapper_CompositeExplicitAutograd_low_randint(low, high, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10071 | } |
10072 | at::Tensor randint_symint(int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10073 | return wrapper_CompositeExplicitAutograd_low_randint(low, high, size, dtype, layout, device, pin_memory); |
10074 | } |
10075 | at::Tensor & randint_out(at::Tensor & out, int64_t low, int64_t high, at::IntArrayRef size) { |
10076 | return wrapper_CompositeExplicitAutograd_low_out_randint_out(low, high, c10::fromIntArrayRefSlow(size), out); |
10077 | } |
10078 | at::Tensor & randint_outf(int64_t low, int64_t high, at::IntArrayRef size, at::Tensor & out) { |
10079 | return wrapper_CompositeExplicitAutograd_low_out_randint_out(low, high, c10::fromIntArrayRefSlow(size), out); |
10080 | } |
10081 | at::Tensor & randint_symint_out(at::Tensor & out, int64_t low, int64_t high, c10::SymIntArrayRef size) { |
10082 | return wrapper_CompositeExplicitAutograd_low_out_randint_out(low, high, size, out); |
10083 | } |
10084 | at::Tensor & randint_symint_outf(int64_t low, int64_t high, c10::SymIntArrayRef size, at::Tensor & out) { |
10085 | return wrapper_CompositeExplicitAutograd_low_out_randint_out(low, high, size, out); |
10086 | } |
10087 | at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options) { |
10088 | return wrapper_CompositeExplicitAutograd_low_generator_randint(low, high, c10::fromIntArrayRefSlow(size), generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10089 | } |
10090 | at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10091 | return wrapper_CompositeExplicitAutograd_low_generator_randint(low, high, c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory); |
10092 | } |
10093 | at::Tensor randint_symint(int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options) { |
10094 | return wrapper_CompositeExplicitAutograd_low_generator_randint(low, high, size, generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10095 | } |
10096 | at::Tensor randint_symint(int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10097 | return wrapper_CompositeExplicitAutograd_low_generator_randint(low, high, size, generator, dtype, layout, device, pin_memory); |
10098 | } |
10099 | at::Tensor & randint_out(at::Tensor & out, int64_t low, int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator) { |
10100 | return wrapper_CompositeExplicitAutograd_low_generator_out_randint_out(low, high, c10::fromIntArrayRefSlow(size), generator, out); |
10101 | } |
10102 | at::Tensor & randint_outf(int64_t low, int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) { |
10103 | return wrapper_CompositeExplicitAutograd_low_generator_out_randint_out(low, high, c10::fromIntArrayRefSlow(size), generator, out); |
10104 | } |
10105 | at::Tensor & randint_symint_out(at::Tensor & out, int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator) { |
10106 | return wrapper_CompositeExplicitAutograd_low_generator_out_randint_out(low, high, size, generator, out); |
10107 | } |
10108 | at::Tensor & randint_symint_outf(int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) { |
10109 | return wrapper_CompositeExplicitAutograd_low_generator_out_randint_out(low, high, size, generator, out); |
10110 | } |
10111 | at::Tensor randint_like(const at::Tensor & self, int64_t high, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) { |
10112 | return wrapper_CompositeExplicitAutograd__randint_like(self, high, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
10113 | } |
10114 | at::Tensor randint_like(const at::Tensor & self, int64_t high, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
10115 | return wrapper_CompositeExplicitAutograd__randint_like(self, high, dtype, layout, device, pin_memory, memory_format); |
10116 | } |
10117 | at::Tensor & randint_like_out(at::Tensor & out, const at::Tensor & self, int64_t high, c10::optional<at::MemoryFormat> memory_format) { |
10118 | return wrapper_CompositeExplicitAutograd_out_randint_like_out(self, high, memory_format, out); |
10119 | } |
10120 | at::Tensor & randint_like_outf(const at::Tensor & self, int64_t high, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
10121 | return wrapper_CompositeExplicitAutograd_out_randint_like_out(self, high, memory_format, out); |
10122 | } |
10123 | at::Tensor randint_like(const at::Tensor & self, int64_t low, int64_t high, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) { |
10124 | return wrapper_CompositeExplicitAutograd_low_dtype_randint_like(self, low, high, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
10125 | } |
10126 | at::Tensor randint_like(const at::Tensor & self, int64_t low, int64_t high, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
10127 | return wrapper_CompositeExplicitAutograd_low_dtype_randint_like(self, low, high, dtype, layout, device, pin_memory, memory_format); |
10128 | } |
10129 | at::Tensor & randint_like_out(at::Tensor & out, const at::Tensor & self, int64_t low, int64_t high, c10::optional<at::MemoryFormat> memory_format) { |
10130 | return wrapper_CompositeExplicitAutograd_low_dtype_out_randint_like_out(self, low, high, memory_format, out); |
10131 | } |
10132 | at::Tensor & randint_like_outf(const at::Tensor & self, int64_t low, int64_t high, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
10133 | return wrapper_CompositeExplicitAutograd_low_dtype_out_randint_like_out(self, low, high, memory_format, out); |
10134 | } |
10135 | at::Tensor randn(at::IntArrayRef size, at::TensorOptions options) { |
10136 | return wrapper_CompositeExplicitAutograd__randn(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10137 | } |
10138 | at::Tensor randn(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10139 | return wrapper_CompositeExplicitAutograd__randn(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); |
10140 | } |
10141 | at::Tensor randn_symint(c10::SymIntArrayRef size, at::TensorOptions options) { |
10142 | return wrapper_CompositeExplicitAutograd__randn(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10143 | } |
10144 | at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10145 | return wrapper_CompositeExplicitAutograd__randn(size, dtype, layout, device, pin_memory); |
10146 | } |
10147 | at::Tensor randn(at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options) { |
10148 | return wrapper_CompositeExplicitAutograd_generator_randn(c10::fromIntArrayRefSlow(size), generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10149 | } |
10150 | at::Tensor randn(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10151 | return wrapper_CompositeExplicitAutograd_generator_randn(c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory); |
10152 | } |
10153 | at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options) { |
10154 | return wrapper_CompositeExplicitAutograd_generator_randn(size, generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10155 | } |
10156 | at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10157 | return wrapper_CompositeExplicitAutograd_generator_randn(size, generator, dtype, layout, device, pin_memory); |
10158 | } |
10159 | at::Tensor randn(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options) { |
10160 | return wrapper_CompositeExplicitAutograd_names_randn(c10::fromIntArrayRefSlow(size), names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10161 | } |
10162 | at::Tensor randn(at::IntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10163 | return wrapper_CompositeExplicitAutograd_names_randn(c10::fromIntArrayRefSlow(size), names, dtype, layout, device, pin_memory); |
10164 | } |
10165 | at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options) { |
10166 | return wrapper_CompositeExplicitAutograd_names_randn(size, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10167 | } |
10168 | at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10169 | return wrapper_CompositeExplicitAutograd_names_randn(size, names, dtype, layout, device, pin_memory); |
10170 | } |
10171 | at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::DimnameList> names) { |
10172 | return wrapper_CompositeExplicitAutograd_names_out_randn_out(c10::fromIntArrayRefSlow(size), names, out); |
10173 | } |
10174 | at::Tensor & randn_outf(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) { |
10175 | return wrapper_CompositeExplicitAutograd_names_out_randn_out(c10::fromIntArrayRefSlow(size), names, out); |
10176 | } |
10177 | at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional<at::DimnameList> names) { |
10178 | return wrapper_CompositeExplicitAutograd_names_out_randn_out(size, names, out); |
10179 | } |
10180 | at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) { |
10181 | return wrapper_CompositeExplicitAutograd_names_out_randn_out(size, names, out); |
10182 | } |
10183 | at::Tensor randn(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options) { |
10184 | return wrapper_CompositeExplicitAutograd_generator_with_names_randn(c10::fromIntArrayRefSlow(size), generator, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10185 | } |
10186 | at::Tensor randn(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10187 | return wrapper_CompositeExplicitAutograd_generator_with_names_randn(c10::fromIntArrayRefSlow(size), generator, names, dtype, layout, device, pin_memory); |
10188 | } |
10189 | at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options) { |
10190 | return wrapper_CompositeExplicitAutograd_generator_with_names_randn(size, generator, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10191 | } |
10192 | at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10193 | return wrapper_CompositeExplicitAutograd_generator_with_names_randn(size, generator, names, dtype, layout, device, pin_memory); |
10194 | } |
10195 | at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names) { |
10196 | return wrapper_CompositeExplicitAutograd_generator_with_names_out_randn_out(c10::fromIntArrayRefSlow(size), generator, names, out); |
10197 | } |
10198 | at::Tensor & randn_outf(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::Tensor & out) { |
10199 | return wrapper_CompositeExplicitAutograd_generator_with_names_out_randn_out(c10::fromIntArrayRefSlow(size), generator, names, out); |
10200 | } |
10201 | at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names) { |
10202 | return wrapper_CompositeExplicitAutograd_generator_with_names_out_randn_out(size, generator, names, out); |
10203 | } |
10204 | at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::Tensor & out) { |
10205 | return wrapper_CompositeExplicitAutograd_generator_with_names_out_randn_out(size, generator, names, out); |
10206 | } |
10207 | at::Tensor randn_like(const at::Tensor & self, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) { |
10208 | return wrapper_CompositeExplicitAutograd__randn_like(self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
10209 | } |
10210 | at::Tensor randn_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
10211 | return wrapper_CompositeExplicitAutograd__randn_like(self, dtype, layout, device, pin_memory, memory_format); |
10212 | } |
10213 | at::Tensor & randn_like_out(at::Tensor & out, const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) { |
10214 | return wrapper_CompositeExplicitAutograd_out_randn_like_out(self, memory_format, out); |
10215 | } |
10216 | at::Tensor & randn_like_outf(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
10217 | return wrapper_CompositeExplicitAutograd_out_randn_like_out(self, memory_format, out); |
10218 | } |
10219 | at::Tensor randperm(int64_t n, at::TensorOptions options) { |
10220 | return wrapper_CompositeExplicitAutograd__randperm(n, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10221 | } |
10222 | at::Tensor randperm(int64_t n, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10223 | return wrapper_CompositeExplicitAutograd__randperm(n, dtype, layout, device, pin_memory); |
10224 | } |
10225 | at::Tensor & randperm_out(at::Tensor & out, int64_t n) { |
10226 | return wrapper_CompositeExplicitAutograd_out_randperm_out(n, out); |
10227 | } |
10228 | at::Tensor & randperm_outf(int64_t n, at::Tensor & out) { |
10229 | return wrapper_CompositeExplicitAutograd_out_randperm_out(n, out); |
10230 | } |
10231 | at::Tensor randperm(int64_t n, c10::optional<at::Generator> generator, at::TensorOptions options) { |
10232 | return wrapper_CompositeExplicitAutograd_generator_randperm(n, generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10233 | } |
10234 | at::Tensor randperm(int64_t n, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10235 | return wrapper_CompositeExplicitAutograd_generator_randperm(n, generator, dtype, layout, device, pin_memory); |
10236 | } |
10237 | at::Tensor range(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::TensorOptions options) { |
10238 | return wrapper_CompositeExplicitAutograd_step_range(start, end, step, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10239 | } |
10240 | at::Tensor range(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10241 | return wrapper_CompositeExplicitAutograd_step_range(start, end, step, dtype, layout, device, pin_memory); |
10242 | } |
10243 | at::Tensor range(const at::Scalar & start, const at::Scalar & end, at::TensorOptions options) { |
10244 | return wrapper_CompositeExplicitAutograd__range(start, end, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10245 | } |
10246 | at::Tensor range(const at::Scalar & start, const at::Scalar & end, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10247 | return wrapper_CompositeExplicitAutograd__range(start, end, dtype, layout, device, pin_memory); |
10248 | } |
10249 | at::Tensor & range_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end) { |
10250 | return wrapper_CompositeExplicitAutograd_out__range_out(start, end, out); |
10251 | } |
10252 | at::Tensor & range_outf(const at::Scalar & start, const at::Scalar & end, at::Tensor & out) { |
10253 | return wrapper_CompositeExplicitAutograd_out__range_out(start, end, out); |
10254 | } |
10255 | at::Tensor repeat(const at::Tensor & self, at::IntArrayRef repeats) { |
10256 | return wrapper_CompositeExplicitAutograd__repeat(self, c10::fromIntArrayRefSlow(repeats)); |
10257 | } |
10258 | at::Tensor repeat_symint(const at::Tensor & self, c10::SymIntArrayRef repeats) { |
10259 | return wrapper_CompositeExplicitAutograd__repeat(self, repeats); |
10260 | } |
10261 | at::Tensor & repeat_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef repeats) { |
10262 | return wrapper_CompositeExplicitAutograd_out_repeat_out(self, c10::fromIntArrayRefSlow(repeats), out); |
10263 | } |
10264 | at::Tensor & repeat_outf(const at::Tensor & self, at::IntArrayRef repeats, at::Tensor & out) { |
10265 | return wrapper_CompositeExplicitAutograd_out_repeat_out(self, c10::fromIntArrayRefSlow(repeats), out); |
10266 | } |
10267 | at::Tensor & repeat_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef repeats) { |
10268 | return wrapper_CompositeExplicitAutograd_out_repeat_out(self, repeats, out); |
10269 | } |
10270 | at::Tensor & repeat_symint_outf(const at::Tensor & self, c10::SymIntArrayRef repeats, at::Tensor & out) { |
10271 | return wrapper_CompositeExplicitAutograd_out_repeat_out(self, repeats, out); |
10272 | } |
10273 | at::Tensor & repeat_interleave_out(at::Tensor & out, const at::Tensor & repeats, c10::optional<int64_t> output_size) { |
10274 | return wrapper_CompositeExplicitAutograd_Tensor_out_repeat_interleave_out(repeats, output_size, out); |
10275 | } |
10276 | at::Tensor & repeat_interleave_outf(const at::Tensor & repeats, c10::optional<int64_t> output_size, at::Tensor & out) { |
10277 | return wrapper_CompositeExplicitAutograd_Tensor_out_repeat_interleave_out(repeats, output_size, out); |
10278 | } |
10279 | at::Tensor _reshape_copy(const at::Tensor & self, at::IntArrayRef size) { |
10280 | return wrapper_CompositeExplicitAutograd___reshape_copy(self, c10::fromIntArrayRefSlow(size)); |
10281 | } |
10282 | at::Tensor _reshape_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size) { |
10283 | return wrapper_CompositeExplicitAutograd___reshape_copy(self, size); |
10284 | } |
10285 | at::Tensor & _mkldnn_reshape_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef shape) { |
10286 | return wrapper_CompositeExplicitAutograd_out__mkldnn_reshape_out(self, shape, out); |
10287 | } |
10288 | at::Tensor & _mkldnn_reshape_outf(const at::Tensor & self, at::IntArrayRef shape, at::Tensor & out) { |
10289 | return wrapper_CompositeExplicitAutograd_out__mkldnn_reshape_out(self, shape, out); |
10290 | } |
10291 | at::Tensor & relu_out(at::Tensor & out, const at::Tensor & self) { |
10292 | return wrapper_CompositeExplicitAutograd_out_relu_out(self, out); |
10293 | } |
10294 | at::Tensor & relu_outf(const at::Tensor & self, at::Tensor & out) { |
10295 | return wrapper_CompositeExplicitAutograd_out_relu_out(self, out); |
10296 | } |
10297 | at::Tensor select(const at::Tensor & self, int64_t dim, int64_t index) { |
10298 | return wrapper_CompositeExplicitAutograd_int_select(self, dim, index); |
10299 | } |
10300 | at::Tensor select_symint(const at::Tensor & self, int64_t dim, c10::SymInt index) { |
10301 | return wrapper_CompositeExplicitAutograd_int_select(self, dim, index); |
10302 | } |
10303 | at::Tensor & select_backward_out(at::Tensor & out, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index) { |
10304 | return wrapper_CompositeExplicitAutograd_out_select_backward_out(grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, index, out); |
10305 | } |
10306 | at::Tensor & select_backward_outf(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index, at::Tensor & out) { |
10307 | return wrapper_CompositeExplicitAutograd_out_select_backward_out(grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, index, out); |
10308 | } |
10309 | at::Tensor & select_backward_symint_out(at::Tensor & out, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index) { |
10310 | return wrapper_CompositeExplicitAutograd_out_select_backward_out(grad_output, input_sizes, dim, index, out); |
10311 | } |
10312 | at::Tensor & select_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index, at::Tensor & out) { |
10313 | return wrapper_CompositeExplicitAutograd_out_select_backward_out(grad_output, input_sizes, dim, index, out); |
10314 | } |
10315 | at::Tensor celu(const at::Tensor & self, const at::Scalar & alpha) { |
10316 | return wrapper_CompositeExplicitAutograd__celu(self, alpha); |
10317 | } |
10318 | at::Tensor & celu_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & alpha) { |
10319 | return wrapper_CompositeExplicitAutograd_out_celu_out(self, alpha, out); |
10320 | } |
10321 | at::Tensor & celu_outf(const at::Tensor & self, const at::Scalar & alpha, at::Tensor & out) { |
10322 | return wrapper_CompositeExplicitAutograd_out_celu_out(self, alpha, out); |
10323 | } |
10324 | at::Tensor & celu_(at::Tensor & self, const at::Scalar & alpha) { |
10325 | return wrapper_CompositeExplicitAutograd__celu_(self, alpha); |
10326 | } |
10327 | at::Tensor detach(const at::Tensor & self) { |
10328 | return wrapper_CompositeExplicitAutograd__detach(self); |
10329 | } |
10330 | at::Tensor & detach_(at::Tensor & self) { |
10331 | return wrapper_CompositeExplicitAutograd__detach_(self); |
10332 | } |
10333 | at::Tensor slice(const at::Tensor & self, int64_t dim, c10::optional<int64_t> start, c10::optional<int64_t> end, int64_t step) { |
10334 | return wrapper_CompositeExplicitAutograd_Tensor_slice(self, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step); |
10335 | } |
10336 | at::Tensor slice_symint(const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) { |
10337 | return wrapper_CompositeExplicitAutograd_Tensor_slice(self, dim, start, end, step); |
10338 | } |
10339 | at::Tensor slice_backward(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t start, int64_t end, int64_t step) { |
10340 | return wrapper_CompositeExplicitAutograd__slice_backward(grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, start, end, step); |
10341 | } |
10342 | at::Tensor slice_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step) { |
10343 | return wrapper_CompositeExplicitAutograd__slice_backward(grad_output, input_sizes, dim, start, end, step); |
10344 | } |
10345 | at::Tensor & slice_backward_out(at::Tensor & out, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t start, int64_t end, int64_t step) { |
10346 | return wrapper_CompositeExplicitAutograd_out_slice_backward_out(grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, start, end, step, out); |
10347 | } |
10348 | at::Tensor & slice_backward_outf(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t start, int64_t end, int64_t step, at::Tensor & out) { |
10349 | return wrapper_CompositeExplicitAutograd_out_slice_backward_out(grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, start, end, step, out); |
10350 | } |
10351 | at::Tensor & slice_backward_symint_out(at::Tensor & out, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step) { |
10352 | return wrapper_CompositeExplicitAutograd_out_slice_backward_out(grad_output, input_sizes, dim, start, end, step, out); |
10353 | } |
10354 | at::Tensor & slice_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step, at::Tensor & out) { |
10355 | return wrapper_CompositeExplicitAutograd_out_slice_backward_out(grad_output, input_sizes, dim, start, end, step, out); |
10356 | } |
10357 | at::Tensor slice_scatter(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional<int64_t> start, c10::optional<int64_t> end, int64_t step) { |
10358 | return wrapper_CompositeExplicitAutograd__slice_scatter(self, src, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step); |
10359 | } |
10360 | at::Tensor slice_scatter_symint(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) { |
10361 | return wrapper_CompositeExplicitAutograd__slice_scatter(self, src, dim, start, end, step); |
10362 | } |
10363 | at::Tensor & slice_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional<int64_t> start, c10::optional<int64_t> end, int64_t step) { |
10364 | return wrapper_CompositeExplicitAutograd_out_slice_scatter_out(self, src, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out); |
10365 | } |
10366 | at::Tensor & slice_scatter_outf(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional<int64_t> start, c10::optional<int64_t> end, int64_t step, at::Tensor & out) { |
10367 | return wrapper_CompositeExplicitAutograd_out_slice_scatter_out(self, src, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out); |
10368 | } |
10369 | at::Tensor & slice_scatter_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) { |
10370 | return wrapper_CompositeExplicitAutograd_out_slice_scatter_out(self, src, dim, start, end, step, out); |
10371 | } |
10372 | at::Tensor & slice_scatter_symint_outf(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step, at::Tensor & out) { |
10373 | return wrapper_CompositeExplicitAutograd_out_slice_scatter_out(self, src, dim, start, end, step, out); |
10374 | } |
10375 | at::Tensor select_scatter(const at::Tensor & self, const at::Tensor & src, int64_t dim, int64_t index) { |
10376 | return wrapper_CompositeExplicitAutograd__select_scatter(self, src, dim, index); |
10377 | } |
10378 | at::Tensor select_scatter_symint(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index) { |
10379 | return wrapper_CompositeExplicitAutograd__select_scatter(self, src, dim, index); |
10380 | } |
10381 | at::Tensor & select_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t dim, int64_t index) { |
10382 | return wrapper_CompositeExplicitAutograd_out_select_scatter_out(self, src, dim, index, out); |
10383 | } |
10384 | at::Tensor & select_scatter_outf(const at::Tensor & self, const at::Tensor & src, int64_t dim, int64_t index, at::Tensor & out) { |
10385 | return wrapper_CompositeExplicitAutograd_out_select_scatter_out(self, src, dim, index, out); |
10386 | } |
10387 | at::Tensor & select_scatter_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index) { |
10388 | return wrapper_CompositeExplicitAutograd_out_select_scatter_out(self, src, dim, index, out); |
10389 | } |
10390 | at::Tensor & select_scatter_symint_outf(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index, at::Tensor & out) { |
10391 | return wrapper_CompositeExplicitAutograd_out_select_scatter_out(self, src, dim, index, out); |
10392 | } |
10393 | at::Tensor diagonal_scatter(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2) { |
10394 | return wrapper_CompositeExplicitAutograd__diagonal_scatter(self, src, offset, dim1, dim2); |
10395 | } |
10396 | at::Tensor & diagonal_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2) { |
10397 | return wrapper_CompositeExplicitAutograd_out_diagonal_scatter_out(self, src, offset, dim1, dim2, out); |
10398 | } |
10399 | at::Tensor & diagonal_scatter_outf(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { |
10400 | return wrapper_CompositeExplicitAutograd_out_diagonal_scatter_out(self, src, offset, dim1, dim2, out); |
10401 | } |
10402 | at::Tensor as_strided_scatter(const at::Tensor & self, const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset) { |
10403 | return wrapper_CompositeExplicitAutograd__as_strided_scatter(self, src, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt); |
10404 | } |
10405 | at::Tensor as_strided_scatter_symint(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) { |
10406 | return wrapper_CompositeExplicitAutograd__as_strided_scatter(self, src, size, stride, storage_offset); |
10407 | } |
10408 | at::Tensor & as_strided_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset) { |
10409 | return wrapper_CompositeExplicitAutograd_out_as_strided_scatter_out(self, src, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out); |
10410 | } |
10411 | at::Tensor & as_strided_scatter_outf(const at::Tensor & self, const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset, at::Tensor & out) { |
10412 | return wrapper_CompositeExplicitAutograd_out_as_strided_scatter_out(self, src, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out); |
10413 | } |
10414 | at::Tensor & as_strided_scatter_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) { |
10415 | return wrapper_CompositeExplicitAutograd_out_as_strided_scatter_out(self, src, size, stride, storage_offset, out); |
10416 | } |
10417 | at::Tensor & as_strided_scatter_symint_outf(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset, at::Tensor & out) { |
10418 | return wrapper_CompositeExplicitAutograd_out_as_strided_scatter_out(self, src, size, stride, storage_offset, out); |
10419 | } |
10420 | at::Tensor & softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) { |
10421 | return wrapper_CompositeExplicitAutograd_int_out_softmax_out(self, dim, dtype, out); |
10422 | } |
10423 | at::Tensor & softmax_outf(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
10424 | return wrapper_CompositeExplicitAutograd_int_out_softmax_out(self, dim, dtype, out); |
10425 | } |
10426 | ::std::vector<at::Tensor> unsafe_split(const at::Tensor & self, int64_t split_size, int64_t dim) { |
10427 | return wrapper_CompositeExplicitAutograd_Tensor_unsafe_split(self, split_size, dim); |
10428 | } |
10429 | ::std::vector<at::Tensor> unsafe_split_symint(const at::Tensor & self, c10::SymInt split_size, int64_t dim) { |
10430 | return wrapper_CompositeExplicitAutograd_Tensor_unsafe_split(self, split_size, dim); |
10431 | } |
10432 | void unsafe_split_out(at::TensorList out, const at::Tensor & self, int64_t split_size, int64_t dim) { |
10433 | return wrapper_CompositeExplicitAutograd_Tensor_out_unsafe_split_out(self, split_size, dim, out); |
10434 | } |
10435 | void unsafe_split_outf(const at::Tensor & self, int64_t split_size, int64_t dim, at::TensorList out) { |
10436 | return wrapper_CompositeExplicitAutograd_Tensor_out_unsafe_split_out(self, split_size, dim, out); |
10437 | } |
10438 | void unsafe_split_symint_out(at::TensorList out, const at::Tensor & self, c10::SymInt split_size, int64_t dim) { |
10439 | return wrapper_CompositeExplicitAutograd_Tensor_out_unsafe_split_out(self, split_size, dim, out); |
10440 | } |
10441 | void unsafe_split_symint_outf(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) { |
10442 | return wrapper_CompositeExplicitAutograd_Tensor_out_unsafe_split_out(self, split_size, dim, out); |
10443 | } |
10444 | ::std::vector<at::Tensor> split(const at::Tensor & self, int64_t split_size, int64_t dim) { |
10445 | return wrapper_CompositeExplicitAutograd_Tensor_split(self, split_size, dim); |
10446 | } |
10447 | ::std::vector<at::Tensor> split_symint(const at::Tensor & self, c10::SymInt split_size, int64_t dim) { |
10448 | return wrapper_CompositeExplicitAutograd_Tensor_split(self, split_size, dim); |
10449 | } |
10450 | ::std::vector<at::Tensor> unsafe_split_with_sizes(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim) { |
10451 | return wrapper_CompositeExplicitAutograd__unsafe_split_with_sizes(self, c10::fromIntArrayRefSlow(split_sizes), dim); |
10452 | } |
10453 | ::std::vector<at::Tensor> unsafe_split_with_sizes_symint(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) { |
10454 | return wrapper_CompositeExplicitAutograd__unsafe_split_with_sizes(self, split_sizes, dim); |
10455 | } |
10456 | void unsafe_split_with_sizes_out(at::TensorList out, const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim) { |
10457 | return wrapper_CompositeExplicitAutograd_out_unsafe_split_with_sizes_out(self, c10::fromIntArrayRefSlow(split_sizes), dim, out); |
10458 | } |
10459 | void unsafe_split_with_sizes_outf(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim, at::TensorList out) { |
10460 | return wrapper_CompositeExplicitAutograd_out_unsafe_split_with_sizes_out(self, c10::fromIntArrayRefSlow(split_sizes), dim, out); |
10461 | } |
10462 | void unsafe_split_with_sizes_symint_out(at::TensorList out, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) { |
10463 | return wrapper_CompositeExplicitAutograd_out_unsafe_split_with_sizes_out(self, split_sizes, dim, out); |
10464 | } |
10465 | void unsafe_split_with_sizes_symint_outf(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) { |
10466 | return wrapper_CompositeExplicitAutograd_out_unsafe_split_with_sizes_out(self, split_sizes, dim, out); |
10467 | } |
10468 | ::std::vector<at::Tensor> split_with_sizes(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim) { |
10469 | return wrapper_CompositeExplicitAutograd__split_with_sizes(self, c10::fromIntArrayRefSlow(split_sizes), dim); |
10470 | } |
10471 | ::std::vector<at::Tensor> split_with_sizes_symint(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) { |
10472 | return wrapper_CompositeExplicitAutograd__split_with_sizes(self, split_sizes, dim); |
10473 | } |
10474 | at::Tensor squeeze(const at::Tensor & self) { |
10475 | return wrapper_CompositeExplicitAutograd__squeeze(self); |
10476 | } |
10477 | at::Tensor & squeeze_(at::Tensor & self) { |
10478 | return wrapper_CompositeExplicitAutograd__squeeze_(self); |
10479 | } |
10480 | at::Tensor squeeze(const at::Tensor & self, int64_t dim) { |
10481 | return wrapper_CompositeExplicitAutograd_dim_squeeze(self, dim); |
10482 | } |
10483 | at::Tensor & squeeze_(at::Tensor & self, int64_t dim) { |
10484 | return wrapper_CompositeExplicitAutograd_dim_squeeze_(self, dim); |
10485 | } |
10486 | at::Tensor squeeze(const at::Tensor & self, at::IntArrayRef dim) { |
10487 | return wrapper_CompositeExplicitAutograd_dims_squeeze(self, dim); |
10488 | } |
10489 | at::Tensor & squeeze_(at::Tensor & self, at::IntArrayRef dim) { |
10490 | return wrapper_CompositeExplicitAutograd_dims_squeeze_(self, dim); |
10491 | } |
10492 | at::Tensor stack(at::TensorList tensors, int64_t dim) { |
10493 | return wrapper_CompositeExplicitAutograd__stack(tensors, dim); |
10494 | } |
10495 | at::Tensor & stack_out(at::Tensor & out, at::TensorList tensors, int64_t dim) { |
10496 | return wrapper_CompositeExplicitAutograd_out_stack_out(tensors, dim, out); |
10497 | } |
10498 | at::Tensor & stack_outf(at::TensorList tensors, int64_t dim, at::Tensor & out) { |
10499 | return wrapper_CompositeExplicitAutograd_out_stack_out(tensors, dim, out); |
10500 | } |
10501 | at::Tensor _stack(at::TensorList tensors, int64_t dim) { |
10502 | return wrapper_CompositeExplicitAutograd___stack(tensors, dim); |
10503 | } |
10504 | at::Tensor & _stack_out(at::Tensor & out, at::TensorList tensors, int64_t dim) { |
10505 | return wrapper_CompositeExplicitAutograd_out__stack_out(tensors, dim, out); |
10506 | } |
10507 | at::Tensor & _stack_outf(at::TensorList tensors, int64_t dim, at::Tensor & out) { |
10508 | return wrapper_CompositeExplicitAutograd_out__stack_out(tensors, dim, out); |
10509 | } |
10510 | at::Tensor sum(const at::Tensor & self, c10::optional<at::ScalarType> dtype) { |
10511 | return wrapper_CompositeExplicitAutograd__sum(self, dtype); |
10512 | } |
10513 | at::Tensor & sum_out(at::Tensor & out, const at::Tensor & self, c10::optional<at::ScalarType> dtype) { |
10514 | return wrapper_CompositeExplicitAutograd_out_sum_out(self, dtype, out); |
10515 | } |
10516 | at::Tensor & sum_outf(const at::Tensor & self, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
10517 | return wrapper_CompositeExplicitAutograd_out_sum_out(self, dtype, out); |
10518 | } |
10519 | ::std::tuple<at::Tensor &,at::Tensor &> std_mean_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) { |
10520 | return wrapper_CompositeExplicitAutograd_correction_out_std_mean_out(self, dim, correction, keepdim, out0, out1); |
10521 | } |
10522 | ::std::tuple<at::Tensor &,at::Tensor &> std_mean_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out0, at::Tensor & out1) { |
10523 | return wrapper_CompositeExplicitAutograd_correction_out_std_mean_out(self, dim, correction, keepdim, out0, out1); |
10524 | } |
10525 | at::Tensor & prod_out(at::Tensor & out, const at::Tensor & self, c10::optional<at::ScalarType> dtype) { |
10526 | return wrapper_CompositeExplicitAutograd_out_prod_out(self, dtype, out); |
10527 | } |
10528 | at::Tensor & prod_outf(const at::Tensor & self, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
10529 | return wrapper_CompositeExplicitAutograd_out_prod_out(self, dtype, out); |
10530 | } |
10531 | at::Tensor t(const at::Tensor & self) { |
10532 | return wrapper_CompositeExplicitAutograd__t(self); |
10533 | } |
10534 | at::Tensor & t_(at::Tensor & self) { |
10535 | return wrapper_CompositeExplicitAutograd__t_(self); |
10536 | } |
10537 | at::Tensor transpose(const at::Tensor & self, int64_t dim0, int64_t dim1) { |
10538 | return wrapper_CompositeExplicitAutograd_int_transpose(self, dim0, dim1); |
10539 | } |
10540 | at::Tensor & transpose_(at::Tensor & self, int64_t dim0, int64_t dim1) { |
10541 | return wrapper_CompositeExplicitAutograd__transpose_(self, dim0, dim1); |
10542 | } |
10543 | at::Tensor & _mkldnn_transpose_out(at::Tensor & out, const at::Tensor & self, int64_t dim0, int64_t dim1) { |
10544 | return wrapper_CompositeExplicitAutograd_out__mkldnn_transpose_out(self, dim0, dim1, out); |
10545 | } |
10546 | at::Tensor & _mkldnn_transpose_outf(const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) { |
10547 | return wrapper_CompositeExplicitAutograd_out__mkldnn_transpose_out(self, dim0, dim1, out); |
10548 | } |
10549 | at::Tensor & flip_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dims) { |
10550 | return wrapper_CompositeExplicitAutograd_out_flip_out(self, dims, out); |
10551 | } |
10552 | at::Tensor & flip_outf(const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) { |
10553 | return wrapper_CompositeExplicitAutograd_out_flip_out(self, dims, out); |
10554 | } |
10555 | at::Tensor & roll_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims) { |
10556 | return wrapper_CompositeExplicitAutograd_out_roll_out(self, shifts, dims, out); |
10557 | } |
10558 | at::Tensor & roll_outf(const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims, at::Tensor & out) { |
10559 | return wrapper_CompositeExplicitAutograd_out_roll_out(self, shifts, dims, out); |
10560 | } |
10561 | at::Tensor rot90(const at::Tensor & self, int64_t k, at::IntArrayRef dims) { |
10562 | return wrapper_CompositeExplicitAutograd__rot90(self, k, dims); |
10563 | } |
10564 | at::Tensor & rot90_out(at::Tensor & out, const at::Tensor & self, int64_t k, at::IntArrayRef dims) { |
10565 | return wrapper_CompositeExplicitAutograd_out_rot90_out(self, k, dims, out); |
10566 | } |
10567 | at::Tensor & rot90_outf(const at::Tensor & self, int64_t k, at::IntArrayRef dims, at::Tensor & out) { |
10568 | return wrapper_CompositeExplicitAutograd_out_rot90_out(self, k, dims, out); |
10569 | } |
10570 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _transform_bias_rescale_qkv_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads) { |
10571 | return wrapper_CompositeExplicitAutograd_out__transform_bias_rescale_qkv_out(qkv, qkv_bias, num_heads, out0, out1, out2); |
10572 | } |
10573 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _transform_bias_rescale_qkv_outf(const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
10574 | return wrapper_CompositeExplicitAutograd_out__transform_bias_rescale_qkv_out(qkv, qkv_bias, num_heads, out0, out1, out2); |
10575 | } |
10576 | at::Tensor & _nested_tensor_from_mask_out(at::Tensor & out, const at::Tensor & t, const at::Tensor & mask, bool mask_check) { |
10577 | return wrapper_CompositeExplicitAutograd_out__nested_tensor_from_mask_out(t, mask, mask_check, out); |
10578 | } |
10579 | at::Tensor & _nested_tensor_from_mask_outf(const at::Tensor & t, const at::Tensor & mask, bool mask_check, at::Tensor & out) { |
10580 | return wrapper_CompositeExplicitAutograd_out__nested_tensor_from_mask_out(t, mask, mask_check, out); |
10581 | } |
10582 | at::Tensor & _nested_from_padded_out(at::Tensor & out, const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213) { |
10583 | return wrapper_CompositeExplicitAutograd_out__nested_from_padded_out(padded, cpu_nested_shape_example, fuse_transform_0213, out); |
10584 | } |
10585 | at::Tensor & _nested_from_padded_outf(const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213, at::Tensor & out) { |
10586 | return wrapper_CompositeExplicitAutograd_out__nested_from_padded_out(padded, cpu_nested_shape_example, fuse_transform_0213, out); |
10587 | } |
10588 | at::Tensor & _nested_tensor_size_out(at::Tensor & out, const at::Tensor & self) { |
10589 | return wrapper_CompositeExplicitAutograd_out__nested_tensor_size_out(self, out); |
10590 | } |
10591 | at::Tensor & _nested_tensor_size_outf(const at::Tensor & self, at::Tensor & out) { |
10592 | return wrapper_CompositeExplicitAutograd_out__nested_tensor_size_out(self, out); |
10593 | } |
10594 | at::Tensor & _nested_tensor_strides_out(at::Tensor & out, const at::Tensor & self) { |
10595 | return wrapper_CompositeExplicitAutograd_out__nested_tensor_strides_out(self, out); |
10596 | } |
10597 | at::Tensor & _nested_tensor_strides_outf(const at::Tensor & self, at::Tensor & out) { |
10598 | return wrapper_CompositeExplicitAutograd_out__nested_tensor_strides_out(self, out); |
10599 | } |
10600 | at::Tensor & _nested_from_padded_and_nested_example_out(at::Tensor & out, const at::Tensor & padded, const at::Tensor & nt_example) { |
10601 | return wrapper_CompositeExplicitAutograd_out__nested_from_padded_and_nested_example_out(padded, nt_example, out); |
10602 | } |
10603 | at::Tensor & _nested_from_padded_and_nested_example_outf(const at::Tensor & padded, const at::Tensor & nt_example, at::Tensor & out) { |
10604 | return wrapper_CompositeExplicitAutograd_out__nested_from_padded_and_nested_example_out(padded, nt_example, out); |
10605 | } |
10606 | at::Tensor & _nested_view_from_buffer_copy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets) { |
10607 | return wrapper_CompositeExplicitAutograd_out__nested_view_from_buffer_copy_out(self, nested_size, nested_strides, offsets, out); |
10608 | } |
10609 | at::Tensor & _nested_view_from_buffer_copy_outf(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets, at::Tensor & out) { |
10610 | return wrapper_CompositeExplicitAutograd_out__nested_view_from_buffer_copy_out(self, nested_size, nested_strides, offsets, out); |
10611 | } |
10612 | at::Tensor & _trilinear_out(at::Tensor & out, const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim) { |
10613 | return wrapper_CompositeExplicitAutograd_out__trilinear_out(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim, out); |
10614 | } |
10615 | at::Tensor & _trilinear_outf(const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim, at::Tensor & out) { |
10616 | return wrapper_CompositeExplicitAutograd_out__trilinear_out(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim, out); |
10617 | } |
10618 | ::std::tuple<at::Tensor &,at::Tensor &> _unique_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, bool sorted, bool return_inverse) { |
10619 | return wrapper_CompositeExplicitAutograd_out__unique_out(self, sorted, return_inverse, out0, out1); |
10620 | } |
10621 | ::std::tuple<at::Tensor &,at::Tensor &> _unique_outf(const at::Tensor & self, bool sorted, bool return_inverse, at::Tensor & out0, at::Tensor & out1) { |
10622 | return wrapper_CompositeExplicitAutograd_out__unique_out(self, sorted, return_inverse, out0, out1); |
10623 | } |
10624 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_dim_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts) { |
10625 | return wrapper_CompositeExplicitAutograd_out_unique_dim_out(self, dim, sorted, return_inverse, return_counts, out0, out1, out2); |
10626 | } |
10627 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_dim_outf(const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
10628 | return wrapper_CompositeExplicitAutograd_out_unique_dim_out(self, dim, sorted, return_inverse, return_counts, out0, out1, out2); |
10629 | } |
10630 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_consecutive_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, bool return_inverse, bool return_counts, c10::optional<int64_t> dim) { |
10631 | return wrapper_CompositeExplicitAutograd_out_unique_consecutive_out(self, return_inverse, return_counts, dim, out0, out1, out2); |
10632 | } |
10633 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_consecutive_outf(const at::Tensor & self, bool return_inverse, bool return_counts, c10::optional<int64_t> dim, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
10634 | return wrapper_CompositeExplicitAutograd_out_unique_consecutive_out(self, return_inverse, return_counts, dim, out0, out1, out2); |
10635 | } |
10636 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_dim_consecutive_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts) { |
10637 | return wrapper_CompositeExplicitAutograd_out_unique_dim_consecutive_out(self, dim, return_inverse, return_counts, out0, out1, out2); |
10638 | } |
10639 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_dim_consecutive_outf(const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
10640 | return wrapper_CompositeExplicitAutograd_out_unique_dim_consecutive_out(self, dim, return_inverse, return_counts, out0, out1, out2); |
10641 | } |
10642 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _unique2_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts) { |
10643 | return wrapper_CompositeExplicitAutograd_out__unique2_out(self, sorted, return_inverse, return_counts, out0, out1, out2); |
10644 | } |
10645 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _unique2_outf(const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
10646 | return wrapper_CompositeExplicitAutograd_out__unique2_out(self, sorted, return_inverse, return_counts, out0, out1, out2); |
10647 | } |
10648 | at::Tensor _unsafe_view(const at::Tensor & self, at::IntArrayRef size) { |
10649 | return wrapper_CompositeExplicitAutograd___unsafe_view(self, c10::fromIntArrayRefSlow(size)); |
10650 | } |
10651 | at::Tensor _unsafe_view_symint(const at::Tensor & self, c10::SymIntArrayRef size) { |
10652 | return wrapper_CompositeExplicitAutograd___unsafe_view(self, size); |
10653 | } |
10654 | at::Tensor & _unsafe_view_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) { |
10655 | return wrapper_CompositeExplicitAutograd_out__unsafe_view_out(self, c10::fromIntArrayRefSlow(size), out); |
10656 | } |
10657 | at::Tensor & _unsafe_view_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { |
10658 | return wrapper_CompositeExplicitAutograd_out__unsafe_view_out(self, c10::fromIntArrayRefSlow(size), out); |
10659 | } |
10660 | at::Tensor & _unsafe_view_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) { |
10661 | return wrapper_CompositeExplicitAutograd_out__unsafe_view_out(self, size, out); |
10662 | } |
10663 | at::Tensor & _unsafe_view_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { |
10664 | return wrapper_CompositeExplicitAutograd_out__unsafe_view_out(self, size, out); |
10665 | } |
10666 | at::Tensor unsqueeze(const at::Tensor & self, int64_t dim) { |
10667 | return wrapper_CompositeExplicitAutograd__unsqueeze(self, dim); |
10668 | } |
10669 | at::Tensor & unsqueeze_(at::Tensor & self, int64_t dim) { |
10670 | return wrapper_CompositeExplicitAutograd__unsqueeze_(self, dim); |
10671 | } |
10672 | ::std::tuple<at::Tensor &,at::Tensor &> var_mean_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) { |
10673 | return wrapper_CompositeExplicitAutograd_correction_out_var_mean_out(self, dim, correction, keepdim, out0, out1); |
10674 | } |
10675 | ::std::tuple<at::Tensor &,at::Tensor &> var_mean_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out0, at::Tensor & out1) { |
10676 | return wrapper_CompositeExplicitAutograd_correction_out_var_mean_out(self, dim, correction, keepdim, out0, out1); |
10677 | } |
10678 | ::std::tuple<at::Tensor &,at::Tensor &> _weight_norm_interface_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & v, const at::Tensor & g, int64_t dim) { |
10679 | return wrapper_CompositeExplicitAutograd_out__weight_norm_interface_out(v, g, dim, out0, out1); |
10680 | } |
10681 | ::std::tuple<at::Tensor &,at::Tensor &> _weight_norm_interface_outf(const at::Tensor & v, const at::Tensor & g, int64_t dim, at::Tensor & out0, at::Tensor & out1) { |
10682 | return wrapper_CompositeExplicitAutograd_out__weight_norm_interface_out(v, g, dim, out0, out1); |
10683 | } |
10684 | ::std::tuple<at::Tensor &,at::Tensor &> _weight_norm_interface_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) { |
10685 | return wrapper_CompositeExplicitAutograd_out__weight_norm_interface_backward_out(grad_w, saved_v, saved_g, saved_norms, dim, out0, out1); |
10686 | } |
10687 | ::std::tuple<at::Tensor &,at::Tensor &> _weight_norm_interface_backward_outf(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim, at::Tensor & out0, at::Tensor & out1) { |
10688 | return wrapper_CompositeExplicitAutograd_out__weight_norm_interface_backward_out(grad_w, saved_v, saved_g, saved_norms, dim, out0, out1); |
10689 | } |
10690 | at::Tensor zeros(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options) { |
10691 | return wrapper_CompositeExplicitAutograd_names_zeros(size, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10692 | } |
10693 | at::Tensor zeros(at::IntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10694 | return wrapper_CompositeExplicitAutograd_names_zeros(size, names, dtype, layout, device, pin_memory); |
10695 | } |
10696 | at::Tensor & zeros_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::DimnameList> names) { |
10697 | return wrapper_CompositeExplicitAutograd_names_out_zeros_out(size, names, out); |
10698 | } |
10699 | at::Tensor & zeros_outf(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) { |
10700 | return wrapper_CompositeExplicitAutograd_names_out_zeros_out(size, names, out); |
10701 | } |
10702 | at::Tensor & _efficientzerotensor_out(at::Tensor & out, at::IntArrayRef size) { |
10703 | return wrapper_CompositeExplicitAutograd_out__efficientzerotensor_out(size, out); |
10704 | } |
10705 | at::Tensor & _efficientzerotensor_outf(at::IntArrayRef size, at::Tensor & out) { |
10706 | return wrapper_CompositeExplicitAutograd_out__efficientzerotensor_out(size, out); |
10707 | } |
10708 | at::Tensor zeros(at::IntArrayRef size, at::TensorOptions options) { |
10709 | return wrapper_CompositeExplicitAutograd__zeros(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10710 | } |
10711 | at::Tensor zeros(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10712 | return wrapper_CompositeExplicitAutograd__zeros(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); |
10713 | } |
10714 | at::Tensor zeros_symint(c10::SymIntArrayRef size, at::TensorOptions options) { |
10715 | return wrapper_CompositeExplicitAutograd__zeros(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10716 | } |
10717 | at::Tensor zeros_symint(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10718 | return wrapper_CompositeExplicitAutograd__zeros(size, dtype, layout, device, pin_memory); |
10719 | } |
10720 | at::Tensor & zeros_out(at::Tensor & out, at::IntArrayRef size) { |
10721 | return wrapper_CompositeExplicitAutograd_out_zeros_out(c10::fromIntArrayRefSlow(size), out); |
10722 | } |
10723 | at::Tensor & zeros_outf(at::IntArrayRef size, at::Tensor & out) { |
10724 | return wrapper_CompositeExplicitAutograd_out_zeros_out(c10::fromIntArrayRefSlow(size), out); |
10725 | } |
10726 | at::Tensor & zeros_symint_out(at::Tensor & out, c10::SymIntArrayRef size) { |
10727 | return wrapper_CompositeExplicitAutograd_out_zeros_out(size, out); |
10728 | } |
10729 | at::Tensor & zeros_symint_outf(c10::SymIntArrayRef size, at::Tensor & out) { |
10730 | return wrapper_CompositeExplicitAutograd_out_zeros_out(size, out); |
10731 | } |
10732 | at::Tensor zeros_like(const at::Tensor & self, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) { |
10733 | return wrapper_CompositeExplicitAutograd__zeros_like(self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
10734 | } |
10735 | at::Tensor zeros_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
10736 | return wrapper_CompositeExplicitAutograd__zeros_like(self, dtype, layout, device, pin_memory, memory_format); |
10737 | } |
10738 | at::Tensor & zeros_like_out(at::Tensor & out, const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) { |
10739 | return wrapper_CompositeExplicitAutograd_out_zeros_like_out(self, memory_format, out); |
10740 | } |
10741 | at::Tensor & zeros_like_outf(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
10742 | return wrapper_CompositeExplicitAutograd_out_zeros_like_out(self, memory_format, out); |
10743 | } |
10744 | at::Tensor & _standard_gamma_grad_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & output) { |
10745 | return wrapper_CompositeExplicitAutograd_out__standard_gamma_grad_out(self, output, out); |
10746 | } |
10747 | at::Tensor & _standard_gamma_grad_outf(const at::Tensor & self, const at::Tensor & output, at::Tensor & out) { |
10748 | return wrapper_CompositeExplicitAutograd_out__standard_gamma_grad_out(self, output, out); |
10749 | } |
10750 | at::Tensor & _standard_gamma_out(at::Tensor & out, const at::Tensor & self, c10::optional<at::Generator> generator) { |
10751 | return wrapper_CompositeExplicitAutograd_out__standard_gamma_out(self, generator, out); |
10752 | } |
10753 | at::Tensor & _standard_gamma_outf(const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) { |
10754 | return wrapper_CompositeExplicitAutograd_out__standard_gamma_out(self, generator, out); |
10755 | } |
10756 | at::Tensor & _dirichlet_grad_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) { |
10757 | return wrapper_CompositeExplicitAutograd_out__dirichlet_grad_out(x, alpha, total, out); |
10758 | } |
10759 | at::Tensor & _dirichlet_grad_outf(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total, at::Tensor & out) { |
10760 | return wrapper_CompositeExplicitAutograd_out__dirichlet_grad_out(x, alpha, total, out); |
10761 | } |
10762 | at::Tensor & _sample_dirichlet_out(at::Tensor & out, const at::Tensor & self, c10::optional<at::Generator> generator) { |
10763 | return wrapper_CompositeExplicitAutograd_out__sample_dirichlet_out(self, generator, out); |
10764 | } |
10765 | at::Tensor & _sample_dirichlet_outf(const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) { |
10766 | return wrapper_CompositeExplicitAutograd_out__sample_dirichlet_out(self, generator, out); |
10767 | } |
10768 | at::Tensor & poisson_out(at::Tensor & out, const at::Tensor & self, c10::optional<at::Generator> generator) { |
10769 | return wrapper_CompositeExplicitAutograd_out_poisson_out(self, generator, out); |
10770 | } |
10771 | at::Tensor & poisson_outf(const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) { |
10772 | return wrapper_CompositeExplicitAutograd_out_poisson_out(self, generator, out); |
10773 | } |
10774 | at::Tensor & binomial_out(at::Tensor & out, const at::Tensor & count, const at::Tensor & prob, c10::optional<at::Generator> generator) { |
10775 | return wrapper_CompositeExplicitAutograd_out_binomial_out(count, prob, generator, out); |
10776 | } |
10777 | at::Tensor & binomial_outf(const at::Tensor & count, const at::Tensor & prob, c10::optional<at::Generator> generator, at::Tensor & out) { |
10778 | return wrapper_CompositeExplicitAutograd_out_binomial_out(count, prob, generator, out); |
10779 | } |
10780 | at::Tensor & native_norm_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & p) { |
10781 | return wrapper_CompositeExplicitAutograd_out_native_norm_out(self, p, out); |
10782 | } |
10783 | at::Tensor & native_norm_outf(const at::Tensor & self, const at::Scalar & p, at::Tensor & out) { |
10784 | return wrapper_CompositeExplicitAutograd_out_native_norm_out(self, p, out); |
10785 | } |
10786 | at::Tensor & native_norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
10787 | return wrapper_CompositeExplicitAutograd_ScalarOpt_dim_dtype_out_native_norm_out(self, p, dim, keepdim, dtype, out); |
10788 | } |
10789 | at::Tensor & native_norm_outf(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
10790 | return wrapper_CompositeExplicitAutograd_ScalarOpt_dim_dtype_out_native_norm_out(self, p, dim, keepdim, dtype, out); |
10791 | } |
10792 | at::Tensor _sparse_sum(const at::Tensor & self, at::IntArrayRef dim) { |
10793 | return wrapper_CompositeExplicitAutograd_dim__sparse_sum(self, dim); |
10794 | } |
10795 | at::Tensor & _sparse_sum_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim) { |
10796 | return wrapper_CompositeExplicitAutograd_dim_out__sparse_sum_out(self, dim, out); |
10797 | } |
10798 | at::Tensor & _sparse_sum_outf(const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) { |
10799 | return wrapper_CompositeExplicitAutograd_dim_out__sparse_sum_out(self, dim, out); |
10800 | } |
10801 | at::Tensor & _sparse_sum_backward_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) { |
10802 | return wrapper_CompositeExplicitAutograd_out__sparse_sum_backward_out(grad, self, dim, out); |
10803 | } |
10804 | at::Tensor & _sparse_sum_backward_outf(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) { |
10805 | return wrapper_CompositeExplicitAutograd_out__sparse_sum_backward_out(grad, self, dim, out); |
10806 | } |
10807 | at::Tensor & _sparse_csr_sum_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
10808 | return wrapper_CompositeExplicitAutograd_dim_dtype_out__sparse_csr_sum_out(self, dim, keepdim, dtype, out); |
10809 | } |
10810 | at::Tensor & _sparse_csr_sum_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
10811 | return wrapper_CompositeExplicitAutograd_dim_dtype_out__sparse_csr_sum_out(self, dim, keepdim, dtype, out); |
10812 | } |
10813 | at::Tensor & _sparse_csr_prod_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
10814 | return wrapper_CompositeExplicitAutograd_dim_dtype_out__sparse_csr_prod_out(self, dim, keepdim, dtype, out); |
10815 | } |
10816 | at::Tensor & _sparse_csr_prod_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
10817 | return wrapper_CompositeExplicitAutograd_dim_dtype_out__sparse_csr_prod_out(self, dim, keepdim, dtype, out); |
10818 | } |
10819 | at::Tensor & _sparse_softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float) { |
10820 | return wrapper_CompositeExplicitAutograd_out__sparse_softmax_out(self, dim, half_to_float, out); |
10821 | } |
10822 | at::Tensor & _sparse_softmax_outf(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) { |
10823 | return wrapper_CompositeExplicitAutograd_out__sparse_softmax_out(self, dim, half_to_float, out); |
10824 | } |
10825 | at::Tensor & _sparse_softmax_backward_data_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) { |
10826 | return wrapper_CompositeExplicitAutograd_out__sparse_softmax_backward_data_out(grad_output, output, dim, self, out); |
10827 | } |
10828 | at::Tensor & _sparse_softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out) { |
10829 | return wrapper_CompositeExplicitAutograd_out__sparse_softmax_backward_data_out(grad_output, output, dim, self, out); |
10830 | } |
10831 | at::Tensor & _sparse_log_softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float) { |
10832 | return wrapper_CompositeExplicitAutograd_out__sparse_log_softmax_out(self, dim, half_to_float, out); |
10833 | } |
10834 | at::Tensor & _sparse_log_softmax_outf(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) { |
10835 | return wrapper_CompositeExplicitAutograd_out__sparse_log_softmax_out(self, dim, half_to_float, out); |
10836 | } |
10837 | at::Tensor & _sparse_log_softmax_backward_data_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) { |
10838 | return wrapper_CompositeExplicitAutograd_out__sparse_log_softmax_backward_data_out(grad_output, output, dim, self, out); |
10839 | } |
10840 | at::Tensor & _sparse_log_softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out) { |
10841 | return wrapper_CompositeExplicitAutograd_out__sparse_log_softmax_backward_data_out(grad_output, output, dim, self, out); |
10842 | } |
10843 | at::Tensor & _spdiags_out(at::Tensor & out, const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional<at::Layout> layout) { |
10844 | return wrapper_CompositeExplicitAutograd_out__spdiags_out(diagonals, offsets, shape, layout, out); |
10845 | } |
10846 | at::Tensor & _spdiags_outf(const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional<at::Layout> layout, at::Tensor & out) { |
10847 | return wrapper_CompositeExplicitAutograd_out__spdiags_out(diagonals, offsets, shape, layout, out); |
10848 | } |
10849 | at::Tensor norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::ScalarType dtype) { |
10850 | return wrapper_CompositeExplicitAutograd_ScalarOpt_dtype_norm(self, p, dtype); |
10851 | } |
10852 | at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & p, at::ScalarType dtype) { |
10853 | return wrapper_CompositeExplicitAutograd_ScalarOpt_dtype_out_norm_out(self, p, dtype, out); |
10854 | } |
10855 | at::Tensor & norm_outf(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::ScalarType dtype, at::Tensor & out) { |
10856 | return wrapper_CompositeExplicitAutograd_ScalarOpt_dtype_out_norm_out(self, p, dtype, out); |
10857 | } |
10858 | at::Tensor norm(const at::Tensor & self, const at::Scalar & p) { |
10859 | return wrapper_CompositeExplicitAutograd_Scalar_norm(self, p); |
10860 | } |
10861 | at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & p) { |
10862 | return wrapper_CompositeExplicitAutograd_Scalar_out_norm_out(self, p, out); |
10863 | } |
10864 | at::Tensor & norm_outf(const at::Tensor & self, const at::Scalar & p, at::Tensor & out) { |
10865 | return wrapper_CompositeExplicitAutograd_Scalar_out_norm_out(self, p, out); |
10866 | } |
10867 | ::std::tuple<at::Tensor,at::Tensor> frexp(const at::Tensor & self) { |
10868 | return wrapper_CompositeExplicitAutograd_Tensor_frexp(self); |
10869 | } |
10870 | at::Tensor clone(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) { |
10871 | return wrapper_CompositeExplicitAutograd__clone(self, memory_format); |
10872 | } |
10873 | at::Tensor & clone_out(at::Tensor & out, const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) { |
10874 | return wrapper_CompositeExplicitAutograd_out_clone_out(self, memory_format, out); |
10875 | } |
10876 | at::Tensor & clone_outf(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
10877 | return wrapper_CompositeExplicitAutograd_out_clone_out(self, memory_format, out); |
10878 | } |
10879 | at::Tensor resize_as(const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format) { |
10880 | return wrapper_CompositeExplicitAutograd__resize_as(self, the_template, memory_format); |
10881 | } |
10882 | const at::Tensor & resize_as_out(const at::Tensor & out, const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format) { |
10883 | return wrapper_CompositeExplicitAutograd_out_resize_as_out(self, the_template, memory_format, out); |
10884 | } |
10885 | const at::Tensor & resize_as_outf(const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format, const at::Tensor & out) { |
10886 | return wrapper_CompositeExplicitAutograd_out_resize_as_out(self, the_template, memory_format, out); |
10887 | } |
10888 | const at::Tensor & resize_as_(const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format) { |
10889 | return wrapper_CompositeExplicitAutograd__resize_as_(self, the_template, memory_format); |
10890 | } |
10891 | at::Tensor resize_as_sparse(const at::Tensor & self, const at::Tensor & the_template) { |
10892 | return wrapper_CompositeExplicitAutograd__resize_as_sparse(self, the_template); |
10893 | } |
10894 | const at::Tensor & resize_as_sparse_out(const at::Tensor & out, const at::Tensor & self, const at::Tensor & the_template) { |
10895 | return wrapper_CompositeExplicitAutograd_out_resize_as_sparse_out(self, the_template, out); |
10896 | } |
10897 | const at::Tensor & resize_as_sparse_outf(const at::Tensor & self, const at::Tensor & the_template, const at::Tensor & out) { |
10898 | return wrapper_CompositeExplicitAutograd_out_resize_as_sparse_out(self, the_template, out); |
10899 | } |
10900 | at::Tensor zero(const at::Tensor & self) { |
10901 | return wrapper_CompositeExplicitAutograd__zero(self); |
10902 | } |
10903 | at::Tensor & zero_out(at::Tensor & out, const at::Tensor & self) { |
10904 | return wrapper_CompositeExplicitAutograd_out_zero_out(self, out); |
10905 | } |
10906 | at::Tensor & zero_outf(const at::Tensor & self, at::Tensor & out) { |
10907 | return wrapper_CompositeExplicitAutograd_out_zero_out(self, out); |
10908 | } |
10909 | at::Tensor sub(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { |
10910 | return wrapper_CompositeExplicitAutograd_Scalar_sub(self, other, alpha); |
10911 | } |
10912 | at::Tensor & sub_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { |
10913 | return wrapper_CompositeExplicitAutograd_Scalar_out_sub_out(self, other, alpha, out); |
10914 | } |
10915 | at::Tensor & sub_outf(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { |
10916 | return wrapper_CompositeExplicitAutograd_Scalar_out_sub_out(self, other, alpha, out); |
10917 | } |
10918 | at::Tensor & sub_(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { |
10919 | return wrapper_CompositeExplicitAutograd_Scalar_sub_(self, other, alpha); |
10920 | } |
10921 | at::Tensor & rsub_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
10922 | return wrapper_CompositeExplicitAutograd_Tensor_out_rsub_out(self, other, alpha, out); |
10923 | } |
10924 | at::Tensor & rsub_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { |
10925 | return wrapper_CompositeExplicitAutograd_Tensor_out_rsub_out(self, other, alpha, out); |
10926 | } |
10927 | at::Tensor rsub(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { |
10928 | return wrapper_CompositeExplicitAutograd_Scalar_rsub(self, other, alpha); |
10929 | } |
10930 | at::Tensor & rsub_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { |
10931 | return wrapper_CompositeExplicitAutograd_Scalar_out_rsub_out(self, other, alpha, out); |
10932 | } |
10933 | at::Tensor & rsub_outf(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { |
10934 | return wrapper_CompositeExplicitAutograd_Scalar_out_rsub_out(self, other, alpha, out); |
10935 | } |
10936 | at::Tensor _sparse_addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { |
10937 | return wrapper_CompositeExplicitAutograd___sparse_addmm(self, mat1, mat2, beta, alpha); |
10938 | } |
10939 | at::Tensor & _sparse_addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { |
10940 | return wrapper_CompositeExplicitAutograd_out__sparse_addmm_out(self, mat1, mat2, beta, alpha, out); |
10941 | } |
10942 | at::Tensor & _sparse_addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
10943 | return wrapper_CompositeExplicitAutograd_out__sparse_addmm_out(self, mat1, mat2, beta, alpha, out); |
10944 | } |
10945 | at::Tensor sparse_coo_tensor(at::IntArrayRef size, at::TensorOptions options) { |
10946 | return wrapper_CompositeExplicitAutograd_size_sparse_coo_tensor(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10947 | } |
10948 | at::Tensor sparse_coo_tensor(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10949 | return wrapper_CompositeExplicitAutograd_size_sparse_coo_tensor(size, dtype, layout, device, pin_memory); |
10950 | } |
10951 | at::Tensor & sparse_coo_tensor_out(at::Tensor & out, at::IntArrayRef size) { |
10952 | return wrapper_CompositeExplicitAutograd_size_out_sparse_coo_tensor_out(size, out); |
10953 | } |
10954 | at::Tensor & sparse_coo_tensor_outf(at::IntArrayRef size, at::Tensor & out) { |
10955 | return wrapper_CompositeExplicitAutograd_size_out_sparse_coo_tensor_out(size, out); |
10956 | } |
10957 | at::Tensor & _sparse_coo_tensor_with_dims_out(at::Tensor & out, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size) { |
10958 | return wrapper_CompositeExplicitAutograd_out__sparse_coo_tensor_with_dims_out(sparse_dim, dense_dim, size, out); |
10959 | } |
10960 | at::Tensor & _sparse_coo_tensor_with_dims_outf(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::Tensor & out) { |
10961 | return wrapper_CompositeExplicitAutograd_out__sparse_coo_tensor_with_dims_out(sparse_dim, dense_dim, size, out); |
10962 | } |
10963 | at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_out(at::Tensor & out, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values) { |
10964 | return wrapper_CompositeExplicitAutograd_out__sparse_coo_tensor_with_dims_and_tensors_out(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, out); |
10965 | } |
10966 | at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_outf(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::Tensor & out) { |
10967 | return wrapper_CompositeExplicitAutograd_out__sparse_coo_tensor_with_dims_and_tensors_out(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, out); |
10968 | } |
10969 | at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_symint_out(at::Tensor & out, int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values) { |
10970 | return wrapper_CompositeExplicitAutograd_out__sparse_coo_tensor_with_dims_and_tensors_out(sparse_dim, dense_dim, size, indices, values, out); |
10971 | } |
10972 | at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_symint_outf(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::Tensor & out) { |
10973 | return wrapper_CompositeExplicitAutograd_out__sparse_coo_tensor_with_dims_and_tensors_out(sparse_dim, dense_dim, size, indices, values, out); |
10974 | } |
10975 | at::Tensor sparse_resize(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { |
10976 | return wrapper_CompositeExplicitAutograd__sparse_resize(self, size, sparse_dim, dense_dim); |
10977 | } |
10978 | const at::Tensor & sparse_resize_out(const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { |
10979 | return wrapper_CompositeExplicitAutograd_out_sparse_resize_out(self, size, sparse_dim, dense_dim, out); |
10980 | } |
10981 | const at::Tensor & sparse_resize_outf(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out) { |
10982 | return wrapper_CompositeExplicitAutograd_out_sparse_resize_out(self, size, sparse_dim, dense_dim, out); |
10983 | } |
10984 | at::Tensor sparse_resize_and_clear(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { |
10985 | return wrapper_CompositeExplicitAutograd__sparse_resize_and_clear(self, size, sparse_dim, dense_dim); |
10986 | } |
10987 | const at::Tensor & sparse_resize_and_clear_out(const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { |
10988 | return wrapper_CompositeExplicitAutograd_out_sparse_resize_and_clear_out(self, size, sparse_dim, dense_dim, out); |
10989 | } |
10990 | const at::Tensor & sparse_resize_and_clear_outf(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out) { |
10991 | return wrapper_CompositeExplicitAutograd_out_sparse_resize_and_clear_out(self, size, sparse_dim, dense_dim, out); |
10992 | } |
10993 | at::Tensor & sparse_mask_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask) { |
10994 | return wrapper_CompositeExplicitAutograd_out_sparse_mask_out(self, mask, out); |
10995 | } |
10996 | at::Tensor & sparse_mask_outf(const at::Tensor & self, const at::Tensor & mask, at::Tensor & out) { |
10997 | return wrapper_CompositeExplicitAutograd_out_sparse_mask_out(self, mask, out); |
10998 | } |
10999 | at::Tensor & _to_dense_out(at::Tensor & out, const at::Tensor & self, c10::optional<at::ScalarType> dtype) { |
11000 | return wrapper_CompositeExplicitAutograd_out__to_dense_out(self, dtype, out); |
11001 | } |
11002 | at::Tensor & _to_dense_outf(const at::Tensor & self, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
11003 | return wrapper_CompositeExplicitAutograd_out__to_dense_out(self, dtype, out); |
11004 | } |
11005 | at::Tensor & _coalesce_out(at::Tensor & out, const at::Tensor & self) { |
11006 | return wrapper_CompositeExplicitAutograd_out__coalesce_out(self, out); |
11007 | } |
11008 | at::Tensor & _coalesce_outf(const at::Tensor & self, at::Tensor & out) { |
11009 | return wrapper_CompositeExplicitAutograd_out__coalesce_out(self, out); |
11010 | } |
11011 | bool is_coalesced(const at::Tensor & self) { |
11012 | return wrapper_CompositeExplicitAutograd__is_coalesced(self); |
11013 | } |
11014 | at::Tensor _coalesced(const at::Tensor & self, bool coalesced) { |
11015 | return wrapper_CompositeExplicitAutograd___coalesced(self, coalesced); |
11016 | } |
11017 | at::Tensor & _coalesced_out(at::Tensor & out, const at::Tensor & self, bool coalesced) { |
11018 | return wrapper_CompositeExplicitAutograd_out__coalesced_out(self, coalesced, out); |
11019 | } |
11020 | at::Tensor & _coalesced_outf(const at::Tensor & self, bool coalesced, at::Tensor & out) { |
11021 | return wrapper_CompositeExplicitAutograd_out__coalesced_out(self, coalesced, out); |
11022 | } |
11023 | at::Tensor indices(const at::Tensor & self) { |
11024 | return wrapper_CompositeExplicitAutograd__indices(self); |
11025 | } |
11026 | at::Tensor values(const at::Tensor & self) { |
11027 | return wrapper_CompositeExplicitAutograd__values(self); |
11028 | } |
11029 | at::Tensor crow_indices(const at::Tensor & self) { |
11030 | return wrapper_CompositeExplicitAutograd__crow_indices(self); |
11031 | } |
11032 | at::Tensor col_indices(const at::Tensor & self) { |
11033 | return wrapper_CompositeExplicitAutograd__col_indices(self); |
11034 | } |
11035 | at::Tensor ccol_indices(const at::Tensor & self) { |
11036 | return wrapper_CompositeExplicitAutograd__ccol_indices(self); |
11037 | } |
11038 | at::Tensor row_indices(const at::Tensor & self) { |
11039 | return wrapper_CompositeExplicitAutograd__row_indices(self); |
11040 | } |
11041 | at::Tensor copy_sparse_to_sparse(const at::Tensor & self, const at::Tensor & src, bool non_blocking) { |
11042 | return wrapper_CompositeExplicitAutograd__copy_sparse_to_sparse(self, src, non_blocking); |
11043 | } |
11044 | at::Tensor & copy_sparse_to_sparse_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, bool non_blocking) { |
11045 | return wrapper_CompositeExplicitAutograd_out_copy_sparse_to_sparse_out(self, src, non_blocking, out); |
11046 | } |
11047 | at::Tensor & copy_sparse_to_sparse_outf(const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out) { |
11048 | return wrapper_CompositeExplicitAutograd_out_copy_sparse_to_sparse_out(self, src, non_blocking, out); |
11049 | } |
11050 | ::std::vector<at::Tensor> unbind(const at::Tensor & self, int64_t dim) { |
11051 | return wrapper_CompositeExplicitAutograd_int_unbind(self, dim); |
11052 | } |
11053 | at::Tensor & to_sparse_out(at::Tensor & out, const at::Tensor & self, int64_t sparse_dim) { |
11054 | return wrapper_CompositeExplicitAutograd_sparse_dim_out_to_sparse_out(self, sparse_dim, out); |
11055 | } |
11056 | at::Tensor & to_sparse_outf(const at::Tensor & self, int64_t sparse_dim, at::Tensor & out) { |
11057 | return wrapper_CompositeExplicitAutograd_sparse_dim_out_to_sparse_out(self, sparse_dim, out); |
11058 | } |
11059 | at::Tensor & to_sparse_out(at::Tensor & out, const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim) { |
11060 | return wrapper_CompositeExplicitAutograd_out_to_sparse_out(self, layout, blocksize, dense_dim, out); |
11061 | } |
11062 | at::Tensor & to_sparse_outf(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim, at::Tensor & out) { |
11063 | return wrapper_CompositeExplicitAutograd_out_to_sparse_out(self, layout, blocksize, dense_dim, out); |
11064 | } |
11065 | at::Tensor & to_sparse_csr_out(at::Tensor & out, const at::Tensor & self, c10::optional<int64_t> dense_dim) { |
11066 | return wrapper_CompositeExplicitAutograd_out_to_sparse_csr_out(self, dense_dim, out); |
11067 | } |
11068 | at::Tensor & to_sparse_csr_outf(const at::Tensor & self, c10::optional<int64_t> dense_dim, at::Tensor & out) { |
11069 | return wrapper_CompositeExplicitAutograd_out_to_sparse_csr_out(self, dense_dim, out); |
11070 | } |
11071 | at::Tensor & to_sparse_csc_out(at::Tensor & out, const at::Tensor & self, c10::optional<int64_t> dense_dim) { |
11072 | return wrapper_CompositeExplicitAutograd_out_to_sparse_csc_out(self, dense_dim, out); |
11073 | } |
11074 | at::Tensor & to_sparse_csc_outf(const at::Tensor & self, c10::optional<int64_t> dense_dim, at::Tensor & out) { |
11075 | return wrapper_CompositeExplicitAutograd_out_to_sparse_csc_out(self, dense_dim, out); |
11076 | } |
11077 | at::Tensor & to_sparse_bsr_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) { |
11078 | return wrapper_CompositeExplicitAutograd_out_to_sparse_bsr_out(self, blocksize, dense_dim, out); |
11079 | } |
11080 | at::Tensor & to_sparse_bsr_outf(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim, at::Tensor & out) { |
11081 | return wrapper_CompositeExplicitAutograd_out_to_sparse_bsr_out(self, blocksize, dense_dim, out); |
11082 | } |
11083 | at::Tensor & to_sparse_bsc_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) { |
11084 | return wrapper_CompositeExplicitAutograd_out_to_sparse_bsc_out(self, blocksize, dense_dim, out); |
11085 | } |
11086 | at::Tensor & to_sparse_bsc_outf(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim, at::Tensor & out) { |
11087 | return wrapper_CompositeExplicitAutograd_out_to_sparse_bsc_out(self, blocksize, dense_dim, out); |
11088 | } |
11089 | at::Tensor & to_mkldnn_out(at::Tensor & out, const at::Tensor & self, c10::optional<at::ScalarType> dtype) { |
11090 | return wrapper_CompositeExplicitAutograd_out_to_mkldnn_out(self, dtype, out); |
11091 | } |
11092 | at::Tensor & to_mkldnn_outf(const at::Tensor & self, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
11093 | return wrapper_CompositeExplicitAutograd_out_to_mkldnn_out(self, dtype, out); |
11094 | } |
11095 | at::Tensor & mkldnn_reorder_conv2d_weight_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::OptionalIntArrayRef input_size) { |
11096 | return wrapper_CompositeExplicitAutograd_out_mkldnn_reorder_conv2d_weight_out(self, padding, stride, dilation, groups, input_size, out); |
11097 | } |
11098 | at::Tensor & mkldnn_reorder_conv2d_weight_outf(const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::OptionalIntArrayRef input_size, at::Tensor & out) { |
11099 | return wrapper_CompositeExplicitAutograd_out_mkldnn_reorder_conv2d_weight_out(self, padding, stride, dilation, groups, input_size, out); |
11100 | } |
11101 | at::Tensor & mkldnn_reorder_conv3d_weight_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { |
11102 | return wrapper_CompositeExplicitAutograd_out_mkldnn_reorder_conv3d_weight_out(self, padding, stride, dilation, groups, out); |
11103 | } |
11104 | at::Tensor & mkldnn_reorder_conv3d_weight_outf(const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { |
11105 | return wrapper_CompositeExplicitAutograd_out_mkldnn_reorder_conv3d_weight_out(self, padding, stride, dilation, groups, out); |
11106 | } |
11107 | at::Tensor & quantize_per_tensor_dynamic_out(at::Tensor & out, const at::Tensor & self, at::ScalarType dtype, bool reduce_range) { |
11108 | return wrapper_CompositeExplicitAutograd_out_quantize_per_tensor_dynamic_out(self, dtype, reduce_range, out); |
11109 | } |
11110 | at::Tensor & quantize_per_tensor_dynamic_outf(const at::Tensor & self, at::ScalarType dtype, bool reduce_range, at::Tensor & out) { |
11111 | return wrapper_CompositeExplicitAutograd_out_quantize_per_tensor_dynamic_out(self, dtype, reduce_range, out); |
11112 | } |
11113 | at::Tensor & quantize_per_tensor_out(at::Tensor & out, const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype) { |
11114 | return wrapper_CompositeExplicitAutograd_out_quantize_per_tensor_out(self, scale, zero_point, dtype, out); |
11115 | } |
11116 | at::Tensor & quantize_per_tensor_outf(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype, at::Tensor & out) { |
11117 | return wrapper_CompositeExplicitAutograd_out_quantize_per_tensor_out(self, scale, zero_point, dtype, out); |
11118 | } |
11119 | at::Tensor & quantize_per_tensor_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype) { |
11120 | return wrapper_CompositeExplicitAutograd_tensor_qparams_out_quantize_per_tensor_out(self, scale, zero_point, dtype, out); |
11121 | } |
11122 | at::Tensor & quantize_per_tensor_outf(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype, at::Tensor & out) { |
11123 | return wrapper_CompositeExplicitAutograd_tensor_qparams_out_quantize_per_tensor_out(self, scale, zero_point, dtype, out); |
11124 | } |
11125 | void quantize_per_tensor_out(at::TensorList out, at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype) { |
11126 | return wrapper_CompositeExplicitAutograd_tensors_out_quantize_per_tensor_out(tensors, scales, zero_points, dtype, out); |
11127 | } |
11128 | void quantize_per_tensor_outf(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype, at::TensorList out) { |
11129 | return wrapper_CompositeExplicitAutograd_tensors_out_quantize_per_tensor_out(tensors, scales, zero_points, dtype, out); |
11130 | } |
11131 | at::Tensor & quantize_per_channel_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype) { |
11132 | return wrapper_CompositeExplicitAutograd_out_quantize_per_channel_out(self, scales, zero_points, axis, dtype, out); |
11133 | } |
11134 | at::Tensor & quantize_per_channel_outf(const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype, at::Tensor & out) { |
11135 | return wrapper_CompositeExplicitAutograd_out_quantize_per_channel_out(self, scales, zero_points, axis, dtype, out); |
11136 | } |
11137 | at::Tensor & dequantize_out(at::Tensor & out, const at::Tensor & self) { |
11138 | return wrapper_CompositeExplicitAutograd_self_out_dequantize_out(self, out); |
11139 | } |
11140 | at::Tensor & dequantize_outf(const at::Tensor & self, at::Tensor & out) { |
11141 | return wrapper_CompositeExplicitAutograd_self_out_dequantize_out(self, out); |
11142 | } |
11143 | void dequantize_out(at::TensorList out, at::TensorList tensors) { |
11144 | return wrapper_CompositeExplicitAutograd_tensors_out_dequantize_out(tensors, out); |
11145 | } |
11146 | void dequantize_outf(at::TensorList tensors, at::TensorList out) { |
11147 | return wrapper_CompositeExplicitAutograd_tensors_out_dequantize_out(tensors, out); |
11148 | } |
11149 | at::Tensor & q_per_channel_scales_out(at::Tensor & out, const at::Tensor & self) { |
11150 | return wrapper_CompositeExplicitAutograd_out_q_per_channel_scales_out(self, out); |
11151 | } |
11152 | at::Tensor & q_per_channel_scales_outf(const at::Tensor & self, at::Tensor & out) { |
11153 | return wrapper_CompositeExplicitAutograd_out_q_per_channel_scales_out(self, out); |
11154 | } |
11155 | at::Tensor & q_per_channel_zero_points_out(at::Tensor & out, const at::Tensor & self) { |
11156 | return wrapper_CompositeExplicitAutograd_out_q_per_channel_zero_points_out(self, out); |
11157 | } |
11158 | at::Tensor & q_per_channel_zero_points_outf(const at::Tensor & self, at::Tensor & out) { |
11159 | return wrapper_CompositeExplicitAutograd_out_q_per_channel_zero_points_out(self, out); |
11160 | } |
11161 | at::Tensor & int_repr_out(at::Tensor & out, const at::Tensor & self) { |
11162 | return wrapper_CompositeExplicitAutograd_out_int_repr_out(self, out); |
11163 | } |
11164 | at::Tensor & int_repr_outf(const at::Tensor & self, at::Tensor & out) { |
11165 | return wrapper_CompositeExplicitAutograd_out_int_repr_out(self, out); |
11166 | } |
11167 | at::Tensor & _make_per_tensor_quantized_tensor_out(at::Tensor & out, const at::Tensor & self, double scale, int64_t zero_point) { |
11168 | return wrapper_CompositeExplicitAutograd_out__make_per_tensor_quantized_tensor_out(self, scale, zero_point, out); |
11169 | } |
11170 | at::Tensor & _make_per_tensor_quantized_tensor_outf(const at::Tensor & self, double scale, int64_t zero_point, at::Tensor & out) { |
11171 | return wrapper_CompositeExplicitAutograd_out__make_per_tensor_quantized_tensor_out(self, scale, zero_point, out); |
11172 | } |
11173 | at::Tensor & _make_per_channel_quantized_tensor_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis) { |
11174 | return wrapper_CompositeExplicitAutograd_out__make_per_channel_quantized_tensor_out(self, scale, zero_point, axis, out); |
11175 | } |
11176 | at::Tensor & _make_per_channel_quantized_tensor_outf(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, at::Tensor & out) { |
11177 | return wrapper_CompositeExplicitAutograd_out__make_per_channel_quantized_tensor_out(self, scale, zero_point, axis, out); |
11178 | } |
11179 | ::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_tensor_affine_cachemask_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { |
11180 | return wrapper_CompositeExplicitAutograd_out_fake_quantize_per_tensor_affine_cachemask_out(self, scale, zero_point, quant_min, quant_max, out0, out1); |
11181 | } |
11182 | ::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_tensor_affine_cachemask_outf(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) { |
11183 | return wrapper_CompositeExplicitAutograd_out_fake_quantize_per_tensor_affine_cachemask_out(self, scale, zero_point, quant_min, quant_max, out0, out1); |
11184 | } |
11185 | ::std::tuple<at::Tensor &,at::Tensor &> _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max) { |
11186 | return wrapper_CompositeExplicitAutograd_out__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out(self, scale, zero_point, fake_quant_enabled, quant_min, quant_max, out0, out1); |
11187 | } |
11188 | ::std::tuple<at::Tensor &,at::Tensor &> _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_outf(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) { |
11189 | return wrapper_CompositeExplicitAutograd_out__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out(self, scale, zero_point, fake_quant_enabled, quant_min, quant_max, out0, out1); |
11190 | } |
11191 | at::Tensor & _fake_quantize_learnable_per_tensor_affine_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) { |
11192 | return wrapper_CompositeExplicitAutograd_out__fake_quantize_learnable_per_tensor_affine_out(self, scale, zero_point, quant_min, quant_max, grad_factor, out); |
11193 | } |
11194 | at::Tensor & _fake_quantize_learnable_per_tensor_affine_outf(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out) { |
11195 | return wrapper_CompositeExplicitAutograd_out__fake_quantize_learnable_per_tensor_affine_out(self, scale, zero_point, quant_min, quant_max, grad_factor, out); |
11196 | } |
11197 | ::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_channel_affine_cachemask_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { |
11198 | return wrapper_CompositeExplicitAutograd_out_fake_quantize_per_channel_affine_cachemask_out(self, scale, zero_point, axis, quant_min, quant_max, out0, out1); |
11199 | } |
11200 | ::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_channel_affine_cachemask_outf(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) { |
11201 | return wrapper_CompositeExplicitAutograd_out_fake_quantize_per_channel_affine_cachemask_out(self, scale, zero_point, axis, quant_min, quant_max, out0, out1); |
11202 | } |
11203 | at::Tensor & _fake_quantize_learnable_per_channel_affine_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) { |
11204 | return wrapper_CompositeExplicitAutograd_out__fake_quantize_learnable_per_channel_affine_out(self, scale, zero_point, axis, quant_min, quant_max, grad_factor, out); |
11205 | } |
11206 | at::Tensor & _fake_quantize_learnable_per_channel_affine_outf(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out) { |
11207 | return wrapper_CompositeExplicitAutograd_out__fake_quantize_learnable_per_channel_affine_out(self, scale, zero_point, axis, quant_min, quant_max, grad_factor, out); |
11208 | } |
11209 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _fused_moving_avg_obs_fq_helper_functional(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, const at::Tensor & running_min, const at::Tensor & running_max, const at::Tensor & scale, const at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) { |
11210 | return wrapper_CompositeExplicitAutograd___fused_moving_avg_obs_fq_helper_functional(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant); |
11211 | } |
11212 | ::std::tuple<at::Tensor &,at::Tensor &> _fused_moving_avg_obs_fq_helper_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) { |
11213 | return wrapper_CompositeExplicitAutograd_out__fused_moving_avg_obs_fq_helper_out(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant, out0, out1); |
11214 | } |
11215 | ::std::tuple<at::Tensor &,at::Tensor &> _fused_moving_avg_obs_fq_helper_outf(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant, at::Tensor & out0, at::Tensor & out1) { |
11216 | return wrapper_CompositeExplicitAutograd_out__fused_moving_avg_obs_fq_helper_out(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant, out0, out1); |
11217 | } |
11218 | at::Tensor _to_copy(const at::Tensor & self, at::TensorOptions options, bool non_blocking, c10::optional<at::MemoryFormat> memory_format) { |
11219 | return wrapper_CompositeExplicitAutograd___to_copy(self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), non_blocking, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
11220 | } |
11221 | at::Tensor _to_copy(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, c10::optional<at::MemoryFormat> memory_format) { |
11222 | return wrapper_CompositeExplicitAutograd___to_copy(self, dtype, layout, device, pin_memory, non_blocking, memory_format); |
11223 | } |
11224 | at::Tensor & _to_copy_out(at::Tensor & out, const at::Tensor & self, bool non_blocking, c10::optional<at::MemoryFormat> memory_format) { |
11225 | return wrapper_CompositeExplicitAutograd_out__to_copy_out(self, non_blocking, memory_format, out); |
11226 | } |
11227 | at::Tensor & _to_copy_outf(const at::Tensor & self, bool non_blocking, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
11228 | return wrapper_CompositeExplicitAutograd_out__to_copy_out(self, non_blocking, memory_format, out); |
11229 | } |
11230 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _lstm_mps_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { |
11231 | return wrapper_CompositeExplicitAutograd_out__lstm_mps_out(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2, out3, out4); |
11232 | } |
11233 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _lstm_mps_outf(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) { |
11234 | return wrapper_CompositeExplicitAutograd_out__lstm_mps_out(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2, out3, out4); |
11235 | } |
11236 | void lstm_mps_backward_out(at::Tensor & out0, at::TensorList out1, at::TensorList out2, const at::Tensor & grad_y, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { |
11237 | return wrapper_CompositeExplicitAutograd_out_lstm_mps_backward_out(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2); |
11238 | } |
11239 | void lstm_mps_backward_outf(const at::Tensor & grad_y, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2) { |
11240 | return wrapper_CompositeExplicitAutograd_out_lstm_mps_backward_out(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2); |
11241 | } |
11242 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_lstm_cell_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias) { |
11243 | return wrapper_CompositeExplicitAutograd_out__thnn_fused_lstm_cell_out(input_gates, hidden_gates, cx, input_bias, hidden_bias, out0, out1, out2); |
11244 | } |
11245 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_lstm_cell_outf(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
11246 | return wrapper_CompositeExplicitAutograd_out__thnn_fused_lstm_cell_out(input_gates, hidden_gates, cx, input_bias, hidden_bias, out0, out1, out2); |
11247 | } |
11248 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_lstm_cell_backward_impl_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) { |
11249 | return wrapper_CompositeExplicitAutograd_out__thnn_fused_lstm_cell_backward_impl_out(grad_hy, grad_cy, cx, cy, workspace, has_bias, out0, out1, out2); |
11250 | } |
11251 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_lstm_cell_backward_impl_outf(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
11252 | return wrapper_CompositeExplicitAutograd_out__thnn_fused_lstm_cell_backward_impl_out(grad_hy, grad_cy, cx, cy, workspace, has_bias, out0, out1, out2); |
11253 | } |
11254 | ::std::tuple<at::Tensor &,at::Tensor &> _thnn_fused_gru_cell_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias) { |
11255 | return wrapper_CompositeExplicitAutograd_out__thnn_fused_gru_cell_out(input_gates, hidden_gates, hx, input_bias, hidden_bias, out0, out1); |
11256 | } |
11257 | ::std::tuple<at::Tensor &,at::Tensor &> _thnn_fused_gru_cell_outf(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, at::Tensor & out0, at::Tensor & out1) { |
11258 | return wrapper_CompositeExplicitAutograd_out__thnn_fused_gru_cell_out(input_gates, hidden_gates, hx, input_bias, hidden_bias, out0, out1); |
11259 | } |
11260 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_gru_cell_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias) { |
11261 | return wrapper_CompositeExplicitAutograd_out__thnn_fused_gru_cell_backward_out(grad_hy, workspace, has_bias, out0, out1, out2, out3, out4); |
11262 | } |
11263 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_gru_cell_backward_outf(const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) { |
11264 | return wrapper_CompositeExplicitAutograd_out__thnn_fused_gru_cell_backward_out(grad_hy, workspace, has_bias, out0, out1, out2, out3, out4); |
11265 | } |
11266 | ::std::tuple<at::Tensor,at::Tensor> _pack_padded_sequence(const at::Tensor & input, const at::Tensor & lengths, bool batch_first) { |
11267 | return wrapper_CompositeExplicitAutograd___pack_padded_sequence(input, lengths, batch_first); |
11268 | } |
11269 | ::std::tuple<at::Tensor &,at::Tensor &> _pack_padded_sequence_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, const at::Tensor & lengths, bool batch_first) { |
11270 | return wrapper_CompositeExplicitAutograd_out__pack_padded_sequence_out(input, lengths, batch_first, out0, out1); |
11271 | } |
11272 | ::std::tuple<at::Tensor &,at::Tensor &> _pack_padded_sequence_outf(const at::Tensor & input, const at::Tensor & lengths, bool batch_first, at::Tensor & out0, at::Tensor & out1) { |
11273 | return wrapper_CompositeExplicitAutograd_out__pack_padded_sequence_out(input, lengths, batch_first, out0, out1); |
11274 | } |
11275 | at::Tensor set(const at::Tensor & self, at::Storage source) { |
11276 | return wrapper_CompositeExplicitAutograd_source_Storage_set(self, source); |
11277 | } |
11278 | at::Tensor & set_out(at::Tensor & out, const at::Tensor & self, at::Storage source) { |
11279 | return wrapper_CompositeExplicitAutograd_source_Storage_out_set_out(self, source, out); |
11280 | } |
11281 | at::Tensor & set_outf(const at::Tensor & self, at::Storage source, at::Tensor & out) { |
11282 | return wrapper_CompositeExplicitAutograd_source_Storage_out_set_out(self, source, out); |
11283 | } |
11284 | at::Tensor set(const at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride) { |
11285 | return wrapper_CompositeExplicitAutograd_source_Storage_storage_offset_set(self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride)); |
11286 | } |
11287 | at::Tensor set_symint(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { |
11288 | return wrapper_CompositeExplicitAutograd_source_Storage_storage_offset_set(self, source, storage_offset, size, stride); |
11289 | } |
11290 | at::Tensor & set_out(at::Tensor & out, const at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride) { |
11291 | return wrapper_CompositeExplicitAutograd_source_Storage_storage_offset_out_set_out(self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out); |
11292 | } |
11293 | at::Tensor & set_outf(const at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor & out) { |
11294 | return wrapper_CompositeExplicitAutograd_source_Storage_storage_offset_out_set_out(self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out); |
11295 | } |
11296 | at::Tensor & set_symint_out(at::Tensor & out, const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { |
11297 | return wrapper_CompositeExplicitAutograd_source_Storage_storage_offset_out_set_out(self, source, storage_offset, size, stride, out); |
11298 | } |
11299 | at::Tensor & set_symint_outf(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) { |
11300 | return wrapper_CompositeExplicitAutograd_source_Storage_storage_offset_out_set_out(self, source, storage_offset, size, stride, out); |
11301 | } |
11302 | at::Tensor set(const at::Tensor & self, const at::Tensor & source) { |
11303 | return wrapper_CompositeExplicitAutograd_source_Tensor_set(self, source); |
11304 | } |
11305 | at::Tensor & set_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & source) { |
11306 | return wrapper_CompositeExplicitAutograd_source_Tensor_out_set_out(self, source, out); |
11307 | } |
11308 | at::Tensor & set_outf(const at::Tensor & self, const at::Tensor & source, at::Tensor & out) { |
11309 | return wrapper_CompositeExplicitAutograd_source_Tensor_out_set_out(self, source, out); |
11310 | } |
11311 | at::Tensor set(const at::Tensor & self) { |
11312 | return wrapper_CompositeExplicitAutograd__set(self); |
11313 | } |
11314 | at::Tensor & set_out(at::Tensor & out, const at::Tensor & self) { |
11315 | return wrapper_CompositeExplicitAutograd_out_set_out(self, out); |
11316 | } |
11317 | at::Tensor & set_outf(const at::Tensor & self, at::Tensor & out) { |
11318 | return wrapper_CompositeExplicitAutograd_out_set_out(self, out); |
11319 | } |
11320 | at::Tensor lift(const at::Tensor & self) { |
11321 | return wrapper_CompositeExplicitAutograd__lift(self); |
11322 | } |
11323 | at::Tensor & lift_out(at::Tensor & out, const at::Tensor & self) { |
11324 | return wrapper_CompositeExplicitAutograd_out_lift_out(self, out); |
11325 | } |
11326 | at::Tensor & lift_outf(const at::Tensor & self, at::Tensor & out) { |
11327 | return wrapper_CompositeExplicitAutograd_out_lift_out(self, out); |
11328 | } |
11329 | at::Tensor lift_fresh(const at::Tensor & self) { |
11330 | return wrapper_CompositeExplicitAutograd__lift_fresh(self); |
11331 | } |
11332 | at::Tensor & lift_fresh_copy_out(at::Tensor & out, const at::Tensor & self) { |
11333 | return wrapper_CompositeExplicitAutograd_out_lift_fresh_copy_out(self, out); |
11334 | } |
11335 | at::Tensor & lift_fresh_copy_outf(const at::Tensor & self, at::Tensor & out) { |
11336 | return wrapper_CompositeExplicitAutograd_out_lift_fresh_copy_out(self, out); |
11337 | } |
11338 | at::Tensor masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) { |
11339 | return wrapper_CompositeExplicitAutograd_Scalar_masked_fill(self, mask, value); |
11340 | } |
11341 | at::Tensor & masked_fill_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) { |
11342 | return wrapper_CompositeExplicitAutograd_Scalar_out_masked_fill_out(self, mask, value, out); |
11343 | } |
11344 | at::Tensor & masked_fill_outf(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value, at::Tensor & out) { |
11345 | return wrapper_CompositeExplicitAutograd_Scalar_out_masked_fill_out(self, mask, value, out); |
11346 | } |
11347 | at::Tensor masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) { |
11348 | return wrapper_CompositeExplicitAutograd_Tensor_masked_fill(self, mask, value); |
11349 | } |
11350 | at::Tensor & masked_fill_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) { |
11351 | return wrapper_CompositeExplicitAutograd_Tensor_out_masked_fill_out(self, mask, value, out); |
11352 | } |
11353 | at::Tensor & masked_fill_outf(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value, at::Tensor & out) { |
11354 | return wrapper_CompositeExplicitAutograd_Tensor_out_masked_fill_out(self, mask, value, out); |
11355 | } |
11356 | at::Tensor masked_scatter(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) { |
11357 | return wrapper_CompositeExplicitAutograd__masked_scatter(self, mask, source); |
11358 | } |
11359 | at::Tensor & masked_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) { |
11360 | return wrapper_CompositeExplicitAutograd_out_masked_scatter_out(self, mask, source, out); |
11361 | } |
11362 | at::Tensor & masked_scatter_outf(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source, at::Tensor & out) { |
11363 | return wrapper_CompositeExplicitAutograd_out_masked_scatter_out(self, mask, source, out); |
11364 | } |
11365 | at::Tensor & _masked_softmax_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, c10::optional<int64_t> dim, c10::optional<int64_t> mask_type) { |
11366 | return wrapper_CompositeExplicitAutograd_out__masked_softmax_out(self, mask, dim, mask_type, out); |
11367 | } |
11368 | at::Tensor & _masked_softmax_outf(const at::Tensor & self, const at::Tensor & mask, c10::optional<int64_t> dim, c10::optional<int64_t> mask_type, at::Tensor & out) { |
11369 | return wrapper_CompositeExplicitAutograd_out__masked_softmax_out(self, mask, dim, mask_type, out); |
11370 | } |
11371 | at::Tensor & _masked_softmax_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional<int64_t> dim) { |
11372 | return wrapper_CompositeExplicitAutograd_out__masked_softmax_backward_out(grad_output, output, mask, dim, out); |
11373 | } |
11374 | at::Tensor & _masked_softmax_backward_outf(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional<int64_t> dim, at::Tensor & out) { |
11375 | return wrapper_CompositeExplicitAutograd_out__masked_softmax_backward_out(grad_output, output, mask, dim, out); |
11376 | } |
11377 | at::Tensor view(const at::Tensor & self, at::ScalarType dtype) { |
11378 | return wrapper_CompositeExplicitAutograd_dtype_view(self, dtype); |
11379 | } |
11380 | at::Tensor put(const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) { |
11381 | return wrapper_CompositeExplicitAutograd__put(self, index, source, accumulate); |
11382 | } |
11383 | at::Tensor & put_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) { |
11384 | return wrapper_CompositeExplicitAutograd_out_put_out(self, index, source, accumulate, out); |
11385 | } |
11386 | at::Tensor & put_outf(const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate, at::Tensor & out) { |
11387 | return wrapper_CompositeExplicitAutograd_out_put_out(self, index, source, accumulate, out); |
11388 | } |
11389 | at::Tensor index_fill(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { |
11390 | return wrapper_CompositeExplicitAutograd_int_Scalar_index_fill(self, dim, index, value); |
11391 | } |
11392 | at::Tensor & index_fill_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { |
11393 | return wrapper_CompositeExplicitAutograd_int_Scalar_out_index_fill_out(self, dim, index, value, out); |
11394 | } |
11395 | at::Tensor & index_fill_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) { |
11396 | return wrapper_CompositeExplicitAutograd_int_Scalar_out_index_fill_out(self, dim, index, value, out); |
11397 | } |
11398 | at::Tensor index_fill(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) { |
11399 | return wrapper_CompositeExplicitAutograd_int_Tensor_index_fill(self, dim, index, value); |
11400 | } |
11401 | at::Tensor & index_fill_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) { |
11402 | return wrapper_CompositeExplicitAutograd_int_Tensor_out_index_fill_out(self, dim, index, value, out); |
11403 | } |
11404 | at::Tensor & index_fill_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out) { |
11405 | return wrapper_CompositeExplicitAutograd_int_Tensor_out_index_fill_out(self, dim, index, value, out); |
11406 | } |
11407 | at::Tensor bitwise_and(const at::Tensor & self, const at::Scalar & other) { |
11408 | return wrapper_CompositeExplicitAutograd_Scalar_bitwise_and(self, other); |
11409 | } |
11410 | at::Tensor & bitwise_and_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { |
11411 | return wrapper_CompositeExplicitAutograd_Scalar_out_bitwise_and_out(self, other, out); |
11412 | } |
11413 | at::Tensor & bitwise_and_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
11414 | return wrapper_CompositeExplicitAutograd_Scalar_out_bitwise_and_out(self, other, out); |
11415 | } |
11416 | at::Tensor bitwise_and(const at::Scalar & self, const at::Tensor & other) { |
11417 | return wrapper_CompositeExplicitAutograd_Scalar_Tensor_bitwise_and(self, other); |
11418 | } |
11419 | at::Tensor & bitwise_and_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { |
11420 | return wrapper_CompositeExplicitAutograd_Scalar_Tensor_out_bitwise_and_out(self, other, out); |
11421 | } |
11422 | at::Tensor & bitwise_and_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
11423 | return wrapper_CompositeExplicitAutograd_Scalar_Tensor_out_bitwise_and_out(self, other, out); |
11424 | } |
11425 | at::Tensor & bitwise_or_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { |
11426 | return wrapper_CompositeExplicitAutograd_Scalar_out_bitwise_or_out(self, other, out); |
11427 | } |
11428 | at::Tensor & bitwise_or_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
11429 | return wrapper_CompositeExplicitAutograd_Scalar_out_bitwise_or_out(self, other, out); |
11430 | } |
11431 | at::Tensor bitwise_or(const at::Scalar & self, const at::Tensor & other) { |
11432 | return wrapper_CompositeExplicitAutograd_Scalar_Tensor_bitwise_or(self, other); |
11433 | } |
11434 | at::Tensor & bitwise_or_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { |
11435 | return wrapper_CompositeExplicitAutograd_Scalar_Tensor_out_bitwise_or_out(self, other, out); |
11436 | } |
11437 | at::Tensor & bitwise_or_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
11438 | return wrapper_CompositeExplicitAutograd_Scalar_Tensor_out_bitwise_or_out(self, other, out); |
11439 | } |
11440 | at::Tensor & bitwise_xor_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { |
11441 | return wrapper_CompositeExplicitAutograd_Scalar_out_bitwise_xor_out(self, other, out); |
11442 | } |
11443 | at::Tensor & bitwise_xor_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
11444 | return wrapper_CompositeExplicitAutograd_Scalar_out_bitwise_xor_out(self, other, out); |
11445 | } |
11446 | at::Tensor bitwise_xor(const at::Scalar & self, const at::Tensor & other) { |
11447 | return wrapper_CompositeExplicitAutograd_Scalar_Tensor_bitwise_xor(self, other); |
11448 | } |
11449 | at::Tensor & bitwise_xor_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { |
11450 | return wrapper_CompositeExplicitAutograd_Scalar_Tensor_out_bitwise_xor_out(self, other, out); |
11451 | } |
11452 | at::Tensor & bitwise_xor_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
11453 | return wrapper_CompositeExplicitAutograd_Scalar_Tensor_out_bitwise_xor_out(self, other, out); |
11454 | } |
11455 | at::Tensor & __lshift___out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { |
11456 | return wrapper_CompositeExplicitAutograd_Scalar_out___lshift___out(self, other, out); |
11457 | } |
11458 | at::Tensor & __lshift___outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
11459 | return wrapper_CompositeExplicitAutograd_Scalar_out___lshift___out(self, other, out); |
11460 | } |
11461 | at::Tensor & __lshift___out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
11462 | return wrapper_CompositeExplicitAutograd_Tensor_out___lshift___out(self, other, out); |
11463 | } |
11464 | at::Tensor & __lshift___outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
11465 | return wrapper_CompositeExplicitAutograd_Tensor_out___lshift___out(self, other, out); |
11466 | } |
11467 | at::Tensor bitwise_left_shift(const at::Tensor & self, const at::Scalar & other) { |
11468 | return wrapper_CompositeExplicitAutograd_Tensor_Scalar_bitwise_left_shift(self, other); |
11469 | } |
11470 | at::Tensor & bitwise_left_shift_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { |
11471 | return wrapper_CompositeExplicitAutograd_Tensor_Scalar_out_bitwise_left_shift_out(self, other, out); |
11472 | } |
11473 | at::Tensor & bitwise_left_shift_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
11474 | return wrapper_CompositeExplicitAutograd_Tensor_Scalar_out_bitwise_left_shift_out(self, other, out); |
11475 | } |
11476 | at::Tensor & bitwise_left_shift_(at::Tensor & self, const at::Scalar & other) { |
11477 | return wrapper_CompositeExplicitAutograd_Tensor_Scalar_bitwise_left_shift_(self, other); |
11478 | } |
11479 | at::Tensor bitwise_left_shift(const at::Scalar & self, const at::Tensor & other) { |
11480 | return wrapper_CompositeExplicitAutograd_Scalar_Tensor_bitwise_left_shift(self, other); |
11481 | } |
11482 | at::Tensor & bitwise_left_shift_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { |
11483 | return wrapper_CompositeExplicitAutograd_Scalar_Tensor_out_bitwise_left_shift_out(self, other, out); |
11484 | } |
11485 | at::Tensor & bitwise_left_shift_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
11486 | return wrapper_CompositeExplicitAutograd_Scalar_Tensor_out_bitwise_left_shift_out(self, other, out); |
11487 | } |
11488 | at::Tensor & __rshift___out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { |
11489 | return wrapper_CompositeExplicitAutograd_Scalar_out___rshift___out(self, other, out); |
11490 | } |
11491 | at::Tensor & __rshift___outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
11492 | return wrapper_CompositeExplicitAutograd_Scalar_out___rshift___out(self, other, out); |
11493 | } |
11494 | at::Tensor & __rshift___out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
11495 | return wrapper_CompositeExplicitAutograd_Tensor_out___rshift___out(self, other, out); |
11496 | } |
11497 | at::Tensor & __rshift___outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
11498 | return wrapper_CompositeExplicitAutograd_Tensor_out___rshift___out(self, other, out); |
11499 | } |
11500 | at::Tensor bitwise_right_shift(const at::Tensor & self, const at::Scalar & other) { |
11501 | return wrapper_CompositeExplicitAutograd_Tensor_Scalar_bitwise_right_shift(self, other); |
11502 | } |
11503 | at::Tensor & bitwise_right_shift_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { |
11504 | return wrapper_CompositeExplicitAutograd_Tensor_Scalar_out_bitwise_right_shift_out(self, other, out); |
11505 | } |
11506 | at::Tensor & bitwise_right_shift_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
11507 | return wrapper_CompositeExplicitAutograd_Tensor_Scalar_out_bitwise_right_shift_out(self, other, out); |
11508 | } |
11509 | at::Tensor & bitwise_right_shift_(at::Tensor & self, const at::Scalar & other) { |
11510 | return wrapper_CompositeExplicitAutograd_Tensor_Scalar_bitwise_right_shift_(self, other); |
11511 | } |
11512 | at::Tensor bitwise_right_shift(const at::Scalar & self, const at::Tensor & other) { |
11513 | return wrapper_CompositeExplicitAutograd_Scalar_Tensor_bitwise_right_shift(self, other); |
11514 | } |
11515 | at::Tensor & bitwise_right_shift_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { |
11516 | return wrapper_CompositeExplicitAutograd_Scalar_Tensor_out_bitwise_right_shift_out(self, other, out); |
11517 | } |
11518 | at::Tensor & bitwise_right_shift_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
11519 | return wrapper_CompositeExplicitAutograd_Scalar_Tensor_out_bitwise_right_shift_out(self, other, out); |
11520 | } |
11521 | at::Tensor random(const at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator) { |
11522 | return wrapper_CompositeExplicitAutograd_from_random(self, from, to, generator); |
11523 | } |
11524 | at::Tensor & random_out(at::Tensor & out, const at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator) { |
11525 | return wrapper_CompositeExplicitAutograd_from_out_random_out(self, from, to, generator, out); |
11526 | } |
11527 | at::Tensor & random_outf(const at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator, at::Tensor & out) { |
11528 | return wrapper_CompositeExplicitAutograd_from_out_random_out(self, from, to, generator, out); |
11529 | } |
11530 | at::Tensor random(const at::Tensor & self, int64_t to, c10::optional<at::Generator> generator) { |
11531 | return wrapper_CompositeExplicitAutograd_to_random(self, to, generator); |
11532 | } |
11533 | at::Tensor & random_out(at::Tensor & out, const at::Tensor & self, int64_t to, c10::optional<at::Generator> generator) { |
11534 | return wrapper_CompositeExplicitAutograd_to_out_random_out(self, to, generator, out); |
11535 | } |
11536 | at::Tensor & random_outf(const at::Tensor & self, int64_t to, c10::optional<at::Generator> generator, at::Tensor & out) { |
11537 | return wrapper_CompositeExplicitAutograd_to_out_random_out(self, to, generator, out); |
11538 | } |
11539 | at::Tensor random(const at::Tensor & self, c10::optional<at::Generator> generator) { |
11540 | return wrapper_CompositeExplicitAutograd__random(self, generator); |
11541 | } |
11542 | at::Tensor & random_out(at::Tensor & out, const at::Tensor & self, c10::optional<at::Generator> generator) { |
11543 | return wrapper_CompositeExplicitAutograd_out_random_out(self, generator, out); |
11544 | } |
11545 | at::Tensor & random_outf(const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) { |
11546 | return wrapper_CompositeExplicitAutograd_out_random_out(self, generator, out); |
11547 | } |
11548 | at::Tensor uniform(const at::Tensor & self, double from, double to, c10::optional<at::Generator> generator) { |
11549 | return wrapper_CompositeExplicitAutograd__uniform(self, from, to, generator); |
11550 | } |
11551 | at::Tensor & uniform_out(at::Tensor & out, const at::Tensor & self, double from, double to, c10::optional<at::Generator> generator) { |
11552 | return wrapper_CompositeExplicitAutograd_out_uniform_out(self, from, to, generator, out); |
11553 | } |
11554 | at::Tensor & uniform_outf(const at::Tensor & self, double from, double to, c10::optional<at::Generator> generator, at::Tensor & out) { |
11555 | return wrapper_CompositeExplicitAutograd_out_uniform_out(self, from, to, generator, out); |
11556 | } |
11557 | at::Tensor cauchy(const at::Tensor & self, double median, double sigma, c10::optional<at::Generator> generator) { |
11558 | return wrapper_CompositeExplicitAutograd__cauchy(self, median, sigma, generator); |
11559 | } |
11560 | at::Tensor & cauchy_out(at::Tensor & out, const at::Tensor & self, double median, double sigma, c10::optional<at::Generator> generator) { |
11561 | return wrapper_CompositeExplicitAutograd_out_cauchy_out(self, median, sigma, generator, out); |
11562 | } |
11563 | at::Tensor & cauchy_outf(const at::Tensor & self, double median, double sigma, c10::optional<at::Generator> generator, at::Tensor & out) { |
11564 | return wrapper_CompositeExplicitAutograd_out_cauchy_out(self, median, sigma, generator, out); |
11565 | } |
11566 | at::Tensor log_normal(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) { |
11567 | return wrapper_CompositeExplicitAutograd__log_normal(self, mean, std, generator); |
11568 | } |
11569 | at::Tensor & log_normal_out(at::Tensor & out, const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) { |
11570 | return wrapper_CompositeExplicitAutograd_out_log_normal_out(self, mean, std, generator, out); |
11571 | } |
11572 | at::Tensor & log_normal_outf(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator, at::Tensor & out) { |
11573 | return wrapper_CompositeExplicitAutograd_out_log_normal_out(self, mean, std, generator, out); |
11574 | } |
11575 | at::Tensor exponential(const at::Tensor & self, double lambd, c10::optional<at::Generator> generator) { |
11576 | return wrapper_CompositeExplicitAutograd__exponential(self, lambd, generator); |
11577 | } |
11578 | at::Tensor & exponential_out(at::Tensor & out, const at::Tensor & self, double lambd, c10::optional<at::Generator> generator) { |
11579 | return wrapper_CompositeExplicitAutograd_out_exponential_out(self, lambd, generator, out); |
11580 | } |
11581 | at::Tensor & exponential_outf(const at::Tensor & self, double lambd, c10::optional<at::Generator> generator, at::Tensor & out) { |
11582 | return wrapper_CompositeExplicitAutograd_out_exponential_out(self, lambd, generator, out); |
11583 | } |
11584 | at::Tensor geometric(const at::Tensor & self, double p, c10::optional<at::Generator> generator) { |
11585 | return wrapper_CompositeExplicitAutograd__geometric(self, p, generator); |
11586 | } |
11587 | at::Tensor & geometric_out(at::Tensor & out, const at::Tensor & self, double p, c10::optional<at::Generator> generator) { |
11588 | return wrapper_CompositeExplicitAutograd_out_geometric_out(self, p, generator, out); |
11589 | } |
11590 | at::Tensor & geometric_outf(const at::Tensor & self, double p, c10::optional<at::Generator> generator, at::Tensor & out) { |
11591 | return wrapper_CompositeExplicitAutograd_out_geometric_out(self, p, generator, out); |
11592 | } |
11593 | at::Tensor & tril_indices_out(at::Tensor & out, int64_t row, int64_t col, int64_t offset) { |
11594 | return wrapper_CompositeExplicitAutograd_out_tril_indices_out(row, col, offset, out); |
11595 | } |
11596 | at::Tensor & tril_indices_outf(int64_t row, int64_t col, int64_t offset, at::Tensor & out) { |
11597 | return wrapper_CompositeExplicitAutograd_out_tril_indices_out(row, col, offset, out); |
11598 | } |
11599 | at::Tensor & triu_indices_out(at::Tensor & out, int64_t row, int64_t col, int64_t offset) { |
11600 | return wrapper_CompositeExplicitAutograd_out_triu_indices_out(row, col, offset, out); |
11601 | } |
11602 | at::Tensor & triu_indices_outf(int64_t row, int64_t col, int64_t offset, at::Tensor & out) { |
11603 | return wrapper_CompositeExplicitAutograd_out_triu_indices_out(row, col, offset, out); |
11604 | } |
11605 | at::Tensor & trace_out(at::Tensor & out, const at::Tensor & self) { |
11606 | return wrapper_CompositeExplicitAutograd_out_trace_out(self, out); |
11607 | } |
11608 | at::Tensor & trace_outf(const at::Tensor & self, at::Tensor & out) { |
11609 | return wrapper_CompositeExplicitAutograd_out_trace_out(self, out); |
11610 | } |
11611 | void _linalg_check_errors(const at::Tensor & info, c10::string_view api_name, bool is_matrix) { |
11612 | return wrapper_CompositeExplicitAutograd___linalg_check_errors(info, api_name, is_matrix); |
11613 | } |
11614 | at::Tensor cholesky_solve(const at::Tensor & self, const at::Tensor & input2, bool upper) { |
11615 | return wrapper_CompositeExplicitAutograd__cholesky_solve(self, input2, upper); |
11616 | } |
11617 | at::Tensor & cholesky_solve_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & input2, bool upper) { |
11618 | return wrapper_CompositeExplicitAutograd_out_cholesky_solve_out(self, input2, upper, out); |
11619 | } |
11620 | at::Tensor & cholesky_solve_outf(const at::Tensor & self, const at::Tensor & input2, bool upper, at::Tensor & out) { |
11621 | return wrapper_CompositeExplicitAutograd_out_cholesky_solve_out(self, input2, upper, out); |
11622 | } |
11623 | at::Tensor & _cholesky_solve_helper_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & A, bool upper) { |
11624 | return wrapper_CompositeExplicitAutograd_out__cholesky_solve_helper_out(self, A, upper, out); |
11625 | } |
11626 | at::Tensor & _cholesky_solve_helper_outf(const at::Tensor & self, const at::Tensor & A, bool upper, at::Tensor & out) { |
11627 | return wrapper_CompositeExplicitAutograd_out__cholesky_solve_helper_out(self, A, upper, out); |
11628 | } |
11629 | at::Tensor & polygamma_(at::Tensor & self, int64_t n) { |
11630 | return wrapper_CompositeExplicitAutograd__polygamma_(self, n); |
11631 | } |
11632 | at::Tensor dist(const at::Tensor & self, const at::Tensor & other, const at::Scalar & p) { |
11633 | return wrapper_CompositeExplicitAutograd__dist(self, other, p); |
11634 | } |
11635 | at::Tensor & dist_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & p) { |
11636 | return wrapper_CompositeExplicitAutograd_out_dist_out(self, other, p, out); |
11637 | } |
11638 | at::Tensor & dist_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & p, at::Tensor & out) { |
11639 | return wrapper_CompositeExplicitAutograd_out_dist_out(self, other, p, out); |
11640 | } |
11641 | void _histogramdd_bin_edges_out(at::TensorList out, const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) { |
11642 | return wrapper_CompositeExplicitAutograd_out__histogramdd_bin_edges_out(self, bins, range, weight, density, out); |
11643 | } |
11644 | void _histogramdd_bin_edges_outf(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density, at::TensorList out) { |
11645 | return wrapper_CompositeExplicitAutograd_out__histogramdd_bin_edges_out(self, bins, range, weight, density, out); |
11646 | } |
11647 | at::Tensor & _histogramdd_from_bin_cts_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) { |
11648 | return wrapper_CompositeExplicitAutograd_out__histogramdd_from_bin_cts_out(self, bins, range, weight, density, out); |
11649 | } |
11650 | at::Tensor & _histogramdd_from_bin_cts_outf(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density, at::Tensor & out) { |
11651 | return wrapper_CompositeExplicitAutograd_out__histogramdd_from_bin_cts_out(self, bins, range, weight, density, out); |
11652 | } |
11653 | at::Tensor & _histogramdd_from_bin_tensors_out(at::Tensor & out, const at::Tensor & self, at::TensorList bins, const c10::optional<at::Tensor> & weight, bool density) { |
11654 | return wrapper_CompositeExplicitAutograd_out__histogramdd_from_bin_tensors_out(self, bins, weight, density, out); |
11655 | } |
11656 | at::Tensor & _histogramdd_from_bin_tensors_outf(const at::Tensor & self, at::TensorList bins, const c10::optional<at::Tensor> & weight, bool density, at::Tensor & out) { |
11657 | return wrapper_CompositeExplicitAutograd_out__histogramdd_from_bin_tensors_out(self, bins, weight, density, out); |
11658 | } |
11659 | at::Tensor fmod(const at::Tensor & self, const at::Scalar & other) { |
11660 | return wrapper_CompositeExplicitAutograd_Scalar_fmod(self, other); |
11661 | } |
11662 | at::Tensor & fmod_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { |
11663 | return wrapper_CompositeExplicitAutograd_Scalar_out_fmod_out(self, other, out); |
11664 | } |
11665 | at::Tensor & fmod_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
11666 | return wrapper_CompositeExplicitAutograd_Scalar_out_fmod_out(self, other, out); |
11667 | } |
11668 | at::Tensor & fmod_(at::Tensor & self, const at::Scalar & other) { |
11669 | return wrapper_CompositeExplicitAutograd_Scalar_fmod_(self, other); |
11670 | } |
11671 | at::Tensor remainder(const at::Tensor & self, const at::Scalar & other) { |
11672 | return wrapper_CompositeExplicitAutograd_Scalar_remainder(self, other); |
11673 | } |
11674 | at::Tensor & remainder_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { |
11675 | return wrapper_CompositeExplicitAutograd_Scalar_out_remainder_out(self, other, out); |
11676 | } |
11677 | at::Tensor & remainder_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
11678 | return wrapper_CompositeExplicitAutograd_Scalar_out_remainder_out(self, other, out); |
11679 | } |
11680 | at::Tensor & remainder_(at::Tensor & self, const at::Scalar & other) { |
11681 | return wrapper_CompositeExplicitAutograd_Scalar_remainder_(self, other); |
11682 | } |
11683 | at::Tensor & remainder_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { |
11684 | return wrapper_CompositeExplicitAutograd_Scalar_Tensor_out_remainder_out(self, other, out); |
11685 | } |
11686 | at::Tensor & remainder_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
11687 | return wrapper_CompositeExplicitAutograd_Scalar_Tensor_out_remainder_out(self, other, out); |
11688 | } |
11689 | ::std::tuple<at::Tensor,at::Tensor> sort(const at::Tensor & self, int64_t dim, bool descending) { |
11690 | return wrapper_CompositeExplicitAutograd__sort(self, dim, descending); |
11691 | } |
11692 | ::std::tuple<at::Tensor &,at::Tensor &> sort_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim, bool descending) { |
11693 | return wrapper_CompositeExplicitAutograd_values_sort_out(self, dim, descending, values, indices); |
11694 | } |
11695 | ::std::tuple<at::Tensor &,at::Tensor &> sort_outf(const at::Tensor & self, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices) { |
11696 | return wrapper_CompositeExplicitAutograd_values_sort_out(self, dim, descending, values, indices); |
11697 | } |
11698 | at::Tensor & argsort_out(at::Tensor & out, const at::Tensor & self, bool stable, int64_t dim, bool descending) { |
11699 | return wrapper_CompositeExplicitAutograd_stable_out_argsort_out(self, stable, dim, descending, out); |
11700 | } |
11701 | at::Tensor & argsort_outf(const at::Tensor & self, bool stable, int64_t dim, bool descending, at::Tensor & out) { |
11702 | return wrapper_CompositeExplicitAutograd_stable_out_argsort_out(self, stable, dim, descending, out); |
11703 | } |
11704 | at::Tensor & unfold_backward_out(at::Tensor & out, const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) { |
11705 | return wrapper_CompositeExplicitAutograd_out_unfold_backward_out(grad_in, c10::fromIntArrayRefSlow(input_sizes), dim, size, step, out); |
11706 | } |
11707 | at::Tensor & unfold_backward_outf(const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out) { |
11708 | return wrapper_CompositeExplicitAutograd_out_unfold_backward_out(grad_in, c10::fromIntArrayRefSlow(input_sizes), dim, size, step, out); |
11709 | } |
11710 | at::Tensor & unfold_backward_symint_out(at::Tensor & out, const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) { |
11711 | return wrapper_CompositeExplicitAutograd_out_unfold_backward_out(grad_in, input_sizes, dim, size, step, out); |
11712 | } |
11713 | at::Tensor & unfold_backward_symint_outf(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out) { |
11714 | return wrapper_CompositeExplicitAutograd_out_unfold_backward_out(grad_in, input_sizes, dim, size, step, out); |
11715 | } |
11716 | at::Tensor normal_functional(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) { |
11717 | return wrapper_CompositeExplicitAutograd__normal_functional(self, mean, std, generator); |
11718 | } |
11719 | at::Tensor & normal_out(at::Tensor & out, const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) { |
11720 | return wrapper_CompositeExplicitAutograd_out_normal_out(self, mean, std, generator, out); |
11721 | } |
11722 | at::Tensor & normal_outf(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator, at::Tensor & out) { |
11723 | return wrapper_CompositeExplicitAutograd_out_normal_out(self, mean, std, generator, out); |
11724 | } |
11725 | at::Tensor normal(double mean, double std, at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options) { |
11726 | return wrapper_CompositeExplicitAutograd_float_float_normal(mean, std, c10::fromIntArrayRefSlow(size), generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
11727 | } |
11728 | at::Tensor normal(double mean, double std, at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
11729 | return wrapper_CompositeExplicitAutograd_float_float_normal(mean, std, c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory); |
11730 | } |
11731 | at::Tensor normal_symint(double mean, double std, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options) { |
11732 | return wrapper_CompositeExplicitAutograd_float_float_normal(mean, std, size, generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
11733 | } |
11734 | at::Tensor normal_symint(double mean, double std, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
11735 | return wrapper_CompositeExplicitAutograd_float_float_normal(mean, std, size, generator, dtype, layout, device, pin_memory); |
11736 | } |
11737 | at::Tensor & normal_out(at::Tensor & out, double mean, double std, at::IntArrayRef size, c10::optional<at::Generator> generator) { |
11738 | return wrapper_CompositeExplicitAutograd_float_float_out_normal_out(mean, std, c10::fromIntArrayRefSlow(size), generator, out); |
11739 | } |
11740 | at::Tensor & normal_outf(double mean, double std, at::IntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) { |
11741 | return wrapper_CompositeExplicitAutograd_float_float_out_normal_out(mean, std, c10::fromIntArrayRefSlow(size), generator, out); |
11742 | } |
11743 | at::Tensor & normal_symint_out(at::Tensor & out, double mean, double std, c10::SymIntArrayRef size, c10::optional<at::Generator> generator) { |
11744 | return wrapper_CompositeExplicitAutograd_float_float_out_normal_out(mean, std, size, generator, out); |
11745 | } |
11746 | at::Tensor & normal_symint_outf(double mean, double std, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) { |
11747 | return wrapper_CompositeExplicitAutograd_float_float_out_normal_out(mean, std, size, generator, out); |
11748 | } |
11749 | at::Tensor alias(const at::Tensor & self) { |
11750 | return wrapper_CompositeExplicitAutograd__alias(self); |
11751 | } |
11752 | ::std::tuple<::std::vector<at::Tensor>,at::Tensor> _amp_foreach_non_finite_check_and_unscale(at::TensorList self, const at::Tensor & found_inf, const at::Tensor & inv_scale) { |
11753 | return wrapper_CompositeExplicitAutograd___amp_foreach_non_finite_check_and_unscale(self, found_inf, inv_scale); |
11754 | } |
11755 | void _amp_foreach_non_finite_check_and_unscale_out(at::TensorList out, at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale) { |
11756 | return wrapper_CompositeExplicitAutograd_out__amp_foreach_non_finite_check_and_unscale_out(self, found_inf, inv_scale, out); |
11757 | } |
11758 | void _amp_foreach_non_finite_check_and_unscale_outf(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale, at::TensorList out) { |
11759 | return wrapper_CompositeExplicitAutograd_out__amp_foreach_non_finite_check_and_unscale_out(self, found_inf, inv_scale, out); |
11760 | } |
11761 | ::std::tuple<at::Tensor,at::Tensor> _amp_update_scale(const at::Tensor & self, const at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) { |
11762 | return wrapper_CompositeExplicitAutograd___amp_update_scale(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval); |
11763 | } |
11764 | at::Tensor & _amp_update_scale_out(at::Tensor & out, const at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) { |
11765 | return wrapper_CompositeExplicitAutograd_out__amp_update_scale_out(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval, out); |
11766 | } |
11767 | at::Tensor & _amp_update_scale_outf(const at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, at::Tensor & out) { |
11768 | return wrapper_CompositeExplicitAutograd_out__amp_update_scale_out(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval, out); |
11769 | } |
11770 | void _foreach_add_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) { |
11771 | return wrapper_CompositeExplicitAutograd_Scalar_out__foreach_add_out(self, scalar, out); |
11772 | } |
11773 | void _foreach_add_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
11774 | return wrapper_CompositeExplicitAutograd_Scalar_out__foreach_add_out(self, scalar, out); |
11775 | } |
11776 | void _foreach_sub_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) { |
11777 | return wrapper_CompositeExplicitAutograd_Scalar_out__foreach_sub_out(self, scalar, out); |
11778 | } |
11779 | void _foreach_sub_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
11780 | return wrapper_CompositeExplicitAutograd_Scalar_out__foreach_sub_out(self, scalar, out); |
11781 | } |
11782 | void _foreach_mul_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) { |
11783 | return wrapper_CompositeExplicitAutograd_Scalar_out__foreach_mul_out(self, scalar, out); |
11784 | } |
11785 | void _foreach_mul_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
11786 | return wrapper_CompositeExplicitAutograd_Scalar_out__foreach_mul_out(self, scalar, out); |
11787 | } |
11788 | void _foreach_div_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) { |
11789 | return wrapper_CompositeExplicitAutograd_Scalar_out__foreach_div_out(self, scalar, out); |
11790 | } |
11791 | void _foreach_div_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
11792 | return wrapper_CompositeExplicitAutograd_Scalar_out__foreach_div_out(self, scalar, out); |
11793 | } |
11794 | void _foreach_clamp_min_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) { |
11795 | return wrapper_CompositeExplicitAutograd_Scalar_out__foreach_clamp_min_out(self, scalar, out); |
11796 | } |
11797 | void _foreach_clamp_min_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
11798 | return wrapper_CompositeExplicitAutograd_Scalar_out__foreach_clamp_min_out(self, scalar, out); |
11799 | } |
11800 | void _foreach_clamp_max_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) { |
11801 | return wrapper_CompositeExplicitAutograd_Scalar_out__foreach_clamp_max_out(self, scalar, out); |
11802 | } |
11803 | void _foreach_clamp_max_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
11804 | return wrapper_CompositeExplicitAutograd_Scalar_out__foreach_clamp_max_out(self, scalar, out); |
11805 | } |
11806 | void _foreach_maximum_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) { |
11807 | return wrapper_CompositeExplicitAutograd_Scalar_out__foreach_maximum_out(self, scalar, out); |
11808 | } |
11809 | void _foreach_maximum_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
11810 | return wrapper_CompositeExplicitAutograd_Scalar_out__foreach_maximum_out(self, scalar, out); |
11811 | } |
11812 | void _foreach_minimum_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) { |
11813 | return wrapper_CompositeExplicitAutograd_Scalar_out__foreach_minimum_out(self, scalar, out); |
11814 | } |
11815 | void _foreach_minimum_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
11816 | return wrapper_CompositeExplicitAutograd_Scalar_out__foreach_minimum_out(self, scalar, out); |
11817 | } |
11818 | void _foreach_add_out(at::TensorList out, at::TensorList self, at::TensorList other, const at::Scalar & alpha) { |
11819 | return wrapper_CompositeExplicitAutograd_List_out__foreach_add_out(self, other, alpha, out); |
11820 | } |
11821 | void _foreach_add_outf(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) { |
11822 | return wrapper_CompositeExplicitAutograd_List_out__foreach_add_out(self, other, alpha, out); |
11823 | } |
11824 | void _foreach_sub_out(at::TensorList out, at::TensorList self, at::TensorList other, const at::Scalar & alpha) { |
11825 | return wrapper_CompositeExplicitAutograd_List_out__foreach_sub_out(self, other, alpha, out); |
11826 | } |
11827 | void _foreach_sub_outf(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) { |
11828 | return wrapper_CompositeExplicitAutograd_List_out__foreach_sub_out(self, other, alpha, out); |
11829 | } |
11830 | void _foreach_mul_out(at::TensorList out, at::TensorList self, at::TensorList other) { |
11831 | return wrapper_CompositeExplicitAutograd_List_out__foreach_mul_out(self, other, out); |
11832 | } |
11833 | void _foreach_mul_outf(at::TensorList self, at::TensorList other, at::TensorList out) { |
11834 | return wrapper_CompositeExplicitAutograd_List_out__foreach_mul_out(self, other, out); |
11835 | } |
11836 | void _foreach_div_out(at::TensorList out, at::TensorList self, at::TensorList other) { |
11837 | return wrapper_CompositeExplicitAutograd_List_out__foreach_div_out(self, other, out); |
11838 | } |
11839 | void _foreach_div_outf(at::TensorList self, at::TensorList other, at::TensorList out) { |
11840 | return wrapper_CompositeExplicitAutograd_List_out__foreach_div_out(self, other, out); |
11841 | } |
11842 | void _foreach_clamp_min_out(at::TensorList out, at::TensorList self, at::TensorList other) { |
11843 | return wrapper_CompositeExplicitAutograd_List_out__foreach_clamp_min_out(self, other, out); |
11844 | } |
11845 | void _foreach_clamp_min_outf(at::TensorList self, at::TensorList other, at::TensorList out) { |
11846 | return wrapper_CompositeExplicitAutograd_List_out__foreach_clamp_min_out(self, other, out); |
11847 | } |
11848 | void _foreach_clamp_max_out(at::TensorList out, at::TensorList self, at::TensorList other) { |
11849 | return wrapper_CompositeExplicitAutograd_List_out__foreach_clamp_max_out(self, other, out); |
11850 | } |
11851 | void _foreach_clamp_max_outf(at::TensorList self, at::TensorList other, at::TensorList out) { |
11852 | return wrapper_CompositeExplicitAutograd_List_out__foreach_clamp_max_out(self, other, out); |
11853 | } |
11854 | void _foreach_maximum_out(at::TensorList out, at::TensorList self, at::TensorList other) { |
11855 | return wrapper_CompositeExplicitAutograd_List_out__foreach_maximum_out(self, other, out); |
11856 | } |
11857 | void _foreach_maximum_outf(at::TensorList self, at::TensorList other, at::TensorList out) { |
11858 | return wrapper_CompositeExplicitAutograd_List_out__foreach_maximum_out(self, other, out); |
11859 | } |
11860 | void _foreach_minimum_out(at::TensorList out, at::TensorList self, at::TensorList other) { |
11861 | return wrapper_CompositeExplicitAutograd_List_out__foreach_minimum_out(self, other, out); |
11862 | } |
11863 | void _foreach_minimum_outf(at::TensorList self, at::TensorList other, at::TensorList out) { |
11864 | return wrapper_CompositeExplicitAutograd_List_out__foreach_minimum_out(self, other, out); |
11865 | } |
11866 | void _foreach_add_out(at::TensorList out, at::TensorList self, at::ArrayRef<at::Scalar> scalars) { |
11867 | return wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_add_out(self, scalars, out); |
11868 | } |
11869 | void _foreach_add_outf(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
11870 | return wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_add_out(self, scalars, out); |
11871 | } |
11872 | void _foreach_sub_out(at::TensorList out, at::TensorList self, at::ArrayRef<at::Scalar> scalars) { |
11873 | return wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_sub_out(self, scalars, out); |
11874 | } |
11875 | void _foreach_sub_outf(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
11876 | return wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_sub_out(self, scalars, out); |
11877 | } |
11878 | void _foreach_div_out(at::TensorList out, at::TensorList self, at::ArrayRef<at::Scalar> scalars) { |
11879 | return wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_div_out(self, scalars, out); |
11880 | } |
11881 | void _foreach_div_outf(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
11882 | return wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_div_out(self, scalars, out); |
11883 | } |
11884 | void _foreach_mul_out(at::TensorList out, at::TensorList self, at::ArrayRef<at::Scalar> scalars) { |
11885 | return wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_mul_out(self, scalars, out); |
11886 | } |
11887 | void _foreach_mul_outf(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
11888 | return wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_mul_out(self, scalars, out); |
11889 | } |
11890 | void _foreach_clamp_min_out(at::TensorList out, at::TensorList self, at::ArrayRef<at::Scalar> scalars) { |
11891 | return wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_clamp_min_out(self, scalars, out); |
11892 | } |
11893 | void _foreach_clamp_min_outf(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
11894 | return wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_clamp_min_out(self, scalars, out); |
11895 | } |
11896 | void _foreach_clamp_max_out(at::TensorList out, at::TensorList self, at::ArrayRef<at::Scalar> scalars) { |
11897 | return wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_clamp_max_out(self, scalars, out); |
11898 | } |
11899 | void _foreach_clamp_max_outf(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
11900 | return wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_clamp_max_out(self, scalars, out); |
11901 | } |
11902 | void _foreach_maximum_out(at::TensorList out, at::TensorList self, at::ArrayRef<at::Scalar> scalars) { |
11903 | return wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_maximum_out(self, scalars, out); |
11904 | } |
11905 | void _foreach_maximum_outf(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
11906 | return wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_maximum_out(self, scalars, out); |
11907 | } |
11908 | void _foreach_minimum_out(at::TensorList out, at::TensorList self, at::ArrayRef<at::Scalar> scalars) { |
11909 | return wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_minimum_out(self, scalars, out); |
11910 | } |
11911 | void _foreach_minimum_outf(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
11912 | return wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_minimum_out(self, scalars, out); |
11913 | } |
11914 | void _foreach_exp_out(at::TensorList out, at::TensorList self) { |
11915 | return wrapper_CompositeExplicitAutograd_out__foreach_exp_out(self, out); |
11916 | } |
11917 | void _foreach_exp_outf(at::TensorList self, at::TensorList out) { |
11918 | return wrapper_CompositeExplicitAutograd_out__foreach_exp_out(self, out); |
11919 | } |
11920 | ::std::vector<at::Tensor> _foreach_zero(at::TensorList self) { |
11921 | return wrapper_CompositeExplicitAutograd___foreach_zero(self); |
11922 | } |
11923 | void _foreach_zero_out(at::TensorList out, at::TensorList self) { |
11924 | return wrapper_CompositeExplicitAutograd_out__foreach_zero_out(self, out); |
11925 | } |
11926 | void _foreach_zero_outf(at::TensorList self, at::TensorList out) { |
11927 | return wrapper_CompositeExplicitAutograd_out__foreach_zero_out(self, out); |
11928 | } |
11929 | void _foreach_sqrt_out(at::TensorList out, at::TensorList self) { |
11930 | return wrapper_CompositeExplicitAutograd_out__foreach_sqrt_out(self, out); |
11931 | } |
11932 | void _foreach_sqrt_outf(at::TensorList self, at::TensorList out) { |
11933 | return wrapper_CompositeExplicitAutograd_out__foreach_sqrt_out(self, out); |
11934 | } |
11935 | void _foreach_abs_out(at::TensorList out, at::TensorList self) { |
11936 | return wrapper_CompositeExplicitAutograd_out__foreach_abs_out(self, out); |
11937 | } |
11938 | void _foreach_abs_outf(at::TensorList self, at::TensorList out) { |
11939 | return wrapper_CompositeExplicitAutograd_out__foreach_abs_out(self, out); |
11940 | } |
11941 | void _foreach_acos_out(at::TensorList out, at::TensorList self) { |
11942 | return wrapper_CompositeExplicitAutograd_out__foreach_acos_out(self, out); |
11943 | } |
11944 | void _foreach_acos_outf(at::TensorList self, at::TensorList out) { |
11945 | return wrapper_CompositeExplicitAutograd_out__foreach_acos_out(self, out); |
11946 | } |
11947 | void _foreach_asin_out(at::TensorList out, at::TensorList self) { |
11948 | return wrapper_CompositeExplicitAutograd_out__foreach_asin_out(self, out); |
11949 | } |
11950 | void _foreach_asin_outf(at::TensorList self, at::TensorList out) { |
11951 | return wrapper_CompositeExplicitAutograd_out__foreach_asin_out(self, out); |
11952 | } |
11953 | void _foreach_atan_out(at::TensorList out, at::TensorList self) { |
11954 | return wrapper_CompositeExplicitAutograd_out__foreach_atan_out(self, out); |
11955 | } |
11956 | void _foreach_atan_outf(at::TensorList self, at::TensorList out) { |
11957 | return wrapper_CompositeExplicitAutograd_out__foreach_atan_out(self, out); |
11958 | } |
11959 | void _foreach_ceil_out(at::TensorList out, at::TensorList self) { |
11960 | return wrapper_CompositeExplicitAutograd_out__foreach_ceil_out(self, out); |
11961 | } |
11962 | void _foreach_ceil_outf(at::TensorList self, at::TensorList out) { |
11963 | return wrapper_CompositeExplicitAutograd_out__foreach_ceil_out(self, out); |
11964 | } |
11965 | void _foreach_cos_out(at::TensorList out, at::TensorList self) { |
11966 | return wrapper_CompositeExplicitAutograd_out__foreach_cos_out(self, out); |
11967 | } |
11968 | void _foreach_cos_outf(at::TensorList self, at::TensorList out) { |
11969 | return wrapper_CompositeExplicitAutograd_out__foreach_cos_out(self, out); |
11970 | } |
11971 | void _foreach_cosh_out(at::TensorList out, at::TensorList self) { |
11972 | return wrapper_CompositeExplicitAutograd_out__foreach_cosh_out(self, out); |
11973 | } |
11974 | void _foreach_cosh_outf(at::TensorList self, at::TensorList out) { |
11975 | return wrapper_CompositeExplicitAutograd_out__foreach_cosh_out(self, out); |
11976 | } |
11977 | void _foreach_erf_out(at::TensorList out, at::TensorList self) { |
11978 | return wrapper_CompositeExplicitAutograd_out__foreach_erf_out(self, out); |
11979 | } |
11980 | void _foreach_erf_outf(at::TensorList self, at::TensorList out) { |
11981 | return wrapper_CompositeExplicitAutograd_out__foreach_erf_out(self, out); |
11982 | } |
11983 | void _foreach_erfc_out(at::TensorList out, at::TensorList self) { |
11984 | return wrapper_CompositeExplicitAutograd_out__foreach_erfc_out(self, out); |
11985 | } |
11986 | void _foreach_erfc_outf(at::TensorList self, at::TensorList out) { |
11987 | return wrapper_CompositeExplicitAutograd_out__foreach_erfc_out(self, out); |
11988 | } |
11989 | void _foreach_expm1_out(at::TensorList out, at::TensorList self) { |
11990 | return wrapper_CompositeExplicitAutograd_out__foreach_expm1_out(self, out); |
11991 | } |
11992 | void _foreach_expm1_outf(at::TensorList self, at::TensorList out) { |
11993 | return wrapper_CompositeExplicitAutograd_out__foreach_expm1_out(self, out); |
11994 | } |
11995 | void _foreach_floor_out(at::TensorList out, at::TensorList self) { |
11996 | return wrapper_CompositeExplicitAutograd_out__foreach_floor_out(self, out); |
11997 | } |
11998 | void _foreach_floor_outf(at::TensorList self, at::TensorList out) { |
11999 | return wrapper_CompositeExplicitAutograd_out__foreach_floor_out(self, out); |
12000 | } |
12001 | void _foreach_log_out(at::TensorList out, at::TensorList self) { |
12002 | return wrapper_CompositeExplicitAutograd_out__foreach_log_out(self, out); |
12003 | } |
12004 | void _foreach_log_outf(at::TensorList self, at::TensorList out) { |
12005 | return wrapper_CompositeExplicitAutograd_out__foreach_log_out(self, out); |
12006 | } |
12007 | void _foreach_log10_out(at::TensorList out, at::TensorList self) { |
12008 | return wrapper_CompositeExplicitAutograd_out__foreach_log10_out(self, out); |
12009 | } |
12010 | void _foreach_log10_outf(at::TensorList self, at::TensorList out) { |
12011 | return wrapper_CompositeExplicitAutograd_out__foreach_log10_out(self, out); |
12012 | } |
12013 | void _foreach_log1p_out(at::TensorList out, at::TensorList self) { |
12014 | return wrapper_CompositeExplicitAutograd_out__foreach_log1p_out(self, out); |
12015 | } |
12016 | void _foreach_log1p_outf(at::TensorList self, at::TensorList out) { |
12017 | return wrapper_CompositeExplicitAutograd_out__foreach_log1p_out(self, out); |
12018 | } |
12019 | void _foreach_log2_out(at::TensorList out, at::TensorList self) { |
12020 | return wrapper_CompositeExplicitAutograd_out__foreach_log2_out(self, out); |
12021 | } |
12022 | void _foreach_log2_outf(at::TensorList self, at::TensorList out) { |
12023 | return wrapper_CompositeExplicitAutograd_out__foreach_log2_out(self, out); |
12024 | } |
12025 | void _foreach_neg_out(at::TensorList out, at::TensorList self) { |
12026 | return wrapper_CompositeExplicitAutograd_out__foreach_neg_out(self, out); |
12027 | } |
12028 | void _foreach_neg_outf(at::TensorList self, at::TensorList out) { |
12029 | return wrapper_CompositeExplicitAutograd_out__foreach_neg_out(self, out); |
12030 | } |
12031 | void _foreach_tan_out(at::TensorList out, at::TensorList self) { |
12032 | return wrapper_CompositeExplicitAutograd_out__foreach_tan_out(self, out); |
12033 | } |
12034 | void _foreach_tan_outf(at::TensorList self, at::TensorList out) { |
12035 | return wrapper_CompositeExplicitAutograd_out__foreach_tan_out(self, out); |
12036 | } |
12037 | void _foreach_tanh_out(at::TensorList out, at::TensorList self) { |
12038 | return wrapper_CompositeExplicitAutograd_out__foreach_tanh_out(self, out); |
12039 | } |
12040 | void _foreach_tanh_outf(at::TensorList self, at::TensorList out) { |
12041 | return wrapper_CompositeExplicitAutograd_out__foreach_tanh_out(self, out); |
12042 | } |
12043 | void _foreach_sin_out(at::TensorList out, at::TensorList self) { |
12044 | return wrapper_CompositeExplicitAutograd_out__foreach_sin_out(self, out); |
12045 | } |
12046 | void _foreach_sin_outf(at::TensorList self, at::TensorList out) { |
12047 | return wrapper_CompositeExplicitAutograd_out__foreach_sin_out(self, out); |
12048 | } |
12049 | void _foreach_sinh_out(at::TensorList out, at::TensorList self) { |
12050 | return wrapper_CompositeExplicitAutograd_out__foreach_sinh_out(self, out); |
12051 | } |
12052 | void _foreach_sinh_outf(at::TensorList self, at::TensorList out) { |
12053 | return wrapper_CompositeExplicitAutograd_out__foreach_sinh_out(self, out); |
12054 | } |
12055 | void _foreach_round_out(at::TensorList out, at::TensorList self) { |
12056 | return wrapper_CompositeExplicitAutograd_out__foreach_round_out(self, out); |
12057 | } |
12058 | void _foreach_round_outf(at::TensorList self, at::TensorList out) { |
12059 | return wrapper_CompositeExplicitAutograd_out__foreach_round_out(self, out); |
12060 | } |
12061 | void _foreach_lgamma_out(at::TensorList out, at::TensorList self) { |
12062 | return wrapper_CompositeExplicitAutograd_out__foreach_lgamma_out(self, out); |
12063 | } |
12064 | void _foreach_lgamma_outf(at::TensorList self, at::TensorList out) { |
12065 | return wrapper_CompositeExplicitAutograd_out__foreach_lgamma_out(self, out); |
12066 | } |
12067 | void _foreach_frac_out(at::TensorList out, at::TensorList self) { |
12068 | return wrapper_CompositeExplicitAutograd_out__foreach_frac_out(self, out); |
12069 | } |
12070 | void _foreach_frac_outf(at::TensorList self, at::TensorList out) { |
12071 | return wrapper_CompositeExplicitAutograd_out__foreach_frac_out(self, out); |
12072 | } |
12073 | void _foreach_reciprocal_out(at::TensorList out, at::TensorList self) { |
12074 | return wrapper_CompositeExplicitAutograd_out__foreach_reciprocal_out(self, out); |
12075 | } |
12076 | void _foreach_reciprocal_outf(at::TensorList self, at::TensorList out) { |
12077 | return wrapper_CompositeExplicitAutograd_out__foreach_reciprocal_out(self, out); |
12078 | } |
12079 | void _foreach_sigmoid_out(at::TensorList out, at::TensorList self) { |
12080 | return wrapper_CompositeExplicitAutograd_out__foreach_sigmoid_out(self, out); |
12081 | } |
12082 | void _foreach_sigmoid_outf(at::TensorList self, at::TensorList out) { |
12083 | return wrapper_CompositeExplicitAutograd_out__foreach_sigmoid_out(self, out); |
12084 | } |
12085 | void _foreach_trunc_out(at::TensorList out, at::TensorList self) { |
12086 | return wrapper_CompositeExplicitAutograd_out__foreach_trunc_out(self, out); |
12087 | } |
12088 | void _foreach_trunc_outf(at::TensorList self, at::TensorList out) { |
12089 | return wrapper_CompositeExplicitAutograd_out__foreach_trunc_out(self, out); |
12090 | } |
12091 | void _foreach_addcdiv_out(at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) { |
12092 | return wrapper_CompositeExplicitAutograd_Scalar_out__foreach_addcdiv_out(self, tensor1, tensor2, value, out); |
12093 | } |
12094 | void _foreach_addcdiv_outf(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) { |
12095 | return wrapper_CompositeExplicitAutograd_Scalar_out__foreach_addcdiv_out(self, tensor1, tensor2, value, out); |
12096 | } |
12097 | void _foreach_addcmul_out(at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) { |
12098 | return wrapper_CompositeExplicitAutograd_Scalar_out__foreach_addcmul_out(self, tensor1, tensor2, value, out); |
12099 | } |
12100 | void _foreach_addcmul_outf(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) { |
12101 | return wrapper_CompositeExplicitAutograd_Scalar_out__foreach_addcmul_out(self, tensor1, tensor2, value, out); |
12102 | } |
12103 | void _foreach_addcdiv_out(at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) { |
12104 | return wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_addcdiv_out(self, tensor1, tensor2, scalars, out); |
12105 | } |
12106 | void _foreach_addcdiv_outf(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
12107 | return wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_addcdiv_out(self, tensor1, tensor2, scalars, out); |
12108 | } |
12109 | void _foreach_addcdiv_out(at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) { |
12110 | return wrapper_CompositeExplicitAutograd_Tensor_out__foreach_addcdiv_out(self, tensor1, tensor2, scalars, out); |
12111 | } |
12112 | void _foreach_addcdiv_outf(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) { |
12113 | return wrapper_CompositeExplicitAutograd_Tensor_out__foreach_addcdiv_out(self, tensor1, tensor2, scalars, out); |
12114 | } |
12115 | void _foreach_addcmul_out(at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) { |
12116 | return wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_addcmul_out(self, tensor1, tensor2, scalars, out); |
12117 | } |
12118 | void _foreach_addcmul_outf(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
12119 | return wrapper_CompositeExplicitAutograd_ScalarList_out__foreach_addcmul_out(self, tensor1, tensor2, scalars, out); |
12120 | } |
12121 | void _foreach_addcmul_out(at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) { |
12122 | return wrapper_CompositeExplicitAutograd_Tensor_out__foreach_addcmul_out(self, tensor1, tensor2, scalars, out); |
12123 | } |
12124 | void _foreach_addcmul_outf(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) { |
12125 | return wrapper_CompositeExplicitAutograd_Tensor_out__foreach_addcmul_out(self, tensor1, tensor2, scalars, out); |
12126 | } |
12127 | void _foreach_norm_out(at::TensorList out, at::TensorList self, const at::Scalar & ord) { |
12128 | return wrapper_CompositeExplicitAutograd_Scalar_out__foreach_norm_out(self, ord, out); |
12129 | } |
12130 | void _foreach_norm_outf(at::TensorList self, const at::Scalar & ord, at::TensorList out) { |
12131 | return wrapper_CompositeExplicitAutograd_Scalar_out__foreach_norm_out(self, ord, out); |
12132 | } |
12133 | void _foreach_lerp_out(at::TensorList out, at::TensorList self, at::TensorList tensors1, at::TensorList weights) { |
12134 | return wrapper_CompositeExplicitAutograd_List_out__foreach_lerp_out(self, tensors1, weights, out); |
12135 | } |
12136 | void _foreach_lerp_outf(at::TensorList self, at::TensorList tensors1, at::TensorList weights, at::TensorList out) { |
12137 | return wrapper_CompositeExplicitAutograd_List_out__foreach_lerp_out(self, tensors1, weights, out); |
12138 | } |
12139 | void _foreach_lerp_out(at::TensorList out, at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) { |
12140 | return wrapper_CompositeExplicitAutograd_Scalar_out__foreach_lerp_out(self, tensors1, weight, out); |
12141 | } |
12142 | void _foreach_lerp_outf(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight, at::TensorList out) { |
12143 | return wrapper_CompositeExplicitAutograd_Scalar_out__foreach_lerp_out(self, tensors1, weight, out); |
12144 | } |
12145 | at::Tensor & bucketize_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right) { |
12146 | return wrapper_CompositeExplicitAutograd_Scalar_out_bucketize_out(self, boundaries, out_int32, right, out); |
12147 | } |
12148 | at::Tensor & bucketize_outf(const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right, at::Tensor & out) { |
12149 | return wrapper_CompositeExplicitAutograd_Scalar_out_bucketize_out(self, boundaries, out_int32, right, out); |
12150 | } |
12151 | at::Tensor & searchsorted_out(at::Tensor & out, const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter) { |
12152 | return wrapper_CompositeExplicitAutograd_Scalar_out_searchsorted_out(sorted_sequence, self, out_int32, right, side, sorter, out); |
12153 | } |
12154 | at::Tensor & searchsorted_outf(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter, at::Tensor & out) { |
12155 | return wrapper_CompositeExplicitAutograd_Scalar_out_searchsorted_out(sorted_sequence, self, out_int32, right, side, sorter, out); |
12156 | } |
12157 | at::Tensor smooth_l1_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) { |
12158 | return wrapper_CompositeExplicitAutograd__smooth_l1_loss_backward(grad_output, self, target, reduction, beta); |
12159 | } |
12160 | at::Tensor huber_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) { |
12161 | return wrapper_CompositeExplicitAutograd__huber_loss_backward(grad_output, self, target, reduction, delta); |
12162 | } |
12163 | at::Tensor soft_margin_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction) { |
12164 | return wrapper_CompositeExplicitAutograd__soft_margin_loss(self, target, reduction); |
12165 | } |
12166 | at::Tensor & soft_margin_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction) { |
12167 | return wrapper_CompositeExplicitAutograd_out_soft_margin_loss_out(self, target, reduction, out); |
12168 | } |
12169 | at::Tensor & soft_margin_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) { |
12170 | return wrapper_CompositeExplicitAutograd_out_soft_margin_loss_out(self, target, reduction, out); |
12171 | } |
12172 | at::Tensor soft_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) { |
12173 | return wrapper_CompositeExplicitAutograd__soft_margin_loss_backward(grad_output, self, target, reduction); |
12174 | } |
12175 | at::Tensor & soft_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) { |
12176 | return wrapper_CompositeExplicitAutograd_grad_input_soft_margin_loss_backward_out(grad_output, self, target, reduction, grad_input); |
12177 | } |
12178 | at::Tensor & soft_margin_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input) { |
12179 | return wrapper_CompositeExplicitAutograd_grad_input_soft_margin_loss_backward_out(grad_output, self, target, reduction, grad_input); |
12180 | } |
12181 | at::Tensor & glu_jvp_out(at::Tensor & out, const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) { |
12182 | return wrapper_CompositeExplicitAutograd_out_glu_jvp_out(glu, x, dx, dim, out); |
12183 | } |
12184 | at::Tensor & glu_jvp_outf(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim, at::Tensor & out) { |
12185 | return wrapper_CompositeExplicitAutograd_out_glu_jvp_out(glu, x, dx, dim, out); |
12186 | } |
12187 | at::Tensor & glu_backward_jvp_out(at::Tensor & out, const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim) { |
12188 | return wrapper_CompositeExplicitAutograd_out_glu_backward_jvp_out(grad_x, grad_glu, x, dgrad_glu, dx, dim, out); |
12189 | } |
12190 | at::Tensor & glu_backward_jvp_outf(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim, at::Tensor & out) { |
12191 | return wrapper_CompositeExplicitAutograd_out_glu_backward_jvp_out(grad_x, grad_glu, x, dgrad_glu, dx, dim, out); |
12192 | } |
12193 | at::Tensor & hardswish_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self) { |
12194 | return wrapper_CompositeExplicitAutograd_out_hardswish_backward_out(grad_output, self, out); |
12195 | } |
12196 | at::Tensor & hardswish_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { |
12197 | return wrapper_CompositeExplicitAutograd_out_hardswish_backward_out(grad_output, self, out); |
12198 | } |
12199 | at::Tensor rrelu_with_noise_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result) { |
12200 | return wrapper_CompositeExplicitAutograd__rrelu_with_noise_backward(grad_output, self, noise, lower, upper, training, self_is_result); |
12201 | } |
12202 | at::Tensor & rrelu_with_noise_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result) { |
12203 | return wrapper_CompositeExplicitAutograd_out_rrelu_with_noise_backward_out(grad_output, self, noise, lower, upper, training, self_is_result, out); |
12204 | } |
12205 | at::Tensor & rrelu_with_noise_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result, at::Tensor & out) { |
12206 | return wrapper_CompositeExplicitAutograd_out_rrelu_with_noise_backward_out(grad_output, self, noise, lower, upper, training, self_is_result, out); |
12207 | } |
12208 | at::Tensor & mkldnn_adaptive_avg_pool2d_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self) { |
12209 | return wrapper_CompositeExplicitAutograd_out_mkldnn_adaptive_avg_pool2d_backward_out(grad_output, self, out); |
12210 | } |
12211 | at::Tensor & mkldnn_adaptive_avg_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { |
12212 | return wrapper_CompositeExplicitAutograd_out_mkldnn_adaptive_avg_pool2d_backward_out(grad_output, self, out); |
12213 | } |
12214 | at::Tensor & _adaptive_avg_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) { |
12215 | return wrapper_CompositeExplicitAutograd_out__adaptive_avg_pool2d_out(self, c10::fromIntArrayRefSlow(output_size), out); |
12216 | } |
12217 | at::Tensor & _adaptive_avg_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) { |
12218 | return wrapper_CompositeExplicitAutograd_out__adaptive_avg_pool2d_out(self, c10::fromIntArrayRefSlow(output_size), out); |
12219 | } |
12220 | at::Tensor & _adaptive_avg_pool2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size) { |
12221 | return wrapper_CompositeExplicitAutograd_out__adaptive_avg_pool2d_out(self, output_size, out); |
12222 | } |
12223 | at::Tensor & _adaptive_avg_pool2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) { |
12224 | return wrapper_CompositeExplicitAutograd_out__adaptive_avg_pool2d_out(self, output_size, out); |
12225 | } |
12226 | at::Tensor & _adaptive_avg_pool2d_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self) { |
12227 | return wrapper_CompositeExplicitAutograd_out__adaptive_avg_pool2d_backward_out(grad_output, self, out); |
12228 | } |
12229 | at::Tensor & _adaptive_avg_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { |
12230 | return wrapper_CompositeExplicitAutograd_out__adaptive_avg_pool2d_backward_out(grad_output, self, out); |
12231 | } |
12232 | at::Tensor & _adaptive_avg_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) { |
12233 | return wrapper_CompositeExplicitAutograd_out__adaptive_avg_pool3d_out(self, c10::fromIntArrayRefSlow(output_size), out); |
12234 | } |
12235 | at::Tensor & _adaptive_avg_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) { |
12236 | return wrapper_CompositeExplicitAutograd_out__adaptive_avg_pool3d_out(self, c10::fromIntArrayRefSlow(output_size), out); |
12237 | } |
12238 | at::Tensor & _adaptive_avg_pool3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size) { |
12239 | return wrapper_CompositeExplicitAutograd_out__adaptive_avg_pool3d_out(self, output_size, out); |
12240 | } |
12241 | at::Tensor & _adaptive_avg_pool3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) { |
12242 | return wrapper_CompositeExplicitAutograd_out__adaptive_avg_pool3d_out(self, output_size, out); |
12243 | } |
12244 | at::Tensor & _adaptive_avg_pool3d_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self) { |
12245 | return wrapper_CompositeExplicitAutograd_out__adaptive_avg_pool3d_backward_out(grad_output, self, out); |
12246 | } |
12247 | at::Tensor & _adaptive_avg_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { |
12248 | return wrapper_CompositeExplicitAutograd_out__adaptive_avg_pool3d_backward_out(grad_output, self, out); |
12249 | } |
12250 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array<bool,3> output_mask) { |
12251 | return wrapper_CompositeExplicitAutograd_output_mask_out__slow_conv2d_backward_out(grad_output, self, weight, kernel_size, stride, padding, output_mask, out0, out1, out2); |
12252 | } |
12253 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
12254 | return wrapper_CompositeExplicitAutograd_output_mask_out__slow_conv2d_backward_out(grad_output, self, weight, kernel_size, stride, padding, output_mask, out0, out1, out2); |
12255 | } |
12256 | at::Tensor & conv_depthwise3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation) { |
12257 | return wrapper_CompositeExplicitAutograd_out_conv_depthwise3d_out(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, out); |
12258 | } |
12259 | at::Tensor & conv_depthwise3d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) { |
12260 | return wrapper_CompositeExplicitAutograd_out_conv_depthwise3d_out(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, out); |
12261 | } |
12262 | at::Tensor & conv_depthwise3d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) { |
12263 | return wrapper_CompositeExplicitAutograd_out_conv_depthwise3d_out(self, weight, kernel_size, bias, stride, padding, dilation, out); |
12264 | } |
12265 | at::Tensor & conv_depthwise3d_symint_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) { |
12266 | return wrapper_CompositeExplicitAutograd_out_conv_depthwise3d_out(self, weight, kernel_size, bias, stride, padding, dilation, out); |
12267 | } |
12268 | at::Tensor & slow_conv_dilated2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation) { |
12269 | return wrapper_CompositeExplicitAutograd_out_slow_conv_dilated2d_out(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, out); |
12270 | } |
12271 | at::Tensor & slow_conv_dilated2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) { |
12272 | return wrapper_CompositeExplicitAutograd_out_slow_conv_dilated2d_out(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, out); |
12273 | } |
12274 | at::Tensor & slow_conv_dilated2d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) { |
12275 | return wrapper_CompositeExplicitAutograd_out_slow_conv_dilated2d_out(self, weight, kernel_size, bias, stride, padding, dilation, out); |
12276 | } |
12277 | at::Tensor & slow_conv_dilated2d_symint_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) { |
12278 | return wrapper_CompositeExplicitAutograd_out_slow_conv_dilated2d_out(self, weight, kernel_size, bias, stride, padding, dilation, out); |
12279 | } |
12280 | at::Tensor & slow_conv_dilated3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation) { |
12281 | return wrapper_CompositeExplicitAutograd_out_slow_conv_dilated3d_out(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, out); |
12282 | } |
12283 | at::Tensor & slow_conv_dilated3d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) { |
12284 | return wrapper_CompositeExplicitAutograd_out_slow_conv_dilated3d_out(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, out); |
12285 | } |
12286 | at::Tensor & slow_conv_dilated3d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) { |
12287 | return wrapper_CompositeExplicitAutograd_out_slow_conv_dilated3d_out(self, weight, kernel_size, bias, stride, padding, dilation, out); |
12288 | } |
12289 | at::Tensor & slow_conv_dilated3d_symint_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) { |
12290 | return wrapper_CompositeExplicitAutograd_out_slow_conv_dilated3d_out(self, weight, kernel_size, bias, stride, padding, dilation, out); |
12291 | } |
12292 | at::Tensor isinf(const at::Tensor & self) { |
12293 | return wrapper_CompositeExplicitAutograd__isinf(self); |
12294 | } |
12295 | at::Tensor & isinf_out(at::Tensor & out, const at::Tensor & self) { |
12296 | return wrapper_CompositeExplicitAutograd_out_isinf_out(self, out); |
12297 | } |
12298 | at::Tensor & isinf_outf(const at::Tensor & self, at::Tensor & out) { |
12299 | return wrapper_CompositeExplicitAutograd_out_isinf_out(self, out); |
12300 | } |
12301 | at::Tensor special_xlog1py(const at::Scalar & self, const at::Tensor & other) { |
12302 | return wrapper_CompositeExplicitAutograd_self_scalar_special_xlog1py(self, other); |
12303 | } |
12304 | at::Tensor & special_xlog1py_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { |
12305 | return wrapper_CompositeExplicitAutograd_self_scalar_out_special_xlog1py_out(self, other, out); |
12306 | } |
12307 | at::Tensor & special_xlog1py_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
12308 | return wrapper_CompositeExplicitAutograd_self_scalar_out_special_xlog1py_out(self, other, out); |
12309 | } |
12310 | at::Tensor special_xlog1py(const at::Tensor & self, const at::Scalar & other) { |
12311 | return wrapper_CompositeExplicitAutograd_other_scalar_special_xlog1py(self, other); |
12312 | } |
12313 | at::Tensor & special_xlog1py_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { |
12314 | return wrapper_CompositeExplicitAutograd_other_scalar_out_special_xlog1py_out(self, other, out); |
12315 | } |
12316 | at::Tensor & special_xlog1py_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
12317 | return wrapper_CompositeExplicitAutograd_other_scalar_out_special_xlog1py_out(self, other, out); |
12318 | } |
12319 | at::Tensor special_zeta(const at::Scalar & self, const at::Tensor & other) { |
12320 | return wrapper_CompositeExplicitAutograd_self_scalar_special_zeta(self, other); |
12321 | } |
12322 | at::Tensor & special_zeta_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { |
12323 | return wrapper_CompositeExplicitAutograd_self_scalar_out_special_zeta_out(self, other, out); |
12324 | } |
12325 | at::Tensor & special_zeta_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
12326 | return wrapper_CompositeExplicitAutograd_self_scalar_out_special_zeta_out(self, other, out); |
12327 | } |
12328 | at::Tensor special_zeta(const at::Tensor & self, const at::Scalar & other) { |
12329 | return wrapper_CompositeExplicitAutograd_other_scalar_special_zeta(self, other); |
12330 | } |
12331 | at::Tensor & special_zeta_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { |
12332 | return wrapper_CompositeExplicitAutograd_other_scalar_out_special_zeta_out(self, other, out); |
12333 | } |
12334 | at::Tensor & special_zeta_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
12335 | return wrapper_CompositeExplicitAutograd_other_scalar_out_special_zeta_out(self, other, out); |
12336 | } |
12337 | at::Tensor fft_fftfreq(int64_t n, double d, at::TensorOptions options) { |
12338 | return wrapper_CompositeExplicitAutograd__fft_fftfreq(n, d, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
12339 | } |
12340 | at::Tensor fft_fftfreq(int64_t n, double d, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
12341 | return wrapper_CompositeExplicitAutograd__fft_fftfreq(n, d, dtype, layout, device, pin_memory); |
12342 | } |
12343 | at::Tensor & fft_fftfreq_out(at::Tensor & out, int64_t n, double d) { |
12344 | return wrapper_CompositeExplicitAutograd_out_fft_fftfreq_out(n, d, out); |
12345 | } |
12346 | at::Tensor & fft_fftfreq_outf(int64_t n, double d, at::Tensor & out) { |
12347 | return wrapper_CompositeExplicitAutograd_out_fft_fftfreq_out(n, d, out); |
12348 | } |
12349 | at::Tensor fft_rfftfreq(int64_t n, double d, at::TensorOptions options) { |
12350 | return wrapper_CompositeExplicitAutograd__fft_rfftfreq(n, d, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
12351 | } |
12352 | at::Tensor fft_rfftfreq(int64_t n, double d, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
12353 | return wrapper_CompositeExplicitAutograd__fft_rfftfreq(n, d, dtype, layout, device, pin_memory); |
12354 | } |
12355 | at::Tensor & fft_rfftfreq_out(at::Tensor & out, int64_t n, double d) { |
12356 | return wrapper_CompositeExplicitAutograd_out_fft_rfftfreq_out(n, d, out); |
12357 | } |
12358 | at::Tensor & fft_rfftfreq_outf(int64_t n, double d, at::Tensor & out) { |
12359 | return wrapper_CompositeExplicitAutograd_out_fft_rfftfreq_out(n, d, out); |
12360 | } |
12361 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> linalg_lstsq(const at::Tensor & self, const at::Tensor & b, c10::optional<double> rcond, c10::optional<c10::string_view> driver) { |
12362 | return wrapper_CompositeExplicitAutograd__linalg_lstsq(self, b, rcond, driver); |
12363 | } |
12364 | at::Tensor & linalg_matrix_exp_out(at::Tensor & out, const at::Tensor & self) { |
12365 | return wrapper_CompositeExplicitAutograd_out_linalg_matrix_exp_out(self, out); |
12366 | } |
12367 | at::Tensor & linalg_matrix_exp_outf(const at::Tensor & self, at::Tensor & out) { |
12368 | return wrapper_CompositeExplicitAutograd_out_linalg_matrix_exp_out(self, out); |
12369 | } |
12370 | at::Tensor & linalg_pinv_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian) { |
12371 | return wrapper_CompositeExplicitAutograd_atol_rtol_tensor_out_linalg_pinv_out(self, atol, rtol, hermitian, out); |
12372 | } |
12373 | at::Tensor & linalg_pinv_outf(const at::Tensor & self, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian, at::Tensor & out) { |
12374 | return wrapper_CompositeExplicitAutograd_atol_rtol_tensor_out_linalg_pinv_out(self, atol, rtol, hermitian, out); |
12375 | } |
12376 | at::Tensor & _test_optional_intlist_out(at::Tensor & out, const at::Tensor & values, at::OptionalIntArrayRef addends) { |
12377 | return wrapper_CompositeExplicitAutograd_out__test_optional_intlist_out(values, addends, out); |
12378 | } |
12379 | at::Tensor & _test_optional_intlist_outf(const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out) { |
12380 | return wrapper_CompositeExplicitAutograd_out__test_optional_intlist_out(values, addends, out); |
12381 | } |
12382 | at::Tensor & _test_optional_filled_intlist_out(at::Tensor & out, const at::Tensor & values, at::OptionalIntArrayRef addends) { |
12383 | return wrapper_CompositeExplicitAutograd_out__test_optional_filled_intlist_out(values, addends, out); |
12384 | } |
12385 | at::Tensor & _test_optional_filled_intlist_outf(const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out) { |
12386 | return wrapper_CompositeExplicitAutograd_out__test_optional_filled_intlist_out(values, addends, out); |
12387 | } |
12388 | at::Tensor & _test_optional_floatlist_out(at::Tensor & out, const at::Tensor & values, c10::optional<at::ArrayRef<double>> addends) { |
12389 | return wrapper_CompositeExplicitAutograd_out__test_optional_floatlist_out(values, addends, out); |
12390 | } |
12391 | at::Tensor & _test_optional_floatlist_outf(const at::Tensor & values, c10::optional<at::ArrayRef<double>> addends, at::Tensor & out) { |
12392 | return wrapper_CompositeExplicitAutograd_out__test_optional_floatlist_out(values, addends, out); |
12393 | } |
12394 | at::Tensor _test_warn_in_autograd(const at::Tensor & self) { |
12395 | return wrapper_CompositeExplicitAutograd___test_warn_in_autograd(self); |
12396 | } |
12397 | at::Tensor & _test_warn_in_autograd_out(at::Tensor & out, const at::Tensor & self) { |
12398 | return wrapper_CompositeExplicitAutograd_out__test_warn_in_autograd_out(self, out); |
12399 | } |
12400 | at::Tensor & _test_warn_in_autograd_outf(const at::Tensor & self, at::Tensor & out) { |
12401 | return wrapper_CompositeExplicitAutograd_out__test_warn_in_autograd_out(self, out); |
12402 | } |
12403 | at::Tensor _test_autograd_multiple_dispatch(const at::Tensor & self) { |
12404 | return wrapper_CompositeExplicitAutograd_fullcoverage__test_autograd_multiple_dispatch(self); |
12405 | } |
12406 | at::Tensor & _test_autograd_multiple_dispatch_out(at::Tensor & out, const at::Tensor & self) { |
12407 | return wrapper_CompositeExplicitAutograd_fullcoverage_out__test_autograd_multiple_dispatch_out(self, out); |
12408 | } |
12409 | at::Tensor & _test_autograd_multiple_dispatch_outf(const at::Tensor & self, at::Tensor & out) { |
12410 | return wrapper_CompositeExplicitAutograd_fullcoverage_out__test_autograd_multiple_dispatch_out(self, out); |
12411 | } |
12412 | at::Tensor _test_autograd_multiple_dispatch_view(const at::Tensor & self) { |
12413 | return wrapper_CompositeExplicitAutograd___test_autograd_multiple_dispatch_view(self); |
12414 | } |
12415 | at::Tensor & _test_autograd_multiple_dispatch_view_copy_out(at::Tensor & out, const at::Tensor & self) { |
12416 | return wrapper_CompositeExplicitAutograd_out__test_autograd_multiple_dispatch_view_copy_out(self, out); |
12417 | } |
12418 | at::Tensor & _test_autograd_multiple_dispatch_view_copy_outf(const at::Tensor & self, at::Tensor & out) { |
12419 | return wrapper_CompositeExplicitAutograd_out__test_autograd_multiple_dispatch_view_copy_out(self, out); |
12420 | } |
12421 | at::Tensor & segment_reduce_out(at::Tensor & out, const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & indices, const c10::optional<at::Tensor> & offsets, int64_t axis, bool unsafe, const c10::optional<at::Scalar> & initial) { |
12422 | return wrapper_CompositeExplicitAutograd_out_segment_reduce_out(data, reduce, lengths, indices, offsets, axis, unsafe, initial, out); |
12423 | } |
12424 | at::Tensor & segment_reduce_outf(const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & indices, const c10::optional<at::Tensor> & offsets, int64_t axis, bool unsafe, const c10::optional<at::Scalar> & initial, at::Tensor & out) { |
12425 | return wrapper_CompositeExplicitAutograd_out_segment_reduce_out(data, reduce, lengths, indices, offsets, axis, unsafe, initial, out); |
12426 | } |
12427 | at::Tensor & _segment_reduce_backward_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & offsets, int64_t axis, const c10::optional<at::Scalar> & initial) { |
12428 | return wrapper_CompositeExplicitAutograd_out__segment_reduce_backward_out(grad, output, data, reduce, lengths, offsets, axis, initial, out); |
12429 | } |
12430 | at::Tensor & _segment_reduce_backward_outf(const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & offsets, int64_t axis, const c10::optional<at::Scalar> & initial, at::Tensor & out) { |
12431 | return wrapper_CompositeExplicitAutograd_out__segment_reduce_backward_out(grad, output, data, reduce, lengths, offsets, axis, initial, out); |
12432 | } |
12433 | at::Tensor _nested_tensor_from_tensor_list(at::TensorList list, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
12434 | return wrapper_CompositeExplicitAutograd___nested_tensor_from_tensor_list(list, dtype, layout, device, pin_memory); |
12435 | } |
12436 | at::Tensor & _nested_tensor_from_tensor_list_out(at::Tensor & out, at::TensorList list, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
12437 | return wrapper_CompositeExplicitAutograd_out__nested_tensor_from_tensor_list_out(list, dtype, layout, device, pin_memory, out); |
12438 | } |
12439 | at::Tensor & _nested_tensor_from_tensor_list_outf(at::TensorList list, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, at::Tensor & out) { |
12440 | return wrapper_CompositeExplicitAutograd_out__nested_tensor_from_tensor_list_out(list, dtype, layout, device, pin_memory, out); |
12441 | } |
12442 | at::Tensor & _fw_primal_copy_out(at::Tensor & out, const at::Tensor & self, int64_t level) { |
12443 | return wrapper_CompositeExplicitAutograd_out__fw_primal_copy_out(self, level, out); |
12444 | } |
12445 | at::Tensor & _fw_primal_copy_outf(const at::Tensor & self, int64_t level, at::Tensor & out) { |
12446 | return wrapper_CompositeExplicitAutograd_out__fw_primal_copy_out(self, level, out); |
12447 | } |
12448 | at::Tensor & _make_dual_copy_out(at::Tensor & out, const at::Tensor & primal, const at::Tensor & tangent, int64_t level) { |
12449 | return wrapper_CompositeExplicitAutograd_out__make_dual_copy_out(primal, tangent, level, out); |
12450 | } |
12451 | at::Tensor & _make_dual_copy_outf(const at::Tensor & primal, const at::Tensor & tangent, int64_t level, at::Tensor & out) { |
12452 | return wrapper_CompositeExplicitAutograd_out__make_dual_copy_out(primal, tangent, level, out); |
12453 | } |
12454 | at::Tensor & view_as_real_copy_out(at::Tensor & out, const at::Tensor & self) { |
12455 | return wrapper_CompositeExplicitAutograd_out_view_as_real_copy_out(self, out); |
12456 | } |
12457 | at::Tensor & view_as_real_copy_outf(const at::Tensor & self, at::Tensor & out) { |
12458 | return wrapper_CompositeExplicitAutograd_out_view_as_real_copy_out(self, out); |
12459 | } |
12460 | at::Tensor & view_as_complex_copy_out(at::Tensor & out, const at::Tensor & self) { |
12461 | return wrapper_CompositeExplicitAutograd_out_view_as_complex_copy_out(self, out); |
12462 | } |
12463 | at::Tensor & view_as_complex_copy_outf(const at::Tensor & self, at::Tensor & out) { |
12464 | return wrapper_CompositeExplicitAutograd_out_view_as_complex_copy_out(self, out); |
12465 | } |
12466 | at::Tensor & _conj_copy_out(at::Tensor & out, const at::Tensor & self) { |
12467 | return wrapper_CompositeExplicitAutograd_out__conj_copy_out(self, out); |
12468 | } |
12469 | at::Tensor & _conj_copy_outf(const at::Tensor & self, at::Tensor & out) { |
12470 | return wrapper_CompositeExplicitAutograd_out__conj_copy_out(self, out); |
12471 | } |
12472 | at::Tensor & _neg_view_copy_out(at::Tensor & out, const at::Tensor & self) { |
12473 | return wrapper_CompositeExplicitAutograd_out__neg_view_copy_out(self, out); |
12474 | } |
12475 | at::Tensor & _neg_view_copy_outf(const at::Tensor & self, at::Tensor & out) { |
12476 | return wrapper_CompositeExplicitAutograd_out__neg_view_copy_out(self, out); |
12477 | } |
12478 | at::Tensor & as_strided_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset) { |
12479 | return wrapper_CompositeExplicitAutograd_out_as_strided_copy_out(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out); |
12480 | } |
12481 | at::Tensor & as_strided_copy_outf(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset, at::Tensor & out) { |
12482 | return wrapper_CompositeExplicitAutograd_out_as_strided_copy_out(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out); |
12483 | } |
12484 | at::Tensor & as_strided_copy_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) { |
12485 | return wrapper_CompositeExplicitAutograd_out_as_strided_copy_out(self, size, stride, storage_offset, out); |
12486 | } |
12487 | at::Tensor & as_strided_copy_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset, at::Tensor & out) { |
12488 | return wrapper_CompositeExplicitAutograd_out_as_strided_copy_out(self, size, stride, storage_offset, out); |
12489 | } |
12490 | at::Tensor & _sparse_broadcast_to_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) { |
12491 | return wrapper_CompositeExplicitAutograd_out__sparse_broadcast_to_copy_out(self, size, out); |
12492 | } |
12493 | at::Tensor & _sparse_broadcast_to_copy_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { |
12494 | return wrapper_CompositeExplicitAutograd_out__sparse_broadcast_to_copy_out(self, size, out); |
12495 | } |
12496 | at::Tensor & diagonal_copy_out(at::Tensor & out, const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) { |
12497 | return wrapper_CompositeExplicitAutograd_out_diagonal_copy_out(self, offset, dim1, dim2, out); |
12498 | } |
12499 | at::Tensor & diagonal_copy_outf(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { |
12500 | return wrapper_CompositeExplicitAutograd_out_diagonal_copy_out(self, offset, dim1, dim2, out); |
12501 | } |
12502 | at::Tensor & expand_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, bool implicit) { |
12503 | return wrapper_CompositeExplicitAutograd_out_expand_copy_out(self, c10::fromIntArrayRefSlow(size), implicit, out); |
12504 | } |
12505 | at::Tensor & expand_copy_outf(const at::Tensor & self, at::IntArrayRef size, bool implicit, at::Tensor & out) { |
12506 | return wrapper_CompositeExplicitAutograd_out_expand_copy_out(self, c10::fromIntArrayRefSlow(size), implicit, out); |
12507 | } |
12508 | at::Tensor & expand_copy_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) { |
12509 | return wrapper_CompositeExplicitAutograd_out_expand_copy_out(self, size, implicit, out); |
12510 | } |
12511 | at::Tensor & expand_copy_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit, at::Tensor & out) { |
12512 | return wrapper_CompositeExplicitAutograd_out_expand_copy_out(self, size, implicit, out); |
12513 | } |
12514 | at::Tensor & permute_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dims) { |
12515 | return wrapper_CompositeExplicitAutograd_out_permute_copy_out(self, dims, out); |
12516 | } |
12517 | at::Tensor & permute_copy_outf(const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) { |
12518 | return wrapper_CompositeExplicitAutograd_out_permute_copy_out(self, dims, out); |
12519 | } |
12520 | at::Tensor & _reshape_alias_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) { |
12521 | return wrapper_CompositeExplicitAutograd_out__reshape_alias_copy_out(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out); |
12522 | } |
12523 | at::Tensor & _reshape_alias_copy_outf(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor & out) { |
12524 | return wrapper_CompositeExplicitAutograd_out__reshape_alias_copy_out(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out); |
12525 | } |
12526 | at::Tensor & _reshape_alias_copy_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { |
12527 | return wrapper_CompositeExplicitAutograd_out__reshape_alias_copy_out(self, size, stride, out); |
12528 | } |
12529 | at::Tensor & _reshape_alias_copy_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) { |
12530 | return wrapper_CompositeExplicitAutograd_out__reshape_alias_copy_out(self, size, stride, out); |
12531 | } |
12532 | at::Tensor & select_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim, int64_t index) { |
12533 | return wrapper_CompositeExplicitAutograd_int_out_select_copy_out(self, dim, index, out); |
12534 | } |
12535 | at::Tensor & select_copy_outf(const at::Tensor & self, int64_t dim, int64_t index, at::Tensor & out) { |
12536 | return wrapper_CompositeExplicitAutograd_int_out_select_copy_out(self, dim, index, out); |
12537 | } |
12538 | at::Tensor & select_copy_symint_out(at::Tensor & out, const at::Tensor & self, int64_t dim, c10::SymInt index) { |
12539 | return wrapper_CompositeExplicitAutograd_int_out_select_copy_out(self, dim, index, out); |
12540 | } |
12541 | at::Tensor & select_copy_symint_outf(const at::Tensor & self, int64_t dim, c10::SymInt index, at::Tensor & out) { |
12542 | return wrapper_CompositeExplicitAutograd_int_out_select_copy_out(self, dim, index, out); |
12543 | } |
12544 | at::Tensor & detach_copy_out(at::Tensor & out, const at::Tensor & self) { |
12545 | return wrapper_CompositeExplicitAutograd_out_detach_copy_out(self, out); |
12546 | } |
12547 | at::Tensor & detach_copy_outf(const at::Tensor & self, at::Tensor & out) { |
12548 | return wrapper_CompositeExplicitAutograd_out_detach_copy_out(self, out); |
12549 | } |
12550 | at::Tensor & slice_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim, c10::optional<int64_t> start, c10::optional<int64_t> end, int64_t step) { |
12551 | return wrapper_CompositeExplicitAutograd_Tensor_out_slice_copy_out(self, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out); |
12552 | } |
12553 | at::Tensor & slice_copy_outf(const at::Tensor & self, int64_t dim, c10::optional<int64_t> start, c10::optional<int64_t> end, int64_t step, at::Tensor & out) { |
12554 | return wrapper_CompositeExplicitAutograd_Tensor_out_slice_copy_out(self, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out); |
12555 | } |
12556 | at::Tensor & slice_copy_symint_out(at::Tensor & out, const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) { |
12557 | return wrapper_CompositeExplicitAutograd_Tensor_out_slice_copy_out(self, dim, start, end, step, out); |
12558 | } |
12559 | at::Tensor & slice_copy_symint_outf(const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step, at::Tensor & out) { |
12560 | return wrapper_CompositeExplicitAutograd_Tensor_out_slice_copy_out(self, dim, start, end, step, out); |
12561 | } |
12562 | void split_copy_out(at::TensorList out, const at::Tensor & self, int64_t split_size, int64_t dim) { |
12563 | return wrapper_CompositeExplicitAutograd_Tensor_out_split_copy_out(self, split_size, dim, out); |
12564 | } |
12565 | void split_copy_outf(const at::Tensor & self, int64_t split_size, int64_t dim, at::TensorList out) { |
12566 | return wrapper_CompositeExplicitAutograd_Tensor_out_split_copy_out(self, split_size, dim, out); |
12567 | } |
12568 | void split_copy_symint_out(at::TensorList out, const at::Tensor & self, c10::SymInt split_size, int64_t dim) { |
12569 | return wrapper_CompositeExplicitAutograd_Tensor_out_split_copy_out(self, split_size, dim, out); |
12570 | } |
12571 | void split_copy_symint_outf(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) { |
12572 | return wrapper_CompositeExplicitAutograd_Tensor_out_split_copy_out(self, split_size, dim, out); |
12573 | } |
12574 | void split_with_sizes_copy_out(at::TensorList out, const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim) { |
12575 | return wrapper_CompositeExplicitAutograd_out_split_with_sizes_copy_out(self, c10::fromIntArrayRefSlow(split_sizes), dim, out); |
12576 | } |
12577 | void split_with_sizes_copy_outf(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim, at::TensorList out) { |
12578 | return wrapper_CompositeExplicitAutograd_out_split_with_sizes_copy_out(self, c10::fromIntArrayRefSlow(split_sizes), dim, out); |
12579 | } |
12580 | void split_with_sizes_copy_symint_out(at::TensorList out, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) { |
12581 | return wrapper_CompositeExplicitAutograd_out_split_with_sizes_copy_out(self, split_sizes, dim, out); |
12582 | } |
12583 | void split_with_sizes_copy_symint_outf(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) { |
12584 | return wrapper_CompositeExplicitAutograd_out_split_with_sizes_copy_out(self, split_sizes, dim, out); |
12585 | } |
12586 | at::Tensor & squeeze_copy_out(at::Tensor & out, const at::Tensor & self) { |
12587 | return wrapper_CompositeExplicitAutograd_out_squeeze_copy_out(self, out); |
12588 | } |
12589 | at::Tensor & squeeze_copy_outf(const at::Tensor & self, at::Tensor & out) { |
12590 | return wrapper_CompositeExplicitAutograd_out_squeeze_copy_out(self, out); |
12591 | } |
12592 | at::Tensor & squeeze_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim) { |
12593 | return wrapper_CompositeExplicitAutograd_dim_out_squeeze_copy_out(self, dim, out); |
12594 | } |
12595 | at::Tensor & squeeze_copy_outf(const at::Tensor & self, int64_t dim, at::Tensor & out) { |
12596 | return wrapper_CompositeExplicitAutograd_dim_out_squeeze_copy_out(self, dim, out); |
12597 | } |
12598 | at::Tensor & squeeze_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim) { |
12599 | return wrapper_CompositeExplicitAutograd_dims_out_squeeze_copy_out(self, dim, out); |
12600 | } |
12601 | at::Tensor & squeeze_copy_outf(const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) { |
12602 | return wrapper_CompositeExplicitAutograd_dims_out_squeeze_copy_out(self, dim, out); |
12603 | } |
12604 | at::Tensor & t_copy_out(at::Tensor & out, const at::Tensor & self) { |
12605 | return wrapper_CompositeExplicitAutograd_out_t_copy_out(self, out); |
12606 | } |
12607 | at::Tensor & t_copy_outf(const at::Tensor & self, at::Tensor & out) { |
12608 | return wrapper_CompositeExplicitAutograd_out_t_copy_out(self, out); |
12609 | } |
12610 | at::Tensor & transpose_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim0, int64_t dim1) { |
12611 | return wrapper_CompositeExplicitAutograd_int_out_transpose_copy_out(self, dim0, dim1, out); |
12612 | } |
12613 | at::Tensor & transpose_copy_outf(const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) { |
12614 | return wrapper_CompositeExplicitAutograd_int_out_transpose_copy_out(self, dim0, dim1, out); |
12615 | } |
12616 | at::Tensor & unsqueeze_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim) { |
12617 | return wrapper_CompositeExplicitAutograd_out_unsqueeze_copy_out(self, dim, out); |
12618 | } |
12619 | at::Tensor & unsqueeze_copy_outf(const at::Tensor & self, int64_t dim, at::Tensor & out) { |
12620 | return wrapper_CompositeExplicitAutograd_out_unsqueeze_copy_out(self, dim, out); |
12621 | } |
12622 | at::Tensor & _indices_copy_out(at::Tensor & out, const at::Tensor & self) { |
12623 | return wrapper_CompositeExplicitAutograd_out__indices_copy_out(self, out); |
12624 | } |
12625 | at::Tensor & _indices_copy_outf(const at::Tensor & self, at::Tensor & out) { |
12626 | return wrapper_CompositeExplicitAutograd_out__indices_copy_out(self, out); |
12627 | } |
12628 | at::Tensor & _values_copy_out(at::Tensor & out, const at::Tensor & self) { |
12629 | return wrapper_CompositeExplicitAutograd_out__values_copy_out(self, out); |
12630 | } |
12631 | at::Tensor & _values_copy_outf(const at::Tensor & self, at::Tensor & out) { |
12632 | return wrapper_CompositeExplicitAutograd_out__values_copy_out(self, out); |
12633 | } |
12634 | at::Tensor & indices_copy_out(at::Tensor & out, const at::Tensor & self) { |
12635 | return wrapper_CompositeExplicitAutograd_out_indices_copy_out(self, out); |
12636 | } |
12637 | at::Tensor & indices_copy_outf(const at::Tensor & self, at::Tensor & out) { |
12638 | return wrapper_CompositeExplicitAutograd_out_indices_copy_out(self, out); |
12639 | } |
12640 | at::Tensor & values_copy_out(at::Tensor & out, const at::Tensor & self) { |
12641 | return wrapper_CompositeExplicitAutograd_out_values_copy_out(self, out); |
12642 | } |
12643 | at::Tensor & values_copy_outf(const at::Tensor & self, at::Tensor & out) { |
12644 | return wrapper_CompositeExplicitAutograd_out_values_copy_out(self, out); |
12645 | } |
12646 | at::Tensor & crow_indices_copy_out(at::Tensor & out, const at::Tensor & self) { |
12647 | return wrapper_CompositeExplicitAutograd_out_crow_indices_copy_out(self, out); |
12648 | } |
12649 | at::Tensor & crow_indices_copy_outf(const at::Tensor & self, at::Tensor & out) { |
12650 | return wrapper_CompositeExplicitAutograd_out_crow_indices_copy_out(self, out); |
12651 | } |
12652 | at::Tensor & col_indices_copy_out(at::Tensor & out, const at::Tensor & self) { |
12653 | return wrapper_CompositeExplicitAutograd_out_col_indices_copy_out(self, out); |
12654 | } |
12655 | at::Tensor & col_indices_copy_outf(const at::Tensor & self, at::Tensor & out) { |
12656 | return wrapper_CompositeExplicitAutograd_out_col_indices_copy_out(self, out); |
12657 | } |
12658 | at::Tensor & ccol_indices_copy_out(at::Tensor & out, const at::Tensor & self) { |
12659 | return wrapper_CompositeExplicitAutograd_out_ccol_indices_copy_out(self, out); |
12660 | } |
12661 | at::Tensor & ccol_indices_copy_outf(const at::Tensor & self, at::Tensor & out) { |
12662 | return wrapper_CompositeExplicitAutograd_out_ccol_indices_copy_out(self, out); |
12663 | } |
12664 | at::Tensor & row_indices_copy_out(at::Tensor & out, const at::Tensor & self) { |
12665 | return wrapper_CompositeExplicitAutograd_out_row_indices_copy_out(self, out); |
12666 | } |
12667 | at::Tensor & row_indices_copy_outf(const at::Tensor & self, at::Tensor & out) { |
12668 | return wrapper_CompositeExplicitAutograd_out_row_indices_copy_out(self, out); |
12669 | } |
12670 | void unbind_copy_out(at::TensorList out, const at::Tensor & self, int64_t dim) { |
12671 | return wrapper_CompositeExplicitAutograd_int_out_unbind_copy_out(self, dim, out); |
12672 | } |
12673 | void unbind_copy_outf(const at::Tensor & self, int64_t dim, at::TensorList out) { |
12674 | return wrapper_CompositeExplicitAutograd_int_out_unbind_copy_out(self, dim, out); |
12675 | } |
12676 | at::Tensor & view_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) { |
12677 | return wrapper_CompositeExplicitAutograd_out_view_copy_out(self, c10::fromIntArrayRefSlow(size), out); |
12678 | } |
12679 | at::Tensor & view_copy_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { |
12680 | return wrapper_CompositeExplicitAutograd_out_view_copy_out(self, c10::fromIntArrayRefSlow(size), out); |
12681 | } |
12682 | at::Tensor & view_copy_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) { |
12683 | return wrapper_CompositeExplicitAutograd_out_view_copy_out(self, size, out); |
12684 | } |
12685 | at::Tensor & view_copy_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { |
12686 | return wrapper_CompositeExplicitAutograd_out_view_copy_out(self, size, out); |
12687 | } |
12688 | at::Tensor & view_copy_out(at::Tensor & out, const at::Tensor & self, at::ScalarType dtype) { |
12689 | return wrapper_CompositeExplicitAutograd_dtype_out_view_copy_out(self, dtype, out); |
12690 | } |
12691 | at::Tensor & view_copy_outf(const at::Tensor & self, at::ScalarType dtype, at::Tensor & out) { |
12692 | return wrapper_CompositeExplicitAutograd_dtype_out_view_copy_out(self, dtype, out); |
12693 | } |
12694 | at::Tensor & unfold_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) { |
12695 | return wrapper_CompositeExplicitAutograd_out_unfold_copy_out(self, dimension, size, step, out); |
12696 | } |
12697 | at::Tensor & unfold_copy_outf(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step, at::Tensor & out) { |
12698 | return wrapper_CompositeExplicitAutograd_out_unfold_copy_out(self, dimension, size, step, out); |
12699 | } |
12700 | at::Tensor & alias_copy_out(at::Tensor & out, const at::Tensor & self) { |
12701 | return wrapper_CompositeExplicitAutograd_out_alias_copy_out(self, out); |
12702 | } |
12703 | at::Tensor & alias_copy_outf(const at::Tensor & self, at::Tensor & out) { |
12704 | return wrapper_CompositeExplicitAutograd_out_alias_copy_out(self, out); |
12705 | } |
12706 | at::Tensor & to_padded_tensor_out(at::Tensor & out, const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size) { |
12707 | return wrapper_CompositeExplicitAutograd_out_to_padded_tensor_out(self, padding, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, out); |
12708 | } |
12709 | at::Tensor & to_padded_tensor_outf(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size, at::Tensor & out) { |
12710 | return wrapper_CompositeExplicitAutograd_out_to_padded_tensor_out(self, padding, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, out); |
12711 | } |
12712 | at::Tensor & to_padded_tensor_symint_out(at::Tensor & out, const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size) { |
12713 | return wrapper_CompositeExplicitAutograd_out_to_padded_tensor_out(self, padding, output_size, out); |
12714 | } |
12715 | at::Tensor & to_padded_tensor_symint_outf(const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size, at::Tensor & out) { |
12716 | return wrapper_CompositeExplicitAutograd_out_to_padded_tensor_out(self, padding, output_size, out); |
12717 | } |
12718 | at::Tensor & _transformer_encoder_layer_fwd_out(at::Tensor & out, const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, c10::optional<int64_t> mask_type) { |
12719 | return wrapper_CompositeExplicitAutograd_out__transformer_encoder_layer_fwd_out(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type, out); |
12720 | } |
12721 | at::Tensor & _transformer_encoder_layer_fwd_outf(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, c10::optional<int64_t> mask_type, at::Tensor & out) { |
12722 | return wrapper_CompositeExplicitAutograd_out__transformer_encoder_layer_fwd_out(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type, out); |
12723 | } |
12724 | ::std::tuple<at::Tensor &,at::Tensor &> _native_multi_head_attention_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, bool need_weights, bool average_attn_weights, c10::optional<int64_t> mask_type) { |
12725 | return wrapper_CompositeExplicitAutograd_out__native_multi_head_attention_out(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type, out0, out1); |
12726 | } |
12727 | ::std::tuple<at::Tensor &,at::Tensor &> _native_multi_head_attention_outf(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, bool need_weights, bool average_attn_weights, c10::optional<int64_t> mask_type, at::Tensor & out0, at::Tensor & out1) { |
12728 | return wrapper_CompositeExplicitAutograd_out__native_multi_head_attention_out(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type, out0, out1); |
12729 | } |
12730 | at::Tensor & _triton_scaled_dot_attention_out(at::Tensor & out, const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p) { |
12731 | return wrapper_CompositeExplicitAutograd_out__triton_scaled_dot_attention_out(q, k, v, dropout_p, out); |
12732 | } |
12733 | at::Tensor & _triton_scaled_dot_attention_outf(const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p, at::Tensor & out) { |
12734 | return wrapper_CompositeExplicitAutograd_out__triton_scaled_dot_attention_out(q, k, v, dropout_p, out); |
12735 | } |
12736 | at::Tensor & _triton_multi_head_attention_out(at::Tensor & out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask) { |
12737 | return wrapper_CompositeExplicitAutograd_out__triton_multi_head_attention_out(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, out); |
12738 | } |
12739 | at::Tensor & _triton_multi_head_attention_outf(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, at::Tensor & out) { |
12740 | return wrapper_CompositeExplicitAutograd_out__triton_multi_head_attention_out(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, out); |
12741 | } |
12742 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _transformer_decoder_only_layer_fwd_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, const c10::optional<at::Tensor> & incr_key, const c10::optional<at::Tensor> & incr_value) { |
12743 | return wrapper_CompositeExplicitAutograd_out__transformer_decoder_only_layer_fwd_out(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value, out0, out1, out2); |
12744 | } |
12745 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _transformer_decoder_only_layer_fwd_outf(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, const c10::optional<at::Tensor> & incr_key, const c10::optional<at::Tensor> & incr_value, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
12746 | return wrapper_CompositeExplicitAutograd_out__transformer_decoder_only_layer_fwd_out(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value, out0, out1, out2); |
12747 | } |
12748 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _native_decoder_only_multi_head_attention_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, const c10::optional<at::Tensor> & incr_key, const c10::optional<at::Tensor> & incr_value, bool need_weights, bool average_attn_weights) { |
12749 | return wrapper_CompositeExplicitAutograd_out__native_decoder_only_multi_head_attention_out(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, incr_key, incr_value, need_weights, average_attn_weights, out0, out1, out2, out3); |
12750 | } |
12751 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _native_decoder_only_multi_head_attention_outf(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, const c10::optional<at::Tensor> & incr_key, const c10::optional<at::Tensor> & incr_value, bool need_weights, bool average_attn_weights, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { |
12752 | return wrapper_CompositeExplicitAutograd_out__native_decoder_only_multi_head_attention_out(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, incr_key, incr_value, need_weights, average_attn_weights, out0, out1, out2, out3); |
12753 | } |
12754 | at::Tensor & special_chebyshev_polynomial_t_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { |
12755 | return wrapper_CompositeExplicitAutograd_n_scalar_out_special_chebyshev_polynomial_t_out(x, n, out); |
12756 | } |
12757 | at::Tensor & special_chebyshev_polynomial_t_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
12758 | return wrapper_CompositeExplicitAutograd_n_scalar_out_special_chebyshev_polynomial_t_out(x, n, out); |
12759 | } |
12760 | at::Tensor & special_chebyshev_polynomial_u_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { |
12761 | return wrapper_CompositeExplicitAutograd_n_scalar_out_special_chebyshev_polynomial_u_out(x, n, out); |
12762 | } |
12763 | at::Tensor & special_chebyshev_polynomial_u_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
12764 | return wrapper_CompositeExplicitAutograd_n_scalar_out_special_chebyshev_polynomial_u_out(x, n, out); |
12765 | } |
12766 | at::Tensor & special_chebyshev_polynomial_v_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { |
12767 | return wrapper_CompositeExplicitAutograd_n_scalar_out_special_chebyshev_polynomial_v_out(x, n, out); |
12768 | } |
12769 | at::Tensor & special_chebyshev_polynomial_v_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
12770 | return wrapper_CompositeExplicitAutograd_n_scalar_out_special_chebyshev_polynomial_v_out(x, n, out); |
12771 | } |
12772 | at::Tensor & special_chebyshev_polynomial_w_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { |
12773 | return wrapper_CompositeExplicitAutograd_n_scalar_out_special_chebyshev_polynomial_w_out(x, n, out); |
12774 | } |
12775 | at::Tensor & special_chebyshev_polynomial_w_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
12776 | return wrapper_CompositeExplicitAutograd_n_scalar_out_special_chebyshev_polynomial_w_out(x, n, out); |
12777 | } |
12778 | at::Tensor & special_hermite_polynomial_h_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { |
12779 | return wrapper_CompositeExplicitAutograd_n_scalar_out_special_hermite_polynomial_h_out(x, n, out); |
12780 | } |
12781 | at::Tensor & special_hermite_polynomial_h_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
12782 | return wrapper_CompositeExplicitAutograd_n_scalar_out_special_hermite_polynomial_h_out(x, n, out); |
12783 | } |
12784 | at::Tensor & special_hermite_polynomial_he_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { |
12785 | return wrapper_CompositeExplicitAutograd_n_scalar_out_special_hermite_polynomial_he_out(x, n, out); |
12786 | } |
12787 | at::Tensor & special_hermite_polynomial_he_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
12788 | return wrapper_CompositeExplicitAutograd_n_scalar_out_special_hermite_polynomial_he_out(x, n, out); |
12789 | } |
12790 | at::Tensor & special_laguerre_polynomial_l_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { |
12791 | return wrapper_CompositeExplicitAutograd_n_scalar_out_special_laguerre_polynomial_l_out(x, n, out); |
12792 | } |
12793 | at::Tensor & special_laguerre_polynomial_l_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
12794 | return wrapper_CompositeExplicitAutograd_n_scalar_out_special_laguerre_polynomial_l_out(x, n, out); |
12795 | } |
12796 | at::Tensor & special_legendre_polynomial_p_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { |
12797 | return wrapper_CompositeExplicitAutograd_n_scalar_out_special_legendre_polynomial_p_out(x, n, out); |
12798 | } |
12799 | at::Tensor & special_legendre_polynomial_p_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
12800 | return wrapper_CompositeExplicitAutograd_n_scalar_out_special_legendre_polynomial_p_out(x, n, out); |
12801 | } |
12802 | at::Tensor & special_shifted_chebyshev_polynomial_t_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { |
12803 | return wrapper_CompositeExplicitAutograd_n_scalar_out_special_shifted_chebyshev_polynomial_t_out(x, n, out); |
12804 | } |
12805 | at::Tensor & special_shifted_chebyshev_polynomial_t_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
12806 | return wrapper_CompositeExplicitAutograd_n_scalar_out_special_shifted_chebyshev_polynomial_t_out(x, n, out); |
12807 | } |
12808 | at::Tensor & special_shifted_chebyshev_polynomial_u_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { |
12809 | return wrapper_CompositeExplicitAutograd_n_scalar_out_special_shifted_chebyshev_polynomial_u_out(x, n, out); |
12810 | } |
12811 | at::Tensor & special_shifted_chebyshev_polynomial_u_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
12812 | return wrapper_CompositeExplicitAutograd_n_scalar_out_special_shifted_chebyshev_polynomial_u_out(x, n, out); |
12813 | } |
12814 | at::Tensor & special_shifted_chebyshev_polynomial_v_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { |
12815 | return wrapper_CompositeExplicitAutograd_n_scalar_out_special_shifted_chebyshev_polynomial_v_out(x, n, out); |
12816 | } |
12817 | at::Tensor & special_shifted_chebyshev_polynomial_v_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
12818 | return wrapper_CompositeExplicitAutograd_n_scalar_out_special_shifted_chebyshev_polynomial_v_out(x, n, out); |
12819 | } |
12820 | at::Tensor & special_shifted_chebyshev_polynomial_w_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { |
12821 | return wrapper_CompositeExplicitAutograd_n_scalar_out_special_shifted_chebyshev_polynomial_w_out(x, n, out); |
12822 | } |
12823 | at::Tensor & special_shifted_chebyshev_polynomial_w_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
12824 | return wrapper_CompositeExplicitAutograd_n_scalar_out_special_shifted_chebyshev_polynomial_w_out(x, n, out); |
12825 | } |
12826 | at::Tensor & _foobar_out(at::Tensor & out, const at::Tensor & self, bool arg1, bool arg2, bool arg3) { |
12827 | return wrapper_CompositeExplicitAutograd_out__foobar_out(self, arg1, arg2, arg3, out); |
12828 | } |
12829 | at::Tensor & _foobar_outf(const at::Tensor & self, bool arg1, bool arg2, bool arg3, at::Tensor & out) { |
12830 | return wrapper_CompositeExplicitAutograd_out__foobar_out(self, arg1, arg2, arg3, out); |
12831 | } |
12832 | ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adam(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) { |
12833 | return wrapper_CompositeExplicitAutograd___fused_adam(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); |
12834 | } |
12835 | void _fused_adam_out(at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) { |
12836 | return wrapper_CompositeExplicitAutograd_out__fused_adam_out(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); |
12837 | } |
12838 | void _fused_adam_outf(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf, at::TensorList out) { |
12839 | return wrapper_CompositeExplicitAutograd_out__fused_adam_out(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); |
12840 | } |
12841 | ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adamw(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) { |
12842 | return wrapper_CompositeExplicitAutograd___fused_adamw(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); |
12843 | } |
12844 | void _fused_adamw_out(at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) { |
12845 | return wrapper_CompositeExplicitAutograd_out__fused_adamw_out(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); |
12846 | } |
12847 | void _fused_adamw_outf(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf, at::TensorList out) { |
12848 | return wrapper_CompositeExplicitAutograd_out__fused_adamw_out(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); |
12849 | } |
12850 | } // namespace compositeexplicitautograd |
12851 | } // namespace at |
12852 | |