1 | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS |
2 | // @generated by torchgen/gen.py from CompositeViewCopyKernels.cpp |
3 | |
4 | #include <ATen/InferSize.h> |
5 | #include <ATen/Tensor.h> |
6 | #include <ATen/native/Resize.h> |
7 | |
8 | #ifndef AT_PER_OPERATOR_HEADERS |
9 | #include <ATen/Operators.h> |
10 | #else |
11 | #include <ATen/ops/clone.h> |
12 | #include <ATen/ops/_fw_primal_ops.h> |
13 | #include <ATen/ops/_fw_primal_native.h> |
14 | #include <ATen/ops/_fw_primal_copy_ops.h> |
15 | #include <ATen/ops/_fw_primal_copy_native.h> |
16 | #include <ATen/ops/_make_dual_ops.h> |
17 | #include <ATen/ops/_make_dual_native.h> |
18 | #include <ATen/ops/_make_dual_copy_ops.h> |
19 | #include <ATen/ops/_make_dual_copy_native.h> |
20 | #include <ATen/ops/_unpack_dual_ops.h> |
21 | #include <ATen/ops/_unpack_dual_native.h> |
22 | #include <ATen/ops/rename_ops.h> |
23 | #include <ATen/ops/rename_native.h> |
24 | #include <ATen/ops/align_to_ops.h> |
25 | #include <ATen/ops/align_to_native.h> |
26 | #include <ATen/ops/align_to_ops.h> |
27 | #include <ATen/ops/align_to_native.h> |
28 | #include <ATen/ops/refine_names_ops.h> |
29 | #include <ATen/ops/refine_names_native.h> |
30 | #include <ATen/ops/view_as_real_ops.h> |
31 | #include <ATen/ops/view_as_real_native.h> |
32 | #include <ATen/ops/view_as_real_copy_ops.h> |
33 | #include <ATen/ops/view_as_real_copy_native.h> |
34 | #include <ATen/ops/view_as_complex_ops.h> |
35 | #include <ATen/ops/view_as_complex_native.h> |
36 | #include <ATen/ops/view_as_complex_copy_ops.h> |
37 | #include <ATen/ops/view_as_complex_copy_native.h> |
38 | #include <ATen/ops/real_ops.h> |
39 | #include <ATen/ops/real_native.h> |
40 | #include <ATen/ops/imag_ops.h> |
41 | #include <ATen/ops/imag_native.h> |
42 | #include <ATen/ops/_conj_ops.h> |
43 | #include <ATen/ops/_conj_native.h> |
44 | #include <ATen/ops/_conj_copy_ops.h> |
45 | #include <ATen/ops/_conj_copy_native.h> |
46 | #include <ATen/ops/conj_ops.h> |
47 | #include <ATen/ops/conj_native.h> |
48 | #include <ATen/ops/resolve_conj_ops.h> |
49 | #include <ATen/ops/resolve_conj_native.h> |
50 | #include <ATen/ops/resolve_neg_ops.h> |
51 | #include <ATen/ops/resolve_neg_native.h> |
52 | #include <ATen/ops/_neg_view_ops.h> |
53 | #include <ATen/ops/_neg_view_native.h> |
54 | #include <ATen/ops/_neg_view_copy_ops.h> |
55 | #include <ATen/ops/_neg_view_copy_native.h> |
56 | #include <ATen/ops/as_strided_ops.h> |
57 | #include <ATen/ops/as_strided_native.h> |
58 | #include <ATen/ops/as_strided_copy_ops.h> |
59 | #include <ATen/ops/as_strided_copy_native.h> |
60 | #include <ATen/ops/broadcast_to_ops.h> |
61 | #include <ATen/ops/broadcast_to_native.h> |
62 | #include <ATen/ops/_sparse_broadcast_to_ops.h> |
63 | #include <ATen/ops/_sparse_broadcast_to_native.h> |
64 | #include <ATen/ops/_sparse_broadcast_to_copy_ops.h> |
65 | #include <ATen/ops/_sparse_broadcast_to_copy_native.h> |
66 | #include <ATen/ops/chunk_ops.h> |
67 | #include <ATen/ops/chunk_native.h> |
68 | #include <ATen/ops/tensor_split_ops.h> |
69 | #include <ATen/ops/tensor_split_native.h> |
70 | #include <ATen/ops/tensor_split_ops.h> |
71 | #include <ATen/ops/tensor_split_native.h> |
72 | #include <ATen/ops/tensor_split_ops.h> |
73 | #include <ATen/ops/tensor_split_native.h> |
74 | #include <ATen/ops/contiguous_ops.h> |
75 | #include <ATen/ops/contiguous_native.h> |
76 | #include <ATen/ops/diagonal_ops.h> |
77 | #include <ATen/ops/diagonal_native.h> |
78 | #include <ATen/ops/diagonal_copy_ops.h> |
79 | #include <ATen/ops/diagonal_copy_native.h> |
80 | #include <ATen/ops/linalg_diagonal_ops.h> |
81 | #include <ATen/ops/linalg_diagonal_native.h> |
82 | #include <ATen/ops/diagonal_ops.h> |
83 | #include <ATen/ops/diagonal_native.h> |
84 | #include <ATen/ops/expand_ops.h> |
85 | #include <ATen/ops/expand_native.h> |
86 | #include <ATen/ops/expand_copy_ops.h> |
87 | #include <ATen/ops/expand_copy_native.h> |
88 | #include <ATen/ops/expand_as_ops.h> |
89 | #include <ATen/ops/expand_as_native.h> |
90 | #include <ATen/ops/flatten_ops.h> |
91 | #include <ATen/ops/flatten_native.h> |
92 | #include <ATen/ops/flatten_ops.h> |
93 | #include <ATen/ops/flatten_native.h> |
94 | #include <ATen/ops/flatten_ops.h> |
95 | #include <ATen/ops/flatten_native.h> |
96 | #include <ATen/ops/flatten_ops.h> |
97 | #include <ATen/ops/flatten_native.h> |
98 | #include <ATen/ops/unflatten_ops.h> |
99 | #include <ATen/ops/unflatten_native.h> |
100 | #include <ATen/ops/unflatten_ops.h> |
101 | #include <ATen/ops/unflatten_native.h> |
102 | #include <ATen/ops/narrow_ops.h> |
103 | #include <ATen/ops/narrow_native.h> |
104 | #include <ATen/ops/narrow_copy_ops.h> |
105 | #include <ATen/ops/narrow_copy_native.h> |
106 | #include <ATen/ops/narrow_ops.h> |
107 | #include <ATen/ops/narrow_native.h> |
108 | #include <ATen/ops/permute_ops.h> |
109 | #include <ATen/ops/permute_native.h> |
110 | #include <ATen/ops/permute_copy_ops.h> |
111 | #include <ATen/ops/permute_copy_native.h> |
112 | #include <ATen/ops/movedim_ops.h> |
113 | #include <ATen/ops/movedim_native.h> |
114 | #include <ATen/ops/movedim_ops.h> |
115 | #include <ATen/ops/movedim_native.h> |
116 | #include <ATen/ops/moveaxis_ops.h> |
117 | #include <ATen/ops/moveaxis_native.h> |
118 | #include <ATen/ops/moveaxis_ops.h> |
119 | #include <ATen/ops/moveaxis_native.h> |
120 | #include <ATen/ops/numpy_T_ops.h> |
121 | #include <ATen/ops/numpy_T_native.h> |
122 | #include <ATen/ops/matrix_H_ops.h> |
123 | #include <ATen/ops/matrix_H_native.h> |
124 | #include <ATen/ops/mT_ops.h> |
125 | #include <ATen/ops/mT_native.h> |
126 | #include <ATen/ops/mH_ops.h> |
127 | #include <ATen/ops/mH_native.h> |
128 | #include <ATen/ops/adjoint_ops.h> |
129 | #include <ATen/ops/adjoint_native.h> |
130 | #include <ATen/ops/pin_memory_ops.h> |
131 | #include <ATen/ops/pin_memory_native.h> |
132 | #include <ATen/ops/ravel_ops.h> |
133 | #include <ATen/ops/ravel_native.h> |
134 | #include <ATen/ops/reshape_ops.h> |
135 | #include <ATen/ops/reshape_native.h> |
136 | #include <ATen/ops/_reshape_alias_ops.h> |
137 | #include <ATen/ops/_reshape_alias_native.h> |
138 | #include <ATen/ops/_reshape_alias_copy_ops.h> |
139 | #include <ATen/ops/_reshape_alias_copy_native.h> |
140 | #include <ATen/ops/reshape_as_ops.h> |
141 | #include <ATen/ops/reshape_as_native.h> |
142 | #include <ATen/ops/select_ops.h> |
143 | #include <ATen/ops/select_native.h> |
144 | #include <ATen/ops/select_ops.h> |
145 | #include <ATen/ops/select_native.h> |
146 | #include <ATen/ops/select_copy_ops.h> |
147 | #include <ATen/ops/select_copy_native.h> |
148 | #include <ATen/ops/detach_ops.h> |
149 | #include <ATen/ops/detach_native.h> |
150 | #include <ATen/ops/detach_copy_ops.h> |
151 | #include <ATen/ops/detach_copy_native.h> |
152 | #include <ATen/ops/slice_ops.h> |
153 | #include <ATen/ops/slice_native.h> |
154 | #include <ATen/ops/slice_copy_ops.h> |
155 | #include <ATen/ops/slice_copy_native.h> |
156 | #include <ATen/ops/split_ops.h> |
157 | #include <ATen/ops/split_native.h> |
158 | #include <ATen/ops/split_copy_ops.h> |
159 | #include <ATen/ops/split_copy_native.h> |
160 | #include <ATen/ops/split_ops.h> |
161 | #include <ATen/ops/split_native.h> |
162 | #include <ATen/ops/split_with_sizes_ops.h> |
163 | #include <ATen/ops/split_with_sizes_native.h> |
164 | #include <ATen/ops/split_with_sizes_copy_ops.h> |
165 | #include <ATen/ops/split_with_sizes_copy_native.h> |
166 | #include <ATen/ops/hsplit_ops.h> |
167 | #include <ATen/ops/hsplit_native.h> |
168 | #include <ATen/ops/hsplit_ops.h> |
169 | #include <ATen/ops/hsplit_native.h> |
170 | #include <ATen/ops/vsplit_ops.h> |
171 | #include <ATen/ops/vsplit_native.h> |
172 | #include <ATen/ops/vsplit_ops.h> |
173 | #include <ATen/ops/vsplit_native.h> |
174 | #include <ATen/ops/dsplit_ops.h> |
175 | #include <ATen/ops/dsplit_native.h> |
176 | #include <ATen/ops/dsplit_ops.h> |
177 | #include <ATen/ops/dsplit_native.h> |
178 | #include <ATen/ops/squeeze_ops.h> |
179 | #include <ATen/ops/squeeze_native.h> |
180 | #include <ATen/ops/squeeze_copy_ops.h> |
181 | #include <ATen/ops/squeeze_copy_native.h> |
182 | #include <ATen/ops/squeeze_ops.h> |
183 | #include <ATen/ops/squeeze_native.h> |
184 | #include <ATen/ops/squeeze_copy_ops.h> |
185 | #include <ATen/ops/squeeze_copy_native.h> |
186 | #include <ATen/ops/squeeze_ops.h> |
187 | #include <ATen/ops/squeeze_native.h> |
188 | #include <ATen/ops/squeeze_ops.h> |
189 | #include <ATen/ops/squeeze_native.h> |
190 | #include <ATen/ops/squeeze_copy_ops.h> |
191 | #include <ATen/ops/squeeze_copy_native.h> |
192 | #include <ATen/ops/t_ops.h> |
193 | #include <ATen/ops/t_native.h> |
194 | #include <ATen/ops/t_copy_ops.h> |
195 | #include <ATen/ops/t_copy_native.h> |
196 | #include <ATen/ops/transpose_ops.h> |
197 | #include <ATen/ops/transpose_native.h> |
198 | #include <ATen/ops/transpose_copy_ops.h> |
199 | #include <ATen/ops/transpose_copy_native.h> |
200 | #include <ATen/ops/transpose_ops.h> |
201 | #include <ATen/ops/transpose_native.h> |
202 | #include <ATen/ops/_nested_view_from_buffer_ops.h> |
203 | #include <ATen/ops/_nested_view_from_buffer_native.h> |
204 | #include <ATen/ops/_nested_view_from_buffer_copy_ops.h> |
205 | #include <ATen/ops/_nested_view_from_buffer_copy_native.h> |
206 | #include <ATen/ops/unsqueeze_ops.h> |
207 | #include <ATen/ops/unsqueeze_native.h> |
208 | #include <ATen/ops/unsqueeze_copy_ops.h> |
209 | #include <ATen/ops/unsqueeze_copy_native.h> |
210 | #include <ATen/ops/view_as_ops.h> |
211 | #include <ATen/ops/view_as_native.h> |
212 | #include <ATen/ops/positive_ops.h> |
213 | #include <ATen/ops/positive_native.h> |
214 | #include <ATen/ops/coalesce_ops.h> |
215 | #include <ATen/ops/coalesce_native.h> |
216 | #include <ATen/ops/_indices_ops.h> |
217 | #include <ATen/ops/_indices_native.h> |
218 | #include <ATen/ops/_indices_copy_ops.h> |
219 | #include <ATen/ops/_indices_copy_native.h> |
220 | #include <ATen/ops/_values_ops.h> |
221 | #include <ATen/ops/_values_native.h> |
222 | #include <ATen/ops/_values_copy_ops.h> |
223 | #include <ATen/ops/_values_copy_native.h> |
224 | #include <ATen/ops/indices_ops.h> |
225 | #include <ATen/ops/indices_native.h> |
226 | #include <ATen/ops/indices_copy_ops.h> |
227 | #include <ATen/ops/indices_copy_native.h> |
228 | #include <ATen/ops/values_ops.h> |
229 | #include <ATen/ops/values_native.h> |
230 | #include <ATen/ops/values_copy_ops.h> |
231 | #include <ATen/ops/values_copy_native.h> |
232 | #include <ATen/ops/crow_indices_ops.h> |
233 | #include <ATen/ops/crow_indices_native.h> |
234 | #include <ATen/ops/crow_indices_copy_ops.h> |
235 | #include <ATen/ops/crow_indices_copy_native.h> |
236 | #include <ATen/ops/col_indices_ops.h> |
237 | #include <ATen/ops/col_indices_native.h> |
238 | #include <ATen/ops/col_indices_copy_ops.h> |
239 | #include <ATen/ops/col_indices_copy_native.h> |
240 | #include <ATen/ops/ccol_indices_ops.h> |
241 | #include <ATen/ops/ccol_indices_native.h> |
242 | #include <ATen/ops/ccol_indices_copy_ops.h> |
243 | #include <ATen/ops/ccol_indices_copy_native.h> |
244 | #include <ATen/ops/row_indices_ops.h> |
245 | #include <ATen/ops/row_indices_native.h> |
246 | #include <ATen/ops/row_indices_copy_ops.h> |
247 | #include <ATen/ops/row_indices_copy_native.h> |
248 | #include <ATen/ops/unbind_ops.h> |
249 | #include <ATen/ops/unbind_native.h> |
250 | #include <ATen/ops/unbind_copy_ops.h> |
251 | #include <ATen/ops/unbind_copy_native.h> |
252 | #include <ATen/ops/unbind_ops.h> |
253 | #include <ATen/ops/unbind_native.h> |
254 | #include <ATen/ops/_autocast_to_reduced_precision_ops.h> |
255 | #include <ATen/ops/_autocast_to_reduced_precision_native.h> |
256 | #include <ATen/ops/_autocast_to_full_precision_ops.h> |
257 | #include <ATen/ops/_autocast_to_full_precision_native.h> |
258 | #include <ATen/ops/to_ops.h> |
259 | #include <ATen/ops/to_native.h> |
260 | #include <ATen/ops/to_ops.h> |
261 | #include <ATen/ops/to_native.h> |
262 | #include <ATen/ops/to_ops.h> |
263 | #include <ATen/ops/to_native.h> |
264 | #include <ATen/ops/to_ops.h> |
265 | #include <ATen/ops/to_native.h> |
266 | #include <ATen/ops/lift_fresh_ops.h> |
267 | #include <ATen/ops/lift_fresh_native.h> |
268 | #include <ATen/ops/lift_fresh_copy_ops.h> |
269 | #include <ATen/ops/lift_fresh_copy_native.h> |
270 | #include <ATen/ops/view_ops.h> |
271 | #include <ATen/ops/view_native.h> |
272 | #include <ATen/ops/view_copy_ops.h> |
273 | #include <ATen/ops/view_copy_native.h> |
274 | #include <ATen/ops/view_ops.h> |
275 | #include <ATen/ops/view_native.h> |
276 | #include <ATen/ops/view_copy_ops.h> |
277 | #include <ATen/ops/view_copy_native.h> |
278 | #include <ATen/ops/swapaxes_ops.h> |
279 | #include <ATen/ops/swapaxes_native.h> |
280 | #include <ATen/ops/swapdims_ops.h> |
281 | #include <ATen/ops/swapdims_native.h> |
282 | #include <ATen/ops/unfold_ops.h> |
283 | #include <ATen/ops/unfold_native.h> |
284 | #include <ATen/ops/unfold_copy_ops.h> |
285 | #include <ATen/ops/unfold_copy_native.h> |
286 | #include <ATen/ops/alias_ops.h> |
287 | #include <ATen/ops/alias_native.h> |
288 | #include <ATen/ops/alias_copy_ops.h> |
289 | #include <ATen/ops/alias_copy_native.h> |
290 | #include <ATen/ops/_test_autograd_multiple_dispatch_view_ops.h> |
291 | #include <ATen/ops/_test_autograd_multiple_dispatch_view_native.h> |
292 | #include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_ops.h> |
293 | #include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_native.h> |
294 | #include <ATen/ops/_new_zeros_with_same_feature_meta_ops.h> |
295 | #include <ATen/ops/_cudnn_ctc_loss_ops.h> |
296 | #include <ATen/ops/_cudnn_rnn_flatten_weight_ops.h> |
297 | #include <ATen/ops/_cudnn_rnn_ops.h> |
298 | #include <ATen/ops/_cudnn_rnn_backward_ops.h> |
299 | #include <ATen/ops/_cudnn_init_dropout_state_ops.h> |
300 | #include <ATen/ops/_fused_dropout_ops.h> |
301 | #include <ATen/ops/_masked_scale_ops.h> |
302 | #include <ATen/ops/native_dropout_ops.h> |
303 | #include <ATen/ops/native_dropout_backward_ops.h> |
304 | #include <ATen/ops/abs_ops.h> |
305 | #include <ATen/ops/abs_ops.h> |
306 | #include <ATen/ops/absolute_ops.h> |
307 | #include <ATen/ops/absolute_ops.h> |
308 | #include <ATen/ops/angle_ops.h> |
309 | #include <ATen/ops/sgn_ops.h> |
310 | #include <ATen/ops/sgn_ops.h> |
311 | #include <ATen/ops/_conj_physical_ops.h> |
312 | #include <ATen/ops/conj_physical_ops.h> |
313 | #include <ATen/ops/conj_physical_ops.h> |
314 | #include <ATen/ops/acos_ops.h> |
315 | #include <ATen/ops/acos_ops.h> |
316 | #include <ATen/ops/arccos_ops.h> |
317 | #include <ATen/ops/arccos_ops.h> |
318 | #include <ATen/ops/add_ops.h> |
319 | #include <ATen/ops/add_ops.h> |
320 | #include <ATen/ops/_add_relu_ops.h> |
321 | #include <ATen/ops/_add_relu_ops.h> |
322 | #include <ATen/ops/_add_relu_ops.h> |
323 | #include <ATen/ops/_add_relu_ops.h> |
324 | #include <ATen/ops/add_ops.h> |
325 | #include <ATen/ops/add_ops.h> |
326 | #include <ATen/ops/addmv_ops.h> |
327 | #include <ATen/ops/addmv_ops.h> |
328 | #include <ATen/ops/addr_ops.h> |
329 | #include <ATen/ops/addr_ops.h> |
330 | #include <ATen/ops/affine_grid_generator_ops.h> |
331 | #include <ATen/ops/all_ops.h> |
332 | #include <ATen/ops/all_ops.h> |
333 | #include <ATen/ops/any_ops.h> |
334 | #include <ATen/ops/any_ops.h> |
335 | #include <ATen/ops/arange_ops.h> |
336 | #include <ATen/ops/arange_ops.h> |
337 | #include <ATen/ops/argmax_ops.h> |
338 | #include <ATen/ops/argmin_ops.h> |
339 | #include <ATen/ops/acosh_ops.h> |
340 | #include <ATen/ops/acosh_ops.h> |
341 | #include <ATen/ops/arccosh_ops.h> |
342 | #include <ATen/ops/arccosh_ops.h> |
343 | #include <ATen/ops/asinh_ops.h> |
344 | #include <ATen/ops/asinh_ops.h> |
345 | #include <ATen/ops/arcsinh_ops.h> |
346 | #include <ATen/ops/arcsinh_ops.h> |
347 | #include <ATen/ops/atanh_ops.h> |
348 | #include <ATen/ops/atanh_ops.h> |
349 | #include <ATen/ops/arctanh_ops.h> |
350 | #include <ATen/ops/arctanh_ops.h> |
351 | #include <ATen/ops/asin_ops.h> |
352 | #include <ATen/ops/asin_ops.h> |
353 | #include <ATen/ops/arcsin_ops.h> |
354 | #include <ATen/ops/arcsin_ops.h> |
355 | #include <ATen/ops/atan_ops.h> |
356 | #include <ATen/ops/atan_ops.h> |
357 | #include <ATen/ops/arctan_ops.h> |
358 | #include <ATen/ops/arctan_ops.h> |
359 | #include <ATen/ops/baddbmm_ops.h> |
360 | #include <ATen/ops/baddbmm_ops.h> |
361 | #include <ATen/ops/bartlett_window_ops.h> |
362 | #include <ATen/ops/bartlett_window_ops.h> |
363 | #include <ATen/ops/quantized_batch_norm_ops.h> |
364 | #include <ATen/ops/bernoulli_ops.h> |
365 | #include <ATen/ops/bernoulli_ops.h> |
366 | #include <ATen/ops/bernoulli_ops.h> |
367 | #include <ATen/ops/bernoulli_ops.h> |
368 | #include <ATen/ops/binary_cross_entropy_ops.h> |
369 | #include <ATen/ops/binary_cross_entropy_backward_ops.h> |
370 | #include <ATen/ops/binary_cross_entropy_with_logits_ops.h> |
371 | #include <ATen/ops/bincount_ops.h> |
372 | #include <ATen/ops/bitwise_not_ops.h> |
373 | #include <ATen/ops/bitwise_not_ops.h> |
374 | #include <ATen/ops/copysign_ops.h> |
375 | #include <ATen/ops/copysign_ops.h> |
376 | #include <ATen/ops/copysign_ops.h> |
377 | #include <ATen/ops/copysign_ops.h> |
378 | #include <ATen/ops/logical_not_ops.h> |
379 | #include <ATen/ops/logical_not_ops.h> |
380 | #include <ATen/ops/logical_xor_ops.h> |
381 | #include <ATen/ops/logical_xor_ops.h> |
382 | #include <ATen/ops/logical_and_ops.h> |
383 | #include <ATen/ops/logical_and_ops.h> |
384 | #include <ATen/ops/logical_or_ops.h> |
385 | #include <ATen/ops/logical_or_ops.h> |
386 | #include <ATen/ops/blackman_window_ops.h> |
387 | #include <ATen/ops/blackman_window_ops.h> |
388 | #include <ATen/ops/bmm_ops.h> |
389 | #include <ATen/ops/cat_ops.h> |
390 | #include <ATen/ops/cat_ops.h> |
391 | #include <ATen/ops/concat_ops.h> |
392 | #include <ATen/ops/concat_ops.h> |
393 | #include <ATen/ops/concatenate_ops.h> |
394 | #include <ATen/ops/concatenate_ops.h> |
395 | #include <ATen/ops/block_diag_ops.h> |
396 | #include <ATen/ops/ceil_ops.h> |
397 | #include <ATen/ops/ceil_ops.h> |
398 | #include <ATen/ops/chain_matmul_ops.h> |
399 | #include <ATen/ops/clamp_ops.h> |
400 | #include <ATen/ops/clamp_ops.h> |
401 | #include <ATen/ops/clamp_ops.h> |
402 | #include <ATen/ops/clamp_ops.h> |
403 | #include <ATen/ops/clamp_max_ops.h> |
404 | #include <ATen/ops/clamp_max_ops.h> |
405 | #include <ATen/ops/clamp_max_ops.h> |
406 | #include <ATen/ops/clamp_max_ops.h> |
407 | #include <ATen/ops/clamp_min_ops.h> |
408 | #include <ATen/ops/clamp_min_ops.h> |
409 | #include <ATen/ops/clamp_min_ops.h> |
410 | #include <ATen/ops/clamp_min_ops.h> |
411 | #include <ATen/ops/clip_ops.h> |
412 | #include <ATen/ops/clip_ops.h> |
413 | #include <ATen/ops/clip_ops.h> |
414 | #include <ATen/ops/clip_ops.h> |
415 | #include <ATen/ops/complex_ops.h> |
416 | #include <ATen/ops/polar_ops.h> |
417 | #include <ATen/ops/constant_pad_nd_ops.h> |
418 | #include <ATen/ops/convolution_ops.h> |
419 | #include <ATen/ops/convolution_backward_ops.h> |
420 | #include <ATen/ops/convolution_overrideable_ops.h> |
421 | #include <ATen/ops/convolution_backward_overrideable_ops.h> |
422 | #include <ATen/ops/_convolution_ops.h> |
423 | #include <ATen/ops/conv_tbc_ops.h> |
424 | #include <ATen/ops/copy_ops.h> |
425 | #include <ATen/ops/copy_ops.h> |
426 | #include <ATen/ops/_copy_from_ops.h> |
427 | #include <ATen/ops/_copy_from_and_resize_ops.h> |
428 | #include <ATen/ops/cos_ops.h> |
429 | #include <ATen/ops/cos_ops.h> |
430 | #include <ATen/ops/cosh_ops.h> |
431 | #include <ATen/ops/cosh_ops.h> |
432 | #include <ATen/ops/count_nonzero_ops.h> |
433 | #include <ATen/ops/count_nonzero_ops.h> |
434 | #include <ATen/ops/cudnn_affine_grid_generator_ops.h> |
435 | #include <ATen/ops/cudnn_affine_grid_generator_backward_ops.h> |
436 | #include <ATen/ops/cudnn_batch_norm_ops.h> |
437 | #include <ATen/ops/cudnn_batch_norm_backward_ops.h> |
438 | #include <ATen/ops/cudnn_convolution_ops.h> |
439 | #include <ATen/ops/cudnn_convolution_transpose_ops.h> |
440 | #include <ATen/ops/_mps_convolution_transpose_ops.h> |
441 | #include <ATen/ops/mps_convolution_transpose_backward_ops.h> |
442 | #include <ATen/ops/cudnn_convolution_relu_ops.h> |
443 | #include <ATen/ops/cudnn_convolution_add_relu_ops.h> |
444 | #include <ATen/ops/cudnn_grid_sampler_ops.h> |
445 | #include <ATen/ops/cudnn_grid_sampler_backward_ops.h> |
446 | #include <ATen/ops/cummax_ops.h> |
447 | #include <ATen/ops/cummax_ops.h> |
448 | #include <ATen/ops/cummin_ops.h> |
449 | #include <ATen/ops/cummin_ops.h> |
450 | #include <ATen/ops/cumprod_ops.h> |
451 | #include <ATen/ops/cumprod_ops.h> |
452 | #include <ATen/ops/cumprod_ops.h> |
453 | #include <ATen/ops/cumprod_ops.h> |
454 | #include <ATen/ops/cumsum_ops.h> |
455 | #include <ATen/ops/cumsum_ops.h> |
456 | #include <ATen/ops/cumsum_ops.h> |
457 | #include <ATen/ops/cumsum_ops.h> |
458 | #include <ATen/ops/_ctc_loss_ops.h> |
459 | #include <ATen/ops/_ctc_loss_ops.h> |
460 | #include <ATen/ops/_ctc_loss_backward_ops.h> |
461 | #include <ATen/ops/diag_embed_ops.h> |
462 | #include <ATen/ops/diagonal_backward_ops.h> |
463 | #include <ATen/ops/diff_ops.h> |
464 | #include <ATen/ops/div_ops.h> |
465 | #include <ATen/ops/div_ops.h> |
466 | #include <ATen/ops/div_ops.h> |
467 | #include <ATen/ops/div_ops.h> |
468 | #include <ATen/ops/div_ops.h> |
469 | #include <ATen/ops/div_ops.h> |
470 | #include <ATen/ops/div_ops.h> |
471 | #include <ATen/ops/div_ops.h> |
472 | #include <ATen/ops/divide_ops.h> |
473 | #include <ATen/ops/divide_ops.h> |
474 | #include <ATen/ops/divide_ops.h> |
475 | #include <ATen/ops/divide_ops.h> |
476 | #include <ATen/ops/true_divide_ops.h> |
477 | #include <ATen/ops/true_divide_ops.h> |
478 | #include <ATen/ops/dot_ops.h> |
479 | #include <ATen/ops/vdot_ops.h> |
480 | #include <ATen/ops/embedding_ops.h> |
481 | #include <ATen/ops/embedding_dense_backward_ops.h> |
482 | #include <ATen/ops/embedding_renorm_ops.h> |
483 | #include <ATen/ops/_embedding_bag_forward_only_ops.h> |
484 | #include <ATen/ops/row_stack_ops.h> |
485 | #include <ATen/ops/_embedding_bag_ops.h> |
486 | #include <ATen/ops/_embedding_bag_dense_backward_ops.h> |
487 | #include <ATen/ops/_embedding_bag_per_sample_weights_backward_ops.h> |
488 | #include <ATen/ops/empty_ops.h> |
489 | #include <ATen/ops/empty_ops.h> |
490 | #include <ATen/ops/new_empty_ops.h> |
491 | #include <ATen/ops/new_empty_strided_ops.h> |
492 | #include <ATen/ops/new_full_ops.h> |
493 | #include <ATen/ops/new_zeros_ops.h> |
494 | #include <ATen/ops/new_ones_ops.h> |
495 | #include <ATen/ops/_empty_affine_quantized_ops.h> |
496 | #include <ATen/ops/_empty_per_channel_affine_quantized_ops.h> |
497 | #include <ATen/ops/resize_ops.h> |
498 | #include <ATen/ops/_resize_output_ops.h> |
499 | #include <ATen/ops/empty_quantized_ops.h> |
500 | #include <ATen/ops/empty_like_ops.h> |
501 | #include <ATen/ops/empty_strided_ops.h> |
502 | #include <ATen/ops/erf_ops.h> |
503 | #include <ATen/ops/erf_ops.h> |
504 | #include <ATen/ops/erfc_ops.h> |
505 | #include <ATen/ops/erfc_ops.h> |
506 | #include <ATen/ops/exp_ops.h> |
507 | #include <ATen/ops/exp_ops.h> |
508 | #include <ATen/ops/exp2_ops.h> |
509 | #include <ATen/ops/exp2_ops.h> |
510 | #include <ATen/ops/expm1_ops.h> |
511 | #include <ATen/ops/expm1_ops.h> |
512 | #include <ATen/ops/eye_ops.h> |
513 | #include <ATen/ops/eye_ops.h> |
514 | #include <ATen/ops/fill_ops.h> |
515 | #include <ATen/ops/fill_ops.h> |
516 | #include <ATen/ops/fill_ops.h> |
517 | #include <ATen/ops/fill_ops.h> |
518 | #include <ATen/ops/floor_ops.h> |
519 | #include <ATen/ops/floor_ops.h> |
520 | #include <ATen/ops/floor_divide_ops.h> |
521 | #include <ATen/ops/floor_divide_ops.h> |
522 | #include <ATen/ops/frac_ops.h> |
523 | #include <ATen/ops/frac_ops.h> |
524 | #include <ATen/ops/full_ops.h> |
525 | #include <ATen/ops/full_ops.h> |
526 | #include <ATen/ops/full_like_ops.h> |
527 | #include <ATen/ops/from_file_ops.h> |
528 | #include <ATen/ops/gcd_ops.h> |
529 | #include <ATen/ops/gcd_ops.h> |
530 | #include <ATen/ops/lcm_ops.h> |
531 | #include <ATen/ops/lcm_ops.h> |
532 | #include <ATen/ops/grid_sampler_2d_ops.h> |
533 | #include <ATen/ops/grid_sampler_2d_backward_ops.h> |
534 | #include <ATen/ops/_grid_sampler_2d_cpu_fallback_ops.h> |
535 | #include <ATen/ops/grid_sampler_3d_ops.h> |
536 | #include <ATen/ops/grid_sampler_3d_backward_ops.h> |
537 | #include <ATen/ops/hann_window_ops.h> |
538 | #include <ATen/ops/hann_window_ops.h> |
539 | #include <ATen/ops/hamming_window_ops.h> |
540 | #include <ATen/ops/hamming_window_ops.h> |
541 | #include <ATen/ops/hamming_window_ops.h> |
542 | #include <ATen/ops/hamming_window_ops.h> |
543 | #include <ATen/ops/kaiser_window_ops.h> |
544 | #include <ATen/ops/kaiser_window_ops.h> |
545 | #include <ATen/ops/kaiser_window_ops.h> |
546 | #include <ATen/ops/native_group_norm_ops.h> |
547 | #include <ATen/ops/native_group_norm_backward_ops.h> |
548 | #include <ATen/ops/_fft_r2c_ops.h> |
549 | #include <ATen/ops/_fft_c2r_ops.h> |
550 | #include <ATen/ops/_fft_c2c_ops.h> |
551 | #include <ATen/ops/index_ops.h> |
552 | #include <ATen/ops/index_copy_ops.h> |
553 | #include <ATen/ops/index_copy_ops.h> |
554 | #include <ATen/ops/index_put_ops.h> |
555 | #include <ATen/ops/index_put_ops.h> |
556 | #include <ATen/ops/_index_put_impl_ops.h> |
557 | #include <ATen/ops/isin_ops.h> |
558 | #include <ATen/ops/isin_ops.h> |
559 | #include <ATen/ops/isin_ops.h> |
560 | #include <ATen/ops/isnan_ops.h> |
561 | #include <ATen/ops/kron_ops.h> |
562 | #include <ATen/ops/kthvalue_ops.h> |
563 | #include <ATen/ops/kthvalue_ops.h> |
564 | #include <ATen/ops/native_layer_norm_ops.h> |
565 | #include <ATen/ops/native_layer_norm_backward_ops.h> |
566 | #include <ATen/ops/nan_to_num_ops.h> |
567 | #include <ATen/ops/nan_to_num_ops.h> |
568 | #include <ATen/ops/linear_ops.h> |
569 | #include <ATen/ops/linear_backward_ops.h> |
570 | #include <ATen/ops/mkldnn_linear_ops.h> |
571 | #include <ATen/ops/mkldnn_linear_backward_input_ops.h> |
572 | #include <ATen/ops/mkldnn_linear_backward_weights_ops.h> |
573 | #include <ATen/ops/mkldnn_linear_backward_ops.h> |
574 | #include <ATen/ops/ldexp_ops.h> |
575 | #include <ATen/ops/ldexp_ops.h> |
576 | #include <ATen/ops/linspace_ops.h> |
577 | #include <ATen/ops/log_ops.h> |
578 | #include <ATen/ops/log_ops.h> |
579 | #include <ATen/ops/log10_ops.h> |
580 | #include <ATen/ops/log10_ops.h> |
581 | #include <ATen/ops/log1p_ops.h> |
582 | #include <ATen/ops/log1p_ops.h> |
583 | #include <ATen/ops/log2_ops.h> |
584 | #include <ATen/ops/log2_ops.h> |
585 | #include <ATen/ops/logaddexp_ops.h> |
586 | #include <ATen/ops/logaddexp2_ops.h> |
587 | #include <ATen/ops/xlogy_ops.h> |
588 | #include <ATen/ops/xlogy_ops.h> |
589 | #include <ATen/ops/xlogy_ops.h> |
590 | #include <ATen/ops/xlogy_ops.h> |
591 | #include <ATen/ops/xlogy_ops.h> |
592 | #include <ATen/ops/logspace_ops.h> |
593 | #include <ATen/ops/log_softmax_ops.h> |
594 | #include <ATen/ops/_log_softmax_ops.h> |
595 | #include <ATen/ops/_log_softmax_backward_data_ops.h> |
596 | #include <ATen/ops/_logcumsumexp_ops.h> |
597 | #include <ATen/ops/logcumsumexp_ops.h> |
598 | #include <ATen/ops/logcumsumexp_ops.h> |
599 | #include <ATen/ops/logsumexp_ops.h> |
600 | #include <ATen/ops/logsumexp_ops.h> |
601 | #include <ATen/ops/matmul_ops.h> |
602 | #include <ATen/ops/matmul_backward_ops.h> |
603 | #include <ATen/ops/matrix_power_ops.h> |
604 | #include <ATen/ops/_aminmax_ops.h> |
605 | #include <ATen/ops/_aminmax_ops.h> |
606 | #include <ATen/ops/aminmax_ops.h> |
607 | #include <ATen/ops/_compute_linear_combination_ops.h> |
608 | #include <ATen/ops/max_ops.h> |
609 | #include <ATen/ops/max_ops.h> |
610 | #include <ATen/ops/amax_ops.h> |
611 | #include <ATen/ops/_mps_max_pool2d_ops.h> |
612 | #include <ATen/ops/mps_max_pool2d_backward_ops.h> |
613 | #include <ATen/ops/mkldnn_max_pool2d_ops.h> |
614 | #include <ATen/ops/mkldnn_max_pool2d_backward_ops.h> |
615 | #include <ATen/ops/mkldnn_max_pool3d_ops.h> |
616 | #include <ATen/ops/mkldnn_max_pool3d_backward_ops.h> |
617 | #include <ATen/ops/quantized_max_pool1d_ops.h> |
618 | #include <ATen/ops/quantized_max_pool2d_ops.h> |
619 | #include <ATen/ops/mean_ops.h> |
620 | #include <ATen/ops/mean_ops.h> |
621 | #include <ATen/ops/nanmean_ops.h> |
622 | #include <ATen/ops/median_ops.h> |
623 | #include <ATen/ops/median_ops.h> |
624 | #include <ATen/ops/median_ops.h> |
625 | #include <ATen/ops/nanmedian_ops.h> |
626 | #include <ATen/ops/nanmedian_ops.h> |
627 | #include <ATen/ops/nanmedian_ops.h> |
628 | #include <ATen/ops/min_ops.h> |
629 | #include <ATen/ops/min_ops.h> |
630 | #include <ATen/ops/amin_ops.h> |
631 | #include <ATen/ops/_mps_convolution_ops.h> |
632 | #include <ATen/ops/mps_convolution_backward_ops.h> |
633 | #include <ATen/ops/mkldnn_convolution_ops.h> |
634 | #include <ATen/ops/mkldnn_rnn_layer_ops.h> |
635 | #include <ATen/ops/mkldnn_rnn_layer_backward_ops.h> |
636 | #include <ATen/ops/miopen_batch_norm_ops.h> |
637 | #include <ATen/ops/miopen_batch_norm_backward_ops.h> |
638 | #include <ATen/ops/miopen_convolution_ops.h> |
639 | #include <ATen/ops/miopen_convolution_transpose_ops.h> |
640 | #include <ATen/ops/miopen_depthwise_convolution_ops.h> |
641 | #include <ATen/ops/miopen_rnn_ops.h> |
642 | #include <ATen/ops/miopen_rnn_backward_ops.h> |
643 | #include <ATen/ops/mm_ops.h> |
644 | #include <ATen/ops/_sparse_sparse_matmul_ops.h> |
645 | #include <ATen/ops/mode_ops.h> |
646 | #include <ATen/ops/mode_ops.h> |
647 | #include <ATen/ops/mul_ops.h> |
648 | #include <ATen/ops/mul_ops.h> |
649 | #include <ATen/ops/mul_ops.h> |
650 | #include <ATen/ops/mul_ops.h> |
651 | #include <ATen/ops/multiply_ops.h> |
652 | #include <ATen/ops/multiply_ops.h> |
653 | #include <ATen/ops/mv_ops.h> |
654 | #include <ATen/ops/mvlgamma_ops.h> |
655 | #include <ATen/ops/mvlgamma_ops.h> |
656 | #include <ATen/ops/narrow_copy_ops.h> |
657 | #include <ATen/ops/native_batch_norm_ops.h> |
658 | #include <ATen/ops/_native_batch_norm_legit_ops.h> |
659 | #include <ATen/ops/_native_batch_norm_legit_ops.h> |
660 | #include <ATen/ops/batch_norm_stats_ops.h> |
661 | #include <ATen/ops/batch_norm_elemt_ops.h> |
662 | #include <ATen/ops/batch_norm_gather_stats_ops.h> |
663 | #include <ATen/ops/batch_norm_gather_stats_with_counts_ops.h> |
664 | #include <ATen/ops/native_batch_norm_backward_ops.h> |
665 | #include <ATen/ops/batch_norm_backward_reduce_ops.h> |
666 | #include <ATen/ops/batch_norm_backward_elemt_ops.h> |
667 | #include <ATen/ops/batch_norm_update_stats_ops.h> |
668 | #include <ATen/ops/_nnpack_spatial_convolution_ops.h> |
669 | #include <ATen/ops/ones_ops.h> |
670 | #include <ATen/ops/ones_ops.h> |
671 | #include <ATen/ops/ones_like_ops.h> |
672 | #include <ATen/ops/_euclidean_dist_ops.h> |
673 | #include <ATen/ops/_cdist_forward_ops.h> |
674 | #include <ATen/ops/_cdist_backward_ops.h> |
675 | #include <ATen/ops/_pdist_forward_ops.h> |
676 | #include <ATen/ops/_pdist_backward_ops.h> |
677 | #include <ATen/ops/pixel_shuffle_ops.h> |
678 | #include <ATen/ops/pixel_unshuffle_ops.h> |
679 | #include <ATen/ops/channel_shuffle_ops.h> |
680 | #include <ATen/ops/_pin_memory_ops.h> |
681 | #include <ATen/ops/rad2deg_ops.h> |
682 | #include <ATen/ops/rad2deg_ops.h> |
683 | #include <ATen/ops/deg2rad_ops.h> |
684 | #include <ATen/ops/deg2rad_ops.h> |
685 | #include <ATen/ops/scalar_tensor_ops.h> |
686 | #include <ATen/ops/rand_ops.h> |
687 | #include <ATen/ops/rand_ops.h> |
688 | #include <ATen/ops/rand_ops.h> |
689 | #include <ATen/ops/rand_ops.h> |
690 | #include <ATen/ops/rand_like_ops.h> |
691 | #include <ATen/ops/randint_ops.h> |
692 | #include <ATen/ops/randint_ops.h> |
693 | #include <ATen/ops/randint_ops.h> |
694 | #include <ATen/ops/randint_ops.h> |
695 | #include <ATen/ops/randint_like_ops.h> |
696 | #include <ATen/ops/randint_like_ops.h> |
697 | #include <ATen/ops/randn_ops.h> |
698 | #include <ATen/ops/randn_ops.h> |
699 | #include <ATen/ops/randn_ops.h> |
700 | #include <ATen/ops/randn_ops.h> |
701 | #include <ATen/ops/randn_like_ops.h> |
702 | #include <ATen/ops/randperm_ops.h> |
703 | #include <ATen/ops/randperm_ops.h> |
704 | #include <ATen/ops/range_ops.h> |
705 | #include <ATen/ops/range_ops.h> |
706 | #include <ATen/ops/reciprocal_ops.h> |
707 | #include <ATen/ops/reciprocal_ops.h> |
708 | #include <ATen/ops/neg_ops.h> |
709 | #include <ATen/ops/neg_ops.h> |
710 | #include <ATen/ops/negative_ops.h> |
711 | #include <ATen/ops/negative_ops.h> |
712 | #include <ATen/ops/repeat_ops.h> |
713 | #include <ATen/ops/repeat_interleave_ops.h> |
714 | #include <ATen/ops/_mkldnn_reshape_ops.h> |
715 | #include <ATen/ops/round_ops.h> |
716 | #include <ATen/ops/round_ops.h> |
717 | #include <ATen/ops/round_ops.h> |
718 | #include <ATen/ops/round_ops.h> |
719 | #include <ATen/ops/relu_ops.h> |
720 | #include <ATen/ops/relu_ops.h> |
721 | #include <ATen/ops/gelu_ops.h> |
722 | #include <ATen/ops/gelu_ops.h> |
723 | #include <ATen/ops/gelu_backward_ops.h> |
724 | #include <ATen/ops/hardshrink_ops.h> |
725 | #include <ATen/ops/hardshrink_backward_ops.h> |
726 | #include <ATen/ops/rsqrt_ops.h> |
727 | #include <ATen/ops/rsqrt_ops.h> |
728 | #include <ATen/ops/select_backward_ops.h> |
729 | #include <ATen/ops/celu_ops.h> |
730 | #include <ATen/ops/celu_ops.h> |
731 | #include <ATen/ops/silu_ops.h> |
732 | #include <ATen/ops/silu_ops.h> |
733 | #include <ATen/ops/silu_backward_ops.h> |
734 | #include <ATen/ops/mish_ops.h> |
735 | #include <ATen/ops/mish_ops.h> |
736 | #include <ATen/ops/sigmoid_ops.h> |
737 | #include <ATen/ops/sigmoid_ops.h> |
738 | #include <ATen/ops/logit_ops.h> |
739 | #include <ATen/ops/logit_ops.h> |
740 | #include <ATen/ops/sin_ops.h> |
741 | #include <ATen/ops/sin_ops.h> |
742 | #include <ATen/ops/sinc_ops.h> |
743 | #include <ATen/ops/sinc_ops.h> |
744 | #include <ATen/ops/sinh_ops.h> |
745 | #include <ATen/ops/sinh_ops.h> |
746 | #include <ATen/ops/slice_backward_ops.h> |
747 | #include <ATen/ops/slice_scatter_ops.h> |
748 | #include <ATen/ops/select_scatter_ops.h> |
749 | #include <ATen/ops/diagonal_scatter_ops.h> |
750 | #include <ATen/ops/as_strided_scatter_ops.h> |
751 | #include <ATen/ops/softmax_ops.h> |
752 | #include <ATen/ops/_softmax_ops.h> |
753 | #include <ATen/ops/_softmax_backward_data_ops.h> |
754 | #include <ATen/ops/unsafe_split_ops.h> |
755 | #include <ATen/ops/unsafe_split_with_sizes_ops.h> |
756 | #include <ATen/ops/sspaddmm_ops.h> |
757 | #include <ATen/ops/stack_ops.h> |
758 | #include <ATen/ops/_stack_ops.h> |
759 | #include <ATen/ops/hstack_ops.h> |
760 | #include <ATen/ops/vstack_ops.h> |
761 | #include <ATen/ops/dstack_ops.h> |
762 | #include <ATen/ops/sum_ops.h> |
763 | #include <ATen/ops/sum_ops.h> |
764 | #include <ATen/ops/sum_ops.h> |
765 | #include <ATen/ops/nansum_ops.h> |
766 | #include <ATen/ops/sqrt_ops.h> |
767 | #include <ATen/ops/sqrt_ops.h> |
768 | #include <ATen/ops/square_ops.h> |
769 | #include <ATen/ops/square_ops.h> |
770 | #include <ATen/ops/std_ops.h> |
771 | #include <ATen/ops/std_ops.h> |
772 | #include <ATen/ops/std_mean_ops.h> |
773 | #include <ATen/ops/std_ops.h> |
774 | #include <ATen/ops/std_ops.h> |
775 | #include <ATen/ops/prod_ops.h> |
776 | #include <ATen/ops/prod_ops.h> |
777 | #include <ATen/ops/prod_ops.h> |
778 | #include <ATen/ops/tan_ops.h> |
779 | #include <ATen/ops/tan_ops.h> |
780 | #include <ATen/ops/tanh_ops.h> |
781 | #include <ATen/ops/tanh_ops.h> |
782 | #include <ATen/ops/tensordot_ops.h> |
783 | #include <ATen/ops/threshold_ops.h> |
784 | #include <ATen/ops/threshold_ops.h> |
785 | #include <ATen/ops/threshold_backward_ops.h> |
786 | #include <ATen/ops/_mkldnn_transpose_ops.h> |
787 | #include <ATen/ops/_mkldnn_transpose_ops.h> |
788 | #include <ATen/ops/flip_ops.h> |
789 | #include <ATen/ops/roll_ops.h> |
790 | #include <ATen/ops/rot90_ops.h> |
791 | #include <ATen/ops/_transform_bias_rescale_qkv_ops.h> |
792 | #include <ATen/ops/_nested_tensor_from_mask_ops.h> |
793 | #include <ATen/ops/_nested_from_padded_ops.h> |
794 | #include <ATen/ops/_nested_tensor_size_ops.h> |
795 | #include <ATen/ops/_nested_tensor_strides_ops.h> |
796 | #include <ATen/ops/_nested_from_padded_and_nested_example_ops.h> |
797 | #include <ATen/ops/_nested_view_from_buffer_copy_ops.h> |
798 | #include <ATen/ops/_trilinear_ops.h> |
799 | #include <ATen/ops/trunc_ops.h> |
800 | #include <ATen/ops/trunc_ops.h> |
801 | #include <ATen/ops/fix_ops.h> |
802 | #include <ATen/ops/fix_ops.h> |
803 | #include <ATen/ops/_unique_ops.h> |
804 | #include <ATen/ops/unique_dim_ops.h> |
805 | #include <ATen/ops/unique_consecutive_ops.h> |
806 | #include <ATen/ops/unique_dim_consecutive_ops.h> |
807 | #include <ATen/ops/_unique2_ops.h> |
808 | #include <ATen/ops/_unsafe_view_ops.h> |
809 | #include <ATen/ops/var_ops.h> |
810 | #include <ATen/ops/var_ops.h> |
811 | #include <ATen/ops/var_ops.h> |
812 | #include <ATen/ops/var_ops.h> |
813 | #include <ATen/ops/var_mean_ops.h> |
814 | #include <ATen/ops/where_ops.h> |
815 | #include <ATen/ops/_weight_norm_interface_ops.h> |
816 | #include <ATen/ops/_weight_norm_interface_backward_ops.h> |
817 | #include <ATen/ops/zeros_ops.h> |
818 | #include <ATen/ops/_efficientzerotensor_ops.h> |
819 | #include <ATen/ops/zeros_ops.h> |
820 | #include <ATen/ops/zeros_like_ops.h> |
821 | #include <ATen/ops/_standard_gamma_grad_ops.h> |
822 | #include <ATen/ops/_standard_gamma_ops.h> |
823 | #include <ATen/ops/_dirichlet_grad_ops.h> |
824 | #include <ATen/ops/_sample_dirichlet_ops.h> |
825 | #include <ATen/ops/poisson_ops.h> |
826 | #include <ATen/ops/binomial_ops.h> |
827 | #include <ATen/ops/native_norm_ops.h> |
828 | #include <ATen/ops/native_norm_ops.h> |
829 | #include <ATen/ops/_sparse_sum_ops.h> |
830 | #include <ATen/ops/_sparse_sum_backward_ops.h> |
831 | #include <ATen/ops/_sparse_csr_sum_ops.h> |
832 | #include <ATen/ops/_sparse_csr_prod_ops.h> |
833 | #include <ATen/ops/_sparse_softmax_ops.h> |
834 | #include <ATen/ops/_sparse_softmax_backward_data_ops.h> |
835 | #include <ATen/ops/_sparse_log_softmax_ops.h> |
836 | #include <ATen/ops/_sparse_log_softmax_backward_data_ops.h> |
837 | #include <ATen/ops/_spdiags_ops.h> |
838 | #include <ATen/ops/norm_ops.h> |
839 | #include <ATen/ops/norm_ops.h> |
840 | #include <ATen/ops/norm_ops.h> |
841 | #include <ATen/ops/norm_ops.h> |
842 | #include <ATen/ops/norm_ops.h> |
843 | #include <ATen/ops/norm_ops.h> |
844 | #include <ATen/ops/frexp_ops.h> |
845 | #include <ATen/ops/frobenius_norm_ops.h> |
846 | #include <ATen/ops/nuclear_norm_ops.h> |
847 | #include <ATen/ops/nuclear_norm_ops.h> |
848 | #include <ATen/ops/clone_ops.h> |
849 | #include <ATen/ops/resize_as_ops.h> |
850 | #include <ATen/ops/resize_as_sparse_ops.h> |
851 | #include <ATen/ops/zero_ops.h> |
852 | #include <ATen/ops/sub_ops.h> |
853 | #include <ATen/ops/sub_ops.h> |
854 | #include <ATen/ops/sub_ops.h> |
855 | #include <ATen/ops/sub_ops.h> |
856 | #include <ATen/ops/subtract_ops.h> |
857 | #include <ATen/ops/subtract_ops.h> |
858 | #include <ATen/ops/rsub_ops.h> |
859 | #include <ATen/ops/heaviside_ops.h> |
860 | #include <ATen/ops/heaviside_ops.h> |
861 | #include <ATen/ops/rsub_ops.h> |
862 | #include <ATen/ops/_sparse_addmm_ops.h> |
863 | #include <ATen/ops/sparse_sampled_addmm_ops.h> |
864 | #include <ATen/ops/addmm_ops.h> |
865 | #include <ATen/ops/addmm_ops.h> |
866 | #include <ATen/ops/_addmm_activation_ops.h> |
867 | #include <ATen/ops/sparse_coo_tensor_ops.h> |
868 | #include <ATen/ops/_sparse_coo_tensor_with_dims_ops.h> |
869 | #include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_ops.h> |
870 | #include <ATen/ops/sparse_resize_ops.h> |
871 | #include <ATen/ops/sparse_resize_and_clear_ops.h> |
872 | #include <ATen/ops/sparse_mask_ops.h> |
873 | #include <ATen/ops/_to_dense_ops.h> |
874 | #include <ATen/ops/_coalesce_ops.h> |
875 | #include <ATen/ops/_coalesced_ops.h> |
876 | #include <ATen/ops/hspmm_ops.h> |
877 | #include <ATen/ops/copy_sparse_to_sparse_ops.h> |
878 | #include <ATen/ops/to_sparse_ops.h> |
879 | #include <ATen/ops/to_sparse_ops.h> |
880 | #include <ATen/ops/to_sparse_csr_ops.h> |
881 | #include <ATen/ops/to_sparse_csc_ops.h> |
882 | #include <ATen/ops/to_sparse_bsr_ops.h> |
883 | #include <ATen/ops/to_sparse_bsc_ops.h> |
884 | #include <ATen/ops/to_mkldnn_ops.h> |
885 | #include <ATen/ops/mkldnn_reorder_conv2d_weight_ops.h> |
886 | #include <ATen/ops/mkldnn_reorder_conv3d_weight_ops.h> |
887 | #include <ATen/ops/quantize_per_tensor_dynamic_ops.h> |
888 | #include <ATen/ops/quantize_per_tensor_ops.h> |
889 | #include <ATen/ops/quantize_per_tensor_ops.h> |
890 | #include <ATen/ops/quantize_per_tensor_ops.h> |
891 | #include <ATen/ops/quantize_per_channel_ops.h> |
892 | #include <ATen/ops/dequantize_ops.h> |
893 | #include <ATen/ops/dequantize_ops.h> |
894 | #include <ATen/ops/q_per_channel_scales_ops.h> |
895 | #include <ATen/ops/q_per_channel_zero_points_ops.h> |
896 | #include <ATen/ops/int_repr_ops.h> |
897 | #include <ATen/ops/_make_per_tensor_quantized_tensor_ops.h> |
898 | #include <ATen/ops/_make_per_channel_quantized_tensor_ops.h> |
899 | #include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_ops.h> |
900 | #include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_ops.h> |
901 | #include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_ops.h> |
902 | #include <ATen/ops/fake_quantize_per_channel_affine_cachemask_ops.h> |
903 | #include <ATen/ops/_fake_quantize_learnable_per_channel_affine_ops.h> |
904 | #include <ATen/ops/_fused_moving_avg_obs_fq_helper_ops.h> |
905 | #include <ATen/ops/_to_copy_ops.h> |
906 | #include <ATen/ops/_lstm_mps_ops.h> |
907 | #include <ATen/ops/lstm_mps_backward_ops.h> |
908 | #include <ATen/ops/_thnn_fused_lstm_cell_ops.h> |
909 | #include <ATen/ops/_thnn_fused_lstm_cell_backward_impl_ops.h> |
910 | #include <ATen/ops/_thnn_fused_gru_cell_ops.h> |
911 | #include <ATen/ops/_thnn_fused_gru_cell_backward_ops.h> |
912 | #include <ATen/ops/_pack_padded_sequence_ops.h> |
913 | #include <ATen/ops/set_ops.h> |
914 | #include <ATen/ops/set_ops.h> |
915 | #include <ATen/ops/set_ops.h> |
916 | #include <ATen/ops/set_ops.h> |
917 | #include <ATen/ops/lift_ops.h> |
918 | #include <ATen/ops/lift_fresh_copy_ops.h> |
919 | #include <ATen/ops/masked_fill_ops.h> |
920 | #include <ATen/ops/masked_fill_ops.h> |
921 | #include <ATen/ops/masked_fill_ops.h> |
922 | #include <ATen/ops/masked_fill_ops.h> |
923 | #include <ATen/ops/masked_scatter_ops.h> |
924 | #include <ATen/ops/masked_scatter_ops.h> |
925 | #include <ATen/ops/_masked_softmax_ops.h> |
926 | #include <ATen/ops/_masked_softmax_backward_ops.h> |
927 | #include <ATen/ops/put_ops.h> |
928 | #include <ATen/ops/put_ops.h> |
929 | #include <ATen/ops/index_add_ops.h> |
930 | #include <ATen/ops/index_add_ops.h> |
931 | #include <ATen/ops/index_reduce_ops.h> |
932 | #include <ATen/ops/index_reduce_ops.h> |
933 | #include <ATen/ops/index_fill_ops.h> |
934 | #include <ATen/ops/index_fill_ops.h> |
935 | #include <ATen/ops/index_fill_ops.h> |
936 | #include <ATen/ops/index_fill_ops.h> |
937 | #include <ATen/ops/scatter_ops.h> |
938 | #include <ATen/ops/scatter_ops.h> |
939 | #include <ATen/ops/scatter_ops.h> |
940 | #include <ATen/ops/scatter_ops.h> |
941 | #include <ATen/ops/scatter_ops.h> |
942 | #include <ATen/ops/scatter_ops.h> |
943 | #include <ATen/ops/scatter_ops.h> |
944 | #include <ATen/ops/scatter_ops.h> |
945 | #include <ATen/ops/scatter_add_ops.h> |
946 | #include <ATen/ops/scatter_add_ops.h> |
947 | #include <ATen/ops/scatter_reduce_ops.h> |
948 | #include <ATen/ops/scatter_reduce_ops.h> |
949 | #include <ATen/ops/eq_ops.h> |
950 | #include <ATen/ops/eq_ops.h> |
951 | #include <ATen/ops/eq_ops.h> |
952 | #include <ATen/ops/eq_ops.h> |
953 | #include <ATen/ops/bitwise_and_ops.h> |
954 | #include <ATen/ops/bitwise_and_ops.h> |
955 | #include <ATen/ops/bitwise_and_ops.h> |
956 | #include <ATen/ops/bitwise_and_ops.h> |
957 | #include <ATen/ops/bitwise_and_ops.h> |
958 | #include <ATen/ops/bitwise_or_ops.h> |
959 | #include <ATen/ops/bitwise_or_ops.h> |
960 | #include <ATen/ops/bitwise_or_ops.h> |
961 | #include <ATen/ops/bitwise_or_ops.h> |
962 | #include <ATen/ops/bitwise_or_ops.h> |
963 | #include <ATen/ops/bitwise_xor_ops.h> |
964 | #include <ATen/ops/bitwise_xor_ops.h> |
965 | #include <ATen/ops/bitwise_xor_ops.h> |
966 | #include <ATen/ops/bitwise_xor_ops.h> |
967 | #include <ATen/ops/bitwise_xor_ops.h> |
968 | #include <ATen/ops/lshift_ops.h> |
969 | #include <ATen/ops/lshift_ops.h> |
970 | #include <ATen/ops/lshift_ops.h> |
971 | #include <ATen/ops/lshift_ops.h> |
972 | #include <ATen/ops/bitwise_left_shift_ops.h> |
973 | #include <ATen/ops/bitwise_left_shift_ops.h> |
974 | #include <ATen/ops/bitwise_left_shift_ops.h> |
975 | #include <ATen/ops/bitwise_left_shift_ops.h> |
976 | #include <ATen/ops/bitwise_left_shift_ops.h> |
977 | #include <ATen/ops/rshift_ops.h> |
978 | #include <ATen/ops/rshift_ops.h> |
979 | #include <ATen/ops/rshift_ops.h> |
980 | #include <ATen/ops/rshift_ops.h> |
981 | #include <ATen/ops/bitwise_right_shift_ops.h> |
982 | #include <ATen/ops/bitwise_right_shift_ops.h> |
983 | #include <ATen/ops/bitwise_right_shift_ops.h> |
984 | #include <ATen/ops/bitwise_right_shift_ops.h> |
985 | #include <ATen/ops/bitwise_right_shift_ops.h> |
986 | #include <ATen/ops/tril_ops.h> |
987 | #include <ATen/ops/tril_ops.h> |
988 | #include <ATen/ops/triu_ops.h> |
989 | #include <ATen/ops/triu_ops.h> |
990 | #include <ATen/ops/digamma_ops.h> |
991 | #include <ATen/ops/digamma_ops.h> |
992 | #include <ATen/ops/lerp_ops.h> |
993 | #include <ATen/ops/lerp_ops.h> |
994 | #include <ATen/ops/lerp_ops.h> |
995 | #include <ATen/ops/lerp_ops.h> |
996 | #include <ATen/ops/addbmm_ops.h> |
997 | #include <ATen/ops/addbmm_ops.h> |
998 | #include <ATen/ops/random_ops.h> |
999 | #include <ATen/ops/random_ops.h> |
1000 | #include <ATen/ops/random_ops.h> |
1001 | #include <ATen/ops/uniform_ops.h> |
1002 | #include <ATen/ops/cauchy_ops.h> |
1003 | #include <ATen/ops/log_normal_ops.h> |
1004 | #include <ATen/ops/exponential_ops.h> |
1005 | #include <ATen/ops/geometric_ops.h> |
1006 | #include <ATen/ops/diag_ops.h> |
1007 | #include <ATen/ops/cross_ops.h> |
1008 | #include <ATen/ops/tril_indices_ops.h> |
1009 | #include <ATen/ops/triu_indices_ops.h> |
1010 | #include <ATen/ops/trace_ops.h> |
1011 | #include <ATen/ops/ne_ops.h> |
1012 | #include <ATen/ops/ne_ops.h> |
1013 | #include <ATen/ops/ne_ops.h> |
1014 | #include <ATen/ops/ne_ops.h> |
1015 | #include <ATen/ops/not_equal_ops.h> |
1016 | #include <ATen/ops/not_equal_ops.h> |
1017 | #include <ATen/ops/not_equal_ops.h> |
1018 | #include <ATen/ops/not_equal_ops.h> |
1019 | #include <ATen/ops/ge_ops.h> |
1020 | #include <ATen/ops/ge_ops.h> |
1021 | #include <ATen/ops/ge_ops.h> |
1022 | #include <ATen/ops/ge_ops.h> |
1023 | #include <ATen/ops/greater_equal_ops.h> |
1024 | #include <ATen/ops/greater_equal_ops.h> |
1025 | #include <ATen/ops/greater_equal_ops.h> |
1026 | #include <ATen/ops/greater_equal_ops.h> |
1027 | #include <ATen/ops/le_ops.h> |
1028 | #include <ATen/ops/le_ops.h> |
1029 | #include <ATen/ops/le_ops.h> |
1030 | #include <ATen/ops/le_ops.h> |
1031 | #include <ATen/ops/less_equal_ops.h> |
1032 | #include <ATen/ops/less_equal_ops.h> |
1033 | #include <ATen/ops/less_equal_ops.h> |
1034 | #include <ATen/ops/less_equal_ops.h> |
1035 | #include <ATen/ops/gt_ops.h> |
1036 | #include <ATen/ops/gt_ops.h> |
1037 | #include <ATen/ops/gt_ops.h> |
1038 | #include <ATen/ops/gt_ops.h> |
1039 | #include <ATen/ops/greater_ops.h> |
1040 | #include <ATen/ops/greater_ops.h> |
1041 | #include <ATen/ops/greater_ops.h> |
1042 | #include <ATen/ops/greater_ops.h> |
1043 | #include <ATen/ops/lt_ops.h> |
1044 | #include <ATen/ops/lt_ops.h> |
1045 | #include <ATen/ops/lt_ops.h> |
1046 | #include <ATen/ops/lt_ops.h> |
1047 | #include <ATen/ops/less_ops.h> |
1048 | #include <ATen/ops/less_ops.h> |
1049 | #include <ATen/ops/less_ops.h> |
1050 | #include <ATen/ops/less_ops.h> |
1051 | #include <ATen/ops/take_ops.h> |
1052 | #include <ATen/ops/take_along_dim_ops.h> |
1053 | #include <ATen/ops/index_select_ops.h> |
1054 | #include <ATen/ops/index_select_ops.h> |
1055 | #include <ATen/ops/masked_select_ops.h> |
1056 | #include <ATen/ops/nonzero_ops.h> |
1057 | #include <ATen/ops/gather_ops.h> |
1058 | #include <ATen/ops/gather_ops.h> |
1059 | #include <ATen/ops/addcmul_ops.h> |
1060 | #include <ATen/ops/addcmul_ops.h> |
1061 | #include <ATen/ops/addcdiv_ops.h> |
1062 | #include <ATen/ops/addcdiv_ops.h> |
1063 | #include <ATen/ops/triangular_solve_ops.h> |
1064 | #include <ATen/ops/linalg_solve_triangular_ops.h> |
1065 | #include <ATen/ops/svd_ops.h> |
1066 | #include <ATen/ops/cholesky_ops.h> |
1067 | #include <ATen/ops/cholesky_solve_ops.h> |
1068 | #include <ATen/ops/_cholesky_solve_helper_ops.h> |
1069 | #include <ATen/ops/cholesky_inverse_ops.h> |
1070 | #include <ATen/ops/qr_ops.h> |
1071 | #include <ATen/ops/geqrf_ops.h> |
1072 | #include <ATen/ops/orgqr_ops.h> |
1073 | #include <ATen/ops/ormqr_ops.h> |
1074 | #include <ATen/ops/lu_solve_ops.h> |
1075 | #include <ATen/ops/lu_unpack_ops.h> |
1076 | #include <ATen/ops/multinomial_ops.h> |
1077 | #include <ATen/ops/lgamma_ops.h> |
1078 | #include <ATen/ops/lgamma_ops.h> |
1079 | #include <ATen/ops/polygamma_ops.h> |
1080 | #include <ATen/ops/erfinv_ops.h> |
1081 | #include <ATen/ops/erfinv_ops.h> |
1082 | #include <ATen/ops/i0_ops.h> |
1083 | #include <ATen/ops/i0_ops.h> |
1084 | #include <ATen/ops/sign_ops.h> |
1085 | #include <ATen/ops/sign_ops.h> |
1086 | #include <ATen/ops/signbit_ops.h> |
1087 | #include <ATen/ops/dist_ops.h> |
1088 | #include <ATen/ops/atan2_ops.h> |
1089 | #include <ATen/ops/atan2_ops.h> |
1090 | #include <ATen/ops/arctan2_ops.h> |
1091 | #include <ATen/ops/arctan2_ops.h> |
1092 | #include <ATen/ops/histc_ops.h> |
1093 | #include <ATen/ops/histogram_ops.h> |
1094 | #include <ATen/ops/histogram_ops.h> |
1095 | #include <ATen/ops/_histogramdd_bin_edges_ops.h> |
1096 | #include <ATen/ops/_histogramdd_from_bin_cts_ops.h> |
1097 | #include <ATen/ops/_histogramdd_from_bin_tensors_ops.h> |
1098 | #include <ATen/ops/fmod_ops.h> |
1099 | #include <ATen/ops/fmod_ops.h> |
1100 | #include <ATen/ops/fmod_ops.h> |
1101 | #include <ATen/ops/fmod_ops.h> |
1102 | #include <ATen/ops/hypot_ops.h> |
1103 | #include <ATen/ops/hypot_ops.h> |
1104 | #include <ATen/ops/igamma_ops.h> |
1105 | #include <ATen/ops/igamma_ops.h> |
1106 | #include <ATen/ops/igammac_ops.h> |
1107 | #include <ATen/ops/igammac_ops.h> |
1108 | #include <ATen/ops/nextafter_ops.h> |
1109 | #include <ATen/ops/nextafter_ops.h> |
1110 | #include <ATen/ops/remainder_ops.h> |
1111 | #include <ATen/ops/remainder_ops.h> |
1112 | #include <ATen/ops/remainder_ops.h> |
1113 | #include <ATen/ops/remainder_ops.h> |
1114 | #include <ATen/ops/remainder_ops.h> |
1115 | #include <ATen/ops/fmin_ops.h> |
1116 | #include <ATen/ops/max_ops.h> |
1117 | #include <ATen/ops/fmax_ops.h> |
1118 | #include <ATen/ops/maximum_ops.h> |
1119 | #include <ATen/ops/max_ops.h> |
1120 | #include <ATen/ops/minimum_ops.h> |
1121 | #include <ATen/ops/min_ops.h> |
1122 | #include <ATen/ops/quantile_ops.h> |
1123 | #include <ATen/ops/quantile_ops.h> |
1124 | #include <ATen/ops/nanquantile_ops.h> |
1125 | #include <ATen/ops/nanquantile_ops.h> |
1126 | #include <ATen/ops/sort_ops.h> |
1127 | #include <ATen/ops/sort_ops.h> |
1128 | #include <ATen/ops/sort_ops.h> |
1129 | #include <ATen/ops/sort_ops.h> |
1130 | #include <ATen/ops/msort_ops.h> |
1131 | #include <ATen/ops/argsort_ops.h> |
1132 | #include <ATen/ops/topk_ops.h> |
1133 | #include <ATen/ops/all_ops.h> |
1134 | #include <ATen/ops/any_ops.h> |
1135 | #include <ATen/ops/renorm_ops.h> |
1136 | #include <ATen/ops/renorm_ops.h> |
1137 | #include <ATen/ops/unfold_backward_ops.h> |
1138 | #include <ATen/ops/pow_ops.h> |
1139 | #include <ATen/ops/pow_ops.h> |
1140 | #include <ATen/ops/pow_ops.h> |
1141 | #include <ATen/ops/pow_ops.h> |
1142 | #include <ATen/ops/pow_ops.h> |
1143 | #include <ATen/ops/float_power_ops.h> |
1144 | #include <ATen/ops/float_power_ops.h> |
1145 | #include <ATen/ops/float_power_ops.h> |
1146 | #include <ATen/ops/float_power_ops.h> |
1147 | #include <ATen/ops/float_power_ops.h> |
1148 | #include <ATen/ops/normal_ops.h> |
1149 | #include <ATen/ops/normal_ops.h> |
1150 | #include <ATen/ops/normal_ops.h> |
1151 | #include <ATen/ops/normal_ops.h> |
1152 | #include <ATen/ops/normal_ops.h> |
1153 | #include <ATen/ops/normal_ops.h> |
1154 | #include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_ops.h> |
1155 | #include <ATen/ops/_amp_update_scale_ops.h> |
1156 | #include <ATen/ops/_foreach_add_ops.h> |
1157 | #include <ATen/ops/_foreach_add_ops.h> |
1158 | #include <ATen/ops/_foreach_sub_ops.h> |
1159 | #include <ATen/ops/_foreach_sub_ops.h> |
1160 | #include <ATen/ops/_foreach_mul_ops.h> |
1161 | #include <ATen/ops/_foreach_mul_ops.h> |
1162 | #include <ATen/ops/_foreach_div_ops.h> |
1163 | #include <ATen/ops/_foreach_div_ops.h> |
1164 | #include <ATen/ops/_foreach_clamp_min_ops.h> |
1165 | #include <ATen/ops/_foreach_clamp_min_ops.h> |
1166 | #include <ATen/ops/_foreach_clamp_max_ops.h> |
1167 | #include <ATen/ops/_foreach_clamp_max_ops.h> |
1168 | #include <ATen/ops/_foreach_maximum_ops.h> |
1169 | #include <ATen/ops/_foreach_maximum_ops.h> |
1170 | #include <ATen/ops/_foreach_minimum_ops.h> |
1171 | #include <ATen/ops/_foreach_minimum_ops.h> |
1172 | #include <ATen/ops/_foreach_add_ops.h> |
1173 | #include <ATen/ops/_foreach_add_ops.h> |
1174 | #include <ATen/ops/_foreach_sub_ops.h> |
1175 | #include <ATen/ops/_foreach_sub_ops.h> |
1176 | #include <ATen/ops/_foreach_mul_ops.h> |
1177 | #include <ATen/ops/_foreach_mul_ops.h> |
1178 | #include <ATen/ops/_foreach_div_ops.h> |
1179 | #include <ATen/ops/_foreach_div_ops.h> |
1180 | #include <ATen/ops/_foreach_clamp_min_ops.h> |
1181 | #include <ATen/ops/_foreach_clamp_min_ops.h> |
1182 | #include <ATen/ops/_foreach_clamp_max_ops.h> |
1183 | #include <ATen/ops/_foreach_clamp_max_ops.h> |
1184 | #include <ATen/ops/_foreach_maximum_ops.h> |
1185 | #include <ATen/ops/_foreach_maximum_ops.h> |
1186 | #include <ATen/ops/_foreach_minimum_ops.h> |
1187 | #include <ATen/ops/_foreach_minimum_ops.h> |
1188 | #include <ATen/ops/_foreach_add_ops.h> |
1189 | #include <ATen/ops/_foreach_add_ops.h> |
1190 | #include <ATen/ops/_foreach_sub_ops.h> |
1191 | #include <ATen/ops/_foreach_sub_ops.h> |
1192 | #include <ATen/ops/_foreach_div_ops.h> |
1193 | #include <ATen/ops/_foreach_div_ops.h> |
1194 | #include <ATen/ops/_foreach_mul_ops.h> |
1195 | #include <ATen/ops/_foreach_mul_ops.h> |
1196 | #include <ATen/ops/_foreach_clamp_min_ops.h> |
1197 | #include <ATen/ops/_foreach_clamp_min_ops.h> |
1198 | #include <ATen/ops/_foreach_clamp_max_ops.h> |
1199 | #include <ATen/ops/_foreach_clamp_max_ops.h> |
1200 | #include <ATen/ops/_foreach_maximum_ops.h> |
1201 | #include <ATen/ops/_foreach_maximum_ops.h> |
1202 | #include <ATen/ops/_foreach_minimum_ops.h> |
1203 | #include <ATen/ops/_foreach_minimum_ops.h> |
1204 | #include <ATen/ops/_foreach_exp_ops.h> |
1205 | #include <ATen/ops/_foreach_exp_ops.h> |
1206 | #include <ATen/ops/_foreach_zero_ops.h> |
1207 | #include <ATen/ops/_foreach_sqrt_ops.h> |
1208 | #include <ATen/ops/_foreach_sqrt_ops.h> |
1209 | #include <ATen/ops/_foreach_abs_ops.h> |
1210 | #include <ATen/ops/_foreach_abs_ops.h> |
1211 | #include <ATen/ops/_foreach_acos_ops.h> |
1212 | #include <ATen/ops/_foreach_acos_ops.h> |
1213 | #include <ATen/ops/_foreach_asin_ops.h> |
1214 | #include <ATen/ops/_foreach_asin_ops.h> |
1215 | #include <ATen/ops/_foreach_atan_ops.h> |
1216 | #include <ATen/ops/_foreach_atan_ops.h> |
1217 | #include <ATen/ops/_foreach_ceil_ops.h> |
1218 | #include <ATen/ops/_foreach_ceil_ops.h> |
1219 | #include <ATen/ops/_foreach_cos_ops.h> |
1220 | #include <ATen/ops/_foreach_cos_ops.h> |
1221 | #include <ATen/ops/_foreach_cosh_ops.h> |
1222 | #include <ATen/ops/_foreach_cosh_ops.h> |
1223 | #include <ATen/ops/_foreach_erf_ops.h> |
1224 | #include <ATen/ops/_foreach_erf_ops.h> |
1225 | #include <ATen/ops/_foreach_erfc_ops.h> |
1226 | #include <ATen/ops/_foreach_erfc_ops.h> |
1227 | #include <ATen/ops/_foreach_expm1_ops.h> |
1228 | #include <ATen/ops/_foreach_expm1_ops.h> |
1229 | #include <ATen/ops/_foreach_floor_ops.h> |
1230 | #include <ATen/ops/_foreach_floor_ops.h> |
1231 | #include <ATen/ops/_foreach_log_ops.h> |
1232 | #include <ATen/ops/_foreach_log_ops.h> |
1233 | #include <ATen/ops/_foreach_log10_ops.h> |
1234 | #include <ATen/ops/_foreach_log10_ops.h> |
1235 | #include <ATen/ops/_foreach_log1p_ops.h> |
1236 | #include <ATen/ops/_foreach_log1p_ops.h> |
1237 | #include <ATen/ops/_foreach_log2_ops.h> |
1238 | #include <ATen/ops/_foreach_log2_ops.h> |
1239 | #include <ATen/ops/_foreach_neg_ops.h> |
1240 | #include <ATen/ops/_foreach_neg_ops.h> |
1241 | #include <ATen/ops/_foreach_tan_ops.h> |
1242 | #include <ATen/ops/_foreach_tan_ops.h> |
1243 | #include <ATen/ops/_foreach_tanh_ops.h> |
1244 | #include <ATen/ops/_foreach_tanh_ops.h> |
1245 | #include <ATen/ops/_foreach_sin_ops.h> |
1246 | #include <ATen/ops/_foreach_sin_ops.h> |
1247 | #include <ATen/ops/_foreach_sinh_ops.h> |
1248 | #include <ATen/ops/_foreach_sinh_ops.h> |
1249 | #include <ATen/ops/_foreach_round_ops.h> |
1250 | #include <ATen/ops/_foreach_round_ops.h> |
1251 | #include <ATen/ops/_foreach_lgamma_ops.h> |
1252 | #include <ATen/ops/_foreach_lgamma_ops.h> |
1253 | #include <ATen/ops/_foreach_frac_ops.h> |
1254 | #include <ATen/ops/_foreach_frac_ops.h> |
1255 | #include <ATen/ops/_foreach_reciprocal_ops.h> |
1256 | #include <ATen/ops/_foreach_reciprocal_ops.h> |
1257 | #include <ATen/ops/_foreach_sigmoid_ops.h> |
1258 | #include <ATen/ops/_foreach_sigmoid_ops.h> |
1259 | #include <ATen/ops/_foreach_trunc_ops.h> |
1260 | #include <ATen/ops/_foreach_trunc_ops.h> |
1261 | #include <ATen/ops/_foreach_addcdiv_ops.h> |
1262 | #include <ATen/ops/_foreach_addcdiv_ops.h> |
1263 | #include <ATen/ops/_foreach_addcmul_ops.h> |
1264 | #include <ATen/ops/_foreach_addcmul_ops.h> |
1265 | #include <ATen/ops/_foreach_addcdiv_ops.h> |
1266 | #include <ATen/ops/_foreach_addcdiv_ops.h> |
1267 | #include <ATen/ops/_foreach_addcdiv_ops.h> |
1268 | #include <ATen/ops/_foreach_addcdiv_ops.h> |
1269 | #include <ATen/ops/_foreach_addcmul_ops.h> |
1270 | #include <ATen/ops/_foreach_addcmul_ops.h> |
1271 | #include <ATen/ops/_foreach_addcmul_ops.h> |
1272 | #include <ATen/ops/_foreach_addcmul_ops.h> |
1273 | #include <ATen/ops/_foreach_norm_ops.h> |
1274 | #include <ATen/ops/_foreach_lerp_ops.h> |
1275 | #include <ATen/ops/_foreach_lerp_ops.h> |
1276 | #include <ATen/ops/_foreach_lerp_ops.h> |
1277 | #include <ATen/ops/_foreach_lerp_ops.h> |
1278 | #include <ATen/ops/bucketize_ops.h> |
1279 | #include <ATen/ops/bucketize_ops.h> |
1280 | #include <ATen/ops/searchsorted_ops.h> |
1281 | #include <ATen/ops/searchsorted_ops.h> |
1282 | #include <ATen/ops/_convert_indices_from_coo_to_csr_ops.h> |
1283 | #include <ATen/ops/_convert_indices_from_csr_to_coo_ops.h> |
1284 | #include <ATen/ops/mse_loss_ops.h> |
1285 | #include <ATen/ops/mse_loss_backward_ops.h> |
1286 | #include <ATen/ops/multi_margin_loss_ops.h> |
1287 | #include <ATen/ops/multi_margin_loss_backward_ops.h> |
1288 | #include <ATen/ops/multilabel_margin_loss_ops.h> |
1289 | #include <ATen/ops/multilabel_margin_loss_forward_ops.h> |
1290 | #include <ATen/ops/multilabel_margin_loss_backward_ops.h> |
1291 | #include <ATen/ops/nll_loss_ops.h> |
1292 | #include <ATen/ops/nll_loss_forward_ops.h> |
1293 | #include <ATen/ops/nll_loss_backward_ops.h> |
1294 | #include <ATen/ops/nll_loss2d_ops.h> |
1295 | #include <ATen/ops/nll_loss2d_forward_ops.h> |
1296 | #include <ATen/ops/nll_loss2d_backward_ops.h> |
1297 | #include <ATen/ops/smooth_l1_loss_ops.h> |
1298 | #include <ATen/ops/smooth_l1_loss_backward_ops.h> |
1299 | #include <ATen/ops/huber_loss_ops.h> |
1300 | #include <ATen/ops/huber_loss_backward_ops.h> |
1301 | #include <ATen/ops/soft_margin_loss_ops.h> |
1302 | #include <ATen/ops/soft_margin_loss_backward_ops.h> |
1303 | #include <ATen/ops/elu_ops.h> |
1304 | #include <ATen/ops/elu_ops.h> |
1305 | #include <ATen/ops/elu_backward_ops.h> |
1306 | #include <ATen/ops/glu_ops.h> |
1307 | #include <ATen/ops/glu_backward_ops.h> |
1308 | #include <ATen/ops/glu_jvp_ops.h> |
1309 | #include <ATen/ops/glu_backward_jvp_ops.h> |
1310 | #include <ATen/ops/hardsigmoid_ops.h> |
1311 | #include <ATen/ops/hardsigmoid_ops.h> |
1312 | #include <ATen/ops/hardsigmoid_backward_ops.h> |
1313 | #include <ATen/ops/hardtanh_ops.h> |
1314 | #include <ATen/ops/hardtanh_ops.h> |
1315 | #include <ATen/ops/hardtanh_backward_ops.h> |
1316 | #include <ATen/ops/hardswish_ops.h> |
1317 | #include <ATen/ops/hardswish_ops.h> |
1318 | #include <ATen/ops/hardswish_backward_ops.h> |
1319 | #include <ATen/ops/leaky_relu_ops.h> |
1320 | #include <ATen/ops/leaky_relu_ops.h> |
1321 | #include <ATen/ops/leaky_relu_backward_ops.h> |
1322 | #include <ATen/ops/log_sigmoid_ops.h> |
1323 | #include <ATen/ops/log_sigmoid_forward_ops.h> |
1324 | #include <ATen/ops/log_sigmoid_backward_ops.h> |
1325 | #include <ATen/ops/rrelu_with_noise_ops.h> |
1326 | #include <ATen/ops/rrelu_with_noise_ops.h> |
1327 | #include <ATen/ops/rrelu_with_noise_backward_ops.h> |
1328 | #include <ATen/ops/softplus_ops.h> |
1329 | #include <ATen/ops/softplus_backward_ops.h> |
1330 | #include <ATen/ops/softshrink_ops.h> |
1331 | #include <ATen/ops/softshrink_backward_ops.h> |
1332 | #include <ATen/ops/adaptive_avg_pool2d_ops.h> |
1333 | #include <ATen/ops/mkldnn_adaptive_avg_pool2d_ops.h> |
1334 | #include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward_ops.h> |
1335 | #include <ATen/ops/_adaptive_avg_pool2d_ops.h> |
1336 | #include <ATen/ops/_adaptive_avg_pool2d_backward_ops.h> |
1337 | #include <ATen/ops/adaptive_avg_pool3d_ops.h> |
1338 | #include <ATen/ops/_adaptive_avg_pool3d_ops.h> |
1339 | #include <ATen/ops/_adaptive_avg_pool3d_backward_ops.h> |
1340 | #include <ATen/ops/adaptive_max_pool2d_ops.h> |
1341 | #include <ATen/ops/adaptive_max_pool2d_backward_ops.h> |
1342 | #include <ATen/ops/adaptive_max_pool3d_ops.h> |
1343 | #include <ATen/ops/adaptive_max_pool3d_backward_ops.h> |
1344 | #include <ATen/ops/avg_pool2d_ops.h> |
1345 | #include <ATen/ops/avg_pool2d_backward_ops.h> |
1346 | #include <ATen/ops/avg_pool3d_ops.h> |
1347 | #include <ATen/ops/avg_pool3d_backward_ops.h> |
1348 | #include <ATen/ops/fractional_max_pool2d_ops.h> |
1349 | #include <ATen/ops/fractional_max_pool2d_backward_ops.h> |
1350 | #include <ATen/ops/fractional_max_pool3d_ops.h> |
1351 | #include <ATen/ops/fractional_max_pool3d_backward_ops.h> |
1352 | #include <ATen/ops/max_pool2d_with_indices_ops.h> |
1353 | #include <ATen/ops/max_pool2d_with_indices_backward_ops.h> |
1354 | #include <ATen/ops/max_pool3d_with_indices_ops.h> |
1355 | #include <ATen/ops/max_pool3d_with_indices_backward_ops.h> |
1356 | #include <ATen/ops/max_unpool2d_ops.h> |
1357 | #include <ATen/ops/max_unpool3d_ops.h> |
1358 | #include <ATen/ops/reflection_pad1d_ops.h> |
1359 | #include <ATen/ops/reflection_pad1d_backward_ops.h> |
1360 | #include <ATen/ops/reflection_pad2d_ops.h> |
1361 | #include <ATen/ops/reflection_pad2d_backward_ops.h> |
1362 | #include <ATen/ops/reflection_pad3d_ops.h> |
1363 | #include <ATen/ops/reflection_pad3d_backward_ops.h> |
1364 | #include <ATen/ops/replication_pad1d_ops.h> |
1365 | #include <ATen/ops/replication_pad1d_backward_ops.h> |
1366 | #include <ATen/ops/replication_pad2d_ops.h> |
1367 | #include <ATen/ops/replication_pad2d_backward_ops.h> |
1368 | #include <ATen/ops/replication_pad3d_ops.h> |
1369 | #include <ATen/ops/replication_pad3d_backward_ops.h> |
1370 | #include <ATen/ops/upsample_linear1d_ops.h> |
1371 | #include <ATen/ops/upsample_linear1d_backward_ops.h> |
1372 | #include <ATen/ops/upsample_bilinear2d_ops.h> |
1373 | #include <ATen/ops/upsample_bilinear2d_backward_ops.h> |
1374 | #include <ATen/ops/_upsample_bilinear2d_aa_ops.h> |
1375 | #include <ATen/ops/_upsample_bilinear2d_aa_backward_ops.h> |
1376 | #include <ATen/ops/upsample_bicubic2d_ops.h> |
1377 | #include <ATen/ops/upsample_bicubic2d_backward_ops.h> |
1378 | #include <ATen/ops/_upsample_bicubic2d_aa_ops.h> |
1379 | #include <ATen/ops/_upsample_bicubic2d_aa_backward_ops.h> |
1380 | #include <ATen/ops/upsample_trilinear3d_ops.h> |
1381 | #include <ATen/ops/upsample_trilinear3d_backward_ops.h> |
1382 | #include <ATen/ops/upsample_nearest1d_ops.h> |
1383 | #include <ATen/ops/_upsample_nearest_exact1d_ops.h> |
1384 | #include <ATen/ops/upsample_nearest1d_backward_ops.h> |
1385 | #include <ATen/ops/_upsample_nearest_exact1d_backward_ops.h> |
1386 | #include <ATen/ops/upsample_nearest2d_ops.h> |
1387 | #include <ATen/ops/_upsample_nearest_exact2d_ops.h> |
1388 | #include <ATen/ops/upsample_nearest2d_backward_ops.h> |
1389 | #include <ATen/ops/_upsample_nearest_exact2d_backward_ops.h> |
1390 | #include <ATen/ops/upsample_nearest3d_ops.h> |
1391 | #include <ATen/ops/_upsample_nearest_exact3d_ops.h> |
1392 | #include <ATen/ops/upsample_nearest3d_backward_ops.h> |
1393 | #include <ATen/ops/_upsample_nearest_exact3d_backward_ops.h> |
1394 | #include <ATen/ops/sigmoid_backward_ops.h> |
1395 | #include <ATen/ops/logit_backward_ops.h> |
1396 | #include <ATen/ops/tanh_backward_ops.h> |
1397 | #include <ATen/ops/slow_conv_transpose2d_ops.h> |
1398 | #include <ATen/ops/slow_conv_transpose3d_ops.h> |
1399 | #include <ATen/ops/thnn_conv2d_ops.h> |
1400 | #include <ATen/ops/_slow_conv2d_forward_ops.h> |
1401 | #include <ATen/ops/_slow_conv2d_backward_ops.h> |
1402 | #include <ATen/ops/_conv_depthwise2d_ops.h> |
1403 | #include <ATen/ops/conv_depthwise3d_ops.h> |
1404 | #include <ATen/ops/slow_conv3d_ops.h> |
1405 | #include <ATen/ops/slow_conv3d_forward_ops.h> |
1406 | #include <ATen/ops/slow_conv_dilated2d_ops.h> |
1407 | #include <ATen/ops/slow_conv_dilated3d_ops.h> |
1408 | #include <ATen/ops/col2im_ops.h> |
1409 | #include <ATen/ops/column_stack_ops.h> |
1410 | #include <ATen/ops/im2col_ops.h> |
1411 | #include <ATen/ops/isinf_ops.h> |
1412 | #include <ATen/ops/isposinf_ops.h> |
1413 | #include <ATen/ops/isneginf_ops.h> |
1414 | #include <ATen/ops/special_entr_ops.h> |
1415 | #include <ATen/ops/special_ndtri_ops.h> |
1416 | #include <ATen/ops/special_log_ndtr_ops.h> |
1417 | #include <ATen/ops/special_expm1_ops.h> |
1418 | #include <ATen/ops/special_exp2_ops.h> |
1419 | #include <ATen/ops/special_psi_ops.h> |
1420 | #include <ATen/ops/special_digamma_ops.h> |
1421 | #include <ATen/ops/special_gammaln_ops.h> |
1422 | #include <ATen/ops/special_erf_ops.h> |
1423 | #include <ATen/ops/special_erfc_ops.h> |
1424 | #include <ATen/ops/special_erfcx_ops.h> |
1425 | #include <ATen/ops/special_erfinv_ops.h> |
1426 | #include <ATen/ops/special_ndtr_ops.h> |
1427 | #include <ATen/ops/special_xlog1py_ops.h> |
1428 | #include <ATen/ops/special_xlog1py_ops.h> |
1429 | #include <ATen/ops/special_xlog1py_ops.h> |
1430 | #include <ATen/ops/special_xlogy_ops.h> |
1431 | #include <ATen/ops/special_xlogy_ops.h> |
1432 | #include <ATen/ops/special_xlogy_ops.h> |
1433 | #include <ATen/ops/special_zeta_ops.h> |
1434 | #include <ATen/ops/special_zeta_ops.h> |
1435 | #include <ATen/ops/special_zeta_ops.h> |
1436 | #include <ATen/ops/special_i0_ops.h> |
1437 | #include <ATen/ops/special_i0e_ops.h> |
1438 | #include <ATen/ops/special_i1_ops.h> |
1439 | #include <ATen/ops/special_i1e_ops.h> |
1440 | #include <ATen/ops/special_logit_ops.h> |
1441 | #include <ATen/ops/special_polygamma_ops.h> |
1442 | #include <ATen/ops/special_logsumexp_ops.h> |
1443 | #include <ATen/ops/special_expit_ops.h> |
1444 | #include <ATen/ops/special_sinc_ops.h> |
1445 | #include <ATen/ops/special_round_ops.h> |
1446 | #include <ATen/ops/special_log1p_ops.h> |
1447 | #include <ATen/ops/special_gammainc_ops.h> |
1448 | #include <ATen/ops/special_gammaincc_ops.h> |
1449 | #include <ATen/ops/special_multigammaln_ops.h> |
1450 | #include <ATen/ops/fft_fft_ops.h> |
1451 | #include <ATen/ops/fft_ifft_ops.h> |
1452 | #include <ATen/ops/fft_rfft_ops.h> |
1453 | #include <ATen/ops/fft_irfft_ops.h> |
1454 | #include <ATen/ops/fft_hfft_ops.h> |
1455 | #include <ATen/ops/fft_ihfft_ops.h> |
1456 | #include <ATen/ops/fft_fft2_ops.h> |
1457 | #include <ATen/ops/fft_ifft2_ops.h> |
1458 | #include <ATen/ops/fft_rfft2_ops.h> |
1459 | #include <ATen/ops/fft_irfft2_ops.h> |
1460 | #include <ATen/ops/fft_hfft2_ops.h> |
1461 | #include <ATen/ops/fft_ihfft2_ops.h> |
1462 | #include <ATen/ops/fft_fftn_ops.h> |
1463 | #include <ATen/ops/fft_ifftn_ops.h> |
1464 | #include <ATen/ops/fft_rfftn_ops.h> |
1465 | #include <ATen/ops/fft_irfftn_ops.h> |
1466 | #include <ATen/ops/fft_hfftn_ops.h> |
1467 | #include <ATen/ops/fft_ihfftn_ops.h> |
1468 | #include <ATen/ops/fft_fftfreq_ops.h> |
1469 | #include <ATen/ops/fft_rfftfreq_ops.h> |
1470 | #include <ATen/ops/linalg_cholesky_ex_ops.h> |
1471 | #include <ATen/ops/linalg_cholesky_ops.h> |
1472 | #include <ATen/ops/linalg_cross_ops.h> |
1473 | #include <ATen/ops/linalg_lu_factor_ops.h> |
1474 | #include <ATen/ops/linalg_lu_factor_ex_ops.h> |
1475 | #include <ATen/ops/linalg_lu_ops.h> |
1476 | #include <ATen/ops/linalg_lu_solve_ops.h> |
1477 | #include <ATen/ops/_linalg_det_ops.h> |
1478 | #include <ATen/ops/linalg_det_ops.h> |
1479 | #include <ATen/ops/linalg_ldl_factor_ex_ops.h> |
1480 | #include <ATen/ops/linalg_ldl_factor_ops.h> |
1481 | #include <ATen/ops/linalg_ldl_solve_ops.h> |
1482 | #include <ATen/ops/linalg_lstsq_ops.h> |
1483 | #include <ATen/ops/linalg_matmul_ops.h> |
1484 | #include <ATen/ops/linalg_vecdot_ops.h> |
1485 | #include <ATen/ops/linalg_matrix_exp_ops.h> |
1486 | #include <ATen/ops/_linalg_slogdet_ops.h> |
1487 | #include <ATen/ops/linalg_slogdet_ops.h> |
1488 | #include <ATen/ops/slogdet_ops.h> |
1489 | #include <ATen/ops/linalg_eig_ops.h> |
1490 | #include <ATen/ops/linalg_eigvals_ops.h> |
1491 | #include <ATen/ops/_linalg_eigh_ops.h> |
1492 | #include <ATen/ops/linalg_eigh_ops.h> |
1493 | #include <ATen/ops/linalg_eigvalsh_ops.h> |
1494 | #include <ATen/ops/linalg_householder_product_ops.h> |
1495 | #include <ATen/ops/linalg_inv_ex_ops.h> |
1496 | #include <ATen/ops/linalg_inv_ops.h> |
1497 | #include <ATen/ops/inverse_ops.h> |
1498 | #include <ATen/ops/inner_ops.h> |
1499 | #include <ATen/ops/outer_ops.h> |
1500 | #include <ATen/ops/ger_ops.h> |
1501 | #include <ATen/ops/linalg_norm_ops.h> |
1502 | #include <ATen/ops/linalg_norm_ops.h> |
1503 | #include <ATen/ops/linalg_vector_norm_ops.h> |
1504 | #include <ATen/ops/linalg_matrix_norm_ops.h> |
1505 | #include <ATen/ops/linalg_matrix_norm_ops.h> |
1506 | #include <ATen/ops/_linalg_svd_ops.h> |
1507 | #include <ATen/ops/linalg_svd_ops.h> |
1508 | #include <ATen/ops/linalg_svdvals_ops.h> |
1509 | #include <ATen/ops/linalg_cond_ops.h> |
1510 | #include <ATen/ops/linalg_cond_ops.h> |
1511 | #include <ATen/ops/linalg_pinv_ops.h> |
1512 | #include <ATen/ops/linalg_pinv_ops.h> |
1513 | #include <ATen/ops/linalg_pinv_ops.h> |
1514 | #include <ATen/ops/linalg_pinv_ops.h> |
1515 | #include <ATen/ops/_linalg_solve_ex_ops.h> |
1516 | #include <ATen/ops/linalg_solve_ex_ops.h> |
1517 | #include <ATen/ops/linalg_solve_ops.h> |
1518 | #include <ATen/ops/linalg_tensorinv_ops.h> |
1519 | #include <ATen/ops/linalg_tensorsolve_ops.h> |
1520 | #include <ATen/ops/linalg_qr_ops.h> |
1521 | #include <ATen/ops/linalg_matrix_power_ops.h> |
1522 | #include <ATen/ops/linalg_matrix_rank_ops.h> |
1523 | #include <ATen/ops/linalg_matrix_rank_ops.h> |
1524 | #include <ATen/ops/linalg_matrix_rank_ops.h> |
1525 | #include <ATen/ops/linalg_matrix_rank_ops.h> |
1526 | #include <ATen/ops/linalg_multi_dot_ops.h> |
1527 | #include <ATen/ops/_test_optional_intlist_ops.h> |
1528 | #include <ATen/ops/_test_optional_filled_intlist_ops.h> |
1529 | #include <ATen/ops/_test_optional_floatlist_ops.h> |
1530 | #include <ATen/ops/_test_warn_in_autograd_ops.h> |
1531 | #include <ATen/ops/_test_autograd_multiple_dispatch_ops.h> |
1532 | #include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_ops.h> |
1533 | #include <ATen/ops/segment_reduce_ops.h> |
1534 | #include <ATen/ops/_segment_reduce_backward_ops.h> |
1535 | #include <ATen/ops/_nested_tensor_from_tensor_list_ops.h> |
1536 | #include <ATen/ops/_fw_primal_copy_ops.h> |
1537 | #include <ATen/ops/_make_dual_copy_ops.h> |
1538 | #include <ATen/ops/view_as_real_copy_ops.h> |
1539 | #include <ATen/ops/view_as_complex_copy_ops.h> |
1540 | #include <ATen/ops/_conj_copy_ops.h> |
1541 | #include <ATen/ops/_neg_view_copy_ops.h> |
1542 | #include <ATen/ops/as_strided_copy_ops.h> |
1543 | #include <ATen/ops/_sparse_broadcast_to_copy_ops.h> |
1544 | #include <ATen/ops/diagonal_copy_ops.h> |
1545 | #include <ATen/ops/expand_copy_ops.h> |
1546 | #include <ATen/ops/permute_copy_ops.h> |
1547 | #include <ATen/ops/_reshape_alias_copy_ops.h> |
1548 | #include <ATen/ops/select_copy_ops.h> |
1549 | #include <ATen/ops/detach_copy_ops.h> |
1550 | #include <ATen/ops/slice_copy_ops.h> |
1551 | #include <ATen/ops/split_copy_ops.h> |
1552 | #include <ATen/ops/split_with_sizes_copy_ops.h> |
1553 | #include <ATen/ops/squeeze_copy_ops.h> |
1554 | #include <ATen/ops/squeeze_copy_ops.h> |
1555 | #include <ATen/ops/squeeze_copy_ops.h> |
1556 | #include <ATen/ops/t_copy_ops.h> |
1557 | #include <ATen/ops/transpose_copy_ops.h> |
1558 | #include <ATen/ops/unsqueeze_copy_ops.h> |
1559 | #include <ATen/ops/_indices_copy_ops.h> |
1560 | #include <ATen/ops/_values_copy_ops.h> |
1561 | #include <ATen/ops/indices_copy_ops.h> |
1562 | #include <ATen/ops/values_copy_ops.h> |
1563 | #include <ATen/ops/crow_indices_copy_ops.h> |
1564 | #include <ATen/ops/col_indices_copy_ops.h> |
1565 | #include <ATen/ops/ccol_indices_copy_ops.h> |
1566 | #include <ATen/ops/row_indices_copy_ops.h> |
1567 | #include <ATen/ops/unbind_copy_ops.h> |
1568 | #include <ATen/ops/view_copy_ops.h> |
1569 | #include <ATen/ops/view_copy_ops.h> |
1570 | #include <ATen/ops/unfold_copy_ops.h> |
1571 | #include <ATen/ops/alias_copy_ops.h> |
1572 | #include <ATen/ops/to_padded_tensor_ops.h> |
1573 | #include <ATen/ops/_transformer_encoder_layer_fwd_ops.h> |
1574 | #include <ATen/ops/_native_multi_head_attention_ops.h> |
1575 | #include <ATen/ops/_triton_scaled_dot_attention_ops.h> |
1576 | #include <ATen/ops/_triton_multi_head_attention_ops.h> |
1577 | #include <ATen/ops/special_airy_ai_ops.h> |
1578 | #include <ATen/ops/_transformer_decoder_only_layer_fwd_ops.h> |
1579 | #include <ATen/ops/_native_decoder_only_multi_head_attention_ops.h> |
1580 | #include <ATen/ops/special_bessel_j0_ops.h> |
1581 | #include <ATen/ops/special_bessel_j1_ops.h> |
1582 | #include <ATen/ops/special_bessel_y0_ops.h> |
1583 | #include <ATen/ops/special_bessel_y1_ops.h> |
1584 | #include <ATen/ops/special_chebyshev_polynomial_t_ops.h> |
1585 | #include <ATen/ops/special_chebyshev_polynomial_t_ops.h> |
1586 | #include <ATen/ops/special_chebyshev_polynomial_t_ops.h> |
1587 | #include <ATen/ops/special_chebyshev_polynomial_u_ops.h> |
1588 | #include <ATen/ops/special_chebyshev_polynomial_u_ops.h> |
1589 | #include <ATen/ops/special_chebyshev_polynomial_u_ops.h> |
1590 | #include <ATen/ops/special_chebyshev_polynomial_v_ops.h> |
1591 | #include <ATen/ops/special_chebyshev_polynomial_v_ops.h> |
1592 | #include <ATen/ops/special_chebyshev_polynomial_v_ops.h> |
1593 | #include <ATen/ops/special_chebyshev_polynomial_w_ops.h> |
1594 | #include <ATen/ops/special_chebyshev_polynomial_w_ops.h> |
1595 | #include <ATen/ops/special_chebyshev_polynomial_w_ops.h> |
1596 | #include <ATen/ops/special_hermite_polynomial_h_ops.h> |
1597 | #include <ATen/ops/special_hermite_polynomial_h_ops.h> |
1598 | #include <ATen/ops/special_hermite_polynomial_h_ops.h> |
1599 | #include <ATen/ops/special_hermite_polynomial_he_ops.h> |
1600 | #include <ATen/ops/special_hermite_polynomial_he_ops.h> |
1601 | #include <ATen/ops/special_hermite_polynomial_he_ops.h> |
1602 | #include <ATen/ops/special_laguerre_polynomial_l_ops.h> |
1603 | #include <ATen/ops/special_laguerre_polynomial_l_ops.h> |
1604 | #include <ATen/ops/special_laguerre_polynomial_l_ops.h> |
1605 | #include <ATen/ops/special_legendre_polynomial_p_ops.h> |
1606 | #include <ATen/ops/special_legendre_polynomial_p_ops.h> |
1607 | #include <ATen/ops/special_legendre_polynomial_p_ops.h> |
1608 | #include <ATen/ops/special_modified_bessel_i0_ops.h> |
1609 | #include <ATen/ops/special_modified_bessel_i1_ops.h> |
1610 | #include <ATen/ops/special_modified_bessel_k0_ops.h> |
1611 | #include <ATen/ops/special_modified_bessel_k1_ops.h> |
1612 | #include <ATen/ops/special_scaled_modified_bessel_k0_ops.h> |
1613 | #include <ATen/ops/special_scaled_modified_bessel_k1_ops.h> |
1614 | #include <ATen/ops/special_shifted_chebyshev_polynomial_t_ops.h> |
1615 | #include <ATen/ops/special_shifted_chebyshev_polynomial_t_ops.h> |
1616 | #include <ATen/ops/special_shifted_chebyshev_polynomial_t_ops.h> |
1617 | #include <ATen/ops/special_shifted_chebyshev_polynomial_u_ops.h> |
1618 | #include <ATen/ops/special_shifted_chebyshev_polynomial_u_ops.h> |
1619 | #include <ATen/ops/special_shifted_chebyshev_polynomial_u_ops.h> |
1620 | #include <ATen/ops/special_shifted_chebyshev_polynomial_v_ops.h> |
1621 | #include <ATen/ops/special_shifted_chebyshev_polynomial_v_ops.h> |
1622 | #include <ATen/ops/special_shifted_chebyshev_polynomial_v_ops.h> |
1623 | #include <ATen/ops/special_shifted_chebyshev_polynomial_w_ops.h> |
1624 | #include <ATen/ops/special_shifted_chebyshev_polynomial_w_ops.h> |
1625 | #include <ATen/ops/special_shifted_chebyshev_polynomial_w_ops.h> |
1626 | #include <ATen/ops/special_spherical_bessel_j0_ops.h> |
1627 | #include <ATen/ops/_foobar_ops.h> |
1628 | #include <ATen/ops/_fused_adam_ops.h> |
1629 | #include <ATen/ops/_fused_adamw_ops.h> |
1630 | #endif |
1631 | |
1632 | namespace at { |
1633 | namespace native { |
1634 | |
1635 | // This file contains a number of kernels for aten functions that are fully code-generated. |
1636 | // TODO: rename this file to something more generic. |
1637 | |
1638 | at::Tensor clone_arg(const at::Tensor& t) { |
1639 | return t.clone(); |
1640 | } |
1641 | |
1642 | std::vector<at::Tensor> clone_arg(const at::TensorList& t_list) { |
1643 | std::vector<at::Tensor> out(t_list.size()); |
1644 | for (const auto& i : c10::irange(t_list.size())) { |
1645 | out[i] = t_list[i].clone(); |
1646 | } |
1647 | return out; |
1648 | } |
1649 | |
1650 | // duped with gen_resize_out_helper from structured kernels |
1651 | void copy_arg(const at::Tensor& dst, const at::Tensor& src) { |
1652 | TORCH_CHECK(src.dtype() == dst.dtype(), |
1653 | "Expected out tensor to have dtype " , src.dtype(), ", but got " , dst.dtype(), " instead" ); |
1654 | TORCH_CHECK(src.device() == dst.device(), |
1655 | "Expected out tensor to have device " , src.device(), ", but got " , dst.device(), " instead" ); |
1656 | dst.copy_(src); |
1657 | } |
1658 | |
1659 | void copy_arg(const at::TensorList& dst, const at::TensorList& src) { |
1660 | TORCH_INTERNAL_ASSERT(dst.size() == src.size()); |
1661 | for (const auto& i : c10::irange(dst.size())) { |
1662 | copy_arg(dst[i], src[i]); |
1663 | } |
1664 | } |
1665 | |
1666 | // TODO: this doesn't handle restriding empty tensors correctly; see |
1667 | // gen_resize_out_helper for the correct algorithm |
1668 | |
1669 | void resize_out_helper(const at::Tensor& dst, const at::Tensor& src) { |
1670 | at::native::resize_output(dst, src.sizes()); |
1671 | } |
1672 | |
1673 | void resize_out_helper(const at::TensorList& dst, const at::TensorList& src) { |
1674 | TORCH_INTERNAL_ASSERT(dst.size() == src.size()); |
1675 | for (const auto& i : c10::irange(dst.size())) { |
1676 | at::native::resize_output(dst[i], src[i].sizes()); |
1677 | } |
1678 | } |
1679 | |
1680 | |
1681 | |
1682 | at::Tensor _fw_primal_copy(const at::Tensor & self, int64_t level) { |
1683 | auto output = at::_ops::_fw_primal::call(self, level); |
1684 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1685 | } |
1686 | |
1687 | at::Tensor _make_dual_copy(const at::Tensor & primal, const at::Tensor & tangent, int64_t level) { |
1688 | auto output = at::_ops::_make_dual::call(primal, tangent, level); |
1689 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1690 | } |
1691 | |
1692 | at::Tensor view_as_real_copy(const at::Tensor & self) { |
1693 | auto output = at::_ops::view_as_real::call(self); |
1694 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1695 | } |
1696 | |
1697 | at::Tensor view_as_complex_copy(const at::Tensor & self) { |
1698 | auto output = at::_ops::view_as_complex::call(self); |
1699 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1700 | } |
1701 | |
1702 | at::Tensor _conj_copy(const at::Tensor & self) { |
1703 | auto output = at::_ops::_conj::call(self); |
1704 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1705 | } |
1706 | |
1707 | at::Tensor _neg_view_copy(const at::Tensor & self) { |
1708 | auto output = at::_ops::_neg_view::call(self); |
1709 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1710 | } |
1711 | |
1712 | at::Tensor as_strided_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) { |
1713 | auto output = at::_ops::as_strided::call(self, size, stride, storage_offset); |
1714 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1715 | } |
1716 | |
1717 | at::Tensor _sparse_broadcast_to_copy(const at::Tensor & self, at::IntArrayRef size) { |
1718 | auto output = at::_ops::_sparse_broadcast_to::call(self, size); |
1719 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1720 | } |
1721 | |
1722 | at::Tensor diagonal_copy(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) { |
1723 | auto output = at::_ops::diagonal::call(self, offset, dim1, dim2); |
1724 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1725 | } |
1726 | |
1727 | at::Tensor expand_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) { |
1728 | auto output = at::_ops::expand::call(self, size, implicit); |
1729 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1730 | } |
1731 | |
1732 | at::Tensor narrow_copy_dense_symint(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) { |
1733 | auto output = at::_ops::narrow::call(self, dim, start, length); |
1734 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1735 | } |
1736 | |
1737 | at::Tensor permute_copy(const at::Tensor & self, at::IntArrayRef dims) { |
1738 | auto output = at::_ops::permute::call(self, dims); |
1739 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1740 | } |
1741 | |
1742 | at::Tensor _reshape_alias_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { |
1743 | auto output = at::_ops::_reshape_alias::call(self, size, stride); |
1744 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1745 | } |
1746 | |
1747 | at::Tensor select_copy_symint(const at::Tensor & self, int64_t dim, c10::SymInt index) { |
1748 | auto output = at::_ops::select_int::call(self, dim, index); |
1749 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1750 | } |
1751 | |
1752 | at::Tensor detach_copy(const at::Tensor & self) { |
1753 | auto output = at::_ops::detach::call(self); |
1754 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1755 | } |
1756 | |
1757 | at::Tensor slice_copy_Tensor_symint(const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) { |
1758 | auto output = at::_ops::slice_Tensor::call(self, dim, start, end, step); |
1759 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1760 | } |
1761 | |
1762 | ::std::vector<at::Tensor> split_copy_Tensor_symint(const at::Tensor & self, c10::SymInt split_size, int64_t dim) { |
1763 | auto output = at::_ops::split_Tensor::call(self, split_size, dim); |
1764 | ::std::vector<at::Tensor> out_clone; |
1765 | for (const auto i : c10::irange(output.size())) { |
1766 | out_clone.push_back(output[i].clone(/*memory_format=*/at::MemoryFormat::Contiguous)); |
1767 | } |
1768 | return out_clone; |
1769 | } |
1770 | |
1771 | ::std::vector<at::Tensor> split_with_sizes_copy_symint(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) { |
1772 | auto output = at::_ops::split_with_sizes::call(self, split_sizes, dim); |
1773 | ::std::vector<at::Tensor> out_clone; |
1774 | for (const auto i : c10::irange(output.size())) { |
1775 | out_clone.push_back(output[i].clone(/*memory_format=*/at::MemoryFormat::Contiguous)); |
1776 | } |
1777 | return out_clone; |
1778 | } |
1779 | |
1780 | at::Tensor squeeze_copy(const at::Tensor & self) { |
1781 | auto output = at::_ops::squeeze::call(self); |
1782 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1783 | } |
1784 | |
1785 | at::Tensor squeeze_copy_dim(const at::Tensor & self, int64_t dim) { |
1786 | auto output = at::_ops::squeeze_dim::call(self, dim); |
1787 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1788 | } |
1789 | |
1790 | at::Tensor squeeze_copy_dims(const at::Tensor & self, at::IntArrayRef dim) { |
1791 | auto output = at::_ops::squeeze_dims::call(self, dim); |
1792 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1793 | } |
1794 | |
1795 | at::Tensor t_copy(const at::Tensor & self) { |
1796 | auto output = at::_ops::t::call(self); |
1797 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1798 | } |
1799 | |
1800 | at::Tensor transpose_copy_int(const at::Tensor & self, int64_t dim0, int64_t dim1) { |
1801 | auto output = at::_ops::transpose_int::call(self, dim0, dim1); |
1802 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1803 | } |
1804 | |
1805 | at::Tensor _nested_view_from_buffer_copy(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets) { |
1806 | auto output = at::_ops::_nested_view_from_buffer::call(self, nested_size, nested_strides, offsets); |
1807 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1808 | } |
1809 | |
1810 | at::Tensor unsqueeze_copy(const at::Tensor & self, int64_t dim) { |
1811 | auto output = at::_ops::unsqueeze::call(self, dim); |
1812 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1813 | } |
1814 | |
1815 | at::Tensor _indices_copy(const at::Tensor & self) { |
1816 | auto output = at::_ops::_indices::call(self); |
1817 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1818 | } |
1819 | |
1820 | at::Tensor _values_copy(const at::Tensor & self) { |
1821 | auto output = at::_ops::_values::call(self); |
1822 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1823 | } |
1824 | |
1825 | at::Tensor indices_copy(const at::Tensor & self) { |
1826 | auto output = at::_ops::indices::call(self); |
1827 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1828 | } |
1829 | |
1830 | at::Tensor values_copy(const at::Tensor & self) { |
1831 | auto output = at::_ops::values::call(self); |
1832 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1833 | } |
1834 | |
1835 | at::Tensor crow_indices_copy(const at::Tensor & self) { |
1836 | auto output = at::_ops::crow_indices::call(self); |
1837 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1838 | } |
1839 | |
1840 | at::Tensor col_indices_copy(const at::Tensor & self) { |
1841 | auto output = at::_ops::col_indices::call(self); |
1842 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1843 | } |
1844 | |
1845 | at::Tensor ccol_indices_copy(const at::Tensor & self) { |
1846 | auto output = at::_ops::ccol_indices::call(self); |
1847 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1848 | } |
1849 | |
1850 | at::Tensor row_indices_copy(const at::Tensor & self) { |
1851 | auto output = at::_ops::row_indices::call(self); |
1852 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1853 | } |
1854 | |
1855 | ::std::vector<at::Tensor> unbind_copy_int(const at::Tensor & self, int64_t dim) { |
1856 | auto output = at::_ops::unbind_int::call(self, dim); |
1857 | ::std::vector<at::Tensor> out_clone; |
1858 | for (const auto i : c10::irange(output.size())) { |
1859 | out_clone.push_back(output[i].clone(/*memory_format=*/at::MemoryFormat::Contiguous)); |
1860 | } |
1861 | return out_clone; |
1862 | } |
1863 | |
1864 | at::Tensor lift_fresh_copy(const at::Tensor & self) { |
1865 | auto output = at::_ops::lift_fresh::call(self); |
1866 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1867 | } |
1868 | at::Tensor view_copy_symint(const at::Tensor & self, at::SymIntArrayRef size) { |
1869 | c10::SymDimVector shape = infer_size_dv(size, self.sym_numel()); |
1870 | if (!at::detail::computeStride(self.sym_sizes(), self.sym_strides(), shape).has_value()) { |
1871 | return self.reshape_symint(size); |
1872 | } else { |
1873 | auto output = at::_ops::view::call(self, size); |
1874 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1875 | } |
1876 | } |
1877 | |
1878 | at::Tensor view_copy_dtype(const at::Tensor & self, at::ScalarType dtype) { |
1879 | auto output = at::_ops::view_dtype::call(self, dtype); |
1880 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1881 | } |
1882 | |
1883 | at::Tensor unfold_copy(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) { |
1884 | auto output = at::_ops::unfold::call(self, dimension, size, step); |
1885 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1886 | } |
1887 | |
1888 | at::Tensor alias_copy(const at::Tensor & self) { |
1889 | auto output = at::_ops::alias::call(self); |
1890 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1891 | } |
1892 | |
1893 | at::Tensor _test_autograd_multiple_dispatch_view_copy(const at::Tensor & self) { |
1894 | auto output = at::_ops::_test_autograd_multiple_dispatch_view::call(self); |
1895 | return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous); |
1896 | } |
1897 | |
1898 | |
1899 | at::Tensor bernoulli(const at::Tensor & self, const at::Tensor & p, c10::optional<at::Generator> generator) { |
1900 | auto self_clone = clone_arg(self); |
1901 | auto output = at::_ops::bernoulli__Tensor::call(const_cast<Tensor&>(self_clone), p, generator); |
1902 | return self_clone; |
1903 | } |
1904 | |
1905 | at::Tensor embedding_renorm(const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) { |
1906 | auto self_clone = clone_arg(self); |
1907 | auto output = at::_ops::embedding_renorm_::call(const_cast<Tensor&>(self_clone), indices, max_norm, norm_type); |
1908 | return self_clone; |
1909 | } |
1910 | |
1911 | at::Tensor resize_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) { |
1912 | auto self_clone = clone_arg(self); |
1913 | auto output = at::_ops::resize_::call(self_clone, size, memory_format); |
1914 | return self_clone; |
1915 | } |
1916 | |
1917 | at::Tensor _resize_output(const at::Tensor & self, at::IntArrayRef size, at::Device device) { |
1918 | auto self_clone = clone_arg(self); |
1919 | auto output = at::_ops::_resize_output_::call(self_clone, size, device); |
1920 | return self_clone; |
1921 | } |
1922 | |
1923 | at::Tensor _index_put_impl(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) { |
1924 | auto self_clone = clone_arg(self); |
1925 | auto output = at::_ops::_index_put_impl_::call(const_cast<Tensor&>(self_clone), indices, values, accumulate, unsafe); |
1926 | return self_clone; |
1927 | } |
1928 | |
1929 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_functional(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool training, double momentum, double eps) { |
1930 | auto running_mean_clone = clone_arg(running_mean); |
1931 | auto running_var_clone = clone_arg(running_var); |
1932 | auto output = at::_ops::_native_batch_norm_legit::call(input, weight, bias, const_cast<Tensor&>(running_mean_clone), const_cast<Tensor&>(running_var_clone), training, momentum, eps); |
1933 | return ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor>(std::get<0>(output), std::get<1>(output), std::get<2>(output), running_mean_clone, running_var_clone); |
1934 | } |
1935 | |
1936 | at::Tensor resize_as(const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format) { |
1937 | auto self_clone = clone_arg(self); |
1938 | auto output = at::_ops::resize_as_::call(self_clone, the_template, memory_format); |
1939 | return self_clone; |
1940 | } |
1941 | |
1942 | at::Tensor resize_as_sparse(const at::Tensor & self, const at::Tensor & the_template) { |
1943 | auto self_clone = clone_arg(self); |
1944 | auto output = at::_ops::resize_as_sparse_::call(self_clone, the_template); |
1945 | return self_clone; |
1946 | } |
1947 | |
1948 | at::Tensor zero(const at::Tensor & self) { |
1949 | auto self_clone = clone_arg(self); |
1950 | auto output = at::_ops::zero_::call(const_cast<Tensor&>(self_clone)); |
1951 | return self_clone; |
1952 | } |
1953 | |
1954 | at::Tensor sparse_resize(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { |
1955 | auto self_clone = clone_arg(self); |
1956 | auto output = at::_ops::sparse_resize_::call(self_clone, size, sparse_dim, dense_dim); |
1957 | return self_clone; |
1958 | } |
1959 | |
1960 | at::Tensor sparse_resize_and_clear(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { |
1961 | auto self_clone = clone_arg(self); |
1962 | auto output = at::_ops::sparse_resize_and_clear_::call(self_clone, size, sparse_dim, dense_dim); |
1963 | return self_clone; |
1964 | } |
1965 | |
1966 | at::Tensor _coalesced(const at::Tensor & self, bool coalesced) { |
1967 | auto self_clone = clone_arg(self); |
1968 | auto output = at::_ops::_coalesced_::call(const_cast<Tensor&>(self_clone), coalesced); |
1969 | return self_clone; |
1970 | } |
1971 | |
1972 | at::Tensor copy_sparse_to_sparse(const at::Tensor & self, const at::Tensor & src, bool non_blocking) { |
1973 | auto self_clone = clone_arg(self); |
1974 | auto output = at::_ops::copy_sparse_to_sparse_::call(const_cast<Tensor&>(self_clone), src, non_blocking); |
1975 | return self_clone; |
1976 | } |
1977 | |
1978 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _fused_moving_avg_obs_fq_helper_functional(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, const at::Tensor & running_min, const at::Tensor & running_max, const at::Tensor & scale, const at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) { |
1979 | auto running_min_clone = clone_arg(running_min); |
1980 | auto running_max_clone = clone_arg(running_max); |
1981 | auto scale_clone = clone_arg(scale); |
1982 | auto zero_point_clone = clone_arg(zero_point); |
1983 | auto output = at::_ops::_fused_moving_avg_obs_fq_helper::call(self, observer_on, fake_quant_on, const_cast<Tensor&>(running_min_clone), const_cast<Tensor&>(running_max_clone), const_cast<Tensor&>(scale_clone), const_cast<Tensor&>(zero_point_clone), averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant); |
1984 | return ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor>(std::get<0>(output), std::get<1>(output), running_min_clone, running_max_clone, scale_clone, zero_point_clone); |
1985 | } |
1986 | |
1987 | at::Tensor set(const at::Tensor & self, at::Storage source) { |
1988 | auto self_clone = clone_arg(self); |
1989 | auto output = at::_ops::set__source_Storage::call(const_cast<Tensor&>(self_clone), source); |
1990 | return self_clone; |
1991 | } |
1992 | |
1993 | at::Tensor set_symint(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { |
1994 | auto self_clone = clone_arg(self); |
1995 | auto output = at::_ops::set__source_Storage_storage_offset::call(const_cast<Tensor&>(self_clone), source, storage_offset, size, stride); |
1996 | return self_clone; |
1997 | } |
1998 | |
1999 | at::Tensor set(const at::Tensor & self, const at::Tensor & source) { |
2000 | auto self_clone = clone_arg(self); |
2001 | auto output = at::_ops::set__source_Tensor::call(const_cast<Tensor&>(self_clone), source); |
2002 | return self_clone; |
2003 | } |
2004 | |
2005 | at::Tensor set(const at::Tensor & self) { |
2006 | auto self_clone = clone_arg(self); |
2007 | auto output = at::_ops::set_::call(const_cast<Tensor&>(self_clone)); |
2008 | return self_clone; |
2009 | } |
2010 | |
2011 | at::Tensor random(const at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator) { |
2012 | auto self_clone = clone_arg(self); |
2013 | auto output = at::_ops::random__from::call(const_cast<Tensor&>(self_clone), from, to, generator); |
2014 | return self_clone; |
2015 | } |
2016 | |
2017 | at::Tensor random(const at::Tensor & self, int64_t to, c10::optional<at::Generator> generator) { |
2018 | auto self_clone = clone_arg(self); |
2019 | auto output = at::_ops::random__to::call(const_cast<Tensor&>(self_clone), to, generator); |
2020 | return self_clone; |
2021 | } |
2022 | |
2023 | at::Tensor random(const at::Tensor & self, c10::optional<at::Generator> generator) { |
2024 | auto self_clone = clone_arg(self); |
2025 | auto output = at::_ops::random_::call(const_cast<Tensor&>(self_clone), generator); |
2026 | return self_clone; |
2027 | } |
2028 | |
2029 | at::Tensor uniform(const at::Tensor & self, double from, double to, c10::optional<at::Generator> generator) { |
2030 | auto self_clone = clone_arg(self); |
2031 | auto output = at::_ops::uniform_::call(const_cast<Tensor&>(self_clone), from, to, generator); |
2032 | return self_clone; |
2033 | } |
2034 | |
2035 | at::Tensor cauchy(const at::Tensor & self, double median, double sigma, c10::optional<at::Generator> generator) { |
2036 | auto self_clone = clone_arg(self); |
2037 | auto output = at::_ops::cauchy_::call(const_cast<Tensor&>(self_clone), median, sigma, generator); |
2038 | return self_clone; |
2039 | } |
2040 | |
2041 | at::Tensor log_normal(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) { |
2042 | auto self_clone = clone_arg(self); |
2043 | auto output = at::_ops::log_normal_::call(const_cast<Tensor&>(self_clone), mean, std, generator); |
2044 | return self_clone; |
2045 | } |
2046 | |
2047 | at::Tensor exponential(const at::Tensor & self, double lambd, c10::optional<at::Generator> generator) { |
2048 | auto self_clone = clone_arg(self); |
2049 | auto output = at::_ops::exponential_::call(const_cast<Tensor&>(self_clone), lambd, generator); |
2050 | return self_clone; |
2051 | } |
2052 | |
2053 | at::Tensor geometric(const at::Tensor & self, double p, c10::optional<at::Generator> generator) { |
2054 | auto self_clone = clone_arg(self); |
2055 | auto output = at::_ops::geometric_::call(const_cast<Tensor&>(self_clone), p, generator); |
2056 | return self_clone; |
2057 | } |
2058 | |
2059 | ::std::tuple<::std::vector<at::Tensor>,at::Tensor> _amp_foreach_non_finite_check_and_unscale(at::TensorList self, const at::Tensor & found_inf, const at::Tensor & inv_scale) { |
2060 | auto self_clone = clone_arg(self); |
2061 | auto found_inf_clone = clone_arg(found_inf); |
2062 | at::_ops::_amp_foreach_non_finite_check_and_unscale_::call(self_clone, const_cast<Tensor&>(found_inf_clone), inv_scale); |
2063 | return ::std::tuple<::std::vector<at::Tensor>,at::Tensor>(self_clone, found_inf_clone); |
2064 | } |
2065 | |
2066 | ::std::tuple<at::Tensor,at::Tensor> _amp_update_scale(const at::Tensor & self, const at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) { |
2067 | auto self_clone = clone_arg(self); |
2068 | auto growth_tracker_clone = clone_arg(growth_tracker); |
2069 | auto output = at::_ops::_amp_update_scale_::call(const_cast<Tensor&>(self_clone), const_cast<Tensor&>(growth_tracker_clone), found_inf, scale_growth_factor, scale_backoff_factor, growth_interval); |
2070 | return ::std::tuple<at::Tensor,at::Tensor>(self_clone, growth_tracker_clone); |
2071 | } |
2072 | |
2073 | ::std::vector<at::Tensor> _foreach_zero(at::TensorList self) { |
2074 | auto self_clone = clone_arg(self); |
2075 | at::_ops::_foreach_zero_::call(self_clone); |
2076 | return self_clone; |
2077 | } |
2078 | |
2079 | ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adam(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) { |
2080 | auto self_clone = clone_arg(self); |
2081 | auto grads_clone = clone_arg(grads); |
2082 | auto exp_avgs_clone = clone_arg(exp_avgs); |
2083 | auto exp_avg_sqs_clone = clone_arg(exp_avg_sqs); |
2084 | auto max_exp_avg_sqs_clone = clone_arg(max_exp_avg_sqs); |
2085 | at::_ops::_fused_adam_::call(self_clone, grads_clone, exp_avgs_clone, exp_avg_sqs_clone, max_exp_avg_sqs_clone, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); |
2086 | return ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>>(self_clone, grads_clone, exp_avgs_clone, exp_avg_sqs_clone, max_exp_avg_sqs_clone); |
2087 | } |
2088 | |
2089 | ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adamw(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) { |
2090 | auto self_clone = clone_arg(self); |
2091 | auto grads_clone = clone_arg(grads); |
2092 | auto exp_avgs_clone = clone_arg(exp_avgs); |
2093 | auto exp_avg_sqs_clone = clone_arg(exp_avg_sqs); |
2094 | auto max_exp_avg_sqs_clone = clone_arg(max_exp_avg_sqs); |
2095 | at::_ops::_fused_adamw_::call(self_clone, grads_clone, exp_avgs_clone, exp_avg_sqs_clone, max_exp_avg_sqs_clone, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); |
2096 | return ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>>(self_clone, grads_clone, exp_avgs_clone, exp_avg_sqs_clone, max_exp_avg_sqs_clone); |
2097 | } |
2098 | |
2099 | |
2100 | at::Tensor & _new_zeros_with_same_feature_meta_out(const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims, at::Tensor & out) { |
2101 | auto tmp_output = at::_ops::_new_zeros_with_same_feature_meta::call(self, other, self_num_batch_dims); |
2102 | resize_out_helper(out, tmp_output); |
2103 | copy_arg(out, tmp_output); |
2104 | return out; |
2105 | } |
2106 | |
2107 | ::std::tuple<at::Tensor &,at::Tensor &> _cudnn_ctc_loss_out(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) { |
2108 | auto tmp_output = at::_ops::_cudnn_ctc_loss::call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity); |
2109 | resize_out_helper(out0, std::get<0>(tmp_output)); |
2110 | copy_arg(out0, std::get<0>(tmp_output)); |
2111 | resize_out_helper(out1, std::get<1>(tmp_output)); |
2112 | copy_arg(out1, std::get<1>(tmp_output)); |
2113 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
2114 | } |
2115 | |
2116 | at::Tensor & _cudnn_rnn_flatten_weight_out_symint(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) { |
2117 | auto tmp_output = at::_ops::_cudnn_rnn_flatten_weight::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional); |
2118 | resize_out_helper(out, tmp_output); |
2119 | copy_arg(out, tmp_output); |
2120 | return out; |
2121 | } |
2122 | |
2123 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _cudnn_rnn_out_symint(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) { |
2124 | auto tmp_output = at::_ops::_cudnn_rnn::call(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state); |
2125 | resize_out_helper(out0, std::get<0>(tmp_output)); |
2126 | copy_arg(out0, std::get<0>(tmp_output)); |
2127 | resize_out_helper(out1, std::get<1>(tmp_output)); |
2128 | copy_arg(out1, std::get<1>(tmp_output)); |
2129 | resize_out_helper(out2, std::get<2>(tmp_output)); |
2130 | copy_arg(out2, std::get<2>(tmp_output)); |
2131 | resize_out_helper(out3, std::get<3>(tmp_output)); |
2132 | copy_arg(out3, std::get<3>(tmp_output)); |
2133 | resize_out_helper(out4, std::get<4>(tmp_output)); |
2134 | copy_arg(out4, std::get<4>(tmp_output)); |
2135 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3, out4); |
2136 | } |
2137 | |
2138 | void _cudnn_rnn_backward_out_symint(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) { |
2139 | auto tmp_output = at::_ops::_cudnn_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask); |
2140 | resize_out_helper(out0, std::get<0>(tmp_output)); |
2141 | copy_arg(out0, std::get<0>(tmp_output)); |
2142 | resize_out_helper(out1, std::get<1>(tmp_output)); |
2143 | copy_arg(out1, std::get<1>(tmp_output)); |
2144 | resize_out_helper(out2, std::get<2>(tmp_output)); |
2145 | copy_arg(out2, std::get<2>(tmp_output)); |
2146 | resize_out_helper(out3, std::get<3>(tmp_output)); |
2147 | copy_arg(out3, std::get<3>(tmp_output)); |
2148 | |
2149 | } |
2150 | |
2151 | at::Tensor & _cudnn_init_dropout_state_out(double dropout, bool train, int64_t dropout_seed, at::Tensor & out) { |
2152 | auto tmp_output = at::_ops::_cudnn_init_dropout_state::call(dropout, train, dropout_seed, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
2153 | resize_out_helper(out, tmp_output); |
2154 | copy_arg(out, tmp_output); |
2155 | return out; |
2156 | } |
2157 | |
2158 | ::std::tuple<at::Tensor &,at::Tensor &> _fused_dropout_out(const at::Tensor & self, double p, c10::optional<at::Generator> generator, at::Tensor & out0, at::Tensor & out1) { |
2159 | auto tmp_output = at::_ops::_fused_dropout::call(self, p, generator); |
2160 | resize_out_helper(out0, std::get<0>(tmp_output)); |
2161 | copy_arg(out0, std::get<0>(tmp_output)); |
2162 | resize_out_helper(out1, std::get<1>(tmp_output)); |
2163 | copy_arg(out1, std::get<1>(tmp_output)); |
2164 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
2165 | } |
2166 | |
2167 | at::Tensor & _masked_scale_out(const at::Tensor & self, const at::Tensor & mask, double scale, at::Tensor & out) { |
2168 | auto tmp_output = at::_ops::_masked_scale::call(self, mask, scale); |
2169 | resize_out_helper(out, tmp_output); |
2170 | copy_arg(out, tmp_output); |
2171 | return out; |
2172 | } |
2173 | |
2174 | ::std::tuple<at::Tensor &,at::Tensor &> native_dropout_out(const at::Tensor & input, double p, c10::optional<bool> train, at::Tensor & out0, at::Tensor & out1) { |
2175 | auto tmp_output = at::_ops::native_dropout::call(input, p, train); |
2176 | resize_out_helper(out0, std::get<0>(tmp_output)); |
2177 | copy_arg(out0, std::get<0>(tmp_output)); |
2178 | resize_out_helper(out1, std::get<1>(tmp_output)); |
2179 | copy_arg(out1, std::get<1>(tmp_output)); |
2180 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
2181 | } |
2182 | |
2183 | at::Tensor & native_dropout_backward_out(const at::Tensor & grad_output, const at::Tensor & mask, double scale, at::Tensor & out) { |
2184 | auto tmp_output = at::_ops::native_dropout_backward::call(grad_output, mask, scale); |
2185 | resize_out_helper(out, tmp_output); |
2186 | copy_arg(out, tmp_output); |
2187 | return out; |
2188 | } |
2189 | |
2190 | at::Tensor & _conj_physical_out(const at::Tensor & self, at::Tensor & out) { |
2191 | auto tmp_output = at::_ops::_conj_physical::call(self); |
2192 | resize_out_helper(out, tmp_output); |
2193 | copy_arg(out, tmp_output); |
2194 | return out; |
2195 | } |
2196 | |
2197 | at::Tensor & _add_relu_Scalar_out(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { |
2198 | auto tmp_output = at::_ops::_add_relu_Scalar::call(self, other, alpha); |
2199 | resize_out_helper(out, tmp_output); |
2200 | copy_arg(out, tmp_output); |
2201 | return out; |
2202 | } |
2203 | |
2204 | at::Tensor & add_Scalar_out(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { |
2205 | auto tmp_output = at::_ops::add_Scalar::call(self, other, alpha); |
2206 | resize_out_helper(out, tmp_output); |
2207 | copy_arg(out, tmp_output); |
2208 | return out; |
2209 | } |
2210 | |
2211 | at::Tensor & affine_grid_generator_out(const at::Tensor & theta, at::IntArrayRef size, bool align_corners, at::Tensor & out) { |
2212 | auto tmp_output = at::_ops::affine_grid_generator::call(theta, size, align_corners); |
2213 | resize_out_helper(out, tmp_output); |
2214 | copy_arg(out, tmp_output); |
2215 | return out; |
2216 | } |
2217 | |
2218 | at::Tensor & bartlett_window_out(int64_t window_length, at::Tensor & out) { |
2219 | auto tmp_output = at::_ops::bartlett_window::call(window_length, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
2220 | resize_out_helper(out, tmp_output); |
2221 | copy_arg(out, tmp_output); |
2222 | return out; |
2223 | } |
2224 | |
2225 | at::Tensor & bartlett_window_periodic_out(int64_t window_length, bool periodic, at::Tensor & out) { |
2226 | auto tmp_output = at::_ops::bartlett_window_periodic::call(window_length, periodic, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
2227 | resize_out_helper(out, tmp_output); |
2228 | copy_arg(out, tmp_output); |
2229 | return out; |
2230 | } |
2231 | |
2232 | at::Tensor & quantized_batch_norm_out(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point, at::Tensor & out) { |
2233 | auto tmp_output = at::_ops::quantized_batch_norm::call(input, weight, bias, mean, var, eps, output_scale, output_zero_point); |
2234 | resize_out_helper(out, tmp_output); |
2235 | copy_arg(out, tmp_output); |
2236 | return out; |
2237 | } |
2238 | |
2239 | at::Tensor & bernoulli_Tensor_out(const at::Tensor & self, const at::Tensor & p, c10::optional<at::Generator> generator, at::Tensor & out) { |
2240 | auto tmp_output = at::_ops::bernoulli_Tensor::call(self, p, generator); |
2241 | resize_out_helper(out, tmp_output); |
2242 | copy_arg(out, tmp_output); |
2243 | return out; |
2244 | } |
2245 | |
2246 | at::Tensor & bernoulli_float_out(const at::Tensor & self, double p, c10::optional<at::Generator> generator, at::Tensor & out) { |
2247 | auto tmp_output = at::_ops::bernoulli_p::call(self, p, generator); |
2248 | resize_out_helper(out, tmp_output); |
2249 | copy_arg(out, tmp_output); |
2250 | return out; |
2251 | } |
2252 | |
2253 | at::Tensor & binary_cross_entropy_with_logits_out(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & pos_weight, int64_t reduction, at::Tensor & out) { |
2254 | auto tmp_output = at::_ops::binary_cross_entropy_with_logits::call(self, target, weight, pos_weight, reduction); |
2255 | resize_out_helper(out, tmp_output); |
2256 | copy_arg(out, tmp_output); |
2257 | return out; |
2258 | } |
2259 | |
2260 | at::Tensor & bincount_out(const at::Tensor & self, const c10::optional<at::Tensor> & weights, int64_t minlength, at::Tensor & out) { |
2261 | auto tmp_output = at::_ops::bincount::call(self, weights, minlength); |
2262 | resize_out_helper(out, tmp_output); |
2263 | copy_arg(out, tmp_output); |
2264 | return out; |
2265 | } |
2266 | |
2267 | at::Tensor & blackman_window_out(int64_t window_length, at::Tensor & out) { |
2268 | auto tmp_output = at::_ops::blackman_window::call(window_length, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
2269 | resize_out_helper(out, tmp_output); |
2270 | copy_arg(out, tmp_output); |
2271 | return out; |
2272 | } |
2273 | |
2274 | at::Tensor & blackman_window_periodic_out(int64_t window_length, bool periodic, at::Tensor & out) { |
2275 | auto tmp_output = at::_ops::blackman_window_periodic::call(window_length, periodic, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
2276 | resize_out_helper(out, tmp_output); |
2277 | copy_arg(out, tmp_output); |
2278 | return out; |
2279 | } |
2280 | |
2281 | at::Tensor & block_diag_out(at::TensorList tensors, at::Tensor & out) { |
2282 | auto tmp_output = at::_ops::block_diag::call(tensors); |
2283 | resize_out_helper(out, tmp_output); |
2284 | copy_arg(out, tmp_output); |
2285 | return out; |
2286 | } |
2287 | |
2288 | at::Tensor & constant_pad_nd_out_symint(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value, at::Tensor & out) { |
2289 | auto tmp_output = at::_ops::constant_pad_nd::call(self, pad, value); |
2290 | resize_out_helper(out, tmp_output); |
2291 | copy_arg(out, tmp_output); |
2292 | return out; |
2293 | } |
2294 | |
2295 | at::Tensor & convolution_out_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, at::Tensor & out) { |
2296 | auto tmp_output = at::_ops::convolution::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); |
2297 | resize_out_helper(out, tmp_output); |
2298 | copy_arg(out, tmp_output); |
2299 | return out; |
2300 | } |
2301 | |
2302 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_out_symint(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
2303 | auto tmp_output = at::_ops::convolution_backward::call(grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask); |
2304 | resize_out_helper(out0, std::get<0>(tmp_output)); |
2305 | copy_arg(out0, std::get<0>(tmp_output)); |
2306 | resize_out_helper(out1, std::get<1>(tmp_output)); |
2307 | copy_arg(out1, std::get<1>(tmp_output)); |
2308 | resize_out_helper(out2, std::get<2>(tmp_output)); |
2309 | copy_arg(out2, std::get<2>(tmp_output)); |
2310 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
2311 | } |
2312 | |
2313 | at::Tensor & convolution_overrideable_out(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, at::Tensor & out) { |
2314 | auto tmp_output = at::_ops::convolution_overrideable::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); |
2315 | resize_out_helper(out, tmp_output); |
2316 | copy_arg(out, tmp_output); |
2317 | return out; |
2318 | } |
2319 | |
2320 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_overrideable_out(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
2321 | auto tmp_output = at::_ops::convolution_backward_overrideable::call(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask); |
2322 | resize_out_helper(out0, std::get<0>(tmp_output)); |
2323 | copy_arg(out0, std::get<0>(tmp_output)); |
2324 | resize_out_helper(out1, std::get<1>(tmp_output)); |
2325 | copy_arg(out1, std::get<1>(tmp_output)); |
2326 | resize_out_helper(out2, std::get<2>(tmp_output)); |
2327 | copy_arg(out2, std::get<2>(tmp_output)); |
2328 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
2329 | } |
2330 | |
2331 | at::Tensor & _convolution_out_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, at::Tensor & out) { |
2332 | auto tmp_output = at::_ops::_convolution::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32); |
2333 | resize_out_helper(out, tmp_output); |
2334 | copy_arg(out, tmp_output); |
2335 | return out; |
2336 | } |
2337 | |
2338 | at::Tensor & conv_tbc_out(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad, at::Tensor & out) { |
2339 | auto tmp_output = at::_ops::conv_tbc::call(self, weight, bias, pad); |
2340 | resize_out_helper(out, tmp_output); |
2341 | copy_arg(out, tmp_output); |
2342 | return out; |
2343 | } |
2344 | |
2345 | at::Tensor & copy_out(const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out) { |
2346 | auto tmp_output = at::_ops::copy::call(self, src, non_blocking); |
2347 | resize_out_helper(out, tmp_output); |
2348 | copy_arg(out, tmp_output); |
2349 | return out; |
2350 | } |
2351 | |
2352 | at::Tensor & _copy_from_out(const at::Tensor & self, const at::Tensor & dst, bool non_blocking, at::Tensor & out) { |
2353 | auto tmp_output = at::_ops::_copy_from::call(self, dst, non_blocking); |
2354 | resize_out_helper(out, tmp_output); |
2355 | copy_arg(out, tmp_output); |
2356 | return out; |
2357 | } |
2358 | |
2359 | at::Tensor & _copy_from_and_resize_out(const at::Tensor & self, const at::Tensor & dst, at::Tensor & out) { |
2360 | auto tmp_output = at::_ops::_copy_from_and_resize::call(self, dst); |
2361 | resize_out_helper(out, tmp_output); |
2362 | copy_arg(out, tmp_output); |
2363 | return out; |
2364 | } |
2365 | |
2366 | at::Tensor & count_nonzero_dim_IntList_out(const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) { |
2367 | auto tmp_output = at::_ops::count_nonzero_dim_IntList::call(self, dim); |
2368 | resize_out_helper(out, tmp_output); |
2369 | copy_arg(out, tmp_output); |
2370 | return out; |
2371 | } |
2372 | |
2373 | at::Tensor & count_nonzero_out(const at::Tensor & self, c10::optional<int64_t> dim, at::Tensor & out) { |
2374 | auto tmp_output = at::_ops::count_nonzero::call(self, dim); |
2375 | resize_out_helper(out, tmp_output); |
2376 | copy_arg(out, tmp_output); |
2377 | return out; |
2378 | } |
2379 | |
2380 | at::Tensor & cudnn_affine_grid_generator_out(const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out) { |
2381 | auto tmp_output = at::_ops::cudnn_affine_grid_generator::call(theta, N, C, H, W); |
2382 | resize_out_helper(out, tmp_output); |
2383 | copy_arg(out, tmp_output); |
2384 | return out; |
2385 | } |
2386 | |
2387 | at::Tensor & cudnn_affine_grid_generator_backward_out(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out) { |
2388 | auto tmp_output = at::_ops::cudnn_affine_grid_generator_backward::call(grad, N, C, H, W); |
2389 | resize_out_helper(out, tmp_output); |
2390 | copy_arg(out, tmp_output); |
2391 | return out; |
2392 | } |
2393 | |
2394 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> cudnn_batch_norm_out(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { |
2395 | auto tmp_output = at::_ops::cudnn_batch_norm::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon); |
2396 | resize_out_helper(out0, std::get<0>(tmp_output)); |
2397 | copy_arg(out0, std::get<0>(tmp_output)); |
2398 | resize_out_helper(out1, std::get<1>(tmp_output)); |
2399 | copy_arg(out1, std::get<1>(tmp_output)); |
2400 | resize_out_helper(out2, std::get<2>(tmp_output)); |
2401 | copy_arg(out2, std::get<2>(tmp_output)); |
2402 | resize_out_helper(out3, std::get<3>(tmp_output)); |
2403 | copy_arg(out3, std::get<3>(tmp_output)); |
2404 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3); |
2405 | } |
2406 | |
2407 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> cudnn_batch_norm_backward_out(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
2408 | auto tmp_output = at::_ops::cudnn_batch_norm_backward::call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace); |
2409 | resize_out_helper(out0, std::get<0>(tmp_output)); |
2410 | copy_arg(out0, std::get<0>(tmp_output)); |
2411 | resize_out_helper(out1, std::get<1>(tmp_output)); |
2412 | copy_arg(out1, std::get<1>(tmp_output)); |
2413 | resize_out_helper(out2, std::get<2>(tmp_output)); |
2414 | copy_arg(out2, std::get<2>(tmp_output)); |
2415 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
2416 | } |
2417 | |
2418 | at::Tensor & cudnn_convolution_out(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) { |
2419 | auto tmp_output = at::_ops::cudnn_convolution::call(self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32); |
2420 | resize_out_helper(out, tmp_output); |
2421 | copy_arg(out, tmp_output); |
2422 | return out; |
2423 | } |
2424 | |
2425 | at::Tensor & cudnn_convolution_transpose_out(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) { |
2426 | auto tmp_output = at::_ops::cudnn_convolution_transpose::call(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32); |
2427 | resize_out_helper(out, tmp_output); |
2428 | copy_arg(out, tmp_output); |
2429 | return out; |
2430 | } |
2431 | |
2432 | at::Tensor & _mps_convolution_transpose_out(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { |
2433 | auto tmp_output = at::_ops::_mps_convolution_transpose::call(self, weight, padding, output_padding, stride, dilation, groups); |
2434 | resize_out_helper(out, tmp_output); |
2435 | copy_arg(out, tmp_output); |
2436 | return out; |
2437 | } |
2438 | |
2439 | ::std::tuple<at::Tensor &,at::Tensor &> mps_convolution_transpose_backward_out(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) { |
2440 | auto tmp_output = at::_ops::mps_convolution_transpose_backward::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask); |
2441 | resize_out_helper(out0, std::get<0>(tmp_output)); |
2442 | copy_arg(out0, std::get<0>(tmp_output)); |
2443 | resize_out_helper(out1, std::get<1>(tmp_output)); |
2444 | copy_arg(out1, std::get<1>(tmp_output)); |
2445 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
2446 | } |
2447 | |
2448 | at::Tensor & cudnn_convolution_relu_out(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { |
2449 | auto tmp_output = at::_ops::cudnn_convolution_relu::call(self, weight, bias, stride, padding, dilation, groups); |
2450 | resize_out_helper(out, tmp_output); |
2451 | copy_arg(out, tmp_output); |
2452 | return out; |
2453 | } |
2454 | |
2455 | at::Tensor & cudnn_convolution_add_relu_out(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional<at::Scalar> & alpha, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { |
2456 | auto tmp_output = at::_ops::cudnn_convolution_add_relu::call(self, weight, z, alpha, bias, stride, padding, dilation, groups); |
2457 | resize_out_helper(out, tmp_output); |
2458 | copy_arg(out, tmp_output); |
2459 | return out; |
2460 | } |
2461 | |
2462 | at::Tensor & cudnn_grid_sampler_out(const at::Tensor & self, const at::Tensor & grid, at::Tensor & out) { |
2463 | auto tmp_output = at::_ops::cudnn_grid_sampler::call(self, grid); |
2464 | resize_out_helper(out, tmp_output); |
2465 | copy_arg(out, tmp_output); |
2466 | return out; |
2467 | } |
2468 | |
2469 | ::std::tuple<at::Tensor &,at::Tensor &> cudnn_grid_sampler_backward_out(const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output, at::Tensor & out0, at::Tensor & out1) { |
2470 | auto tmp_output = at::_ops::cudnn_grid_sampler_backward::call(self, grid, grad_output); |
2471 | resize_out_helper(out0, std::get<0>(tmp_output)); |
2472 | copy_arg(out0, std::get<0>(tmp_output)); |
2473 | resize_out_helper(out1, std::get<1>(tmp_output)); |
2474 | copy_arg(out1, std::get<1>(tmp_output)); |
2475 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
2476 | } |
2477 | |
2478 | ::std::tuple<at::Tensor &,at::Tensor &> _ctc_loss_out(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) { |
2479 | auto tmp_output = at::_ops::_ctc_loss::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity); |
2480 | resize_out_helper(out0, std::get<0>(tmp_output)); |
2481 | copy_arg(out0, std::get<0>(tmp_output)); |
2482 | resize_out_helper(out1, std::get<1>(tmp_output)); |
2483 | copy_arg(out1, std::get<1>(tmp_output)); |
2484 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
2485 | } |
2486 | |
2487 | ::std::tuple<at::Tensor &,at::Tensor &> _ctc_loss_Tensor_out(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) { |
2488 | auto tmp_output = at::_ops::_ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity); |
2489 | resize_out_helper(out0, std::get<0>(tmp_output)); |
2490 | copy_arg(out0, std::get<0>(tmp_output)); |
2491 | resize_out_helper(out1, std::get<1>(tmp_output)); |
2492 | copy_arg(out1, std::get<1>(tmp_output)); |
2493 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
2494 | } |
2495 | |
2496 | at::Tensor & _ctc_loss_backward_out(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity, at::Tensor & out) { |
2497 | auto tmp_output = at::_ops::_ctc_loss_backward::call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity); |
2498 | resize_out_helper(out, tmp_output); |
2499 | copy_arg(out, tmp_output); |
2500 | return out; |
2501 | } |
2502 | |
2503 | at::Tensor & diag_embed_out(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { |
2504 | auto tmp_output = at::_ops::diag_embed::call(self, offset, dim1, dim2); |
2505 | resize_out_helper(out, tmp_output); |
2506 | copy_arg(out, tmp_output); |
2507 | return out; |
2508 | } |
2509 | |
2510 | at::Tensor & diagonal_backward_out_symint(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { |
2511 | auto tmp_output = at::_ops::diagonal_backward::call(grad_output, input_sizes, offset, dim1, dim2); |
2512 | resize_out_helper(out, tmp_output); |
2513 | copy_arg(out, tmp_output); |
2514 | return out; |
2515 | } |
2516 | |
2517 | at::Tensor & div_Scalar_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
2518 | auto tmp_output = at::_ops::div_Scalar::call(self, other); |
2519 | resize_out_helper(out, tmp_output); |
2520 | copy_arg(out, tmp_output); |
2521 | return out; |
2522 | } |
2523 | |
2524 | at::Tensor & div_Scalar_mode_out(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out) { |
2525 | auto tmp_output = at::_ops::div_Scalar_mode::call(self, other, rounding_mode); |
2526 | resize_out_helper(out, tmp_output); |
2527 | copy_arg(out, tmp_output); |
2528 | return out; |
2529 | } |
2530 | |
2531 | at::Tensor & embedding_out_symint(const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse, at::Tensor & out) { |
2532 | auto tmp_output = at::_ops::embedding::call(weight, indices, padding_idx, scale_grad_by_freq, sparse); |
2533 | resize_out_helper(out, tmp_output); |
2534 | copy_arg(out, tmp_output); |
2535 | return out; |
2536 | } |
2537 | |
2538 | at::Tensor & embedding_dense_backward_out_symint(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, at::Tensor & out) { |
2539 | auto tmp_output = at::_ops::embedding_dense_backward::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq); |
2540 | resize_out_helper(out, tmp_output); |
2541 | copy_arg(out, tmp_output); |
2542 | return out; |
2543 | } |
2544 | |
2545 | at::Tensor & embedding_renorm_out(const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type, at::Tensor & out) { |
2546 | auto tmp_output = at::_ops::embedding_renorm::call(self, indices, max_norm, norm_type); |
2547 | resize_out_helper(out, tmp_output); |
2548 | copy_arg(out, tmp_output); |
2549 | return out; |
2550 | } |
2551 | |
2552 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _embedding_bag_forward_only_out(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { |
2553 | auto tmp_output = at::_ops::_embedding_bag_forward_only::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx); |
2554 | resize_out_helper(out0, std::get<0>(tmp_output)); |
2555 | copy_arg(out0, std::get<0>(tmp_output)); |
2556 | resize_out_helper(out1, std::get<1>(tmp_output)); |
2557 | copy_arg(out1, std::get<1>(tmp_output)); |
2558 | resize_out_helper(out2, std::get<2>(tmp_output)); |
2559 | copy_arg(out2, std::get<2>(tmp_output)); |
2560 | resize_out_helper(out3, std::get<3>(tmp_output)); |
2561 | copy_arg(out3, std::get<3>(tmp_output)); |
2562 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3); |
2563 | } |
2564 | |
2565 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _embedding_bag_out(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { |
2566 | auto tmp_output = at::_ops::_embedding_bag::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx); |
2567 | resize_out_helper(out0, std::get<0>(tmp_output)); |
2568 | copy_arg(out0, std::get<0>(tmp_output)); |
2569 | resize_out_helper(out1, std::get<1>(tmp_output)); |
2570 | copy_arg(out1, std::get<1>(tmp_output)); |
2571 | resize_out_helper(out2, std::get<2>(tmp_output)); |
2572 | copy_arg(out2, std::get<2>(tmp_output)); |
2573 | resize_out_helper(out3, std::get<3>(tmp_output)); |
2574 | copy_arg(out3, std::get<3>(tmp_output)); |
2575 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3); |
2576 | } |
2577 | |
2578 | at::Tensor & _embedding_bag_dense_backward_out_symint(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx, at::Tensor & out) { |
2579 | auto tmp_output = at::_ops::_embedding_bag_dense_backward::call(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx); |
2580 | resize_out_helper(out, tmp_output); |
2581 | copy_arg(out, tmp_output); |
2582 | return out; |
2583 | } |
2584 | |
2585 | at::Tensor & _embedding_bag_per_sample_weights_backward_out(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx, at::Tensor & out) { |
2586 | auto tmp_output = at::_ops::_embedding_bag_per_sample_weights_backward::call(grad, weight, indices, offsets, offset2bag, mode, padding_idx); |
2587 | resize_out_helper(out, tmp_output); |
2588 | copy_arg(out, tmp_output); |
2589 | return out; |
2590 | } |
2591 | |
2592 | at::Tensor & empty_names_out(at::IntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
2593 | auto tmp_output = at::_ops::empty_names::call(size, names, out.scalar_type(), out.layout(), out.device(), c10::nullopt, memory_format); |
2594 | resize_out_helper(out, tmp_output); |
2595 | copy_arg(out, tmp_output); |
2596 | return out; |
2597 | } |
2598 | |
2599 | at::Tensor & new_empty_out_symint(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { |
2600 | auto tmp_output = at::_ops::new_empty::call(self, size, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
2601 | resize_out_helper(out, tmp_output); |
2602 | copy_arg(out, tmp_output); |
2603 | return out; |
2604 | } |
2605 | |
2606 | at::Tensor & new_empty_strided_out_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) { |
2607 | auto tmp_output = at::_ops::new_empty_strided::call(self, size, stride, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
2608 | resize_out_helper(out, tmp_output); |
2609 | copy_arg(out, tmp_output); |
2610 | return out; |
2611 | } |
2612 | |
2613 | at::Tensor & new_full_out_symint(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) { |
2614 | auto tmp_output = at::_ops::new_full::call(self, size, fill_value, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
2615 | resize_out_helper(out, tmp_output); |
2616 | copy_arg(out, tmp_output); |
2617 | return out; |
2618 | } |
2619 | |
2620 | at::Tensor & new_zeros_out_symint(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { |
2621 | auto tmp_output = at::_ops::new_zeros::call(self, size, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
2622 | resize_out_helper(out, tmp_output); |
2623 | copy_arg(out, tmp_output); |
2624 | return out; |
2625 | } |
2626 | |
2627 | at::Tensor & new_ones_out_symint(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { |
2628 | auto tmp_output = at::_ops::new_ones::call(self, size, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
2629 | resize_out_helper(out, tmp_output); |
2630 | copy_arg(out, tmp_output); |
2631 | return out; |
2632 | } |
2633 | |
2634 | at::Tensor & _empty_affine_quantized_out(at::IntArrayRef size, double scale, int64_t zero_point, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
2635 | auto tmp_output = at::_ops::_empty_affine_quantized::call(size, out.scalar_type(), out.layout(), out.device(), c10::nullopt, scale, zero_point, memory_format); |
2636 | resize_out_helper(out, tmp_output); |
2637 | copy_arg(out, tmp_output); |
2638 | return out; |
2639 | } |
2640 | |
2641 | at::Tensor & _empty_per_channel_affine_quantized_out(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
2642 | auto tmp_output = at::_ops::_empty_per_channel_affine_quantized::call(size, scales, zero_points, axis, out.scalar_type(), out.layout(), out.device(), c10::nullopt, memory_format); |
2643 | resize_out_helper(out, tmp_output); |
2644 | copy_arg(out, tmp_output); |
2645 | return out; |
2646 | } |
2647 | |
2648 | const at::Tensor & resize_out_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format, const at::Tensor & out) { |
2649 | auto tmp_output = at::_ops::resize::call(self, size, memory_format); |
2650 | resize_out_helper(out, tmp_output); |
2651 | copy_arg(out, tmp_output); |
2652 | return out; |
2653 | } |
2654 | |
2655 | const at::Tensor & _resize_output_out(const at::Tensor & self, at::IntArrayRef size, at::Device device, const at::Tensor & out) { |
2656 | auto tmp_output = at::_ops::_resize_output::call(self, size, device); |
2657 | resize_out_helper(out, tmp_output); |
2658 | copy_arg(out, tmp_output); |
2659 | return out; |
2660 | } |
2661 | |
2662 | at::Tensor & empty_quantized_out(at::IntArrayRef size, const at::Tensor & qtensor, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
2663 | auto tmp_output = at::_ops::empty_quantized::call(size, qtensor, out.scalar_type(), out.layout(), out.device(), c10::nullopt, memory_format); |
2664 | resize_out_helper(out, tmp_output); |
2665 | copy_arg(out, tmp_output); |
2666 | return out; |
2667 | } |
2668 | |
2669 | at::Tensor & empty_like_out(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
2670 | auto tmp_output = at::_ops::empty_like::call(self, out.scalar_type(), out.layout(), out.device(), c10::nullopt, memory_format); |
2671 | resize_out_helper(out, tmp_output); |
2672 | copy_arg(out, tmp_output); |
2673 | return out; |
2674 | } |
2675 | |
2676 | at::Tensor & empty_strided_out_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) { |
2677 | auto tmp_output = at::_ops::empty_strided::call(size, stride, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
2678 | resize_out_helper(out, tmp_output); |
2679 | copy_arg(out, tmp_output); |
2680 | return out; |
2681 | } |
2682 | |
2683 | at::Tensor & fill_Scalar_out(const at::Tensor & self, const at::Scalar & value, at::Tensor & out) { |
2684 | auto tmp_output = at::_ops::fill_Scalar::call(self, value); |
2685 | resize_out_helper(out, tmp_output); |
2686 | copy_arg(out, tmp_output); |
2687 | return out; |
2688 | } |
2689 | |
2690 | at::Tensor & fill_Tensor_out(const at::Tensor & self, const at::Tensor & value, at::Tensor & out) { |
2691 | auto tmp_output = at::_ops::fill_Tensor::call(self, value); |
2692 | resize_out_helper(out, tmp_output); |
2693 | copy_arg(out, tmp_output); |
2694 | return out; |
2695 | } |
2696 | |
2697 | at::Tensor & full_names_out(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::DimnameList> names, at::Tensor & out) { |
2698 | auto tmp_output = at::_ops::full_names::call(size, fill_value, names, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
2699 | resize_out_helper(out, tmp_output); |
2700 | copy_arg(out, tmp_output); |
2701 | return out; |
2702 | } |
2703 | |
2704 | at::Tensor & full_like_out(const at::Tensor & self, const at::Scalar & fill_value, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
2705 | auto tmp_output = at::_ops::full_like::call(self, fill_value, out.scalar_type(), out.layout(), out.device(), c10::nullopt, memory_format); |
2706 | resize_out_helper(out, tmp_output); |
2707 | copy_arg(out, tmp_output); |
2708 | return out; |
2709 | } |
2710 | |
2711 | at::Tensor & from_file_out(c10::string_view filename, c10::optional<bool> shared, c10::optional<int64_t> size, at::Tensor & out) { |
2712 | auto tmp_output = at::_ops::from_file::call(filename, shared, size, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
2713 | resize_out_helper(out, tmp_output); |
2714 | copy_arg(out, tmp_output); |
2715 | return out; |
2716 | } |
2717 | |
2718 | at::Tensor & grid_sampler_2d_out(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) { |
2719 | auto tmp_output = at::_ops::grid_sampler_2d::call(input, grid, interpolation_mode, padding_mode, align_corners); |
2720 | resize_out_helper(out, tmp_output); |
2721 | copy_arg(out, tmp_output); |
2722 | return out; |
2723 | } |
2724 | |
2725 | ::std::tuple<at::Tensor &,at::Tensor &> grid_sampler_2d_backward_out(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) { |
2726 | auto tmp_output = at::_ops::grid_sampler_2d_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask); |
2727 | resize_out_helper(out0, std::get<0>(tmp_output)); |
2728 | copy_arg(out0, std::get<0>(tmp_output)); |
2729 | resize_out_helper(out1, std::get<1>(tmp_output)); |
2730 | copy_arg(out1, std::get<1>(tmp_output)); |
2731 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
2732 | } |
2733 | |
2734 | at::Tensor & _grid_sampler_2d_cpu_fallback_out(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) { |
2735 | auto tmp_output = at::_ops::_grid_sampler_2d_cpu_fallback::call(input, grid, interpolation_mode, padding_mode, align_corners); |
2736 | resize_out_helper(out, tmp_output); |
2737 | copy_arg(out, tmp_output); |
2738 | return out; |
2739 | } |
2740 | |
2741 | at::Tensor & grid_sampler_3d_out(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) { |
2742 | auto tmp_output = at::_ops::grid_sampler_3d::call(input, grid, interpolation_mode, padding_mode, align_corners); |
2743 | resize_out_helper(out, tmp_output); |
2744 | copy_arg(out, tmp_output); |
2745 | return out; |
2746 | } |
2747 | |
2748 | ::std::tuple<at::Tensor &,at::Tensor &> grid_sampler_3d_backward_out(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) { |
2749 | auto tmp_output = at::_ops::grid_sampler_3d_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask); |
2750 | resize_out_helper(out0, std::get<0>(tmp_output)); |
2751 | copy_arg(out0, std::get<0>(tmp_output)); |
2752 | resize_out_helper(out1, std::get<1>(tmp_output)); |
2753 | copy_arg(out1, std::get<1>(tmp_output)); |
2754 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
2755 | } |
2756 | |
2757 | at::Tensor & hann_window_out(int64_t window_length, at::Tensor & out) { |
2758 | auto tmp_output = at::_ops::hann_window::call(window_length, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
2759 | resize_out_helper(out, tmp_output); |
2760 | copy_arg(out, tmp_output); |
2761 | return out; |
2762 | } |
2763 | |
2764 | at::Tensor & hann_window_periodic_out(int64_t window_length, bool periodic, at::Tensor & out) { |
2765 | auto tmp_output = at::_ops::hann_window_periodic::call(window_length, periodic, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
2766 | resize_out_helper(out, tmp_output); |
2767 | copy_arg(out, tmp_output); |
2768 | return out; |
2769 | } |
2770 | |
2771 | at::Tensor & hamming_window_out(int64_t window_length, at::Tensor & out) { |
2772 | auto tmp_output = at::_ops::hamming_window::call(window_length, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
2773 | resize_out_helper(out, tmp_output); |
2774 | copy_arg(out, tmp_output); |
2775 | return out; |
2776 | } |
2777 | |
2778 | at::Tensor & hamming_window_periodic_out(int64_t window_length, bool periodic, at::Tensor & out) { |
2779 | auto tmp_output = at::_ops::hamming_window_periodic::call(window_length, periodic, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
2780 | resize_out_helper(out, tmp_output); |
2781 | copy_arg(out, tmp_output); |
2782 | return out; |
2783 | } |
2784 | |
2785 | at::Tensor & hamming_window_periodic_alpha_out(int64_t window_length, bool periodic, double alpha, at::Tensor & out) { |
2786 | auto tmp_output = at::_ops::hamming_window_periodic_alpha::call(window_length, periodic, alpha, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
2787 | resize_out_helper(out, tmp_output); |
2788 | copy_arg(out, tmp_output); |
2789 | return out; |
2790 | } |
2791 | |
2792 | at::Tensor & hamming_window_periodic_alpha_beta_out(int64_t window_length, bool periodic, double alpha, double beta, at::Tensor & out) { |
2793 | auto tmp_output = at::_ops::hamming_window_periodic_alpha_beta::call(window_length, periodic, alpha, beta, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
2794 | resize_out_helper(out, tmp_output); |
2795 | copy_arg(out, tmp_output); |
2796 | return out; |
2797 | } |
2798 | |
2799 | at::Tensor & kaiser_window_out(int64_t window_length, at::Tensor & out) { |
2800 | auto tmp_output = at::_ops::kaiser_window::call(window_length, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
2801 | resize_out_helper(out, tmp_output); |
2802 | copy_arg(out, tmp_output); |
2803 | return out; |
2804 | } |
2805 | |
2806 | at::Tensor & kaiser_window_periodic_out(int64_t window_length, bool periodic, at::Tensor & out) { |
2807 | auto tmp_output = at::_ops::kaiser_window_periodic::call(window_length, periodic, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
2808 | resize_out_helper(out, tmp_output); |
2809 | copy_arg(out, tmp_output); |
2810 | return out; |
2811 | } |
2812 | |
2813 | at::Tensor & kaiser_window_beta_out(int64_t window_length, bool periodic, double beta, at::Tensor & out) { |
2814 | auto tmp_output = at::_ops::kaiser_window_beta::call(window_length, periodic, beta, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
2815 | resize_out_helper(out, tmp_output); |
2816 | copy_arg(out, tmp_output); |
2817 | return out; |
2818 | } |
2819 | |
2820 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_out_symint(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
2821 | auto tmp_output = at::_ops::native_group_norm::call(input, weight, bias, N, C, HxW, group, eps); |
2822 | resize_out_helper(out0, std::get<0>(tmp_output)); |
2823 | copy_arg(out0, std::get<0>(tmp_output)); |
2824 | resize_out_helper(out1, std::get<1>(tmp_output)); |
2825 | copy_arg(out1, std::get<1>(tmp_output)); |
2826 | resize_out_helper(out2, std::get<2>(tmp_output)); |
2827 | copy_arg(out2, std::get<2>(tmp_output)); |
2828 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
2829 | } |
2830 | |
2831 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_backward_out_symint(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
2832 | auto tmp_output = at::_ops::native_group_norm_backward::call(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask); |
2833 | resize_out_helper(out0, std::get<0>(tmp_output)); |
2834 | copy_arg(out0, std::get<0>(tmp_output)); |
2835 | resize_out_helper(out1, std::get<1>(tmp_output)); |
2836 | copy_arg(out1, std::get<1>(tmp_output)); |
2837 | resize_out_helper(out2, std::get<2>(tmp_output)); |
2838 | copy_arg(out2, std::get<2>(tmp_output)); |
2839 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
2840 | } |
2841 | |
2842 | at::Tensor & index_put_out(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, at::Tensor & out) { |
2843 | auto tmp_output = at::_ops::index_put::call(self, indices, values, accumulate); |
2844 | resize_out_helper(out, tmp_output); |
2845 | copy_arg(out, tmp_output); |
2846 | return out; |
2847 | } |
2848 | |
2849 | at::Tensor & _index_put_impl_out(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe, at::Tensor & out) { |
2850 | auto tmp_output = at::_ops::_index_put_impl::call(self, indices, values, accumulate, unsafe); |
2851 | resize_out_helper(out, tmp_output); |
2852 | copy_arg(out, tmp_output); |
2853 | return out; |
2854 | } |
2855 | |
2856 | at::Tensor & isnan_out(const at::Tensor & self, at::Tensor & out) { |
2857 | auto tmp_output = at::_ops::isnan::call(self); |
2858 | resize_out_helper(out, tmp_output); |
2859 | copy_arg(out, tmp_output); |
2860 | return out; |
2861 | } |
2862 | |
2863 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_out_symint(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
2864 | auto tmp_output = at::_ops::native_layer_norm::call(input, normalized_shape, weight, bias, eps); |
2865 | resize_out_helper(out0, std::get<0>(tmp_output)); |
2866 | copy_arg(out0, std::get<0>(tmp_output)); |
2867 | resize_out_helper(out1, std::get<1>(tmp_output)); |
2868 | copy_arg(out1, std::get<1>(tmp_output)); |
2869 | resize_out_helper(out2, std::get<2>(tmp_output)); |
2870 | copy_arg(out2, std::get<2>(tmp_output)); |
2871 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
2872 | } |
2873 | |
2874 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_out_symint(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
2875 | auto tmp_output = at::_ops::native_layer_norm_backward::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask); |
2876 | resize_out_helper(out0, std::get<0>(tmp_output)); |
2877 | copy_arg(out0, std::get<0>(tmp_output)); |
2878 | resize_out_helper(out1, std::get<1>(tmp_output)); |
2879 | copy_arg(out1, std::get<1>(tmp_output)); |
2880 | resize_out_helper(out2, std::get<2>(tmp_output)); |
2881 | copy_arg(out2, std::get<2>(tmp_output)); |
2882 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
2883 | } |
2884 | |
2885 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linear_backward_out(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
2886 | auto tmp_output = at::_ops::linear_backward::call(self, grad_output, weight, output_mask); |
2887 | resize_out_helper(out0, std::get<0>(tmp_output)); |
2888 | copy_arg(out0, std::get<0>(tmp_output)); |
2889 | resize_out_helper(out1, std::get<1>(tmp_output)); |
2890 | copy_arg(out1, std::get<1>(tmp_output)); |
2891 | resize_out_helper(out2, std::get<2>(tmp_output)); |
2892 | copy_arg(out2, std::get<2>(tmp_output)); |
2893 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
2894 | } |
2895 | |
2896 | at::Tensor & mkldnn_linear_out(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::Tensor & out) { |
2897 | auto tmp_output = at::_ops::mkldnn_linear::call(self, weight, bias); |
2898 | resize_out_helper(out, tmp_output); |
2899 | copy_arg(out, tmp_output); |
2900 | return out; |
2901 | } |
2902 | |
2903 | at::Tensor & mkldnn_linear_backward_input_out(at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight, at::Tensor & out) { |
2904 | auto tmp_output = at::_ops::mkldnn_linear_backward_input::call(input_size, grad_output, weight); |
2905 | resize_out_helper(out, tmp_output); |
2906 | copy_arg(out, tmp_output); |
2907 | return out; |
2908 | } |
2909 | |
2910 | ::std::tuple<at::Tensor &,at::Tensor &> mkldnn_linear_backward_weights_out(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined, at::Tensor & out0, at::Tensor & out1) { |
2911 | auto tmp_output = at::_ops::mkldnn_linear_backward_weights::call(grad_output, input, weight, bias_defined); |
2912 | resize_out_helper(out0, std::get<0>(tmp_output)); |
2913 | copy_arg(out0, std::get<0>(tmp_output)); |
2914 | resize_out_helper(out1, std::get<1>(tmp_output)); |
2915 | copy_arg(out1, std::get<1>(tmp_output)); |
2916 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
2917 | } |
2918 | |
2919 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_linear_backward_out(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
2920 | auto tmp_output = at::_ops::mkldnn_linear_backward::call(self, grad_output, weight, output_mask); |
2921 | resize_out_helper(out0, std::get<0>(tmp_output)); |
2922 | copy_arg(out0, std::get<0>(tmp_output)); |
2923 | resize_out_helper(out1, std::get<1>(tmp_output)); |
2924 | copy_arg(out1, std::get<1>(tmp_output)); |
2925 | resize_out_helper(out2, std::get<2>(tmp_output)); |
2926 | copy_arg(out2, std::get<2>(tmp_output)); |
2927 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
2928 | } |
2929 | |
2930 | ::std::tuple<at::Tensor &,at::Tensor &> matmul_backward_out(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array<bool,2> mask, at::Tensor & out0, at::Tensor & out1) { |
2931 | auto tmp_output = at::_ops::matmul_backward::call(grad, self, other, mask); |
2932 | resize_out_helper(out0, std::get<0>(tmp_output)); |
2933 | copy_arg(out0, std::get<0>(tmp_output)); |
2934 | resize_out_helper(out1, std::get<1>(tmp_output)); |
2935 | copy_arg(out1, std::get<1>(tmp_output)); |
2936 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
2937 | } |
2938 | |
2939 | ::std::tuple<at::Tensor &,at::Tensor &> _aminmax_out(const at::Tensor & self, at::Tensor & out0, at::Tensor & out1) { |
2940 | auto tmp_output = at::_ops::_aminmax::call(self); |
2941 | resize_out_helper(out0, std::get<0>(tmp_output)); |
2942 | copy_arg(out0, std::get<0>(tmp_output)); |
2943 | resize_out_helper(out1, std::get<1>(tmp_output)); |
2944 | copy_arg(out1, std::get<1>(tmp_output)); |
2945 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
2946 | } |
2947 | |
2948 | ::std::tuple<at::Tensor &,at::Tensor &> _aminmax_dim_out(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out0, at::Tensor & out1) { |
2949 | auto tmp_output = at::_ops::_aminmax_dim::call(self, dim, keepdim); |
2950 | resize_out_helper(out0, std::get<0>(tmp_output)); |
2951 | copy_arg(out0, std::get<0>(tmp_output)); |
2952 | resize_out_helper(out1, std::get<1>(tmp_output)); |
2953 | copy_arg(out1, std::get<1>(tmp_output)); |
2954 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
2955 | } |
2956 | |
2957 | at::Tensor & _mps_max_pool2d_out(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
2958 | auto tmp_output = at::_ops::_mps_max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode); |
2959 | resize_out_helper(out, tmp_output); |
2960 | copy_arg(out, tmp_output); |
2961 | return out; |
2962 | } |
2963 | |
2964 | at::Tensor & mps_max_pool2d_backward_out(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
2965 | auto tmp_output = at::_ops::mps_max_pool2d_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode); |
2966 | resize_out_helper(out, tmp_output); |
2967 | copy_arg(out, tmp_output); |
2968 | return out; |
2969 | } |
2970 | |
2971 | at::Tensor & mkldnn_max_pool2d_out(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
2972 | auto tmp_output = at::_ops::mkldnn_max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode); |
2973 | resize_out_helper(out, tmp_output); |
2974 | copy_arg(out, tmp_output); |
2975 | return out; |
2976 | } |
2977 | |
2978 | at::Tensor & mkldnn_max_pool2d_backward_out(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
2979 | auto tmp_output = at::_ops::mkldnn_max_pool2d_backward::call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode); |
2980 | resize_out_helper(out, tmp_output); |
2981 | copy_arg(out, tmp_output); |
2982 | return out; |
2983 | } |
2984 | |
2985 | at::Tensor & mkldnn_max_pool3d_out(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
2986 | auto tmp_output = at::_ops::mkldnn_max_pool3d::call(self, kernel_size, stride, padding, dilation, ceil_mode); |
2987 | resize_out_helper(out, tmp_output); |
2988 | copy_arg(out, tmp_output); |
2989 | return out; |
2990 | } |
2991 | |
2992 | at::Tensor & mkldnn_max_pool3d_backward_out(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
2993 | auto tmp_output = at::_ops::mkldnn_max_pool3d_backward::call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode); |
2994 | resize_out_helper(out, tmp_output); |
2995 | copy_arg(out, tmp_output); |
2996 | return out; |
2997 | } |
2998 | |
2999 | at::Tensor & quantized_max_pool1d_out(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
3000 | auto tmp_output = at::_ops::quantized_max_pool1d::call(self, kernel_size, stride, padding, dilation, ceil_mode); |
3001 | resize_out_helper(out, tmp_output); |
3002 | copy_arg(out, tmp_output); |
3003 | return out; |
3004 | } |
3005 | |
3006 | at::Tensor & quantized_max_pool2d_out(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
3007 | auto tmp_output = at::_ops::quantized_max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode); |
3008 | resize_out_helper(out, tmp_output); |
3009 | copy_arg(out, tmp_output); |
3010 | return out; |
3011 | } |
3012 | |
3013 | at::Tensor & median_out(const at::Tensor & self, at::Tensor & out) { |
3014 | auto tmp_output = at::_ops::median::call(self); |
3015 | resize_out_helper(out, tmp_output); |
3016 | copy_arg(out, tmp_output); |
3017 | return out; |
3018 | } |
3019 | |
3020 | at::Tensor & nanmedian_out(const at::Tensor & self, at::Tensor & out) { |
3021 | auto tmp_output = at::_ops::nanmedian::call(self); |
3022 | resize_out_helper(out, tmp_output); |
3023 | copy_arg(out, tmp_output); |
3024 | return out; |
3025 | } |
3026 | |
3027 | at::Tensor & _mps_convolution_out(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { |
3028 | auto tmp_output = at::_ops::_mps_convolution::call(self, weight, bias, padding, stride, dilation, groups); |
3029 | resize_out_helper(out, tmp_output); |
3030 | copy_arg(out, tmp_output); |
3031 | return out; |
3032 | } |
3033 | |
3034 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> mps_convolution_backward_out(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
3035 | auto tmp_output = at::_ops::mps_convolution_backward::call(self, grad_output, weight, padding, stride, dilation, groups, output_mask); |
3036 | resize_out_helper(out0, std::get<0>(tmp_output)); |
3037 | copy_arg(out0, std::get<0>(tmp_output)); |
3038 | resize_out_helper(out1, std::get<1>(tmp_output)); |
3039 | copy_arg(out1, std::get<1>(tmp_output)); |
3040 | resize_out_helper(out2, std::get<2>(tmp_output)); |
3041 | copy_arg(out2, std::get<2>(tmp_output)); |
3042 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
3043 | } |
3044 | |
3045 | at::Tensor & mkldnn_convolution_out_symint(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { |
3046 | auto tmp_output = at::_ops::mkldnn_convolution::call(self, weight, bias, padding, stride, dilation, groups); |
3047 | resize_out_helper(out, tmp_output); |
3048 | copy_arg(out, tmp_output); |
3049 | return out; |
3050 | } |
3051 | |
3052 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_rnn_layer_out(const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { |
3053 | auto tmp_output = at::_ops::mkldnn_rnn_layer::call(input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train); |
3054 | resize_out_helper(out0, std::get<0>(tmp_output)); |
3055 | copy_arg(out0, std::get<0>(tmp_output)); |
3056 | resize_out_helper(out1, std::get<1>(tmp_output)); |
3057 | copy_arg(out1, std::get<1>(tmp_output)); |
3058 | resize_out_helper(out2, std::get<2>(tmp_output)); |
3059 | copy_arg(out2, std::get<2>(tmp_output)); |
3060 | resize_out_helper(out3, std::get<3>(tmp_output)); |
3061 | copy_arg(out3, std::get<3>(tmp_output)); |
3062 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3); |
3063 | } |
3064 | |
3065 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_rnn_layer_backward_out(const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5, at::Tensor & out6) { |
3066 | auto tmp_output = at::_ops::mkldnn_rnn_layer_backward::call(input, weight1, weight2, weight3, weight4, hx_, cx_tmp, output, hy_, cy_, grad_output, grad_hy, grad_cy, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace); |
3067 | resize_out_helper(out0, std::get<0>(tmp_output)); |
3068 | copy_arg(out0, std::get<0>(tmp_output)); |
3069 | resize_out_helper(out1, std::get<1>(tmp_output)); |
3070 | copy_arg(out1, std::get<1>(tmp_output)); |
3071 | resize_out_helper(out2, std::get<2>(tmp_output)); |
3072 | copy_arg(out2, std::get<2>(tmp_output)); |
3073 | resize_out_helper(out3, std::get<3>(tmp_output)); |
3074 | copy_arg(out3, std::get<3>(tmp_output)); |
3075 | resize_out_helper(out4, std::get<4>(tmp_output)); |
3076 | copy_arg(out4, std::get<4>(tmp_output)); |
3077 | resize_out_helper(out5, std::get<5>(tmp_output)); |
3078 | copy_arg(out5, std::get<5>(tmp_output)); |
3079 | resize_out_helper(out6, std::get<6>(tmp_output)); |
3080 | copy_arg(out6, std::get<6>(tmp_output)); |
3081 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3, out4, out5, out6); |
3082 | } |
3083 | |
3084 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> miopen_batch_norm_out(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
3085 | auto tmp_output = at::_ops::miopen_batch_norm::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon); |
3086 | resize_out_helper(out0, std::get<0>(tmp_output)); |
3087 | copy_arg(out0, std::get<0>(tmp_output)); |
3088 | resize_out_helper(out1, std::get<1>(tmp_output)); |
3089 | copy_arg(out1, std::get<1>(tmp_output)); |
3090 | resize_out_helper(out2, std::get<2>(tmp_output)); |
3091 | copy_arg(out2, std::get<2>(tmp_output)); |
3092 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
3093 | } |
3094 | |
3095 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> miopen_batch_norm_backward_out(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
3096 | auto tmp_output = at::_ops::miopen_batch_norm_backward::call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon); |
3097 | resize_out_helper(out0, std::get<0>(tmp_output)); |
3098 | copy_arg(out0, std::get<0>(tmp_output)); |
3099 | resize_out_helper(out1, std::get<1>(tmp_output)); |
3100 | copy_arg(out1, std::get<1>(tmp_output)); |
3101 | resize_out_helper(out2, std::get<2>(tmp_output)); |
3102 | copy_arg(out2, std::get<2>(tmp_output)); |
3103 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
3104 | } |
3105 | |
3106 | at::Tensor & miopen_convolution_out_symint(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { |
3107 | auto tmp_output = at::_ops::miopen_convolution::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic); |
3108 | resize_out_helper(out, tmp_output); |
3109 | copy_arg(out, tmp_output); |
3110 | return out; |
3111 | } |
3112 | |
3113 | at::Tensor & miopen_convolution_transpose_out_symint(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { |
3114 | auto tmp_output = at::_ops::miopen_convolution_transpose::call(self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic); |
3115 | resize_out_helper(out, tmp_output); |
3116 | copy_arg(out, tmp_output); |
3117 | return out; |
3118 | } |
3119 | |
3120 | at::Tensor & miopen_depthwise_convolution_out_symint(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { |
3121 | auto tmp_output = at::_ops::miopen_depthwise_convolution::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic); |
3122 | resize_out_helper(out, tmp_output); |
3123 | copy_arg(out, tmp_output); |
3124 | return out; |
3125 | } |
3126 | |
3127 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> miopen_rnn_out(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) { |
3128 | auto tmp_output = at::_ops::miopen_rnn::call(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state); |
3129 | resize_out_helper(out0, std::get<0>(tmp_output)); |
3130 | copy_arg(out0, std::get<0>(tmp_output)); |
3131 | resize_out_helper(out1, std::get<1>(tmp_output)); |
3132 | copy_arg(out1, std::get<1>(tmp_output)); |
3133 | resize_out_helper(out2, std::get<2>(tmp_output)); |
3134 | copy_arg(out2, std::get<2>(tmp_output)); |
3135 | resize_out_helper(out3, std::get<3>(tmp_output)); |
3136 | copy_arg(out3, std::get<3>(tmp_output)); |
3137 | resize_out_helper(out4, std::get<4>(tmp_output)); |
3138 | copy_arg(out4, std::get<4>(tmp_output)); |
3139 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3, out4); |
3140 | } |
3141 | |
3142 | void miopen_rnn_backward_out(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) { |
3143 | auto tmp_output = at::_ops::miopen_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask); |
3144 | resize_out_helper(out0, std::get<0>(tmp_output)); |
3145 | copy_arg(out0, std::get<0>(tmp_output)); |
3146 | resize_out_helper(out1, std::get<1>(tmp_output)); |
3147 | copy_arg(out1, std::get<1>(tmp_output)); |
3148 | resize_out_helper(out2, std::get<2>(tmp_output)); |
3149 | copy_arg(out2, std::get<2>(tmp_output)); |
3150 | resize_out_helper(out3, std::get<3>(tmp_output)); |
3151 | copy_arg(out3, std::get<3>(tmp_output)); |
3152 | |
3153 | } |
3154 | |
3155 | at::Tensor & _sparse_sparse_matmul_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
3156 | auto tmp_output = at::_ops::_sparse_sparse_matmul::call(self, other); |
3157 | resize_out_helper(out, tmp_output); |
3158 | copy_arg(out, tmp_output); |
3159 | return out; |
3160 | } |
3161 | |
3162 | at::Tensor & mul_Scalar_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
3163 | auto tmp_output = at::_ops::mul_Scalar::call(self, other); |
3164 | resize_out_helper(out, tmp_output); |
3165 | copy_arg(out, tmp_output); |
3166 | return out; |
3167 | } |
3168 | |
3169 | ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_stats_out(const at::Tensor & input, double eps, at::Tensor & out0, at::Tensor & out1) { |
3170 | auto tmp_output = at::_ops::batch_norm_stats::call(input, eps); |
3171 | resize_out_helper(out0, std::get<0>(tmp_output)); |
3172 | copy_arg(out0, std::get<0>(tmp_output)); |
3173 | resize_out_helper(out1, std::get<1>(tmp_output)); |
3174 | copy_arg(out1, std::get<1>(tmp_output)); |
3175 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
3176 | } |
3177 | |
3178 | ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_gather_stats_out(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, int64_t count, at::Tensor & out0, at::Tensor & out1) { |
3179 | auto tmp_output = at::_ops::batch_norm_gather_stats::call(input, mean, invstd, running_mean, running_var, momentum, eps, count); |
3180 | resize_out_helper(out0, std::get<0>(tmp_output)); |
3181 | copy_arg(out0, std::get<0>(tmp_output)); |
3182 | resize_out_helper(out1, std::get<1>(tmp_output)); |
3183 | copy_arg(out1, std::get<1>(tmp_output)); |
3184 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
3185 | } |
3186 | |
3187 | ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_gather_stats_with_counts_out(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts, at::Tensor & out0, at::Tensor & out1) { |
3188 | auto tmp_output = at::_ops::batch_norm_gather_stats_with_counts::call(input, mean, invstd, running_mean, running_var, momentum, eps, counts); |
3189 | resize_out_helper(out0, std::get<0>(tmp_output)); |
3190 | copy_arg(out0, std::get<0>(tmp_output)); |
3191 | resize_out_helper(out1, std::get<1>(tmp_output)); |
3192 | copy_arg(out1, std::get<1>(tmp_output)); |
3193 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
3194 | } |
3195 | |
3196 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_batch_norm_backward_out(const at::Tensor & grad_out, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
3197 | auto tmp_output = at::_ops::native_batch_norm_backward::call(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask); |
3198 | resize_out_helper(out0, std::get<0>(tmp_output)); |
3199 | copy_arg(out0, std::get<0>(tmp_output)); |
3200 | resize_out_helper(out1, std::get<1>(tmp_output)); |
3201 | copy_arg(out1, std::get<1>(tmp_output)); |
3202 | resize_out_helper(out2, std::get<2>(tmp_output)); |
3203 | copy_arg(out2, std::get<2>(tmp_output)); |
3204 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
3205 | } |
3206 | |
3207 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> batch_norm_backward_reduce_out(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, bool input_g, bool weight_g, bool bias_g, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { |
3208 | auto tmp_output = at::_ops::batch_norm_backward_reduce::call(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g); |
3209 | resize_out_helper(out0, std::get<0>(tmp_output)); |
3210 | copy_arg(out0, std::get<0>(tmp_output)); |
3211 | resize_out_helper(out1, std::get<1>(tmp_output)); |
3212 | copy_arg(out1, std::get<1>(tmp_output)); |
3213 | resize_out_helper(out2, std::get<2>(tmp_output)); |
3214 | copy_arg(out2, std::get<2>(tmp_output)); |
3215 | resize_out_helper(out3, std::get<3>(tmp_output)); |
3216 | copy_arg(out3, std::get<3>(tmp_output)); |
3217 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3); |
3218 | } |
3219 | |
3220 | at::Tensor & batch_norm_backward_elemt_out(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, const at::Tensor & mean_dy, const at::Tensor & mean_dy_xmu, const at::Tensor & count, at::Tensor & out) { |
3221 | auto tmp_output = at::_ops::batch_norm_backward_elemt::call(grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count); |
3222 | resize_out_helper(out, tmp_output); |
3223 | copy_arg(out, tmp_output); |
3224 | return out; |
3225 | } |
3226 | |
3227 | ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_update_stats_out(const at::Tensor & input, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, at::Tensor & out0, at::Tensor & out1) { |
3228 | auto tmp_output = at::_ops::batch_norm_update_stats::call(input, running_mean, running_var, momentum); |
3229 | resize_out_helper(out0, std::get<0>(tmp_output)); |
3230 | copy_arg(out0, std::get<0>(tmp_output)); |
3231 | resize_out_helper(out1, std::get<1>(tmp_output)); |
3232 | copy_arg(out1, std::get<1>(tmp_output)); |
3233 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
3234 | } |
3235 | |
3236 | at::Tensor & _nnpack_spatial_convolution_out_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { |
3237 | auto tmp_output = at::_ops::_nnpack_spatial_convolution::call(input, weight, bias, padding, stride); |
3238 | resize_out_helper(out, tmp_output); |
3239 | copy_arg(out, tmp_output); |
3240 | return out; |
3241 | } |
3242 | |
3243 | at::Tensor & ones_names_out(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) { |
3244 | auto tmp_output = at::_ops::ones_names::call(size, names, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
3245 | resize_out_helper(out, tmp_output); |
3246 | copy_arg(out, tmp_output); |
3247 | return out; |
3248 | } |
3249 | |
3250 | at::Tensor & ones_like_out(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
3251 | auto tmp_output = at::_ops::ones_like::call(self, out.scalar_type(), out.layout(), out.device(), c10::nullopt, memory_format); |
3252 | resize_out_helper(out, tmp_output); |
3253 | copy_arg(out, tmp_output); |
3254 | return out; |
3255 | } |
3256 | |
3257 | at::Tensor & _euclidean_dist_out(const at::Tensor & x1, const at::Tensor & x2, at::Tensor & out) { |
3258 | auto tmp_output = at::_ops::_euclidean_dist::call(x1, x2); |
3259 | resize_out_helper(out, tmp_output); |
3260 | copy_arg(out, tmp_output); |
3261 | return out; |
3262 | } |
3263 | |
3264 | at::Tensor & _cdist_forward_out(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode, at::Tensor & out) { |
3265 | auto tmp_output = at::_ops::_cdist_forward::call(x1, x2, p, compute_mode); |
3266 | resize_out_helper(out, tmp_output); |
3267 | copy_arg(out, tmp_output); |
3268 | return out; |
3269 | } |
3270 | |
3271 | at::Tensor & _cdist_backward_out(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist, at::Tensor & out) { |
3272 | auto tmp_output = at::_ops::_cdist_backward::call(grad, x1, x2, p, cdist); |
3273 | resize_out_helper(out, tmp_output); |
3274 | copy_arg(out, tmp_output); |
3275 | return out; |
3276 | } |
3277 | |
3278 | at::Tensor & _pdist_forward_out(const at::Tensor & self, double p, at::Tensor & out) { |
3279 | auto tmp_output = at::_ops::_pdist_forward::call(self, p); |
3280 | resize_out_helper(out, tmp_output); |
3281 | copy_arg(out, tmp_output); |
3282 | return out; |
3283 | } |
3284 | |
3285 | at::Tensor & _pdist_backward_out(const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist, at::Tensor & out) { |
3286 | auto tmp_output = at::_ops::_pdist_backward::call(grad, self, p, pdist); |
3287 | resize_out_helper(out, tmp_output); |
3288 | copy_arg(out, tmp_output); |
3289 | return out; |
3290 | } |
3291 | |
3292 | at::Tensor & pixel_shuffle_out(const at::Tensor & self, int64_t upscale_factor, at::Tensor & out) { |
3293 | auto tmp_output = at::_ops::pixel_shuffle::call(self, upscale_factor); |
3294 | resize_out_helper(out, tmp_output); |
3295 | copy_arg(out, tmp_output); |
3296 | return out; |
3297 | } |
3298 | |
3299 | at::Tensor & pixel_unshuffle_out(const at::Tensor & self, int64_t downscale_factor, at::Tensor & out) { |
3300 | auto tmp_output = at::_ops::pixel_unshuffle::call(self, downscale_factor); |
3301 | resize_out_helper(out, tmp_output); |
3302 | copy_arg(out, tmp_output); |
3303 | return out; |
3304 | } |
3305 | |
3306 | at::Tensor & channel_shuffle_out(const at::Tensor & self, int64_t groups, at::Tensor & out) { |
3307 | auto tmp_output = at::_ops::channel_shuffle::call(self, groups); |
3308 | resize_out_helper(out, tmp_output); |
3309 | copy_arg(out, tmp_output); |
3310 | return out; |
3311 | } |
3312 | |
3313 | at::Tensor & _pin_memory_out(const at::Tensor & self, c10::optional<at::Device> device, at::Tensor & out) { |
3314 | auto tmp_output = at::_ops::_pin_memory::call(self, device); |
3315 | resize_out_helper(out, tmp_output); |
3316 | copy_arg(out, tmp_output); |
3317 | return out; |
3318 | } |
3319 | |
3320 | at::Tensor & scalar_tensor_out(const at::Scalar & s, at::Tensor & out) { |
3321 | auto tmp_output = at::_ops::scalar_tensor::call(s, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
3322 | resize_out_helper(out, tmp_output); |
3323 | copy_arg(out, tmp_output); |
3324 | return out; |
3325 | } |
3326 | |
3327 | at::Tensor & rand_names_out_symint(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) { |
3328 | auto tmp_output = at::_ops::rand_names::call(size, names, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
3329 | resize_out_helper(out, tmp_output); |
3330 | copy_arg(out, tmp_output); |
3331 | return out; |
3332 | } |
3333 | |
3334 | at::Tensor & rand_generator_with_names_out_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::Tensor & out) { |
3335 | auto tmp_output = at::_ops::rand_generator_with_names::call(size, generator, names, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
3336 | resize_out_helper(out, tmp_output); |
3337 | copy_arg(out, tmp_output); |
3338 | return out; |
3339 | } |
3340 | |
3341 | at::Tensor & rand_like_out(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
3342 | auto tmp_output = at::_ops::rand_like::call(self, out.scalar_type(), out.layout(), out.device(), c10::nullopt, memory_format); |
3343 | resize_out_helper(out, tmp_output); |
3344 | copy_arg(out, tmp_output); |
3345 | return out; |
3346 | } |
3347 | |
3348 | at::Tensor & randint_like_out(const at::Tensor & self, int64_t high, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
3349 | auto tmp_output = at::_ops::randint_like::call(self, high, out.scalar_type(), out.layout(), out.device(), c10::nullopt, memory_format); |
3350 | resize_out_helper(out, tmp_output); |
3351 | copy_arg(out, tmp_output); |
3352 | return out; |
3353 | } |
3354 | |
3355 | at::Tensor & randint_like_low_dtype_out(const at::Tensor & self, int64_t low, int64_t high, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
3356 | auto tmp_output = at::_ops::randint_like_low_dtype::call(self, low, high, out.scalar_type(), out.layout(), out.device(), c10::nullopt, memory_format); |
3357 | resize_out_helper(out, tmp_output); |
3358 | copy_arg(out, tmp_output); |
3359 | return out; |
3360 | } |
3361 | |
3362 | at::Tensor & randn_names_out_symint(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) { |
3363 | auto tmp_output = at::_ops::randn_names::call(size, names, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
3364 | resize_out_helper(out, tmp_output); |
3365 | copy_arg(out, tmp_output); |
3366 | return out; |
3367 | } |
3368 | |
3369 | at::Tensor & randn_generator_with_names_out_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::Tensor & out) { |
3370 | auto tmp_output = at::_ops::randn_generator_with_names::call(size, generator, names, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
3371 | resize_out_helper(out, tmp_output); |
3372 | copy_arg(out, tmp_output); |
3373 | return out; |
3374 | } |
3375 | |
3376 | at::Tensor & randn_like_out(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
3377 | auto tmp_output = at::_ops::randn_like::call(self, out.scalar_type(), out.layout(), out.device(), c10::nullopt, memory_format); |
3378 | resize_out_helper(out, tmp_output); |
3379 | copy_arg(out, tmp_output); |
3380 | return out; |
3381 | } |
3382 | |
3383 | at::Tensor & repeat_out_symint(const at::Tensor & self, c10::SymIntArrayRef repeats, at::Tensor & out) { |
3384 | auto tmp_output = at::_ops::repeat::call(self, repeats); |
3385 | resize_out_helper(out, tmp_output); |
3386 | copy_arg(out, tmp_output); |
3387 | return out; |
3388 | } |
3389 | |
3390 | at::Tensor & repeat_interleave_Tensor_out(const at::Tensor & repeats, c10::optional<int64_t> output_size, at::Tensor & out) { |
3391 | auto tmp_output = at::_ops::repeat_interleave_Tensor::call(repeats, output_size); |
3392 | resize_out_helper(out, tmp_output); |
3393 | copy_arg(out, tmp_output); |
3394 | return out; |
3395 | } |
3396 | |
3397 | at::Tensor & _mkldnn_reshape_out(const at::Tensor & self, at::IntArrayRef shape, at::Tensor & out) { |
3398 | auto tmp_output = at::_ops::_mkldnn_reshape::call(self, shape); |
3399 | resize_out_helper(out, tmp_output); |
3400 | copy_arg(out, tmp_output); |
3401 | return out; |
3402 | } |
3403 | |
3404 | at::Tensor & relu_out(const at::Tensor & self, at::Tensor & out) { |
3405 | auto tmp_output = at::_ops::relu::call(self); |
3406 | resize_out_helper(out, tmp_output); |
3407 | copy_arg(out, tmp_output); |
3408 | return out; |
3409 | } |
3410 | |
3411 | at::Tensor & select_backward_out_symint(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index, at::Tensor & out) { |
3412 | auto tmp_output = at::_ops::select_backward::call(grad_output, input_sizes, dim, index); |
3413 | resize_out_helper(out, tmp_output); |
3414 | copy_arg(out, tmp_output); |
3415 | return out; |
3416 | } |
3417 | |
3418 | at::Tensor & celu_out(const at::Tensor & self, const at::Scalar & alpha, at::Tensor & out) { |
3419 | auto tmp_output = at::_ops::celu::call(self, alpha); |
3420 | resize_out_helper(out, tmp_output); |
3421 | copy_arg(out, tmp_output); |
3422 | return out; |
3423 | } |
3424 | |
3425 | at::Tensor & slice_backward_out_symint(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step, at::Tensor & out) { |
3426 | auto tmp_output = at::_ops::slice_backward::call(grad_output, input_sizes, dim, start, end, step); |
3427 | resize_out_helper(out, tmp_output); |
3428 | copy_arg(out, tmp_output); |
3429 | return out; |
3430 | } |
3431 | |
3432 | at::Tensor & slice_scatter_out_symint(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step, at::Tensor & out) { |
3433 | auto tmp_output = at::_ops::slice_scatter::call(self, src, dim, start, end, step); |
3434 | resize_out_helper(out, tmp_output); |
3435 | copy_arg(out, tmp_output); |
3436 | return out; |
3437 | } |
3438 | |
3439 | at::Tensor & select_scatter_out_symint(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index, at::Tensor & out) { |
3440 | auto tmp_output = at::_ops::select_scatter::call(self, src, dim, index); |
3441 | resize_out_helper(out, tmp_output); |
3442 | copy_arg(out, tmp_output); |
3443 | return out; |
3444 | } |
3445 | |
3446 | at::Tensor & diagonal_scatter_out(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { |
3447 | auto tmp_output = at::_ops::diagonal_scatter::call(self, src, offset, dim1, dim2); |
3448 | resize_out_helper(out, tmp_output); |
3449 | copy_arg(out, tmp_output); |
3450 | return out; |
3451 | } |
3452 | |
3453 | at::Tensor & as_strided_scatter_out_symint(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset, at::Tensor & out) { |
3454 | auto tmp_output = at::_ops::as_strided_scatter::call(self, src, size, stride, storage_offset); |
3455 | resize_out_helper(out, tmp_output); |
3456 | copy_arg(out, tmp_output); |
3457 | return out; |
3458 | } |
3459 | |
3460 | void unsafe_split_Tensor_out_symint(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) { |
3461 | auto tmp_output = at::_ops::unsafe_split_Tensor::call(self, split_size, dim); |
3462 | resize_out_helper(out, tmp_output); |
3463 | copy_arg(out, tmp_output); |
3464 | |
3465 | } |
3466 | |
3467 | void unsafe_split_with_sizes_out_symint(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) { |
3468 | auto tmp_output = at::_ops::unsafe_split_with_sizes::call(self, split_sizes, dim); |
3469 | resize_out_helper(out, tmp_output); |
3470 | copy_arg(out, tmp_output); |
3471 | |
3472 | } |
3473 | |
3474 | at::Tensor & sum_out(const at::Tensor & self, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
3475 | auto tmp_output = at::_ops::sum::call(self, dtype); |
3476 | resize_out_helper(out, tmp_output); |
3477 | copy_arg(out, tmp_output); |
3478 | return out; |
3479 | } |
3480 | |
3481 | ::std::tuple<at::Tensor &,at::Tensor &> std_mean_correction_out(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out0, at::Tensor & out1) { |
3482 | auto tmp_output = at::_ops::std_mean_correction::call(self, dim, correction, keepdim); |
3483 | resize_out_helper(out0, std::get<0>(tmp_output)); |
3484 | copy_arg(out0, std::get<0>(tmp_output)); |
3485 | resize_out_helper(out1, std::get<1>(tmp_output)); |
3486 | copy_arg(out1, std::get<1>(tmp_output)); |
3487 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
3488 | } |
3489 | |
3490 | at::Tensor & prod_out(const at::Tensor & self, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
3491 | auto tmp_output = at::_ops::prod::call(self, dtype); |
3492 | resize_out_helper(out, tmp_output); |
3493 | copy_arg(out, tmp_output); |
3494 | return out; |
3495 | } |
3496 | |
3497 | at::Tensor & _mkldnn_transpose_out(const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) { |
3498 | auto tmp_output = at::_ops::_mkldnn_transpose::call(self, dim0, dim1); |
3499 | resize_out_helper(out, tmp_output); |
3500 | copy_arg(out, tmp_output); |
3501 | return out; |
3502 | } |
3503 | |
3504 | at::Tensor & flip_out(const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) { |
3505 | auto tmp_output = at::_ops::flip::call(self, dims); |
3506 | resize_out_helper(out, tmp_output); |
3507 | copy_arg(out, tmp_output); |
3508 | return out; |
3509 | } |
3510 | |
3511 | at::Tensor & roll_out(const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims, at::Tensor & out) { |
3512 | auto tmp_output = at::_ops::roll::call(self, shifts, dims); |
3513 | resize_out_helper(out, tmp_output); |
3514 | copy_arg(out, tmp_output); |
3515 | return out; |
3516 | } |
3517 | |
3518 | at::Tensor & rot90_out(const at::Tensor & self, int64_t k, at::IntArrayRef dims, at::Tensor & out) { |
3519 | auto tmp_output = at::_ops::rot90::call(self, k, dims); |
3520 | resize_out_helper(out, tmp_output); |
3521 | copy_arg(out, tmp_output); |
3522 | return out; |
3523 | } |
3524 | |
3525 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _transform_bias_rescale_qkv_out(const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
3526 | auto tmp_output = at::_ops::_transform_bias_rescale_qkv::call(qkv, qkv_bias, num_heads); |
3527 | resize_out_helper(out0, std::get<0>(tmp_output)); |
3528 | copy_arg(out0, std::get<0>(tmp_output)); |
3529 | resize_out_helper(out1, std::get<1>(tmp_output)); |
3530 | copy_arg(out1, std::get<1>(tmp_output)); |
3531 | resize_out_helper(out2, std::get<2>(tmp_output)); |
3532 | copy_arg(out2, std::get<2>(tmp_output)); |
3533 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
3534 | } |
3535 | |
3536 | at::Tensor & _nested_tensor_from_mask_out(const at::Tensor & t, const at::Tensor & mask, bool mask_check, at::Tensor & out) { |
3537 | auto tmp_output = at::_ops::_nested_tensor_from_mask::call(t, mask, mask_check); |
3538 | resize_out_helper(out, tmp_output); |
3539 | copy_arg(out, tmp_output); |
3540 | return out; |
3541 | } |
3542 | |
3543 | at::Tensor & _nested_from_padded_out(const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213, at::Tensor & out) { |
3544 | auto tmp_output = at::_ops::_nested_from_padded::call(padded, cpu_nested_shape_example, fuse_transform_0213); |
3545 | resize_out_helper(out, tmp_output); |
3546 | copy_arg(out, tmp_output); |
3547 | return out; |
3548 | } |
3549 | |
3550 | at::Tensor & _nested_tensor_size_out(const at::Tensor & self, at::Tensor & out) { |
3551 | auto tmp_output = at::_ops::_nested_tensor_size::call(self); |
3552 | resize_out_helper(out, tmp_output); |
3553 | copy_arg(out, tmp_output); |
3554 | return out; |
3555 | } |
3556 | |
3557 | at::Tensor & _nested_tensor_strides_out(const at::Tensor & self, at::Tensor & out) { |
3558 | auto tmp_output = at::_ops::_nested_tensor_strides::call(self); |
3559 | resize_out_helper(out, tmp_output); |
3560 | copy_arg(out, tmp_output); |
3561 | return out; |
3562 | } |
3563 | |
3564 | at::Tensor & _nested_from_padded_and_nested_example_out(const at::Tensor & padded, const at::Tensor & nt_example, at::Tensor & out) { |
3565 | auto tmp_output = at::_ops::_nested_from_padded_and_nested_example::call(padded, nt_example); |
3566 | resize_out_helper(out, tmp_output); |
3567 | copy_arg(out, tmp_output); |
3568 | return out; |
3569 | } |
3570 | |
3571 | at::Tensor & _nested_view_from_buffer_copy_out(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets, at::Tensor & out) { |
3572 | auto tmp_output = at::_ops::_nested_view_from_buffer_copy::call(self, nested_size, nested_strides, offsets); |
3573 | resize_out_helper(out, tmp_output); |
3574 | copy_arg(out, tmp_output); |
3575 | return out; |
3576 | } |
3577 | |
3578 | at::Tensor & _trilinear_out(const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim, at::Tensor & out) { |
3579 | auto tmp_output = at::_ops::_trilinear::call(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim); |
3580 | resize_out_helper(out, tmp_output); |
3581 | copy_arg(out, tmp_output); |
3582 | return out; |
3583 | } |
3584 | |
3585 | ::std::tuple<at::Tensor &,at::Tensor &> _unique_out(const at::Tensor & self, bool sorted, bool return_inverse, at::Tensor & out0, at::Tensor & out1) { |
3586 | auto tmp_output = at::_ops::_unique::call(self, sorted, return_inverse); |
3587 | resize_out_helper(out0, std::get<0>(tmp_output)); |
3588 | copy_arg(out0, std::get<0>(tmp_output)); |
3589 | resize_out_helper(out1, std::get<1>(tmp_output)); |
3590 | copy_arg(out1, std::get<1>(tmp_output)); |
3591 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
3592 | } |
3593 | |
3594 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_dim_out(const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
3595 | auto tmp_output = at::_ops::unique_dim::call(self, dim, sorted, return_inverse, return_counts); |
3596 | resize_out_helper(out0, std::get<0>(tmp_output)); |
3597 | copy_arg(out0, std::get<0>(tmp_output)); |
3598 | resize_out_helper(out1, std::get<1>(tmp_output)); |
3599 | copy_arg(out1, std::get<1>(tmp_output)); |
3600 | resize_out_helper(out2, std::get<2>(tmp_output)); |
3601 | copy_arg(out2, std::get<2>(tmp_output)); |
3602 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
3603 | } |
3604 | |
3605 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_consecutive_out(const at::Tensor & self, bool return_inverse, bool return_counts, c10::optional<int64_t> dim, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
3606 | auto tmp_output = at::_ops::unique_consecutive::call(self, return_inverse, return_counts, dim); |
3607 | resize_out_helper(out0, std::get<0>(tmp_output)); |
3608 | copy_arg(out0, std::get<0>(tmp_output)); |
3609 | resize_out_helper(out1, std::get<1>(tmp_output)); |
3610 | copy_arg(out1, std::get<1>(tmp_output)); |
3611 | resize_out_helper(out2, std::get<2>(tmp_output)); |
3612 | copy_arg(out2, std::get<2>(tmp_output)); |
3613 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
3614 | } |
3615 | |
3616 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_dim_consecutive_out(const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
3617 | auto tmp_output = at::_ops::unique_dim_consecutive::call(self, dim, return_inverse, return_counts); |
3618 | resize_out_helper(out0, std::get<0>(tmp_output)); |
3619 | copy_arg(out0, std::get<0>(tmp_output)); |
3620 | resize_out_helper(out1, std::get<1>(tmp_output)); |
3621 | copy_arg(out1, std::get<1>(tmp_output)); |
3622 | resize_out_helper(out2, std::get<2>(tmp_output)); |
3623 | copy_arg(out2, std::get<2>(tmp_output)); |
3624 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
3625 | } |
3626 | |
3627 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _unique2_out(const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
3628 | auto tmp_output = at::_ops::_unique2::call(self, sorted, return_inverse, return_counts); |
3629 | resize_out_helper(out0, std::get<0>(tmp_output)); |
3630 | copy_arg(out0, std::get<0>(tmp_output)); |
3631 | resize_out_helper(out1, std::get<1>(tmp_output)); |
3632 | copy_arg(out1, std::get<1>(tmp_output)); |
3633 | resize_out_helper(out2, std::get<2>(tmp_output)); |
3634 | copy_arg(out2, std::get<2>(tmp_output)); |
3635 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
3636 | } |
3637 | |
3638 | at::Tensor & _unsafe_view_out_symint(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { |
3639 | auto tmp_output = at::_ops::_unsafe_view::call(self, size); |
3640 | resize_out_helper(out, tmp_output); |
3641 | copy_arg(out, tmp_output); |
3642 | return out; |
3643 | } |
3644 | |
3645 | ::std::tuple<at::Tensor &,at::Tensor &> var_mean_correction_out(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out0, at::Tensor & out1) { |
3646 | auto tmp_output = at::_ops::var_mean_correction::call(self, dim, correction, keepdim); |
3647 | resize_out_helper(out0, std::get<0>(tmp_output)); |
3648 | copy_arg(out0, std::get<0>(tmp_output)); |
3649 | resize_out_helper(out1, std::get<1>(tmp_output)); |
3650 | copy_arg(out1, std::get<1>(tmp_output)); |
3651 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
3652 | } |
3653 | |
3654 | ::std::tuple<at::Tensor &,at::Tensor &> _weight_norm_interface_out(const at::Tensor & v, const at::Tensor & g, int64_t dim, at::Tensor & out0, at::Tensor & out1) { |
3655 | auto tmp_output = at::_ops::_weight_norm_interface::call(v, g, dim); |
3656 | resize_out_helper(out0, std::get<0>(tmp_output)); |
3657 | copy_arg(out0, std::get<0>(tmp_output)); |
3658 | resize_out_helper(out1, std::get<1>(tmp_output)); |
3659 | copy_arg(out1, std::get<1>(tmp_output)); |
3660 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
3661 | } |
3662 | |
3663 | ::std::tuple<at::Tensor &,at::Tensor &> _weight_norm_interface_backward_out(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim, at::Tensor & out0, at::Tensor & out1) { |
3664 | auto tmp_output = at::_ops::_weight_norm_interface_backward::call(grad_w, saved_v, saved_g, saved_norms, dim); |
3665 | resize_out_helper(out0, std::get<0>(tmp_output)); |
3666 | copy_arg(out0, std::get<0>(tmp_output)); |
3667 | resize_out_helper(out1, std::get<1>(tmp_output)); |
3668 | copy_arg(out1, std::get<1>(tmp_output)); |
3669 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
3670 | } |
3671 | |
3672 | at::Tensor & zeros_names_out(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) { |
3673 | auto tmp_output = at::_ops::zeros_names::call(size, names, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
3674 | resize_out_helper(out, tmp_output); |
3675 | copy_arg(out, tmp_output); |
3676 | return out; |
3677 | } |
3678 | |
3679 | at::Tensor & _efficientzerotensor_out(at::IntArrayRef size, at::Tensor & out) { |
3680 | auto tmp_output = at::_ops::_efficientzerotensor::call(size, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
3681 | resize_out_helper(out, tmp_output); |
3682 | copy_arg(out, tmp_output); |
3683 | return out; |
3684 | } |
3685 | |
3686 | at::Tensor & zeros_like_out(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
3687 | auto tmp_output = at::_ops::zeros_like::call(self, out.scalar_type(), out.layout(), out.device(), c10::nullopt, memory_format); |
3688 | resize_out_helper(out, tmp_output); |
3689 | copy_arg(out, tmp_output); |
3690 | return out; |
3691 | } |
3692 | |
3693 | at::Tensor & _standard_gamma_grad_out(const at::Tensor & self, const at::Tensor & output, at::Tensor & out) { |
3694 | auto tmp_output = at::_ops::_standard_gamma_grad::call(self, output); |
3695 | resize_out_helper(out, tmp_output); |
3696 | copy_arg(out, tmp_output); |
3697 | return out; |
3698 | } |
3699 | |
3700 | at::Tensor & _standard_gamma_out(const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) { |
3701 | auto tmp_output = at::_ops::_standard_gamma::call(self, generator); |
3702 | resize_out_helper(out, tmp_output); |
3703 | copy_arg(out, tmp_output); |
3704 | return out; |
3705 | } |
3706 | |
3707 | at::Tensor & _dirichlet_grad_out(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total, at::Tensor & out) { |
3708 | auto tmp_output = at::_ops::_dirichlet_grad::call(x, alpha, total); |
3709 | resize_out_helper(out, tmp_output); |
3710 | copy_arg(out, tmp_output); |
3711 | return out; |
3712 | } |
3713 | |
3714 | at::Tensor & _sample_dirichlet_out(const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) { |
3715 | auto tmp_output = at::_ops::_sample_dirichlet::call(self, generator); |
3716 | resize_out_helper(out, tmp_output); |
3717 | copy_arg(out, tmp_output); |
3718 | return out; |
3719 | } |
3720 | |
3721 | at::Tensor & poisson_out(const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) { |
3722 | auto tmp_output = at::_ops::poisson::call(self, generator); |
3723 | resize_out_helper(out, tmp_output); |
3724 | copy_arg(out, tmp_output); |
3725 | return out; |
3726 | } |
3727 | |
3728 | at::Tensor & binomial_out(const at::Tensor & count, const at::Tensor & prob, c10::optional<at::Generator> generator, at::Tensor & out) { |
3729 | auto tmp_output = at::_ops::binomial::call(count, prob, generator); |
3730 | resize_out_helper(out, tmp_output); |
3731 | copy_arg(out, tmp_output); |
3732 | return out; |
3733 | } |
3734 | |
3735 | at::Tensor & native_norm_out(const at::Tensor & self, const at::Scalar & p, at::Tensor & out) { |
3736 | auto tmp_output = at::_ops::native_norm::call(self, p); |
3737 | resize_out_helper(out, tmp_output); |
3738 | copy_arg(out, tmp_output); |
3739 | return out; |
3740 | } |
3741 | |
3742 | at::Tensor & native_norm_ScalarOpt_dim_dtype_out(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
3743 | auto tmp_output = at::_ops::native_norm_ScalarOpt_dim_dtype::call(self, p, dim, keepdim, dtype); |
3744 | resize_out_helper(out, tmp_output); |
3745 | copy_arg(out, tmp_output); |
3746 | return out; |
3747 | } |
3748 | |
3749 | at::Tensor & _sparse_sum_dim_out(const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) { |
3750 | auto tmp_output = at::_ops::_sparse_sum_dim::call(self, dim); |
3751 | resize_out_helper(out, tmp_output); |
3752 | copy_arg(out, tmp_output); |
3753 | return out; |
3754 | } |
3755 | |
3756 | at::Tensor & _sparse_sum_backward_out(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) { |
3757 | auto tmp_output = at::_ops::_sparse_sum_backward::call(grad, self, dim); |
3758 | resize_out_helper(out, tmp_output); |
3759 | copy_arg(out, tmp_output); |
3760 | return out; |
3761 | } |
3762 | |
3763 | at::Tensor & _sparse_csr_sum_dim_dtype_out(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
3764 | auto tmp_output = at::_ops::_sparse_csr_sum_dim_dtype::call(self, dim, keepdim, dtype); |
3765 | resize_out_helper(out, tmp_output); |
3766 | copy_arg(out, tmp_output); |
3767 | return out; |
3768 | } |
3769 | |
3770 | at::Tensor & _sparse_csr_prod_dim_dtype_out(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
3771 | auto tmp_output = at::_ops::_sparse_csr_prod_dim_dtype::call(self, dim, keepdim, dtype); |
3772 | resize_out_helper(out, tmp_output); |
3773 | copy_arg(out, tmp_output); |
3774 | return out; |
3775 | } |
3776 | |
3777 | at::Tensor & _sparse_softmax_out(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) { |
3778 | auto tmp_output = at::_ops::_sparse_softmax::call(self, dim, half_to_float); |
3779 | resize_out_helper(out, tmp_output); |
3780 | copy_arg(out, tmp_output); |
3781 | return out; |
3782 | } |
3783 | |
3784 | at::Tensor & _sparse_softmax_backward_data_out(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out) { |
3785 | auto tmp_output = at::_ops::_sparse_softmax_backward_data::call(grad_output, output, dim, self); |
3786 | resize_out_helper(out, tmp_output); |
3787 | copy_arg(out, tmp_output); |
3788 | return out; |
3789 | } |
3790 | |
3791 | at::Tensor & _sparse_log_softmax_out(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) { |
3792 | auto tmp_output = at::_ops::_sparse_log_softmax::call(self, dim, half_to_float); |
3793 | resize_out_helper(out, tmp_output); |
3794 | copy_arg(out, tmp_output); |
3795 | return out; |
3796 | } |
3797 | |
3798 | at::Tensor & _sparse_log_softmax_backward_data_out(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out) { |
3799 | auto tmp_output = at::_ops::_sparse_log_softmax_backward_data::call(grad_output, output, dim, self); |
3800 | resize_out_helper(out, tmp_output); |
3801 | copy_arg(out, tmp_output); |
3802 | return out; |
3803 | } |
3804 | |
3805 | at::Tensor & _spdiags_out(const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional<at::Layout> layout, at::Tensor & out) { |
3806 | auto tmp_output = at::_ops::_spdiags::call(diagonals, offsets, shape, layout); |
3807 | resize_out_helper(out, tmp_output); |
3808 | copy_arg(out, tmp_output); |
3809 | return out; |
3810 | } |
3811 | |
3812 | at::Tensor & norm_ScalarOpt_dtype_out(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::ScalarType dtype, at::Tensor & out) { |
3813 | auto tmp_output = at::_ops::norm_ScalarOpt_dtype::call(self, p, dtype); |
3814 | resize_out_helper(out, tmp_output); |
3815 | copy_arg(out, tmp_output); |
3816 | return out; |
3817 | } |
3818 | |
3819 | at::Tensor & norm_Scalar_out(const at::Tensor & self, const at::Scalar & p, at::Tensor & out) { |
3820 | auto tmp_output = at::_ops::norm_Scalar::call(self, p); |
3821 | resize_out_helper(out, tmp_output); |
3822 | copy_arg(out, tmp_output); |
3823 | return out; |
3824 | } |
3825 | |
3826 | at::Tensor & clone_out(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
3827 | auto tmp_output = at::_ops::clone::call(self, memory_format); |
3828 | resize_out_helper(out, tmp_output); |
3829 | copy_arg(out, tmp_output); |
3830 | return out; |
3831 | } |
3832 | |
3833 | const at::Tensor & resize_as_out(const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format, const at::Tensor & out) { |
3834 | auto tmp_output = at::_ops::resize_as::call(self, the_template, memory_format); |
3835 | resize_out_helper(out, tmp_output); |
3836 | copy_arg(out, tmp_output); |
3837 | return out; |
3838 | } |
3839 | |
3840 | const at::Tensor & resize_as_sparse_out(const at::Tensor & self, const at::Tensor & the_template, const at::Tensor & out) { |
3841 | auto tmp_output = at::_ops::resize_as_sparse::call(self, the_template); |
3842 | resize_out_helper(out, tmp_output); |
3843 | copy_arg(out, tmp_output); |
3844 | return out; |
3845 | } |
3846 | |
3847 | at::Tensor & zero_out(const at::Tensor & self, at::Tensor & out) { |
3848 | auto tmp_output = at::_ops::zero::call(self); |
3849 | resize_out_helper(out, tmp_output); |
3850 | copy_arg(out, tmp_output); |
3851 | return out; |
3852 | } |
3853 | |
3854 | at::Tensor & sub_Scalar_out(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { |
3855 | auto tmp_output = at::_ops::sub_Scalar::call(self, other, alpha); |
3856 | resize_out_helper(out, tmp_output); |
3857 | copy_arg(out, tmp_output); |
3858 | return out; |
3859 | } |
3860 | |
3861 | at::Tensor & rsub_Tensor_out(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { |
3862 | auto tmp_output = at::_ops::rsub_Tensor::call(self, other, alpha); |
3863 | resize_out_helper(out, tmp_output); |
3864 | copy_arg(out, tmp_output); |
3865 | return out; |
3866 | } |
3867 | |
3868 | at::Tensor & rsub_Scalar_out(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { |
3869 | auto tmp_output = at::_ops::rsub_Scalar::call(self, other, alpha); |
3870 | resize_out_helper(out, tmp_output); |
3871 | copy_arg(out, tmp_output); |
3872 | return out; |
3873 | } |
3874 | |
3875 | at::Tensor & _sparse_addmm_out(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
3876 | auto tmp_output = at::_ops::_sparse_addmm::call(self, mat1, mat2, beta, alpha); |
3877 | resize_out_helper(out, tmp_output); |
3878 | copy_arg(out, tmp_output); |
3879 | return out; |
3880 | } |
3881 | |
3882 | at::Tensor & sparse_coo_tensor_size_out(at::IntArrayRef size, at::Tensor & out) { |
3883 | auto tmp_output = at::_ops::sparse_coo_tensor_size::call(size, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
3884 | resize_out_helper(out, tmp_output); |
3885 | copy_arg(out, tmp_output); |
3886 | return out; |
3887 | } |
3888 | |
3889 | at::Tensor & _sparse_coo_tensor_with_dims_out(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::Tensor & out) { |
3890 | auto tmp_output = at::_ops::_sparse_coo_tensor_with_dims::call(sparse_dim, dense_dim, size, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
3891 | resize_out_helper(out, tmp_output); |
3892 | copy_arg(out, tmp_output); |
3893 | return out; |
3894 | } |
3895 | |
3896 | at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_out_symint(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::Tensor & out) { |
3897 | auto tmp_output = at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, size, indices, values, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
3898 | resize_out_helper(out, tmp_output); |
3899 | copy_arg(out, tmp_output); |
3900 | return out; |
3901 | } |
3902 | |
3903 | const at::Tensor & sparse_resize_out(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out) { |
3904 | auto tmp_output = at::_ops::sparse_resize::call(self, size, sparse_dim, dense_dim); |
3905 | resize_out_helper(out, tmp_output); |
3906 | copy_arg(out, tmp_output); |
3907 | return out; |
3908 | } |
3909 | |
3910 | const at::Tensor & sparse_resize_and_clear_out(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out) { |
3911 | auto tmp_output = at::_ops::sparse_resize_and_clear::call(self, size, sparse_dim, dense_dim); |
3912 | resize_out_helper(out, tmp_output); |
3913 | copy_arg(out, tmp_output); |
3914 | return out; |
3915 | } |
3916 | |
3917 | at::Tensor & sparse_mask_out(const at::Tensor & self, const at::Tensor & mask, at::Tensor & out) { |
3918 | auto tmp_output = at::_ops::sparse_mask::call(self, mask); |
3919 | resize_out_helper(out, tmp_output); |
3920 | copy_arg(out, tmp_output); |
3921 | return out; |
3922 | } |
3923 | |
3924 | at::Tensor & _to_dense_out(const at::Tensor & self, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
3925 | auto tmp_output = at::_ops::_to_dense::call(self, dtype); |
3926 | resize_out_helper(out, tmp_output); |
3927 | copy_arg(out, tmp_output); |
3928 | return out; |
3929 | } |
3930 | |
3931 | at::Tensor & _coalesce_out(const at::Tensor & self, at::Tensor & out) { |
3932 | auto tmp_output = at::_ops::_coalesce::call(self); |
3933 | resize_out_helper(out, tmp_output); |
3934 | copy_arg(out, tmp_output); |
3935 | return out; |
3936 | } |
3937 | |
3938 | at::Tensor & _coalesced_out(const at::Tensor & self, bool coalesced, at::Tensor & out) { |
3939 | auto tmp_output = at::_ops::_coalesced::call(self, coalesced); |
3940 | resize_out_helper(out, tmp_output); |
3941 | copy_arg(out, tmp_output); |
3942 | return out; |
3943 | } |
3944 | |
3945 | at::Tensor & copy_sparse_to_sparse_out(const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out) { |
3946 | auto tmp_output = at::_ops::copy_sparse_to_sparse::call(self, src, non_blocking); |
3947 | resize_out_helper(out, tmp_output); |
3948 | copy_arg(out, tmp_output); |
3949 | return out; |
3950 | } |
3951 | |
3952 | at::Tensor & to_sparse_sparse_dim_out(const at::Tensor & self, int64_t sparse_dim, at::Tensor & out) { |
3953 | auto tmp_output = at::_ops::to_sparse_sparse_dim::call(self, sparse_dim); |
3954 | resize_out_helper(out, tmp_output); |
3955 | copy_arg(out, tmp_output); |
3956 | return out; |
3957 | } |
3958 | |
3959 | at::Tensor & to_sparse_out(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim, at::Tensor & out) { |
3960 | auto tmp_output = at::_ops::to_sparse::call(self, layout, blocksize, dense_dim); |
3961 | resize_out_helper(out, tmp_output); |
3962 | copy_arg(out, tmp_output); |
3963 | return out; |
3964 | } |
3965 | |
3966 | at::Tensor & to_sparse_csr_out(const at::Tensor & self, c10::optional<int64_t> dense_dim, at::Tensor & out) { |
3967 | auto tmp_output = at::_ops::to_sparse_csr::call(self, dense_dim); |
3968 | resize_out_helper(out, tmp_output); |
3969 | copy_arg(out, tmp_output); |
3970 | return out; |
3971 | } |
3972 | |
3973 | at::Tensor & to_sparse_csc_out(const at::Tensor & self, c10::optional<int64_t> dense_dim, at::Tensor & out) { |
3974 | auto tmp_output = at::_ops::to_sparse_csc::call(self, dense_dim); |
3975 | resize_out_helper(out, tmp_output); |
3976 | copy_arg(out, tmp_output); |
3977 | return out; |
3978 | } |
3979 | |
3980 | at::Tensor & to_sparse_bsr_out(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim, at::Tensor & out) { |
3981 | auto tmp_output = at::_ops::to_sparse_bsr::call(self, blocksize, dense_dim); |
3982 | resize_out_helper(out, tmp_output); |
3983 | copy_arg(out, tmp_output); |
3984 | return out; |
3985 | } |
3986 | |
3987 | at::Tensor & to_sparse_bsc_out(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim, at::Tensor & out) { |
3988 | auto tmp_output = at::_ops::to_sparse_bsc::call(self, blocksize, dense_dim); |
3989 | resize_out_helper(out, tmp_output); |
3990 | copy_arg(out, tmp_output); |
3991 | return out; |
3992 | } |
3993 | |
3994 | at::Tensor & to_mkldnn_out(const at::Tensor & self, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
3995 | auto tmp_output = at::_ops::to_mkldnn::call(self, dtype); |
3996 | resize_out_helper(out, tmp_output); |
3997 | copy_arg(out, tmp_output); |
3998 | return out; |
3999 | } |
4000 | |
4001 | at::Tensor & mkldnn_reorder_conv2d_weight_out(const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::OptionalIntArrayRef input_size, at::Tensor & out) { |
4002 | auto tmp_output = at::_ops::mkldnn_reorder_conv2d_weight::call(self, padding, stride, dilation, groups, input_size); |
4003 | resize_out_helper(out, tmp_output); |
4004 | copy_arg(out, tmp_output); |
4005 | return out; |
4006 | } |
4007 | |
4008 | at::Tensor & mkldnn_reorder_conv3d_weight_out(const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { |
4009 | auto tmp_output = at::_ops::mkldnn_reorder_conv3d_weight::call(self, padding, stride, dilation, groups); |
4010 | resize_out_helper(out, tmp_output); |
4011 | copy_arg(out, tmp_output); |
4012 | return out; |
4013 | } |
4014 | |
4015 | at::Tensor & quantize_per_tensor_dynamic_out(const at::Tensor & self, at::ScalarType dtype, bool reduce_range, at::Tensor & out) { |
4016 | auto tmp_output = at::_ops::quantize_per_tensor_dynamic::call(self, dtype, reduce_range); |
4017 | resize_out_helper(out, tmp_output); |
4018 | copy_arg(out, tmp_output); |
4019 | return out; |
4020 | } |
4021 | |
4022 | at::Tensor & quantize_per_tensor_out(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype, at::Tensor & out) { |
4023 | auto tmp_output = at::_ops::quantize_per_tensor::call(self, scale, zero_point, dtype); |
4024 | resize_out_helper(out, tmp_output); |
4025 | copy_arg(out, tmp_output); |
4026 | return out; |
4027 | } |
4028 | |
4029 | at::Tensor & quantize_per_tensor_tensor_qparams_out(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype, at::Tensor & out) { |
4030 | auto tmp_output = at::_ops::quantize_per_tensor_tensor_qparams::call(self, scale, zero_point, dtype); |
4031 | resize_out_helper(out, tmp_output); |
4032 | copy_arg(out, tmp_output); |
4033 | return out; |
4034 | } |
4035 | |
4036 | void quantize_per_tensor_tensors_out(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype, at::TensorList out) { |
4037 | auto tmp_output = at::_ops::quantize_per_tensor_tensors::call(tensors, scales, zero_points, dtype); |
4038 | resize_out_helper(out, tmp_output); |
4039 | copy_arg(out, tmp_output); |
4040 | |
4041 | } |
4042 | |
4043 | at::Tensor & quantize_per_channel_out(const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype, at::Tensor & out) { |
4044 | auto tmp_output = at::_ops::quantize_per_channel::call(self, scales, zero_points, axis, dtype); |
4045 | resize_out_helper(out, tmp_output); |
4046 | copy_arg(out, tmp_output); |
4047 | return out; |
4048 | } |
4049 | |
4050 | at::Tensor & dequantize_self_out(const at::Tensor & self, at::Tensor & out) { |
4051 | auto tmp_output = at::_ops::dequantize_self::call(self); |
4052 | resize_out_helper(out, tmp_output); |
4053 | copy_arg(out, tmp_output); |
4054 | return out; |
4055 | } |
4056 | |
4057 | void dequantize_tensors_out(at::TensorList tensors, at::TensorList out) { |
4058 | auto tmp_output = at::_ops::dequantize_tensors::call(tensors); |
4059 | resize_out_helper(out, tmp_output); |
4060 | copy_arg(out, tmp_output); |
4061 | |
4062 | } |
4063 | |
4064 | at::Tensor & q_per_channel_scales_out(const at::Tensor & self, at::Tensor & out) { |
4065 | auto tmp_output = at::_ops::q_per_channel_scales::call(self); |
4066 | resize_out_helper(out, tmp_output); |
4067 | copy_arg(out, tmp_output); |
4068 | return out; |
4069 | } |
4070 | |
4071 | at::Tensor & q_per_channel_zero_points_out(const at::Tensor & self, at::Tensor & out) { |
4072 | auto tmp_output = at::_ops::q_per_channel_zero_points::call(self); |
4073 | resize_out_helper(out, tmp_output); |
4074 | copy_arg(out, tmp_output); |
4075 | return out; |
4076 | } |
4077 | |
4078 | at::Tensor & int_repr_out(const at::Tensor & self, at::Tensor & out) { |
4079 | auto tmp_output = at::_ops::int_repr::call(self); |
4080 | resize_out_helper(out, tmp_output); |
4081 | copy_arg(out, tmp_output); |
4082 | return out; |
4083 | } |
4084 | |
4085 | at::Tensor & _make_per_tensor_quantized_tensor_out(const at::Tensor & self, double scale, int64_t zero_point, at::Tensor & out) { |
4086 | auto tmp_output = at::_ops::_make_per_tensor_quantized_tensor::call(self, scale, zero_point); |
4087 | resize_out_helper(out, tmp_output); |
4088 | copy_arg(out, tmp_output); |
4089 | return out; |
4090 | } |
4091 | |
4092 | at::Tensor & _make_per_channel_quantized_tensor_out(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, at::Tensor & out) { |
4093 | auto tmp_output = at::_ops::_make_per_channel_quantized_tensor::call(self, scale, zero_point, axis); |
4094 | resize_out_helper(out, tmp_output); |
4095 | copy_arg(out, tmp_output); |
4096 | return out; |
4097 | } |
4098 | |
4099 | ::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_tensor_affine_cachemask_out(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) { |
4100 | auto tmp_output = at::_ops::fake_quantize_per_tensor_affine_cachemask::call(self, scale, zero_point, quant_min, quant_max); |
4101 | resize_out_helper(out0, std::get<0>(tmp_output)); |
4102 | copy_arg(out0, std::get<0>(tmp_output)); |
4103 | resize_out_helper(out1, std::get<1>(tmp_output)); |
4104 | copy_arg(out1, std::get<1>(tmp_output)); |
4105 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
4106 | } |
4107 | |
4108 | ::std::tuple<at::Tensor &,at::Tensor &> _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) { |
4109 | auto tmp_output = at::_ops::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams::call(self, scale, zero_point, fake_quant_enabled, quant_min, quant_max); |
4110 | resize_out_helper(out0, std::get<0>(tmp_output)); |
4111 | copy_arg(out0, std::get<0>(tmp_output)); |
4112 | resize_out_helper(out1, std::get<1>(tmp_output)); |
4113 | copy_arg(out1, std::get<1>(tmp_output)); |
4114 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
4115 | } |
4116 | |
4117 | at::Tensor & _fake_quantize_learnable_per_tensor_affine_out(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out) { |
4118 | auto tmp_output = at::_ops::_fake_quantize_learnable_per_tensor_affine::call(self, scale, zero_point, quant_min, quant_max, grad_factor); |
4119 | resize_out_helper(out, tmp_output); |
4120 | copy_arg(out, tmp_output); |
4121 | return out; |
4122 | } |
4123 | |
4124 | ::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_channel_affine_cachemask_out(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) { |
4125 | auto tmp_output = at::_ops::fake_quantize_per_channel_affine_cachemask::call(self, scale, zero_point, axis, quant_min, quant_max); |
4126 | resize_out_helper(out0, std::get<0>(tmp_output)); |
4127 | copy_arg(out0, std::get<0>(tmp_output)); |
4128 | resize_out_helper(out1, std::get<1>(tmp_output)); |
4129 | copy_arg(out1, std::get<1>(tmp_output)); |
4130 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
4131 | } |
4132 | |
4133 | at::Tensor & _fake_quantize_learnable_per_channel_affine_out(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out) { |
4134 | auto tmp_output = at::_ops::_fake_quantize_learnable_per_channel_affine::call(self, scale, zero_point, axis, quant_min, quant_max, grad_factor); |
4135 | resize_out_helper(out, tmp_output); |
4136 | copy_arg(out, tmp_output); |
4137 | return out; |
4138 | } |
4139 | |
4140 | ::std::tuple<at::Tensor &,at::Tensor &> _fused_moving_avg_obs_fq_helper_out(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant, at::Tensor & out0, at::Tensor & out1) { |
4141 | auto tmp_output = at::_ops::_fused_moving_avg_obs_fq_helper_functional::call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant); |
4142 | resize_out_helper(out0, std::get<0>(tmp_output)); |
4143 | copy_arg(out0, std::get<0>(tmp_output)); |
4144 | resize_out_helper(out1, std::get<1>(tmp_output)); |
4145 | copy_arg(out1, std::get<1>(tmp_output)); |
4146 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
4147 | } |
4148 | |
4149 | at::Tensor & _to_copy_out(const at::Tensor & self, bool non_blocking, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
4150 | auto tmp_output = at::_ops::_to_copy::call(self, out.scalar_type(), out.layout(), out.device(), c10::nullopt, non_blocking, memory_format); |
4151 | resize_out_helper(out, tmp_output); |
4152 | copy_arg(out, tmp_output); |
4153 | return out; |
4154 | } |
4155 | |
4156 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _lstm_mps_out(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) { |
4157 | auto tmp_output = at::_ops::_lstm_mps::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); |
4158 | resize_out_helper(out0, std::get<0>(tmp_output)); |
4159 | copy_arg(out0, std::get<0>(tmp_output)); |
4160 | resize_out_helper(out1, std::get<1>(tmp_output)); |
4161 | copy_arg(out1, std::get<1>(tmp_output)); |
4162 | resize_out_helper(out2, std::get<2>(tmp_output)); |
4163 | copy_arg(out2, std::get<2>(tmp_output)); |
4164 | resize_out_helper(out3, std::get<3>(tmp_output)); |
4165 | copy_arg(out3, std::get<3>(tmp_output)); |
4166 | resize_out_helper(out4, std::get<4>(tmp_output)); |
4167 | copy_arg(out4, std::get<4>(tmp_output)); |
4168 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3, out4); |
4169 | } |
4170 | |
4171 | void lstm_mps_backward_out(const at::Tensor & grad_y, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2) { |
4172 | auto tmp_output = at::_ops::lstm_mps_backward::call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); |
4173 | resize_out_helper(out0, std::get<0>(tmp_output)); |
4174 | copy_arg(out0, std::get<0>(tmp_output)); |
4175 | resize_out_helper(out1, std::get<1>(tmp_output)); |
4176 | copy_arg(out1, std::get<1>(tmp_output)); |
4177 | resize_out_helper(out2, std::get<2>(tmp_output)); |
4178 | copy_arg(out2, std::get<2>(tmp_output)); |
4179 | |
4180 | } |
4181 | |
4182 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_lstm_cell_out(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
4183 | auto tmp_output = at::_ops::_thnn_fused_lstm_cell::call(input_gates, hidden_gates, cx, input_bias, hidden_bias); |
4184 | resize_out_helper(out0, std::get<0>(tmp_output)); |
4185 | copy_arg(out0, std::get<0>(tmp_output)); |
4186 | resize_out_helper(out1, std::get<1>(tmp_output)); |
4187 | copy_arg(out1, std::get<1>(tmp_output)); |
4188 | resize_out_helper(out2, std::get<2>(tmp_output)); |
4189 | copy_arg(out2, std::get<2>(tmp_output)); |
4190 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
4191 | } |
4192 | |
4193 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_lstm_cell_backward_impl_out(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
4194 | auto tmp_output = at::_ops::_thnn_fused_lstm_cell_backward_impl::call(grad_hy, grad_cy, cx, cy, workspace, has_bias); |
4195 | resize_out_helper(out0, std::get<0>(tmp_output)); |
4196 | copy_arg(out0, std::get<0>(tmp_output)); |
4197 | resize_out_helper(out1, std::get<1>(tmp_output)); |
4198 | copy_arg(out1, std::get<1>(tmp_output)); |
4199 | resize_out_helper(out2, std::get<2>(tmp_output)); |
4200 | copy_arg(out2, std::get<2>(tmp_output)); |
4201 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
4202 | } |
4203 | |
4204 | ::std::tuple<at::Tensor &,at::Tensor &> _thnn_fused_gru_cell_out(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, at::Tensor & out0, at::Tensor & out1) { |
4205 | auto tmp_output = at::_ops::_thnn_fused_gru_cell::call(input_gates, hidden_gates, hx, input_bias, hidden_bias); |
4206 | resize_out_helper(out0, std::get<0>(tmp_output)); |
4207 | copy_arg(out0, std::get<0>(tmp_output)); |
4208 | resize_out_helper(out1, std::get<1>(tmp_output)); |
4209 | copy_arg(out1, std::get<1>(tmp_output)); |
4210 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
4211 | } |
4212 | |
4213 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_gru_cell_backward_out(const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) { |
4214 | auto tmp_output = at::_ops::_thnn_fused_gru_cell_backward::call(grad_hy, workspace, has_bias); |
4215 | resize_out_helper(out0, std::get<0>(tmp_output)); |
4216 | copy_arg(out0, std::get<0>(tmp_output)); |
4217 | resize_out_helper(out1, std::get<1>(tmp_output)); |
4218 | copy_arg(out1, std::get<1>(tmp_output)); |
4219 | resize_out_helper(out2, std::get<2>(tmp_output)); |
4220 | copy_arg(out2, std::get<2>(tmp_output)); |
4221 | resize_out_helper(out3, std::get<3>(tmp_output)); |
4222 | copy_arg(out3, std::get<3>(tmp_output)); |
4223 | resize_out_helper(out4, std::get<4>(tmp_output)); |
4224 | copy_arg(out4, std::get<4>(tmp_output)); |
4225 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3, out4); |
4226 | } |
4227 | |
4228 | ::std::tuple<at::Tensor &,at::Tensor &> _pack_padded_sequence_out(const at::Tensor & input, const at::Tensor & lengths, bool batch_first, at::Tensor & out0, at::Tensor & out1) { |
4229 | auto tmp_output = at::_ops::_pack_padded_sequence::call(input, lengths, batch_first); |
4230 | resize_out_helper(out0, std::get<0>(tmp_output)); |
4231 | copy_arg(out0, std::get<0>(tmp_output)); |
4232 | resize_out_helper(out1, std::get<1>(tmp_output)); |
4233 | copy_arg(out1, std::get<1>(tmp_output)); |
4234 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
4235 | } |
4236 | |
4237 | at::Tensor & set_source_Storage_out(const at::Tensor & self, at::Storage source, at::Tensor & out) { |
4238 | auto tmp_output = at::_ops::set_source_Storage::call(self, source); |
4239 | resize_out_helper(out, tmp_output); |
4240 | copy_arg(out, tmp_output); |
4241 | return out; |
4242 | } |
4243 | |
4244 | at::Tensor & set_source_Storage_storage_offset_out_symint(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) { |
4245 | auto tmp_output = at::_ops::set_source_Storage_storage_offset::call(self, source, storage_offset, size, stride); |
4246 | resize_out_helper(out, tmp_output); |
4247 | copy_arg(out, tmp_output); |
4248 | return out; |
4249 | } |
4250 | |
4251 | at::Tensor & set_source_Tensor_out(const at::Tensor & self, const at::Tensor & source, at::Tensor & out) { |
4252 | auto tmp_output = at::_ops::set_source_Tensor::call(self, source); |
4253 | resize_out_helper(out, tmp_output); |
4254 | copy_arg(out, tmp_output); |
4255 | return out; |
4256 | } |
4257 | |
4258 | at::Tensor & set_out(const at::Tensor & self, at::Tensor & out) { |
4259 | auto tmp_output = at::_ops::set::call(self); |
4260 | resize_out_helper(out, tmp_output); |
4261 | copy_arg(out, tmp_output); |
4262 | return out; |
4263 | } |
4264 | |
4265 | at::Tensor & lift_out(const at::Tensor & self, at::Tensor & out) { |
4266 | auto tmp_output = at::_ops::lift::call(self); |
4267 | resize_out_helper(out, tmp_output); |
4268 | copy_arg(out, tmp_output); |
4269 | return out; |
4270 | } |
4271 | |
4272 | at::Tensor & lift_fresh_copy_out(const at::Tensor & self, at::Tensor & out) { |
4273 | auto tmp_output = at::_ops::lift_fresh_copy::call(self); |
4274 | resize_out_helper(out, tmp_output); |
4275 | copy_arg(out, tmp_output); |
4276 | return out; |
4277 | } |
4278 | |
4279 | at::Tensor & masked_fill_Scalar_out(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value, at::Tensor & out) { |
4280 | auto tmp_output = at::_ops::masked_fill_Scalar::call(self, mask, value); |
4281 | resize_out_helper(out, tmp_output); |
4282 | copy_arg(out, tmp_output); |
4283 | return out; |
4284 | } |
4285 | |
4286 | at::Tensor & masked_fill_Tensor_out(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value, at::Tensor & out) { |
4287 | auto tmp_output = at::_ops::masked_fill_Tensor::call(self, mask, value); |
4288 | resize_out_helper(out, tmp_output); |
4289 | copy_arg(out, tmp_output); |
4290 | return out; |
4291 | } |
4292 | |
4293 | at::Tensor & masked_scatter_out(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source, at::Tensor & out) { |
4294 | auto tmp_output = at::_ops::masked_scatter::call(self, mask, source); |
4295 | resize_out_helper(out, tmp_output); |
4296 | copy_arg(out, tmp_output); |
4297 | return out; |
4298 | } |
4299 | |
4300 | at::Tensor & _masked_softmax_out(const at::Tensor & self, const at::Tensor & mask, c10::optional<int64_t> dim, c10::optional<int64_t> mask_type, at::Tensor & out) { |
4301 | auto tmp_output = at::_ops::_masked_softmax::call(self, mask, dim, mask_type); |
4302 | resize_out_helper(out, tmp_output); |
4303 | copy_arg(out, tmp_output); |
4304 | return out; |
4305 | } |
4306 | |
4307 | at::Tensor & _masked_softmax_backward_out(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional<int64_t> dim, at::Tensor & out) { |
4308 | auto tmp_output = at::_ops::_masked_softmax_backward::call(grad_output, output, mask, dim); |
4309 | resize_out_helper(out, tmp_output); |
4310 | copy_arg(out, tmp_output); |
4311 | return out; |
4312 | } |
4313 | |
4314 | at::Tensor & put_out(const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate, at::Tensor & out) { |
4315 | auto tmp_output = at::_ops::put::call(self, index, source, accumulate); |
4316 | resize_out_helper(out, tmp_output); |
4317 | copy_arg(out, tmp_output); |
4318 | return out; |
4319 | } |
4320 | |
4321 | at::Tensor & index_fill_int_Scalar_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) { |
4322 | auto tmp_output = at::_ops::index_fill_int_Scalar::call(self, dim, index, value); |
4323 | resize_out_helper(out, tmp_output); |
4324 | copy_arg(out, tmp_output); |
4325 | return out; |
4326 | } |
4327 | |
4328 | at::Tensor & index_fill_int_Tensor_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out) { |
4329 | auto tmp_output = at::_ops::index_fill_int_Tensor::call(self, dim, index, value); |
4330 | resize_out_helper(out, tmp_output); |
4331 | copy_arg(out, tmp_output); |
4332 | return out; |
4333 | } |
4334 | |
4335 | at::Tensor & bitwise_and_Scalar_Tensor_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
4336 | auto tmp_output = at::_ops::bitwise_and_Scalar_Tensor::call(self, other); |
4337 | resize_out_helper(out, tmp_output); |
4338 | copy_arg(out, tmp_output); |
4339 | return out; |
4340 | } |
4341 | |
4342 | at::Tensor & bitwise_or_Scalar_Tensor_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
4343 | auto tmp_output = at::_ops::bitwise_or_Scalar_Tensor::call(self, other); |
4344 | resize_out_helper(out, tmp_output); |
4345 | copy_arg(out, tmp_output); |
4346 | return out; |
4347 | } |
4348 | |
4349 | at::Tensor & bitwise_xor_Scalar_Tensor_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
4350 | auto tmp_output = at::_ops::bitwise_xor_Scalar_Tensor::call(self, other); |
4351 | resize_out_helper(out, tmp_output); |
4352 | copy_arg(out, tmp_output); |
4353 | return out; |
4354 | } |
4355 | |
4356 | at::Tensor & __lshift___Scalar_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
4357 | auto tmp_output = at::_ops::__lshift___Scalar::call(self, other); |
4358 | resize_out_helper(out, tmp_output); |
4359 | copy_arg(out, tmp_output); |
4360 | return out; |
4361 | } |
4362 | |
4363 | at::Tensor & __lshift___Tensor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
4364 | auto tmp_output = at::_ops::__lshift___Tensor::call(self, other); |
4365 | resize_out_helper(out, tmp_output); |
4366 | copy_arg(out, tmp_output); |
4367 | return out; |
4368 | } |
4369 | |
4370 | at::Tensor & bitwise_left_shift_Scalar_Tensor_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
4371 | auto tmp_output = at::_ops::bitwise_left_shift_Scalar_Tensor::call(self, other); |
4372 | resize_out_helper(out, tmp_output); |
4373 | copy_arg(out, tmp_output); |
4374 | return out; |
4375 | } |
4376 | |
4377 | at::Tensor & __rshift___Scalar_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
4378 | auto tmp_output = at::_ops::__rshift___Scalar::call(self, other); |
4379 | resize_out_helper(out, tmp_output); |
4380 | copy_arg(out, tmp_output); |
4381 | return out; |
4382 | } |
4383 | |
4384 | at::Tensor & __rshift___Tensor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
4385 | auto tmp_output = at::_ops::__rshift___Tensor::call(self, other); |
4386 | resize_out_helper(out, tmp_output); |
4387 | copy_arg(out, tmp_output); |
4388 | return out; |
4389 | } |
4390 | |
4391 | at::Tensor & bitwise_right_shift_Scalar_Tensor_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
4392 | auto tmp_output = at::_ops::bitwise_right_shift_Scalar_Tensor::call(self, other); |
4393 | resize_out_helper(out, tmp_output); |
4394 | copy_arg(out, tmp_output); |
4395 | return out; |
4396 | } |
4397 | |
4398 | at::Tensor & random_from_out(const at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator, at::Tensor & out) { |
4399 | auto tmp_output = at::_ops::random_from::call(self, from, to, generator); |
4400 | resize_out_helper(out, tmp_output); |
4401 | copy_arg(out, tmp_output); |
4402 | return out; |
4403 | } |
4404 | |
4405 | at::Tensor & random_to_out(const at::Tensor & self, int64_t to, c10::optional<at::Generator> generator, at::Tensor & out) { |
4406 | auto tmp_output = at::_ops::random_to::call(self, to, generator); |
4407 | resize_out_helper(out, tmp_output); |
4408 | copy_arg(out, tmp_output); |
4409 | return out; |
4410 | } |
4411 | |
4412 | at::Tensor & random_out(const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) { |
4413 | auto tmp_output = at::_ops::random::call(self, generator); |
4414 | resize_out_helper(out, tmp_output); |
4415 | copy_arg(out, tmp_output); |
4416 | return out; |
4417 | } |
4418 | |
4419 | at::Tensor & uniform_out(const at::Tensor & self, double from, double to, c10::optional<at::Generator> generator, at::Tensor & out) { |
4420 | auto tmp_output = at::_ops::uniform::call(self, from, to, generator); |
4421 | resize_out_helper(out, tmp_output); |
4422 | copy_arg(out, tmp_output); |
4423 | return out; |
4424 | } |
4425 | |
4426 | at::Tensor & cauchy_out(const at::Tensor & self, double median, double sigma, c10::optional<at::Generator> generator, at::Tensor & out) { |
4427 | auto tmp_output = at::_ops::cauchy::call(self, median, sigma, generator); |
4428 | resize_out_helper(out, tmp_output); |
4429 | copy_arg(out, tmp_output); |
4430 | return out; |
4431 | } |
4432 | |
4433 | at::Tensor & log_normal_out(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator, at::Tensor & out) { |
4434 | auto tmp_output = at::_ops::log_normal::call(self, mean, std, generator); |
4435 | resize_out_helper(out, tmp_output); |
4436 | copy_arg(out, tmp_output); |
4437 | return out; |
4438 | } |
4439 | |
4440 | at::Tensor & exponential_out(const at::Tensor & self, double lambd, c10::optional<at::Generator> generator, at::Tensor & out) { |
4441 | auto tmp_output = at::_ops::exponential::call(self, lambd, generator); |
4442 | resize_out_helper(out, tmp_output); |
4443 | copy_arg(out, tmp_output); |
4444 | return out; |
4445 | } |
4446 | |
4447 | at::Tensor & geometric_out(const at::Tensor & self, double p, c10::optional<at::Generator> generator, at::Tensor & out) { |
4448 | auto tmp_output = at::_ops::geometric::call(self, p, generator); |
4449 | resize_out_helper(out, tmp_output); |
4450 | copy_arg(out, tmp_output); |
4451 | return out; |
4452 | } |
4453 | |
4454 | at::Tensor & tril_indices_out(int64_t row, int64_t col, int64_t offset, at::Tensor & out) { |
4455 | auto tmp_output = at::_ops::tril_indices::call(row, col, offset, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
4456 | resize_out_helper(out, tmp_output); |
4457 | copy_arg(out, tmp_output); |
4458 | return out; |
4459 | } |
4460 | |
4461 | at::Tensor & triu_indices_out(int64_t row, int64_t col, int64_t offset, at::Tensor & out) { |
4462 | auto tmp_output = at::_ops::triu_indices::call(row, col, offset, out.scalar_type(), out.layout(), out.device(), c10::nullopt); |
4463 | resize_out_helper(out, tmp_output); |
4464 | copy_arg(out, tmp_output); |
4465 | return out; |
4466 | } |
4467 | |
4468 | at::Tensor & trace_out(const at::Tensor & self, at::Tensor & out) { |
4469 | auto tmp_output = at::_ops::trace::call(self); |
4470 | resize_out_helper(out, tmp_output); |
4471 | copy_arg(out, tmp_output); |
4472 | return out; |
4473 | } |
4474 | |
4475 | at::Tensor & _cholesky_solve_helper_out(const at::Tensor & self, const at::Tensor & A, bool upper, at::Tensor & out) { |
4476 | auto tmp_output = at::_ops::_cholesky_solve_helper::call(self, A, upper); |
4477 | resize_out_helper(out, tmp_output); |
4478 | copy_arg(out, tmp_output); |
4479 | return out; |
4480 | } |
4481 | |
4482 | at::Tensor & dist_out(const at::Tensor & self, const at::Tensor & other, const at::Scalar & p, at::Tensor & out) { |
4483 | auto tmp_output = at::_ops::dist::call(self, other, p); |
4484 | resize_out_helper(out, tmp_output); |
4485 | copy_arg(out, tmp_output); |
4486 | return out; |
4487 | } |
4488 | |
4489 | void _histogramdd_bin_edges_out(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density, at::TensorList out) { |
4490 | auto tmp_output = at::_ops::_histogramdd_bin_edges::call(self, bins, range, weight, density); |
4491 | resize_out_helper(out, tmp_output); |
4492 | copy_arg(out, tmp_output); |
4493 | |
4494 | } |
4495 | |
4496 | at::Tensor & _histogramdd_from_bin_cts_out(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density, at::Tensor & out) { |
4497 | auto tmp_output = at::_ops::_histogramdd_from_bin_cts::call(self, bins, range, weight, density); |
4498 | resize_out_helper(out, tmp_output); |
4499 | copy_arg(out, tmp_output); |
4500 | return out; |
4501 | } |
4502 | |
4503 | at::Tensor & _histogramdd_from_bin_tensors_out(const at::Tensor & self, at::TensorList bins, const c10::optional<at::Tensor> & weight, bool density, at::Tensor & out) { |
4504 | auto tmp_output = at::_ops::_histogramdd_from_bin_tensors::call(self, bins, weight, density); |
4505 | resize_out_helper(out, tmp_output); |
4506 | copy_arg(out, tmp_output); |
4507 | return out; |
4508 | } |
4509 | |
4510 | at::Tensor & remainder_Scalar_Tensor_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
4511 | auto tmp_output = at::_ops::remainder_Scalar_Tensor::call(self, other); |
4512 | resize_out_helper(out, tmp_output); |
4513 | copy_arg(out, tmp_output); |
4514 | return out; |
4515 | } |
4516 | |
4517 | at::Tensor & argsort_stable_out(const at::Tensor & self, bool stable, int64_t dim, bool descending, at::Tensor & out) { |
4518 | auto tmp_output = at::_ops::argsort_stable::call(self, stable, dim, descending); |
4519 | resize_out_helper(out, tmp_output); |
4520 | copy_arg(out, tmp_output); |
4521 | return out; |
4522 | } |
4523 | |
4524 | at::Tensor & unfold_backward_out_symint(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out) { |
4525 | auto tmp_output = at::_ops::unfold_backward::call(grad_in, input_sizes, dim, size, step); |
4526 | resize_out_helper(out, tmp_output); |
4527 | copy_arg(out, tmp_output); |
4528 | return out; |
4529 | } |
4530 | |
4531 | at::Tensor & normal_out(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator, at::Tensor & out) { |
4532 | auto tmp_output = at::_ops::normal_functional::call(self, mean, std, generator); |
4533 | resize_out_helper(out, tmp_output); |
4534 | copy_arg(out, tmp_output); |
4535 | return out; |
4536 | } |
4537 | |
4538 | void _amp_foreach_non_finite_check_and_unscale_out(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale, at::TensorList out) { |
4539 | auto tmp_output = at::_ops::_amp_foreach_non_finite_check_and_unscale::call(self, found_inf, inv_scale); |
4540 | resize_out_helper(out, std::get<0>(tmp_output)); |
4541 | copy_arg(out, std::get<0>(tmp_output)); |
4542 | |
4543 | } |
4544 | |
4545 | at::Tensor & _amp_update_scale_out(const at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, at::Tensor & out) { |
4546 | auto tmp_output = at::_ops::_amp_update_scale::call(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval); |
4547 | resize_out_helper(out, std::get<0>(tmp_output)); |
4548 | copy_arg(out, std::get<0>(tmp_output)); |
4549 | return out; |
4550 | } |
4551 | |
4552 | void _foreach_add_Scalar_out(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
4553 | auto tmp_output = at::_ops::_foreach_add_Scalar::call(self, scalar); |
4554 | resize_out_helper(out, tmp_output); |
4555 | copy_arg(out, tmp_output); |
4556 | |
4557 | } |
4558 | |
4559 | void _foreach_sub_Scalar_out(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
4560 | auto tmp_output = at::_ops::_foreach_sub_Scalar::call(self, scalar); |
4561 | resize_out_helper(out, tmp_output); |
4562 | copy_arg(out, tmp_output); |
4563 | |
4564 | } |
4565 | |
4566 | void _foreach_mul_Scalar_out(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
4567 | auto tmp_output = at::_ops::_foreach_mul_Scalar::call(self, scalar); |
4568 | resize_out_helper(out, tmp_output); |
4569 | copy_arg(out, tmp_output); |
4570 | |
4571 | } |
4572 | |
4573 | void _foreach_div_Scalar_out(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
4574 | auto tmp_output = at::_ops::_foreach_div_Scalar::call(self, scalar); |
4575 | resize_out_helper(out, tmp_output); |
4576 | copy_arg(out, tmp_output); |
4577 | |
4578 | } |
4579 | |
4580 | void _foreach_clamp_min_Scalar_out(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
4581 | auto tmp_output = at::_ops::_foreach_clamp_min_Scalar::call(self, scalar); |
4582 | resize_out_helper(out, tmp_output); |
4583 | copy_arg(out, tmp_output); |
4584 | |
4585 | } |
4586 | |
4587 | void _foreach_clamp_max_Scalar_out(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
4588 | auto tmp_output = at::_ops::_foreach_clamp_max_Scalar::call(self, scalar); |
4589 | resize_out_helper(out, tmp_output); |
4590 | copy_arg(out, tmp_output); |
4591 | |
4592 | } |
4593 | |
4594 | void _foreach_maximum_Scalar_out(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
4595 | auto tmp_output = at::_ops::_foreach_maximum_Scalar::call(self, scalar); |
4596 | resize_out_helper(out, tmp_output); |
4597 | copy_arg(out, tmp_output); |
4598 | |
4599 | } |
4600 | |
4601 | void _foreach_minimum_Scalar_out(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
4602 | auto tmp_output = at::_ops::_foreach_minimum_Scalar::call(self, scalar); |
4603 | resize_out_helper(out, tmp_output); |
4604 | copy_arg(out, tmp_output); |
4605 | |
4606 | } |
4607 | |
4608 | void _foreach_add_List_out(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) { |
4609 | auto tmp_output = at::_ops::_foreach_add_List::call(self, other, alpha); |
4610 | resize_out_helper(out, tmp_output); |
4611 | copy_arg(out, tmp_output); |
4612 | |
4613 | } |
4614 | |
4615 | void _foreach_sub_List_out(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) { |
4616 | auto tmp_output = at::_ops::_foreach_sub_List::call(self, other, alpha); |
4617 | resize_out_helper(out, tmp_output); |
4618 | copy_arg(out, tmp_output); |
4619 | |
4620 | } |
4621 | |
4622 | void _foreach_mul_List_out(at::TensorList self, at::TensorList other, at::TensorList out) { |
4623 | auto tmp_output = at::_ops::_foreach_mul_List::call(self, other); |
4624 | resize_out_helper(out, tmp_output); |
4625 | copy_arg(out, tmp_output); |
4626 | |
4627 | } |
4628 | |
4629 | void _foreach_div_List_out(at::TensorList self, at::TensorList other, at::TensorList out) { |
4630 | auto tmp_output = at::_ops::_foreach_div_List::call(self, other); |
4631 | resize_out_helper(out, tmp_output); |
4632 | copy_arg(out, tmp_output); |
4633 | |
4634 | } |
4635 | |
4636 | void _foreach_clamp_min_List_out(at::TensorList self, at::TensorList other, at::TensorList out) { |
4637 | auto tmp_output = at::_ops::_foreach_clamp_min_List::call(self, other); |
4638 | resize_out_helper(out, tmp_output); |
4639 | copy_arg(out, tmp_output); |
4640 | |
4641 | } |
4642 | |
4643 | void _foreach_clamp_max_List_out(at::TensorList self, at::TensorList other, at::TensorList out) { |
4644 | auto tmp_output = at::_ops::_foreach_clamp_max_List::call(self, other); |
4645 | resize_out_helper(out, tmp_output); |
4646 | copy_arg(out, tmp_output); |
4647 | |
4648 | } |
4649 | |
4650 | void _foreach_maximum_List_out(at::TensorList self, at::TensorList other, at::TensorList out) { |
4651 | auto tmp_output = at::_ops::_foreach_maximum_List::call(self, other); |
4652 | resize_out_helper(out, tmp_output); |
4653 | copy_arg(out, tmp_output); |
4654 | |
4655 | } |
4656 | |
4657 | void _foreach_minimum_List_out(at::TensorList self, at::TensorList other, at::TensorList out) { |
4658 | auto tmp_output = at::_ops::_foreach_minimum_List::call(self, other); |
4659 | resize_out_helper(out, tmp_output); |
4660 | copy_arg(out, tmp_output); |
4661 | |
4662 | } |
4663 | |
4664 | void _foreach_add_ScalarList_out(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
4665 | auto tmp_output = at::_ops::_foreach_add_ScalarList::call(self, scalars); |
4666 | resize_out_helper(out, tmp_output); |
4667 | copy_arg(out, tmp_output); |
4668 | |
4669 | } |
4670 | |
4671 | void _foreach_sub_ScalarList_out(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
4672 | auto tmp_output = at::_ops::_foreach_sub_ScalarList::call(self, scalars); |
4673 | resize_out_helper(out, tmp_output); |
4674 | copy_arg(out, tmp_output); |
4675 | |
4676 | } |
4677 | |
4678 | void _foreach_div_ScalarList_out(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
4679 | auto tmp_output = at::_ops::_foreach_div_ScalarList::call(self, scalars); |
4680 | resize_out_helper(out, tmp_output); |
4681 | copy_arg(out, tmp_output); |
4682 | |
4683 | } |
4684 | |
4685 | void _foreach_mul_ScalarList_out(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
4686 | auto tmp_output = at::_ops::_foreach_mul_ScalarList::call(self, scalars); |
4687 | resize_out_helper(out, tmp_output); |
4688 | copy_arg(out, tmp_output); |
4689 | |
4690 | } |
4691 | |
4692 | void _foreach_clamp_min_ScalarList_out(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
4693 | auto tmp_output = at::_ops::_foreach_clamp_min_ScalarList::call(self, scalars); |
4694 | resize_out_helper(out, tmp_output); |
4695 | copy_arg(out, tmp_output); |
4696 | |
4697 | } |
4698 | |
4699 | void _foreach_clamp_max_ScalarList_out(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
4700 | auto tmp_output = at::_ops::_foreach_clamp_max_ScalarList::call(self, scalars); |
4701 | resize_out_helper(out, tmp_output); |
4702 | copy_arg(out, tmp_output); |
4703 | |
4704 | } |
4705 | |
4706 | void _foreach_maximum_ScalarList_out(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
4707 | auto tmp_output = at::_ops::_foreach_maximum_ScalarList::call(self, scalars); |
4708 | resize_out_helper(out, tmp_output); |
4709 | copy_arg(out, tmp_output); |
4710 | |
4711 | } |
4712 | |
4713 | void _foreach_minimum_ScalarList_out(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
4714 | auto tmp_output = at::_ops::_foreach_minimum_ScalarList::call(self, scalars); |
4715 | resize_out_helper(out, tmp_output); |
4716 | copy_arg(out, tmp_output); |
4717 | |
4718 | } |
4719 | |
4720 | void _foreach_exp_out(at::TensorList self, at::TensorList out) { |
4721 | auto tmp_output = at::_ops::_foreach_exp::call(self); |
4722 | resize_out_helper(out, tmp_output); |
4723 | copy_arg(out, tmp_output); |
4724 | |
4725 | } |
4726 | |
4727 | void _foreach_zero_out(at::TensorList self, at::TensorList out) { |
4728 | auto tmp_output = at::_ops::_foreach_zero::call(self); |
4729 | resize_out_helper(out, tmp_output); |
4730 | copy_arg(out, tmp_output); |
4731 | |
4732 | } |
4733 | |
4734 | void _foreach_sqrt_out(at::TensorList self, at::TensorList out) { |
4735 | auto tmp_output = at::_ops::_foreach_sqrt::call(self); |
4736 | resize_out_helper(out, tmp_output); |
4737 | copy_arg(out, tmp_output); |
4738 | |
4739 | } |
4740 | |
4741 | void _foreach_abs_out(at::TensorList self, at::TensorList out) { |
4742 | auto tmp_output = at::_ops::_foreach_abs::call(self); |
4743 | resize_out_helper(out, tmp_output); |
4744 | copy_arg(out, tmp_output); |
4745 | |
4746 | } |
4747 | |
4748 | void _foreach_acos_out(at::TensorList self, at::TensorList out) { |
4749 | auto tmp_output = at::_ops::_foreach_acos::call(self); |
4750 | resize_out_helper(out, tmp_output); |
4751 | copy_arg(out, tmp_output); |
4752 | |
4753 | } |
4754 | |
4755 | void _foreach_asin_out(at::TensorList self, at::TensorList out) { |
4756 | auto tmp_output = at::_ops::_foreach_asin::call(self); |
4757 | resize_out_helper(out, tmp_output); |
4758 | copy_arg(out, tmp_output); |
4759 | |
4760 | } |
4761 | |
4762 | void _foreach_atan_out(at::TensorList self, at::TensorList out) { |
4763 | auto tmp_output = at::_ops::_foreach_atan::call(self); |
4764 | resize_out_helper(out, tmp_output); |
4765 | copy_arg(out, tmp_output); |
4766 | |
4767 | } |
4768 | |
4769 | void _foreach_ceil_out(at::TensorList self, at::TensorList out) { |
4770 | auto tmp_output = at::_ops::_foreach_ceil::call(self); |
4771 | resize_out_helper(out, tmp_output); |
4772 | copy_arg(out, tmp_output); |
4773 | |
4774 | } |
4775 | |
4776 | void _foreach_cos_out(at::TensorList self, at::TensorList out) { |
4777 | auto tmp_output = at::_ops::_foreach_cos::call(self); |
4778 | resize_out_helper(out, tmp_output); |
4779 | copy_arg(out, tmp_output); |
4780 | |
4781 | } |
4782 | |
4783 | void _foreach_cosh_out(at::TensorList self, at::TensorList out) { |
4784 | auto tmp_output = at::_ops::_foreach_cosh::call(self); |
4785 | resize_out_helper(out, tmp_output); |
4786 | copy_arg(out, tmp_output); |
4787 | |
4788 | } |
4789 | |
4790 | void _foreach_erf_out(at::TensorList self, at::TensorList out) { |
4791 | auto tmp_output = at::_ops::_foreach_erf::call(self); |
4792 | resize_out_helper(out, tmp_output); |
4793 | copy_arg(out, tmp_output); |
4794 | |
4795 | } |
4796 | |
4797 | void _foreach_erfc_out(at::TensorList self, at::TensorList out) { |
4798 | auto tmp_output = at::_ops::_foreach_erfc::call(self); |
4799 | resize_out_helper(out, tmp_output); |
4800 | copy_arg(out, tmp_output); |
4801 | |
4802 | } |
4803 | |
4804 | void _foreach_expm1_out(at::TensorList self, at::TensorList out) { |
4805 | auto tmp_output = at::_ops::_foreach_expm1::call(self); |
4806 | resize_out_helper(out, tmp_output); |
4807 | copy_arg(out, tmp_output); |
4808 | |
4809 | } |
4810 | |
4811 | void _foreach_floor_out(at::TensorList self, at::TensorList out) { |
4812 | auto tmp_output = at::_ops::_foreach_floor::call(self); |
4813 | resize_out_helper(out, tmp_output); |
4814 | copy_arg(out, tmp_output); |
4815 | |
4816 | } |
4817 | |
4818 | void _foreach_log_out(at::TensorList self, at::TensorList out) { |
4819 | auto tmp_output = at::_ops::_foreach_log::call(self); |
4820 | resize_out_helper(out, tmp_output); |
4821 | copy_arg(out, tmp_output); |
4822 | |
4823 | } |
4824 | |
4825 | void _foreach_log10_out(at::TensorList self, at::TensorList out) { |
4826 | auto tmp_output = at::_ops::_foreach_log10::call(self); |
4827 | resize_out_helper(out, tmp_output); |
4828 | copy_arg(out, tmp_output); |
4829 | |
4830 | } |
4831 | |
4832 | void _foreach_log1p_out(at::TensorList self, at::TensorList out) { |
4833 | auto tmp_output = at::_ops::_foreach_log1p::call(self); |
4834 | resize_out_helper(out, tmp_output); |
4835 | copy_arg(out, tmp_output); |
4836 | |
4837 | } |
4838 | |
4839 | void _foreach_log2_out(at::TensorList self, at::TensorList out) { |
4840 | auto tmp_output = at::_ops::_foreach_log2::call(self); |
4841 | resize_out_helper(out, tmp_output); |
4842 | copy_arg(out, tmp_output); |
4843 | |
4844 | } |
4845 | |
4846 | void _foreach_neg_out(at::TensorList self, at::TensorList out) { |
4847 | auto tmp_output = at::_ops::_foreach_neg::call(self); |
4848 | resize_out_helper(out, tmp_output); |
4849 | copy_arg(out, tmp_output); |
4850 | |
4851 | } |
4852 | |
4853 | void _foreach_tan_out(at::TensorList self, at::TensorList out) { |
4854 | auto tmp_output = at::_ops::_foreach_tan::call(self); |
4855 | resize_out_helper(out, tmp_output); |
4856 | copy_arg(out, tmp_output); |
4857 | |
4858 | } |
4859 | |
4860 | void _foreach_tanh_out(at::TensorList self, at::TensorList out) { |
4861 | auto tmp_output = at::_ops::_foreach_tanh::call(self); |
4862 | resize_out_helper(out, tmp_output); |
4863 | copy_arg(out, tmp_output); |
4864 | |
4865 | } |
4866 | |
4867 | void _foreach_sin_out(at::TensorList self, at::TensorList out) { |
4868 | auto tmp_output = at::_ops::_foreach_sin::call(self); |
4869 | resize_out_helper(out, tmp_output); |
4870 | copy_arg(out, tmp_output); |
4871 | |
4872 | } |
4873 | |
4874 | void _foreach_sinh_out(at::TensorList self, at::TensorList out) { |
4875 | auto tmp_output = at::_ops::_foreach_sinh::call(self); |
4876 | resize_out_helper(out, tmp_output); |
4877 | copy_arg(out, tmp_output); |
4878 | |
4879 | } |
4880 | |
4881 | void _foreach_round_out(at::TensorList self, at::TensorList out) { |
4882 | auto tmp_output = at::_ops::_foreach_round::call(self); |
4883 | resize_out_helper(out, tmp_output); |
4884 | copy_arg(out, tmp_output); |
4885 | |
4886 | } |
4887 | |
4888 | void _foreach_lgamma_out(at::TensorList self, at::TensorList out) { |
4889 | auto tmp_output = at::_ops::_foreach_lgamma::call(self); |
4890 | resize_out_helper(out, tmp_output); |
4891 | copy_arg(out, tmp_output); |
4892 | |
4893 | } |
4894 | |
4895 | void _foreach_frac_out(at::TensorList self, at::TensorList out) { |
4896 | auto tmp_output = at::_ops::_foreach_frac::call(self); |
4897 | resize_out_helper(out, tmp_output); |
4898 | copy_arg(out, tmp_output); |
4899 | |
4900 | } |
4901 | |
4902 | void _foreach_reciprocal_out(at::TensorList self, at::TensorList out) { |
4903 | auto tmp_output = at::_ops::_foreach_reciprocal::call(self); |
4904 | resize_out_helper(out, tmp_output); |
4905 | copy_arg(out, tmp_output); |
4906 | |
4907 | } |
4908 | |
4909 | void _foreach_sigmoid_out(at::TensorList self, at::TensorList out) { |
4910 | auto tmp_output = at::_ops::_foreach_sigmoid::call(self); |
4911 | resize_out_helper(out, tmp_output); |
4912 | copy_arg(out, tmp_output); |
4913 | |
4914 | } |
4915 | |
4916 | void _foreach_trunc_out(at::TensorList self, at::TensorList out) { |
4917 | auto tmp_output = at::_ops::_foreach_trunc::call(self); |
4918 | resize_out_helper(out, tmp_output); |
4919 | copy_arg(out, tmp_output); |
4920 | |
4921 | } |
4922 | |
4923 | void _foreach_addcdiv_Scalar_out(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) { |
4924 | auto tmp_output = at::_ops::_foreach_addcdiv_Scalar::call(self, tensor1, tensor2, value); |
4925 | resize_out_helper(out, tmp_output); |
4926 | copy_arg(out, tmp_output); |
4927 | |
4928 | } |
4929 | |
4930 | void _foreach_addcmul_Scalar_out(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) { |
4931 | auto tmp_output = at::_ops::_foreach_addcmul_Scalar::call(self, tensor1, tensor2, value); |
4932 | resize_out_helper(out, tmp_output); |
4933 | copy_arg(out, tmp_output); |
4934 | |
4935 | } |
4936 | |
4937 | void _foreach_addcdiv_ScalarList_out(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
4938 | auto tmp_output = at::_ops::_foreach_addcdiv_ScalarList::call(self, tensor1, tensor2, scalars); |
4939 | resize_out_helper(out, tmp_output); |
4940 | copy_arg(out, tmp_output); |
4941 | |
4942 | } |
4943 | |
4944 | void _foreach_addcdiv_Tensor_out(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) { |
4945 | auto tmp_output = at::_ops::_foreach_addcdiv_Tensor::call(self, tensor1, tensor2, scalars); |
4946 | resize_out_helper(out, tmp_output); |
4947 | copy_arg(out, tmp_output); |
4948 | |
4949 | } |
4950 | |
4951 | void _foreach_addcmul_ScalarList_out(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
4952 | auto tmp_output = at::_ops::_foreach_addcmul_ScalarList::call(self, tensor1, tensor2, scalars); |
4953 | resize_out_helper(out, tmp_output); |
4954 | copy_arg(out, tmp_output); |
4955 | |
4956 | } |
4957 | |
4958 | void _foreach_addcmul_Tensor_out(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) { |
4959 | auto tmp_output = at::_ops::_foreach_addcmul_Tensor::call(self, tensor1, tensor2, scalars); |
4960 | resize_out_helper(out, tmp_output); |
4961 | copy_arg(out, tmp_output); |
4962 | |
4963 | } |
4964 | |
4965 | void _foreach_norm_Scalar_out(at::TensorList self, const at::Scalar & ord, at::TensorList out) { |
4966 | auto tmp_output = at::_ops::_foreach_norm_Scalar::call(self, ord); |
4967 | resize_out_helper(out, tmp_output); |
4968 | copy_arg(out, tmp_output); |
4969 | |
4970 | } |
4971 | |
4972 | void _foreach_lerp_List_out(at::TensorList self, at::TensorList tensors1, at::TensorList weights, at::TensorList out) { |
4973 | auto tmp_output = at::_ops::_foreach_lerp_List::call(self, tensors1, weights); |
4974 | resize_out_helper(out, tmp_output); |
4975 | copy_arg(out, tmp_output); |
4976 | |
4977 | } |
4978 | |
4979 | void _foreach_lerp_Scalar_out(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight, at::TensorList out) { |
4980 | auto tmp_output = at::_ops::_foreach_lerp_Scalar::call(self, tensors1, weight); |
4981 | resize_out_helper(out, tmp_output); |
4982 | copy_arg(out, tmp_output); |
4983 | |
4984 | } |
4985 | |
4986 | at::Tensor & bucketize_Scalar_out(const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right, at::Tensor & out) { |
4987 | auto tmp_output = at::_ops::bucketize_Scalar::call(self, boundaries, out_int32, right); |
4988 | resize_out_helper(out, tmp_output); |
4989 | copy_arg(out, tmp_output); |
4990 | return out; |
4991 | } |
4992 | |
4993 | at::Tensor & searchsorted_Scalar_out(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter, at::Tensor & out) { |
4994 | auto tmp_output = at::_ops::searchsorted_Scalar::call(sorted_sequence, self, out_int32, right, side, sorter); |
4995 | resize_out_helper(out, tmp_output); |
4996 | copy_arg(out, tmp_output); |
4997 | return out; |
4998 | } |
4999 | |
5000 | at::Tensor & glu_jvp_out(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim, at::Tensor & out) { |
5001 | auto tmp_output = at::_ops::glu_jvp::call(glu, x, dx, dim); |
5002 | resize_out_helper(out, tmp_output); |
5003 | copy_arg(out, tmp_output); |
5004 | return out; |
5005 | } |
5006 | |
5007 | at::Tensor & glu_backward_jvp_out(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim, at::Tensor & out) { |
5008 | auto tmp_output = at::_ops::glu_backward_jvp::call(grad_x, grad_glu, x, dgrad_glu, dx, dim); |
5009 | resize_out_helper(out, tmp_output); |
5010 | copy_arg(out, tmp_output); |
5011 | return out; |
5012 | } |
5013 | |
5014 | at::Tensor & hardswish_backward_out(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { |
5015 | auto tmp_output = at::_ops::hardswish_backward::call(grad_output, self); |
5016 | resize_out_helper(out, tmp_output); |
5017 | copy_arg(out, tmp_output); |
5018 | return out; |
5019 | } |
5020 | |
5021 | at::Tensor & rrelu_with_noise_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result, at::Tensor & out) { |
5022 | auto tmp_output = at::_ops::rrelu_with_noise_backward::call(grad_output, self, noise, lower, upper, training, self_is_result); |
5023 | resize_out_helper(out, tmp_output); |
5024 | copy_arg(out, tmp_output); |
5025 | return out; |
5026 | } |
5027 | |
5028 | at::Tensor & mkldnn_adaptive_avg_pool2d_backward_out(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { |
5029 | auto tmp_output = at::_ops::mkldnn_adaptive_avg_pool2d_backward::call(grad_output, self); |
5030 | resize_out_helper(out, tmp_output); |
5031 | copy_arg(out, tmp_output); |
5032 | return out; |
5033 | } |
5034 | |
5035 | at::Tensor & _adaptive_avg_pool2d_out_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) { |
5036 | auto tmp_output = at::_ops::_adaptive_avg_pool2d::call(self, output_size); |
5037 | resize_out_helper(out, tmp_output); |
5038 | copy_arg(out, tmp_output); |
5039 | return out; |
5040 | } |
5041 | |
5042 | at::Tensor & _adaptive_avg_pool2d_backward_out(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { |
5043 | auto tmp_output = at::_ops::_adaptive_avg_pool2d_backward::call(grad_output, self); |
5044 | resize_out_helper(out, tmp_output); |
5045 | copy_arg(out, tmp_output); |
5046 | return out; |
5047 | } |
5048 | |
5049 | at::Tensor & _adaptive_avg_pool3d_out_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) { |
5050 | auto tmp_output = at::_ops::_adaptive_avg_pool3d::call(self, output_size); |
5051 | resize_out_helper(out, tmp_output); |
5052 | copy_arg(out, tmp_output); |
5053 | return out; |
5054 | } |
5055 | |
5056 | at::Tensor & _adaptive_avg_pool3d_backward_out(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { |
5057 | auto tmp_output = at::_ops::_adaptive_avg_pool3d_backward::call(grad_output, self); |
5058 | resize_out_helper(out, tmp_output); |
5059 | copy_arg(out, tmp_output); |
5060 | return out; |
5061 | } |
5062 | |
5063 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_output_mask_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
5064 | auto tmp_output = at::_ops::_slow_conv2d_backward_output_mask::call(grad_output, self, weight, kernel_size, stride, padding, output_mask); |
5065 | resize_out_helper(out0, std::get<0>(tmp_output)); |
5066 | copy_arg(out0, std::get<0>(tmp_output)); |
5067 | resize_out_helper(out1, std::get<1>(tmp_output)); |
5068 | copy_arg(out1, std::get<1>(tmp_output)); |
5069 | resize_out_helper(out2, std::get<2>(tmp_output)); |
5070 | copy_arg(out2, std::get<2>(tmp_output)); |
5071 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
5072 | } |
5073 | |
5074 | at::Tensor & conv_depthwise3d_out_symint(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) { |
5075 | auto tmp_output = at::_ops::conv_depthwise3d::call(self, weight, kernel_size, bias, stride, padding, dilation); |
5076 | resize_out_helper(out, tmp_output); |
5077 | copy_arg(out, tmp_output); |
5078 | return out; |
5079 | } |
5080 | |
5081 | at::Tensor & slow_conv_dilated2d_out_symint(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) { |
5082 | auto tmp_output = at::_ops::slow_conv_dilated2d::call(self, weight, kernel_size, bias, stride, padding, dilation); |
5083 | resize_out_helper(out, tmp_output); |
5084 | copy_arg(out, tmp_output); |
5085 | return out; |
5086 | } |
5087 | |
5088 | at::Tensor & slow_conv_dilated3d_out_symint(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) { |
5089 | auto tmp_output = at::_ops::slow_conv_dilated3d::call(self, weight, kernel_size, bias, stride, padding, dilation); |
5090 | resize_out_helper(out, tmp_output); |
5091 | copy_arg(out, tmp_output); |
5092 | return out; |
5093 | } |
5094 | |
5095 | at::Tensor & isinf_out(const at::Tensor & self, at::Tensor & out) { |
5096 | auto tmp_output = at::_ops::isinf::call(self); |
5097 | resize_out_helper(out, tmp_output); |
5098 | copy_arg(out, tmp_output); |
5099 | return out; |
5100 | } |
5101 | |
5102 | at::Tensor & linalg_matrix_exp_out(const at::Tensor & self, at::Tensor & out) { |
5103 | auto tmp_output = at::_ops::linalg_matrix_exp::call(self); |
5104 | resize_out_helper(out, tmp_output); |
5105 | copy_arg(out, tmp_output); |
5106 | return out; |
5107 | } |
5108 | |
5109 | at::Tensor & _test_optional_intlist_out(const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out) { |
5110 | auto tmp_output = at::_ops::_test_optional_intlist::call(values, addends); |
5111 | resize_out_helper(out, tmp_output); |
5112 | copy_arg(out, tmp_output); |
5113 | return out; |
5114 | } |
5115 | |
5116 | at::Tensor & _test_optional_filled_intlist_out(const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out) { |
5117 | auto tmp_output = at::_ops::_test_optional_filled_intlist::call(values, addends); |
5118 | resize_out_helper(out, tmp_output); |
5119 | copy_arg(out, tmp_output); |
5120 | return out; |
5121 | } |
5122 | |
5123 | at::Tensor & _test_optional_floatlist_out(const at::Tensor & values, c10::optional<at::ArrayRef<double>> addends, at::Tensor & out) { |
5124 | auto tmp_output = at::_ops::_test_optional_floatlist::call(values, addends); |
5125 | resize_out_helper(out, tmp_output); |
5126 | copy_arg(out, tmp_output); |
5127 | return out; |
5128 | } |
5129 | |
5130 | at::Tensor & _test_warn_in_autograd_out(const at::Tensor & self, at::Tensor & out) { |
5131 | auto tmp_output = at::_ops::_test_warn_in_autograd::call(self); |
5132 | resize_out_helper(out, tmp_output); |
5133 | copy_arg(out, tmp_output); |
5134 | return out; |
5135 | } |
5136 | |
5137 | at::Tensor & _test_autograd_multiple_dispatch_fullcoverage_out(const at::Tensor & self, at::Tensor & out) { |
5138 | auto tmp_output = at::_ops::_test_autograd_multiple_dispatch_fullcoverage::call(self); |
5139 | resize_out_helper(out, tmp_output); |
5140 | copy_arg(out, tmp_output); |
5141 | return out; |
5142 | } |
5143 | |
5144 | at::Tensor & _test_autograd_multiple_dispatch_view_copy_out(const at::Tensor & self, at::Tensor & out) { |
5145 | auto tmp_output = at::_ops::_test_autograd_multiple_dispatch_view_copy::call(self); |
5146 | resize_out_helper(out, tmp_output); |
5147 | copy_arg(out, tmp_output); |
5148 | return out; |
5149 | } |
5150 | |
5151 | at::Tensor & segment_reduce_out(const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & indices, const c10::optional<at::Tensor> & offsets, int64_t axis, bool unsafe, const c10::optional<at::Scalar> & initial, at::Tensor & out) { |
5152 | auto tmp_output = at::_ops::segment_reduce::call(data, reduce, lengths, indices, offsets, axis, unsafe, initial); |
5153 | resize_out_helper(out, tmp_output); |
5154 | copy_arg(out, tmp_output); |
5155 | return out; |
5156 | } |
5157 | |
5158 | at::Tensor & _segment_reduce_backward_out(const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & offsets, int64_t axis, const c10::optional<at::Scalar> & initial, at::Tensor & out) { |
5159 | auto tmp_output = at::_ops::_segment_reduce_backward::call(grad, output, data, reduce, lengths, offsets, axis, initial); |
5160 | resize_out_helper(out, tmp_output); |
5161 | copy_arg(out, tmp_output); |
5162 | return out; |
5163 | } |
5164 | |
5165 | at::Tensor & _nested_tensor_from_tensor_list_out(at::TensorList list, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, at::Tensor & out) { |
5166 | auto tmp_output = at::_ops::_nested_tensor_from_tensor_list::call(list, dtype, layout, device, pin_memory); |
5167 | resize_out_helper(out, tmp_output); |
5168 | copy_arg(out, tmp_output); |
5169 | return out; |
5170 | } |
5171 | |
5172 | at::Tensor & _fw_primal_copy_out(const at::Tensor & self, int64_t level, at::Tensor & out) { |
5173 | auto tmp_output = at::_ops::_fw_primal_copy::call(self, level); |
5174 | resize_out_helper(out, tmp_output); |
5175 | copy_arg(out, tmp_output); |
5176 | return out; |
5177 | } |
5178 | |
5179 | at::Tensor & _make_dual_copy_out(const at::Tensor & primal, const at::Tensor & tangent, int64_t level, at::Tensor & out) { |
5180 | auto tmp_output = at::_ops::_make_dual_copy::call(primal, tangent, level); |
5181 | resize_out_helper(out, tmp_output); |
5182 | copy_arg(out, tmp_output); |
5183 | return out; |
5184 | } |
5185 | |
5186 | at::Tensor & view_as_real_copy_out(const at::Tensor & self, at::Tensor & out) { |
5187 | auto tmp_output = at::_ops::view_as_real_copy::call(self); |
5188 | resize_out_helper(out, tmp_output); |
5189 | copy_arg(out, tmp_output); |
5190 | return out; |
5191 | } |
5192 | |
5193 | at::Tensor & view_as_complex_copy_out(const at::Tensor & self, at::Tensor & out) { |
5194 | auto tmp_output = at::_ops::view_as_complex_copy::call(self); |
5195 | resize_out_helper(out, tmp_output); |
5196 | copy_arg(out, tmp_output); |
5197 | return out; |
5198 | } |
5199 | |
5200 | at::Tensor & _conj_copy_out(const at::Tensor & self, at::Tensor & out) { |
5201 | auto tmp_output = at::_ops::_conj_copy::call(self); |
5202 | resize_out_helper(out, tmp_output); |
5203 | copy_arg(out, tmp_output); |
5204 | return out; |
5205 | } |
5206 | |
5207 | at::Tensor & _neg_view_copy_out(const at::Tensor & self, at::Tensor & out) { |
5208 | auto tmp_output = at::_ops::_neg_view_copy::call(self); |
5209 | resize_out_helper(out, tmp_output); |
5210 | copy_arg(out, tmp_output); |
5211 | return out; |
5212 | } |
5213 | |
5214 | at::Tensor & as_strided_copy_out_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset, at::Tensor & out) { |
5215 | auto tmp_output = at::_ops::as_strided_copy::call(self, size, stride, storage_offset); |
5216 | resize_out_helper(out, tmp_output); |
5217 | copy_arg(out, tmp_output); |
5218 | return out; |
5219 | } |
5220 | |
5221 | at::Tensor & _sparse_broadcast_to_copy_out(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { |
5222 | auto tmp_output = at::_ops::_sparse_broadcast_to_copy::call(self, size); |
5223 | resize_out_helper(out, tmp_output); |
5224 | copy_arg(out, tmp_output); |
5225 | return out; |
5226 | } |
5227 | |
5228 | at::Tensor & diagonal_copy_out(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { |
5229 | auto tmp_output = at::_ops::diagonal_copy::call(self, offset, dim1, dim2); |
5230 | resize_out_helper(out, tmp_output); |
5231 | copy_arg(out, tmp_output); |
5232 | return out; |
5233 | } |
5234 | |
5235 | at::Tensor & expand_copy_out_symint(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit, at::Tensor & out) { |
5236 | auto tmp_output = at::_ops::expand_copy::call(self, size, implicit); |
5237 | resize_out_helper(out, tmp_output); |
5238 | copy_arg(out, tmp_output); |
5239 | return out; |
5240 | } |
5241 | |
5242 | at::Tensor & permute_copy_out(const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) { |
5243 | auto tmp_output = at::_ops::permute_copy::call(self, dims); |
5244 | resize_out_helper(out, tmp_output); |
5245 | copy_arg(out, tmp_output); |
5246 | return out; |
5247 | } |
5248 | |
5249 | at::Tensor & _reshape_alias_copy_out_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) { |
5250 | auto tmp_output = at::_ops::_reshape_alias_copy::call(self, size, stride); |
5251 | resize_out_helper(out, tmp_output); |
5252 | copy_arg(out, tmp_output); |
5253 | return out; |
5254 | } |
5255 | |
5256 | at::Tensor & select_copy_int_out_symint(const at::Tensor & self, int64_t dim, c10::SymInt index, at::Tensor & out) { |
5257 | auto tmp_output = at::_ops::select_copy_int::call(self, dim, index); |
5258 | resize_out_helper(out, tmp_output); |
5259 | copy_arg(out, tmp_output); |
5260 | return out; |
5261 | } |
5262 | |
5263 | at::Tensor & detach_copy_out(const at::Tensor & self, at::Tensor & out) { |
5264 | auto tmp_output = at::_ops::detach_copy::call(self); |
5265 | resize_out_helper(out, tmp_output); |
5266 | copy_arg(out, tmp_output); |
5267 | return out; |
5268 | } |
5269 | |
5270 | at::Tensor & slice_copy_Tensor_out_symint(const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step, at::Tensor & out) { |
5271 | auto tmp_output = at::_ops::slice_copy_Tensor::call(self, dim, start, end, step); |
5272 | resize_out_helper(out, tmp_output); |
5273 | copy_arg(out, tmp_output); |
5274 | return out; |
5275 | } |
5276 | |
5277 | at::Tensor & squeeze_copy_out(const at::Tensor & self, at::Tensor & out) { |
5278 | auto tmp_output = at::_ops::squeeze_copy::call(self); |
5279 | resize_out_helper(out, tmp_output); |
5280 | copy_arg(out, tmp_output); |
5281 | return out; |
5282 | } |
5283 | |
5284 | at::Tensor & squeeze_copy_dim_out(const at::Tensor & self, int64_t dim, at::Tensor & out) { |
5285 | auto tmp_output = at::_ops::squeeze_copy_dim::call(self, dim); |
5286 | resize_out_helper(out, tmp_output); |
5287 | copy_arg(out, tmp_output); |
5288 | return out; |
5289 | } |
5290 | |
5291 | at::Tensor & squeeze_copy_dims_out(const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) { |
5292 | auto tmp_output = at::_ops::squeeze_copy_dims::call(self, dim); |
5293 | resize_out_helper(out, tmp_output); |
5294 | copy_arg(out, tmp_output); |
5295 | return out; |
5296 | } |
5297 | |
5298 | at::Tensor & t_copy_out(const at::Tensor & self, at::Tensor & out) { |
5299 | auto tmp_output = at::_ops::t_copy::call(self); |
5300 | resize_out_helper(out, tmp_output); |
5301 | copy_arg(out, tmp_output); |
5302 | return out; |
5303 | } |
5304 | |
5305 | at::Tensor & transpose_copy_int_out(const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) { |
5306 | auto tmp_output = at::_ops::transpose_copy_int::call(self, dim0, dim1); |
5307 | resize_out_helper(out, tmp_output); |
5308 | copy_arg(out, tmp_output); |
5309 | return out; |
5310 | } |
5311 | |
5312 | at::Tensor & unsqueeze_copy_out(const at::Tensor & self, int64_t dim, at::Tensor & out) { |
5313 | auto tmp_output = at::_ops::unsqueeze_copy::call(self, dim); |
5314 | resize_out_helper(out, tmp_output); |
5315 | copy_arg(out, tmp_output); |
5316 | return out; |
5317 | } |
5318 | |
5319 | at::Tensor & _indices_copy_out(const at::Tensor & self, at::Tensor & out) { |
5320 | auto tmp_output = at::_ops::_indices_copy::call(self); |
5321 | resize_out_helper(out, tmp_output); |
5322 | copy_arg(out, tmp_output); |
5323 | return out; |
5324 | } |
5325 | |
5326 | at::Tensor & _values_copy_out(const at::Tensor & self, at::Tensor & out) { |
5327 | auto tmp_output = at::_ops::_values_copy::call(self); |
5328 | resize_out_helper(out, tmp_output); |
5329 | copy_arg(out, tmp_output); |
5330 | return out; |
5331 | } |
5332 | |
5333 | at::Tensor & indices_copy_out(const at::Tensor & self, at::Tensor & out) { |
5334 | auto tmp_output = at::_ops::indices_copy::call(self); |
5335 | resize_out_helper(out, tmp_output); |
5336 | copy_arg(out, tmp_output); |
5337 | return out; |
5338 | } |
5339 | |
5340 | at::Tensor & values_copy_out(const at::Tensor & self, at::Tensor & out) { |
5341 | auto tmp_output = at::_ops::values_copy::call(self); |
5342 | resize_out_helper(out, tmp_output); |
5343 | copy_arg(out, tmp_output); |
5344 | return out; |
5345 | } |
5346 | |
5347 | at::Tensor & crow_indices_copy_out(const at::Tensor & self, at::Tensor & out) { |
5348 | auto tmp_output = at::_ops::crow_indices_copy::call(self); |
5349 | resize_out_helper(out, tmp_output); |
5350 | copy_arg(out, tmp_output); |
5351 | return out; |
5352 | } |
5353 | |
5354 | at::Tensor & col_indices_copy_out(const at::Tensor & self, at::Tensor & out) { |
5355 | auto tmp_output = at::_ops::col_indices_copy::call(self); |
5356 | resize_out_helper(out, tmp_output); |
5357 | copy_arg(out, tmp_output); |
5358 | return out; |
5359 | } |
5360 | |
5361 | at::Tensor & ccol_indices_copy_out(const at::Tensor & self, at::Tensor & out) { |
5362 | auto tmp_output = at::_ops::ccol_indices_copy::call(self); |
5363 | resize_out_helper(out, tmp_output); |
5364 | copy_arg(out, tmp_output); |
5365 | return out; |
5366 | } |
5367 | |
5368 | at::Tensor & row_indices_copy_out(const at::Tensor & self, at::Tensor & out) { |
5369 | auto tmp_output = at::_ops::row_indices_copy::call(self); |
5370 | resize_out_helper(out, tmp_output); |
5371 | copy_arg(out, tmp_output); |
5372 | return out; |
5373 | } |
5374 | |
5375 | at::Tensor & view_copy_out_symint(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { |
5376 | auto tmp_output = at::_ops::view_copy::call(self, size); |
5377 | resize_out_helper(out, tmp_output); |
5378 | copy_arg(out, tmp_output); |
5379 | return out; |
5380 | } |
5381 | |
5382 | at::Tensor & view_copy_dtype_out(const at::Tensor & self, at::ScalarType dtype, at::Tensor & out) { |
5383 | auto tmp_output = at::_ops::view_copy_dtype::call(self, dtype); |
5384 | resize_out_helper(out, tmp_output); |
5385 | copy_arg(out, tmp_output); |
5386 | return out; |
5387 | } |
5388 | |
5389 | at::Tensor & unfold_copy_out(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step, at::Tensor & out) { |
5390 | auto tmp_output = at::_ops::unfold_copy::call(self, dimension, size, step); |
5391 | resize_out_helper(out, tmp_output); |
5392 | copy_arg(out, tmp_output); |
5393 | return out; |
5394 | } |
5395 | |
5396 | at::Tensor & alias_copy_out(const at::Tensor & self, at::Tensor & out) { |
5397 | auto tmp_output = at::_ops::alias_copy::call(self); |
5398 | resize_out_helper(out, tmp_output); |
5399 | copy_arg(out, tmp_output); |
5400 | return out; |
5401 | } |
5402 | |
5403 | at::Tensor & to_padded_tensor_out_symint(const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size, at::Tensor & out) { |
5404 | auto tmp_output = at::_ops::to_padded_tensor::call(self, padding, output_size); |
5405 | resize_out_helper(out, tmp_output); |
5406 | copy_arg(out, tmp_output); |
5407 | return out; |
5408 | } |
5409 | |
5410 | at::Tensor & _transformer_encoder_layer_fwd_out(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, c10::optional<int64_t> mask_type, at::Tensor & out) { |
5411 | auto tmp_output = at::_ops::_transformer_encoder_layer_fwd::call(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type); |
5412 | resize_out_helper(out, tmp_output); |
5413 | copy_arg(out, tmp_output); |
5414 | return out; |
5415 | } |
5416 | |
5417 | ::std::tuple<at::Tensor &,at::Tensor &> _native_multi_head_attention_out(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, bool need_weights, bool average_attn_weights, c10::optional<int64_t> mask_type, at::Tensor & out0, at::Tensor & out1) { |
5418 | auto tmp_output = at::_ops::_native_multi_head_attention::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type); |
5419 | resize_out_helper(out0, std::get<0>(tmp_output)); |
5420 | copy_arg(out0, std::get<0>(tmp_output)); |
5421 | resize_out_helper(out1, std::get<1>(tmp_output)); |
5422 | copy_arg(out1, std::get<1>(tmp_output)); |
5423 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
5424 | } |
5425 | |
5426 | at::Tensor & _triton_scaled_dot_attention_out(const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p, at::Tensor & out) { |
5427 | auto tmp_output = at::_ops::_triton_scaled_dot_attention::call(q, k, v, dropout_p); |
5428 | resize_out_helper(out, tmp_output); |
5429 | copy_arg(out, tmp_output); |
5430 | return out; |
5431 | } |
5432 | |
5433 | at::Tensor & _triton_multi_head_attention_out(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, at::Tensor & out) { |
5434 | auto tmp_output = at::_ops::_triton_multi_head_attention::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask); |
5435 | resize_out_helper(out, tmp_output); |
5436 | copy_arg(out, tmp_output); |
5437 | return out; |
5438 | } |
5439 | |
5440 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _transformer_decoder_only_layer_fwd_out(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, const c10::optional<at::Tensor> & incr_key, const c10::optional<at::Tensor> & incr_value, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
5441 | auto tmp_output = at::_ops::_transformer_decoder_only_layer_fwd::call(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value); |
5442 | resize_out_helper(out0, std::get<0>(tmp_output)); |
5443 | copy_arg(out0, std::get<0>(tmp_output)); |
5444 | resize_out_helper(out1, std::get<1>(tmp_output)); |
5445 | copy_arg(out1, std::get<1>(tmp_output)); |
5446 | resize_out_helper(out2, std::get<2>(tmp_output)); |
5447 | copy_arg(out2, std::get<2>(tmp_output)); |
5448 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
5449 | } |
5450 | |
5451 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _native_decoder_only_multi_head_attention_out(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, const c10::optional<at::Tensor> & incr_key, const c10::optional<at::Tensor> & incr_value, bool need_weights, bool average_attn_weights, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { |
5452 | auto tmp_output = at::_ops::_native_decoder_only_multi_head_attention::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, incr_key, incr_value, need_weights, average_attn_weights); |
5453 | resize_out_helper(out0, std::get<0>(tmp_output)); |
5454 | copy_arg(out0, std::get<0>(tmp_output)); |
5455 | resize_out_helper(out1, std::get<1>(tmp_output)); |
5456 | copy_arg(out1, std::get<1>(tmp_output)); |
5457 | resize_out_helper(out2, std::get<2>(tmp_output)); |
5458 | copy_arg(out2, std::get<2>(tmp_output)); |
5459 | resize_out_helper(out3, std::get<3>(tmp_output)); |
5460 | copy_arg(out3, std::get<3>(tmp_output)); |
5461 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3); |
5462 | } |
5463 | |
5464 | at::Tensor & _foobar_out(const at::Tensor & self, bool arg1, bool arg2, bool arg3, at::Tensor & out) { |
5465 | auto tmp_output = at::_ops::_foobar::call(self, arg1, arg2, arg3); |
5466 | resize_out_helper(out, tmp_output); |
5467 | copy_arg(out, tmp_output); |
5468 | return out; |
5469 | } |
5470 | |
5471 | void _fused_adam_out(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf, at::TensorList out) { |
5472 | auto tmp_output = at::_ops::_fused_adam::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); |
5473 | resize_out_helper(out, std::get<0>(tmp_output)); |
5474 | copy_arg(out, std::get<0>(tmp_output)); |
5475 | |
5476 | } |
5477 | |
5478 | void _fused_adamw_out(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf, at::TensorList out) { |
5479 | auto tmp_output = at::_ops::_fused_adamw::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); |
5480 | resize_out_helper(out, std::get<0>(tmp_output)); |
5481 | copy_arg(out, std::get<0>(tmp_output)); |
5482 | |
5483 | } |
5484 | |
5485 | } // namespace native |
5486 | } // namespace at |
5487 | |