1// required for old g++ to compile PRId64 macros, see
2// https://github.com/pytorch/pytorch/issues/3571
3// for context
4#ifndef __STDC_FORMAT_MACROS
5#define __STDC_FORMAT_MACROS
6#endif
7
8// an external backend might generate file within its code tree
9// and check all the source files within the tree with clang-format.
10// so, disable it since the backend might have a different config.
11// clang-format off
12
13// NOTE: This condition is true for all PyTorch internal libraries, it
14// just excludes external projects such as torch_xla which
15// re-use some of the PyTorch codegen machinery.
16#if defined(CAFFE2_BUILD_MAIN_LIB) || \
17 defined(TORCH_CUDA_BUILD_MAIN_LIB) || \
18 defined(TORCH_HIP_BUILD_MAIN_LIB) || \
19 defined(TORCH_CUDA_CU_BUILD_MAIN_LIB) || \
20 defined(TORCH_CUDA_CPP_BUILD_MAIN_LIB)
21#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
22#endif
23
24// @generated by torchgen/gen.py from RegisterDispatchKey.cpp
25
26#include <c10/core/TensorImpl.h>
27#include <c10/core/Allocator.h>
28#include <ATen/DeviceGuard.h>
29#include <ATen/NamedTensorUtils.h>
30#include <ATen/Utils.h>
31#include <ATen/WrapDimUtils.h>
32#include <ATen/Dispatch.h>
33#include <c10/util/ExclusivelyOwned.h>
34#include <c10/util/Half.h>
35#include <c10/core/UndefinedTensorImpl.h>
36#include <c10/util/Optional.h>
37#include <ATen/Tensor.h>
38#include <ATen/native/Resize.h>
39
40#include <cstddef>
41#include <functional>
42#include <memory>
43#include <utility>
44
45#include <ATen/Config.h>
46#include <ATen/core/op_registration/adaption.h>
47#include <torch/library.h>
48
49
50#include <ATen/ops/as_strided_native.h>
51#include <ATen/ops/empty.h>
52#include <ATen/ops/empty_strided.h>
53#include <ATen/ops/_copy_from_and_resize.h>
54#include <ATen/ops/_copy_from.h>
55#include <ATen/ops/_addmm_activation.h>
56#include <ATen/ops/_addmm_activation_compositeexplicitautogradnonfunctional_dispatch.h>
57#include <ATen/ops/_addmm_activation_native.h>
58#include <ATen/ops/_conj_copy.h>
59#include <ATen/ops/_conj_copy_compositeexplicitautogradnonfunctional_dispatch.h>
60#include <ATen/ops/_conj_copy_native.h>
61#include <ATen/ops/_convert_indices_from_coo_to_csr.h>
62#include <ATen/ops/_convert_indices_from_coo_to_csr_compositeexplicitautogradnonfunctional_dispatch.h>
63#include <ATen/ops/_convert_indices_from_coo_to_csr_native.h>
64#include <ATen/ops/_convert_indices_from_csr_to_coo.h>
65#include <ATen/ops/_convert_indices_from_csr_to_coo_compositeexplicitautogradnonfunctional_dispatch.h>
66#include <ATen/ops/_convert_indices_from_csr_to_coo_native.h>
67#include <ATen/ops/_fw_primal_copy.h>
68#include <ATen/ops/_fw_primal_copy_compositeexplicitautogradnonfunctional_dispatch.h>
69#include <ATen/ops/_fw_primal_copy_native.h>
70#include <ATen/ops/_indices_copy.h>
71#include <ATen/ops/_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
72#include <ATen/ops/_indices_copy_native.h>
73#include <ATen/ops/_linalg_det.h>
74#include <ATen/ops/_linalg_det_compositeexplicitautogradnonfunctional_dispatch.h>
75#include <ATen/ops/_linalg_det_native.h>
76#include <ATen/ops/_linalg_eigh.h>
77#include <ATen/ops/_linalg_eigh_compositeexplicitautogradnonfunctional_dispatch.h>
78#include <ATen/ops/_linalg_eigh_native.h>
79#include <ATen/ops/_linalg_slogdet.h>
80#include <ATen/ops/_linalg_slogdet_compositeexplicitautogradnonfunctional_dispatch.h>
81#include <ATen/ops/_linalg_slogdet_native.h>
82#include <ATen/ops/_linalg_solve_ex.h>
83#include <ATen/ops/_linalg_solve_ex_compositeexplicitautogradnonfunctional_dispatch.h>
84#include <ATen/ops/_linalg_solve_ex_native.h>
85#include <ATen/ops/_linalg_svd.h>
86#include <ATen/ops/_linalg_svd_compositeexplicitautogradnonfunctional_dispatch.h>
87#include <ATen/ops/_linalg_svd_native.h>
88#include <ATen/ops/_log_softmax.h>
89#include <ATen/ops/_log_softmax_backward_data.h>
90#include <ATen/ops/_log_softmax_backward_data_compositeexplicitautogradnonfunctional_dispatch.h>
91#include <ATen/ops/_log_softmax_backward_data_native.h>
92#include <ATen/ops/_log_softmax_compositeexplicitautogradnonfunctional_dispatch.h>
93#include <ATen/ops/_log_softmax_native.h>
94#include <ATen/ops/_make_dual_copy.h>
95#include <ATen/ops/_make_dual_copy_compositeexplicitautogradnonfunctional_dispatch.h>
96#include <ATen/ops/_make_dual_copy_native.h>
97#include <ATen/ops/_neg_view_copy.h>
98#include <ATen/ops/_neg_view_copy_compositeexplicitautogradnonfunctional_dispatch.h>
99#include <ATen/ops/_neg_view_copy_native.h>
100#include <ATen/ops/_nested_view_from_buffer_copy.h>
101#include <ATen/ops/_nested_view_from_buffer_copy_compositeexplicitautogradnonfunctional_dispatch.h>
102#include <ATen/ops/_nested_view_from_buffer_copy_native.h>
103#include <ATen/ops/_reshape_alias_copy.h>
104#include <ATen/ops/_reshape_alias_copy_compositeexplicitautogradnonfunctional_dispatch.h>
105#include <ATen/ops/_reshape_alias_copy_native.h>
106#include <ATen/ops/_softmax.h>
107#include <ATen/ops/_softmax_backward_data.h>
108#include <ATen/ops/_softmax_backward_data_compositeexplicitautogradnonfunctional_dispatch.h>
109#include <ATen/ops/_softmax_backward_data_native.h>
110#include <ATen/ops/_softmax_compositeexplicitautogradnonfunctional_dispatch.h>
111#include <ATen/ops/_softmax_native.h>
112#include <ATen/ops/_sparse_broadcast_to_copy.h>
113#include <ATen/ops/_sparse_broadcast_to_copy_compositeexplicitautogradnonfunctional_dispatch.h>
114#include <ATen/ops/_sparse_broadcast_to_copy_native.h>
115#include <ATen/ops/_test_autograd_multiple_dispatch_view_copy.h>
116#include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_compositeexplicitautogradnonfunctional_dispatch.h>
117#include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_native.h>
118#include <ATen/ops/_trilinear.h>
119#include <ATen/ops/_trilinear_compositeexplicitautogradnonfunctional_dispatch.h>
120#include <ATen/ops/_trilinear_native.h>
121#include <ATen/ops/_upsample_bicubic2d_aa.h>
122#include <ATen/ops/_upsample_bicubic2d_aa_backward.h>
123#include <ATen/ops/_upsample_bicubic2d_aa_backward_compositeexplicitautogradnonfunctional_dispatch.h>
124#include <ATen/ops/_upsample_bicubic2d_aa_backward_native.h>
125#include <ATen/ops/_upsample_bicubic2d_aa_compositeexplicitautogradnonfunctional_dispatch.h>
126#include <ATen/ops/_upsample_bicubic2d_aa_native.h>
127#include <ATen/ops/_upsample_bilinear2d_aa.h>
128#include <ATen/ops/_upsample_bilinear2d_aa_backward.h>
129#include <ATen/ops/_upsample_bilinear2d_aa_backward_compositeexplicitautogradnonfunctional_dispatch.h>
130#include <ATen/ops/_upsample_bilinear2d_aa_backward_native.h>
131#include <ATen/ops/_upsample_bilinear2d_aa_compositeexplicitautogradnonfunctional_dispatch.h>
132#include <ATen/ops/_upsample_bilinear2d_aa_native.h>
133#include <ATen/ops/_upsample_nearest_exact1d.h>
134#include <ATen/ops/_upsample_nearest_exact1d_backward.h>
135#include <ATen/ops/_upsample_nearest_exact1d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
136#include <ATen/ops/_upsample_nearest_exact1d_backward_native.h>
137#include <ATen/ops/_upsample_nearest_exact1d_compositeexplicitautogradnonfunctional_dispatch.h>
138#include <ATen/ops/_upsample_nearest_exact1d_native.h>
139#include <ATen/ops/_upsample_nearest_exact2d.h>
140#include <ATen/ops/_upsample_nearest_exact2d_backward.h>
141#include <ATen/ops/_upsample_nearest_exact2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
142#include <ATen/ops/_upsample_nearest_exact2d_backward_native.h>
143#include <ATen/ops/_upsample_nearest_exact2d_compositeexplicitautogradnonfunctional_dispatch.h>
144#include <ATen/ops/_upsample_nearest_exact2d_native.h>
145#include <ATen/ops/_upsample_nearest_exact3d.h>
146#include <ATen/ops/_upsample_nearest_exact3d_backward.h>
147#include <ATen/ops/_upsample_nearest_exact3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
148#include <ATen/ops/_upsample_nearest_exact3d_backward_native.h>
149#include <ATen/ops/_upsample_nearest_exact3d_compositeexplicitautogradnonfunctional_dispatch.h>
150#include <ATen/ops/_upsample_nearest_exact3d_native.h>
151#include <ATen/ops/_values_copy.h>
152#include <ATen/ops/_values_copy_compositeexplicitautogradnonfunctional_dispatch.h>
153#include <ATen/ops/_values_copy_native.h>
154#include <ATen/ops/acos.h>
155#include <ATen/ops/acos_compositeexplicitautogradnonfunctional_dispatch.h>
156#include <ATen/ops/acos_native.h>
157#include <ATen/ops/acosh.h>
158#include <ATen/ops/acosh_compositeexplicitautogradnonfunctional_dispatch.h>
159#include <ATen/ops/acosh_native.h>
160#include <ATen/ops/adaptive_max_pool2d.h>
161#include <ATen/ops/adaptive_max_pool2d_backward.h>
162#include <ATen/ops/adaptive_max_pool2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
163#include <ATen/ops/adaptive_max_pool2d_backward_native.h>
164#include <ATen/ops/adaptive_max_pool2d_compositeexplicitautogradnonfunctional_dispatch.h>
165#include <ATen/ops/adaptive_max_pool2d_native.h>
166#include <ATen/ops/adaptive_max_pool3d.h>
167#include <ATen/ops/adaptive_max_pool3d_backward.h>
168#include <ATen/ops/adaptive_max_pool3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
169#include <ATen/ops/adaptive_max_pool3d_backward_native.h>
170#include <ATen/ops/adaptive_max_pool3d_compositeexplicitautogradnonfunctional_dispatch.h>
171#include <ATen/ops/adaptive_max_pool3d_native.h>
172#include <ATen/ops/add.h>
173#include <ATen/ops/add_compositeexplicitautogradnonfunctional_dispatch.h>
174#include <ATen/ops/add_native.h>
175#include <ATen/ops/addcdiv.h>
176#include <ATen/ops/addcdiv_compositeexplicitautogradnonfunctional_dispatch.h>
177#include <ATen/ops/addcdiv_native.h>
178#include <ATen/ops/addcmul.h>
179#include <ATen/ops/addcmul_compositeexplicitautogradnonfunctional_dispatch.h>
180#include <ATen/ops/addcmul_native.h>
181#include <ATen/ops/addmm.h>
182#include <ATen/ops/addmm_compositeexplicitautogradnonfunctional_dispatch.h>
183#include <ATen/ops/addmm_native.h>
184#include <ATen/ops/addmv.h>
185#include <ATen/ops/addmv_compositeexplicitautogradnonfunctional_dispatch.h>
186#include <ATen/ops/addmv_native.h>
187#include <ATen/ops/alias_copy.h>
188#include <ATen/ops/alias_copy_compositeexplicitautogradnonfunctional_dispatch.h>
189#include <ATen/ops/alias_copy_native.h>
190#include <ATen/ops/all.h>
191#include <ATen/ops/all_compositeexplicitautogradnonfunctional_dispatch.h>
192#include <ATen/ops/all_native.h>
193#include <ATen/ops/amax.h>
194#include <ATen/ops/amax_compositeexplicitautogradnonfunctional_dispatch.h>
195#include <ATen/ops/amax_native.h>
196#include <ATen/ops/amin.h>
197#include <ATen/ops/amin_compositeexplicitautogradnonfunctional_dispatch.h>
198#include <ATen/ops/amin_native.h>
199#include <ATen/ops/aminmax.h>
200#include <ATen/ops/aminmax_compositeexplicitautogradnonfunctional_dispatch.h>
201#include <ATen/ops/aminmax_native.h>
202#include <ATen/ops/any.h>
203#include <ATen/ops/any_compositeexplicitautogradnonfunctional_dispatch.h>
204#include <ATen/ops/any_native.h>
205#include <ATen/ops/argmax.h>
206#include <ATen/ops/argmax_compositeexplicitautogradnonfunctional_dispatch.h>
207#include <ATen/ops/argmax_native.h>
208#include <ATen/ops/argmin.h>
209#include <ATen/ops/argmin_compositeexplicitautogradnonfunctional_dispatch.h>
210#include <ATen/ops/argmin_native.h>
211#include <ATen/ops/as_strided.h>
212#include <ATen/ops/as_strided_compositeexplicitautogradnonfunctional_dispatch.h>
213#include <ATen/ops/as_strided_copy.h>
214#include <ATen/ops/as_strided_copy_compositeexplicitautogradnonfunctional_dispatch.h>
215#include <ATen/ops/as_strided_copy_native.h>
216#include <ATen/ops/as_strided_native.h>
217#include <ATen/ops/asin.h>
218#include <ATen/ops/asin_compositeexplicitautogradnonfunctional_dispatch.h>
219#include <ATen/ops/asin_native.h>
220#include <ATen/ops/asinh.h>
221#include <ATen/ops/asinh_compositeexplicitautogradnonfunctional_dispatch.h>
222#include <ATen/ops/asinh_native.h>
223#include <ATen/ops/atan.h>
224#include <ATen/ops/atan2.h>
225#include <ATen/ops/atan2_compositeexplicitautogradnonfunctional_dispatch.h>
226#include <ATen/ops/atan2_native.h>
227#include <ATen/ops/atan_compositeexplicitautogradnonfunctional_dispatch.h>
228#include <ATen/ops/atan_native.h>
229#include <ATen/ops/atanh.h>
230#include <ATen/ops/atanh_compositeexplicitautogradnonfunctional_dispatch.h>
231#include <ATen/ops/atanh_native.h>
232#include <ATen/ops/avg_pool2d.h>
233#include <ATen/ops/avg_pool2d_backward.h>
234#include <ATen/ops/avg_pool2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
235#include <ATen/ops/avg_pool2d_backward_native.h>
236#include <ATen/ops/avg_pool2d_compositeexplicitautogradnonfunctional_dispatch.h>
237#include <ATen/ops/avg_pool2d_native.h>
238#include <ATen/ops/avg_pool3d.h>
239#include <ATen/ops/avg_pool3d_backward.h>
240#include <ATen/ops/avg_pool3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
241#include <ATen/ops/avg_pool3d_backward_native.h>
242#include <ATen/ops/avg_pool3d_compositeexplicitautogradnonfunctional_dispatch.h>
243#include <ATen/ops/avg_pool3d_native.h>
244#include <ATen/ops/baddbmm.h>
245#include <ATen/ops/baddbmm_compositeexplicitautogradnonfunctional_dispatch.h>
246#include <ATen/ops/baddbmm_native.h>
247#include <ATen/ops/bernoulli.h>
248#include <ATen/ops/bernoulli_compositeexplicitautogradnonfunctional_dispatch.h>
249#include <ATen/ops/bernoulli_native.h>
250#include <ATen/ops/bitwise_and.h>
251#include <ATen/ops/bitwise_and_compositeexplicitautogradnonfunctional_dispatch.h>
252#include <ATen/ops/bitwise_and_native.h>
253#include <ATen/ops/bitwise_left_shift.h>
254#include <ATen/ops/bitwise_left_shift_compositeexplicitautogradnonfunctional_dispatch.h>
255#include <ATen/ops/bitwise_left_shift_native.h>
256#include <ATen/ops/bitwise_not.h>
257#include <ATen/ops/bitwise_not_compositeexplicitautogradnonfunctional_dispatch.h>
258#include <ATen/ops/bitwise_not_native.h>
259#include <ATen/ops/bitwise_or.h>
260#include <ATen/ops/bitwise_or_compositeexplicitautogradnonfunctional_dispatch.h>
261#include <ATen/ops/bitwise_or_native.h>
262#include <ATen/ops/bitwise_right_shift.h>
263#include <ATen/ops/bitwise_right_shift_compositeexplicitautogradnonfunctional_dispatch.h>
264#include <ATen/ops/bitwise_right_shift_native.h>
265#include <ATen/ops/bitwise_xor.h>
266#include <ATen/ops/bitwise_xor_compositeexplicitautogradnonfunctional_dispatch.h>
267#include <ATen/ops/bitwise_xor_native.h>
268#include <ATen/ops/bmm.h>
269#include <ATen/ops/bmm_compositeexplicitautogradnonfunctional_dispatch.h>
270#include <ATen/ops/bmm_native.h>
271#include <ATen/ops/cat.h>
272#include <ATen/ops/cat_compositeexplicitautogradnonfunctional_dispatch.h>
273#include <ATen/ops/cat_native.h>
274#include <ATen/ops/ccol_indices_copy.h>
275#include <ATen/ops/ccol_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
276#include <ATen/ops/ccol_indices_copy_native.h>
277#include <ATen/ops/ceil.h>
278#include <ATen/ops/ceil_compositeexplicitautogradnonfunctional_dispatch.h>
279#include <ATen/ops/ceil_native.h>
280#include <ATen/ops/clamp.h>
281#include <ATen/ops/clamp_compositeexplicitautogradnonfunctional_dispatch.h>
282#include <ATen/ops/clamp_max.h>
283#include <ATen/ops/clamp_max_compositeexplicitautogradnonfunctional_dispatch.h>
284#include <ATen/ops/clamp_max_native.h>
285#include <ATen/ops/clamp_min.h>
286#include <ATen/ops/clamp_min_compositeexplicitautogradnonfunctional_dispatch.h>
287#include <ATen/ops/clamp_min_native.h>
288#include <ATen/ops/clamp_native.h>
289#include <ATen/ops/col_indices_copy.h>
290#include <ATen/ops/col_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
291#include <ATen/ops/col_indices_copy_native.h>
292#include <ATen/ops/copy.h>
293#include <ATen/ops/copy_compositeexplicitautogradnonfunctional_dispatch.h>
294#include <ATen/ops/copy_native.h>
295#include <ATen/ops/copysign.h>
296#include <ATen/ops/copysign_compositeexplicitautogradnonfunctional_dispatch.h>
297#include <ATen/ops/copysign_native.h>
298#include <ATen/ops/cos.h>
299#include <ATen/ops/cos_compositeexplicitautogradnonfunctional_dispatch.h>
300#include <ATen/ops/cos_native.h>
301#include <ATen/ops/cosh.h>
302#include <ATen/ops/cosh_compositeexplicitautogradnonfunctional_dispatch.h>
303#include <ATen/ops/cosh_native.h>
304#include <ATen/ops/crow_indices_copy.h>
305#include <ATen/ops/crow_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
306#include <ATen/ops/crow_indices_copy_native.h>
307#include <ATen/ops/cumprod.h>
308#include <ATen/ops/cumprod_compositeexplicitautogradnonfunctional_dispatch.h>
309#include <ATen/ops/cumprod_native.h>
310#include <ATen/ops/cumsum.h>
311#include <ATen/ops/cumsum_compositeexplicitautogradnonfunctional_dispatch.h>
312#include <ATen/ops/cumsum_native.h>
313#include <ATen/ops/detach_copy.h>
314#include <ATen/ops/detach_copy_compositeexplicitautogradnonfunctional_dispatch.h>
315#include <ATen/ops/detach_copy_native.h>
316#include <ATen/ops/diag_embed.h>
317#include <ATen/ops/diag_embed_compositeexplicitautogradnonfunctional_dispatch.h>
318#include <ATen/ops/diag_embed_native.h>
319#include <ATen/ops/diagonal_copy.h>
320#include <ATen/ops/diagonal_copy_compositeexplicitautogradnonfunctional_dispatch.h>
321#include <ATen/ops/diagonal_copy_native.h>
322#include <ATen/ops/digamma.h>
323#include <ATen/ops/digamma_compositeexplicitautogradnonfunctional_dispatch.h>
324#include <ATen/ops/digamma_native.h>
325#include <ATen/ops/div.h>
326#include <ATen/ops/div_compositeexplicitautogradnonfunctional_dispatch.h>
327#include <ATen/ops/div_native.h>
328#include <ATen/ops/elu.h>
329#include <ATen/ops/elu_backward.h>
330#include <ATen/ops/elu_backward_compositeexplicitautogradnonfunctional_dispatch.h>
331#include <ATen/ops/elu_backward_native.h>
332#include <ATen/ops/elu_compositeexplicitautogradnonfunctional_dispatch.h>
333#include <ATen/ops/elu_native.h>
334#include <ATen/ops/eq.h>
335#include <ATen/ops/eq_compositeexplicitautogradnonfunctional_dispatch.h>
336#include <ATen/ops/eq_native.h>
337#include <ATen/ops/erf.h>
338#include <ATen/ops/erf_compositeexplicitautogradnonfunctional_dispatch.h>
339#include <ATen/ops/erf_native.h>
340#include <ATen/ops/erfc.h>
341#include <ATen/ops/erfc_compositeexplicitautogradnonfunctional_dispatch.h>
342#include <ATen/ops/erfc_native.h>
343#include <ATen/ops/erfinv.h>
344#include <ATen/ops/erfinv_compositeexplicitautogradnonfunctional_dispatch.h>
345#include <ATen/ops/erfinv_native.h>
346#include <ATen/ops/exp.h>
347#include <ATen/ops/exp2.h>
348#include <ATen/ops/exp2_compositeexplicitautogradnonfunctional_dispatch.h>
349#include <ATen/ops/exp2_native.h>
350#include <ATen/ops/exp_compositeexplicitautogradnonfunctional_dispatch.h>
351#include <ATen/ops/exp_native.h>
352#include <ATen/ops/expand_copy.h>
353#include <ATen/ops/expand_copy_compositeexplicitautogradnonfunctional_dispatch.h>
354#include <ATen/ops/expand_copy_native.h>
355#include <ATen/ops/expm1.h>
356#include <ATen/ops/expm1_compositeexplicitautogradnonfunctional_dispatch.h>
357#include <ATen/ops/expm1_native.h>
358#include <ATen/ops/floor.h>
359#include <ATen/ops/floor_compositeexplicitautogradnonfunctional_dispatch.h>
360#include <ATen/ops/floor_native.h>
361#include <ATen/ops/fmax.h>
362#include <ATen/ops/fmax_compositeexplicitautogradnonfunctional_dispatch.h>
363#include <ATen/ops/fmax_native.h>
364#include <ATen/ops/fmin.h>
365#include <ATen/ops/fmin_compositeexplicitautogradnonfunctional_dispatch.h>
366#include <ATen/ops/fmin_native.h>
367#include <ATen/ops/fmod.h>
368#include <ATen/ops/fmod_compositeexplicitautogradnonfunctional_dispatch.h>
369#include <ATen/ops/fmod_native.h>
370#include <ATen/ops/frac.h>
371#include <ATen/ops/frac_compositeexplicitautogradnonfunctional_dispatch.h>
372#include <ATen/ops/frac_native.h>
373#include <ATen/ops/fractional_max_pool2d.h>
374#include <ATen/ops/fractional_max_pool2d_backward.h>
375#include <ATen/ops/fractional_max_pool2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
376#include <ATen/ops/fractional_max_pool2d_backward_native.h>
377#include <ATen/ops/fractional_max_pool2d_compositeexplicitautogradnonfunctional_dispatch.h>
378#include <ATen/ops/fractional_max_pool2d_native.h>
379#include <ATen/ops/fractional_max_pool3d.h>
380#include <ATen/ops/fractional_max_pool3d_compositeexplicitautogradnonfunctional_dispatch.h>
381#include <ATen/ops/fractional_max_pool3d_native.h>
382#include <ATen/ops/gather.h>
383#include <ATen/ops/gather_compositeexplicitautogradnonfunctional_dispatch.h>
384#include <ATen/ops/gather_native.h>
385#include <ATen/ops/gcd.h>
386#include <ATen/ops/gcd_compositeexplicitautogradnonfunctional_dispatch.h>
387#include <ATen/ops/gcd_native.h>
388#include <ATen/ops/ge.h>
389#include <ATen/ops/ge_compositeexplicitautogradnonfunctional_dispatch.h>
390#include <ATen/ops/ge_native.h>
391#include <ATen/ops/gelu.h>
392#include <ATen/ops/gelu_backward.h>
393#include <ATen/ops/gelu_backward_compositeexplicitautogradnonfunctional_dispatch.h>
394#include <ATen/ops/gelu_backward_native.h>
395#include <ATen/ops/gelu_compositeexplicitautogradnonfunctional_dispatch.h>
396#include <ATen/ops/gelu_native.h>
397#include <ATen/ops/glu.h>
398#include <ATen/ops/glu_compositeexplicitautogradnonfunctional_dispatch.h>
399#include <ATen/ops/glu_native.h>
400#include <ATen/ops/gt.h>
401#include <ATen/ops/gt_compositeexplicitautogradnonfunctional_dispatch.h>
402#include <ATen/ops/gt_native.h>
403#include <ATen/ops/hardshrink.h>
404#include <ATen/ops/hardshrink_backward.h>
405#include <ATen/ops/hardshrink_backward_compositeexplicitautogradnonfunctional_dispatch.h>
406#include <ATen/ops/hardshrink_backward_native.h>
407#include <ATen/ops/hardshrink_compositeexplicitautogradnonfunctional_dispatch.h>
408#include <ATen/ops/hardshrink_native.h>
409#include <ATen/ops/hardsigmoid.h>
410#include <ATen/ops/hardsigmoid_backward.h>
411#include <ATen/ops/hardsigmoid_backward_compositeexplicitautogradnonfunctional_dispatch.h>
412#include <ATen/ops/hardsigmoid_backward_native.h>
413#include <ATen/ops/hardsigmoid_compositeexplicitautogradnonfunctional_dispatch.h>
414#include <ATen/ops/hardsigmoid_native.h>
415#include <ATen/ops/heaviside.h>
416#include <ATen/ops/heaviside_compositeexplicitautogradnonfunctional_dispatch.h>
417#include <ATen/ops/heaviside_native.h>
418#include <ATen/ops/hypot.h>
419#include <ATen/ops/hypot_compositeexplicitautogradnonfunctional_dispatch.h>
420#include <ATen/ops/hypot_native.h>
421#include <ATen/ops/i0.h>
422#include <ATen/ops/i0_compositeexplicitautogradnonfunctional_dispatch.h>
423#include <ATen/ops/i0_native.h>
424#include <ATen/ops/igamma.h>
425#include <ATen/ops/igamma_compositeexplicitautogradnonfunctional_dispatch.h>
426#include <ATen/ops/igamma_native.h>
427#include <ATen/ops/igammac.h>
428#include <ATen/ops/igammac_compositeexplicitautogradnonfunctional_dispatch.h>
429#include <ATen/ops/igammac_native.h>
430#include <ATen/ops/index.h>
431#include <ATen/ops/index_add.h>
432#include <ATen/ops/index_add_compositeexplicitautogradnonfunctional_dispatch.h>
433#include <ATen/ops/index_add_native.h>
434#include <ATen/ops/index_compositeexplicitautogradnonfunctional_dispatch.h>
435#include <ATen/ops/index_copy.h>
436#include <ATen/ops/index_copy_compositeexplicitautogradnonfunctional_dispatch.h>
437#include <ATen/ops/index_copy_native.h>
438#include <ATen/ops/index_native.h>
439#include <ATen/ops/index_reduce.h>
440#include <ATen/ops/index_reduce_compositeexplicitautogradnonfunctional_dispatch.h>
441#include <ATen/ops/index_reduce_native.h>
442#include <ATen/ops/indices_copy.h>
443#include <ATen/ops/indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
444#include <ATen/ops/indices_copy_native.h>
445#include <ATen/ops/isin.h>
446#include <ATen/ops/isin_compositeexplicitautogradnonfunctional_dispatch.h>
447#include <ATen/ops/isin_native.h>
448#include <ATen/ops/isneginf.h>
449#include <ATen/ops/isneginf_compositeexplicitautogradnonfunctional_dispatch.h>
450#include <ATen/ops/isneginf_native.h>
451#include <ATen/ops/isposinf.h>
452#include <ATen/ops/isposinf_compositeexplicitautogradnonfunctional_dispatch.h>
453#include <ATen/ops/isposinf_native.h>
454#include <ATen/ops/lcm.h>
455#include <ATen/ops/lcm_compositeexplicitautogradnonfunctional_dispatch.h>
456#include <ATen/ops/lcm_native.h>
457#include <ATen/ops/le.h>
458#include <ATen/ops/le_compositeexplicitautogradnonfunctional_dispatch.h>
459#include <ATen/ops/le_native.h>
460#include <ATen/ops/leaky_relu.h>
461#include <ATen/ops/leaky_relu_backward.h>
462#include <ATen/ops/leaky_relu_backward_compositeexplicitautogradnonfunctional_dispatch.h>
463#include <ATen/ops/leaky_relu_backward_native.h>
464#include <ATen/ops/leaky_relu_compositeexplicitautogradnonfunctional_dispatch.h>
465#include <ATen/ops/leaky_relu_native.h>
466#include <ATen/ops/lerp.h>
467#include <ATen/ops/lerp_compositeexplicitautogradnonfunctional_dispatch.h>
468#include <ATen/ops/lerp_native.h>
469#include <ATen/ops/lgamma.h>
470#include <ATen/ops/lgamma_compositeexplicitautogradnonfunctional_dispatch.h>
471#include <ATen/ops/lgamma_native.h>
472#include <ATen/ops/lift_fresh_copy.h>
473#include <ATen/ops/lift_fresh_copy_compositeexplicitautogradnonfunctional_dispatch.h>
474#include <ATen/ops/lift_fresh_copy_native.h>
475#include <ATen/ops/linalg_cholesky_ex.h>
476#include <ATen/ops/linalg_cholesky_ex_compositeexplicitautogradnonfunctional_dispatch.h>
477#include <ATen/ops/linalg_cholesky_ex_native.h>
478#include <ATen/ops/linalg_cross.h>
479#include <ATen/ops/linalg_cross_compositeexplicitautogradnonfunctional_dispatch.h>
480#include <ATen/ops/linalg_cross_native.h>
481#include <ATen/ops/linalg_inv_ex.h>
482#include <ATen/ops/linalg_inv_ex_compositeexplicitautogradnonfunctional_dispatch.h>
483#include <ATen/ops/linalg_inv_ex_native.h>
484#include <ATen/ops/linalg_ldl_factor_ex.h>
485#include <ATen/ops/linalg_ldl_factor_ex_compositeexplicitautogradnonfunctional_dispatch.h>
486#include <ATen/ops/linalg_ldl_factor_ex_native.h>
487#include <ATen/ops/linalg_ldl_solve.h>
488#include <ATen/ops/linalg_ldl_solve_compositeexplicitautogradnonfunctional_dispatch.h>
489#include <ATen/ops/linalg_ldl_solve_native.h>
490#include <ATen/ops/linalg_lu.h>
491#include <ATen/ops/linalg_lu_compositeexplicitautogradnonfunctional_dispatch.h>
492#include <ATen/ops/linalg_lu_factor_ex.h>
493#include <ATen/ops/linalg_lu_factor_ex_compositeexplicitautogradnonfunctional_dispatch.h>
494#include <ATen/ops/linalg_lu_factor_ex_native.h>
495#include <ATen/ops/linalg_lu_native.h>
496#include <ATen/ops/linalg_lu_solve.h>
497#include <ATen/ops/linalg_lu_solve_compositeexplicitautogradnonfunctional_dispatch.h>
498#include <ATen/ops/linalg_lu_solve_native.h>
499#include <ATen/ops/linalg_pinv.h>
500#include <ATen/ops/linalg_pinv_compositeexplicitautogradnonfunctional_dispatch.h>
501#include <ATen/ops/linalg_pinv_native.h>
502#include <ATen/ops/linalg_qr.h>
503#include <ATen/ops/linalg_qr_compositeexplicitautogradnonfunctional_dispatch.h>
504#include <ATen/ops/linalg_qr_native.h>
505#include <ATen/ops/linalg_vector_norm.h>
506#include <ATen/ops/linalg_vector_norm_compositeexplicitautogradnonfunctional_dispatch.h>
507#include <ATen/ops/linalg_vector_norm_native.h>
508#include <ATen/ops/log.h>
509#include <ATen/ops/log10.h>
510#include <ATen/ops/log10_compositeexplicitautogradnonfunctional_dispatch.h>
511#include <ATen/ops/log10_native.h>
512#include <ATen/ops/log1p.h>
513#include <ATen/ops/log1p_compositeexplicitautogradnonfunctional_dispatch.h>
514#include <ATen/ops/log1p_native.h>
515#include <ATen/ops/log2.h>
516#include <ATen/ops/log2_compositeexplicitautogradnonfunctional_dispatch.h>
517#include <ATen/ops/log2_native.h>
518#include <ATen/ops/log_compositeexplicitautogradnonfunctional_dispatch.h>
519#include <ATen/ops/log_native.h>
520#include <ATen/ops/logaddexp.h>
521#include <ATen/ops/logaddexp2.h>
522#include <ATen/ops/logaddexp2_compositeexplicitautogradnonfunctional_dispatch.h>
523#include <ATen/ops/logaddexp2_native.h>
524#include <ATen/ops/logaddexp_compositeexplicitautogradnonfunctional_dispatch.h>
525#include <ATen/ops/logaddexp_native.h>
526#include <ATen/ops/logit_backward.h>
527#include <ATen/ops/logit_backward_compositeexplicitautogradnonfunctional_dispatch.h>
528#include <ATen/ops/logit_backward_native.h>
529#include <ATen/ops/logsumexp.h>
530#include <ATen/ops/logsumexp_compositeexplicitautogradnonfunctional_dispatch.h>
531#include <ATen/ops/logsumexp_native.h>
532#include <ATen/ops/lt.h>
533#include <ATen/ops/lt_compositeexplicitautogradnonfunctional_dispatch.h>
534#include <ATen/ops/lt_native.h>
535#include <ATen/ops/lu_unpack.h>
536#include <ATen/ops/lu_unpack_compositeexplicitautogradnonfunctional_dispatch.h>
537#include <ATen/ops/lu_unpack_native.h>
538#include <ATen/ops/max.h>
539#include <ATen/ops/max_compositeexplicitautogradnonfunctional_dispatch.h>
540#include <ATen/ops/max_native.h>
541#include <ATen/ops/max_pool2d_with_indices.h>
542#include <ATen/ops/max_pool2d_with_indices_backward.h>
543#include <ATen/ops/max_pool2d_with_indices_backward_compositeexplicitautogradnonfunctional_dispatch.h>
544#include <ATen/ops/max_pool2d_with_indices_backward_native.h>
545#include <ATen/ops/max_pool2d_with_indices_compositeexplicitautogradnonfunctional_dispatch.h>
546#include <ATen/ops/max_pool2d_with_indices_native.h>
547#include <ATen/ops/maximum.h>
548#include <ATen/ops/maximum_compositeexplicitautogradnonfunctional_dispatch.h>
549#include <ATen/ops/maximum_native.h>
550#include <ATen/ops/mean.h>
551#include <ATen/ops/mean_compositeexplicitautogradnonfunctional_dispatch.h>
552#include <ATen/ops/mean_native.h>
553#include <ATen/ops/min.h>
554#include <ATen/ops/min_compositeexplicitautogradnonfunctional_dispatch.h>
555#include <ATen/ops/min_native.h>
556#include <ATen/ops/minimum.h>
557#include <ATen/ops/minimum_compositeexplicitautogradnonfunctional_dispatch.h>
558#include <ATen/ops/minimum_native.h>
559#include <ATen/ops/mish.h>
560#include <ATen/ops/mish_compositeexplicitautogradnonfunctional_dispatch.h>
561#include <ATen/ops/mish_native.h>
562#include <ATen/ops/mm.h>
563#include <ATen/ops/mm_compositeexplicitautogradnonfunctional_dispatch.h>
564#include <ATen/ops/mm_native.h>
565#include <ATen/ops/mse_loss.h>
566#include <ATen/ops/mse_loss_compositeexplicitautogradnonfunctional_dispatch.h>
567#include <ATen/ops/mse_loss_native.h>
568#include <ATen/ops/mul.h>
569#include <ATen/ops/mul_compositeexplicitautogradnonfunctional_dispatch.h>
570#include <ATen/ops/mul_native.h>
571#include <ATen/ops/narrow_copy.h>
572#include <ATen/ops/narrow_copy_compositeexplicitautogradnonfunctional_dispatch.h>
573#include <ATen/ops/narrow_copy_native.h>
574#include <ATen/ops/ne.h>
575#include <ATen/ops/ne_compositeexplicitautogradnonfunctional_dispatch.h>
576#include <ATen/ops/ne_native.h>
577#include <ATen/ops/neg.h>
578#include <ATen/ops/neg_compositeexplicitautogradnonfunctional_dispatch.h>
579#include <ATen/ops/neg_native.h>
580#include <ATen/ops/new_empty_strided.h>
581#include <ATen/ops/new_empty_strided_compositeexplicitautogradnonfunctional_dispatch.h>
582#include <ATen/ops/new_empty_strided_native.h>
583#include <ATen/ops/nextafter.h>
584#include <ATen/ops/nextafter_compositeexplicitautogradnonfunctional_dispatch.h>
585#include <ATen/ops/nextafter_native.h>
586#include <ATen/ops/nll_loss_backward.h>
587#include <ATen/ops/nll_loss_backward_compositeexplicitautogradnonfunctional_dispatch.h>
588#include <ATen/ops/nll_loss_backward_native.h>
589#include <ATen/ops/nll_loss_forward.h>
590#include <ATen/ops/nll_loss_forward_compositeexplicitautogradnonfunctional_dispatch.h>
591#include <ATen/ops/nll_loss_forward_native.h>
592#include <ATen/ops/norm.h>
593#include <ATen/ops/norm_compositeexplicitautogradnonfunctional_dispatch.h>
594#include <ATen/ops/norm_native.h>
595#include <ATen/ops/permute_copy.h>
596#include <ATen/ops/permute_copy_compositeexplicitautogradnonfunctional_dispatch.h>
597#include <ATen/ops/permute_copy_native.h>
598#include <ATen/ops/pixel_shuffle.h>
599#include <ATen/ops/pixel_shuffle_compositeexplicitautogradnonfunctional_dispatch.h>
600#include <ATen/ops/pixel_shuffle_native.h>
601#include <ATen/ops/pixel_unshuffle.h>
602#include <ATen/ops/pixel_unshuffle_compositeexplicitautogradnonfunctional_dispatch.h>
603#include <ATen/ops/pixel_unshuffle_native.h>
604#include <ATen/ops/polygamma.h>
605#include <ATen/ops/polygamma_compositeexplicitautogradnonfunctional_dispatch.h>
606#include <ATen/ops/polygamma_native.h>
607#include <ATen/ops/pow.h>
608#include <ATen/ops/pow_compositeexplicitautogradnonfunctional_dispatch.h>
609#include <ATen/ops/pow_native.h>
610#include <ATen/ops/prod.h>
611#include <ATen/ops/prod_compositeexplicitautogradnonfunctional_dispatch.h>
612#include <ATen/ops/prod_native.h>
613#include <ATen/ops/reciprocal.h>
614#include <ATen/ops/reciprocal_compositeexplicitautogradnonfunctional_dispatch.h>
615#include <ATen/ops/reciprocal_native.h>
616#include <ATen/ops/reflection_pad1d.h>
617#include <ATen/ops/reflection_pad1d_backward.h>
618#include <ATen/ops/reflection_pad1d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
619#include <ATen/ops/reflection_pad1d_backward_native.h>
620#include <ATen/ops/reflection_pad1d_compositeexplicitautogradnonfunctional_dispatch.h>
621#include <ATen/ops/reflection_pad1d_native.h>
622#include <ATen/ops/reflection_pad3d.h>
623#include <ATen/ops/reflection_pad3d_backward.h>
624#include <ATen/ops/reflection_pad3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
625#include <ATen/ops/reflection_pad3d_backward_native.h>
626#include <ATen/ops/reflection_pad3d_compositeexplicitautogradnonfunctional_dispatch.h>
627#include <ATen/ops/reflection_pad3d_native.h>
628#include <ATen/ops/remainder.h>
629#include <ATen/ops/remainder_compositeexplicitautogradnonfunctional_dispatch.h>
630#include <ATen/ops/remainder_native.h>
631#include <ATen/ops/renorm.h>
632#include <ATen/ops/renorm_compositeexplicitautogradnonfunctional_dispatch.h>
633#include <ATen/ops/renorm_native.h>
634#include <ATen/ops/replication_pad1d.h>
635#include <ATen/ops/replication_pad1d_backward.h>
636#include <ATen/ops/replication_pad1d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
637#include <ATen/ops/replication_pad1d_backward_native.h>
638#include <ATen/ops/replication_pad1d_compositeexplicitautogradnonfunctional_dispatch.h>
639#include <ATen/ops/replication_pad1d_native.h>
640#include <ATen/ops/replication_pad2d.h>
641#include <ATen/ops/replication_pad2d_compositeexplicitautogradnonfunctional_dispatch.h>
642#include <ATen/ops/replication_pad2d_native.h>
643#include <ATen/ops/replication_pad3d.h>
644#include <ATen/ops/replication_pad3d_compositeexplicitautogradnonfunctional_dispatch.h>
645#include <ATen/ops/replication_pad3d_native.h>
646#include <ATen/ops/round.h>
647#include <ATen/ops/round_compositeexplicitautogradnonfunctional_dispatch.h>
648#include <ATen/ops/round_native.h>
649#include <ATen/ops/row_indices_copy.h>
650#include <ATen/ops/row_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
651#include <ATen/ops/row_indices_copy_native.h>
652#include <ATen/ops/rsqrt.h>
653#include <ATen/ops/rsqrt_compositeexplicitautogradnonfunctional_dispatch.h>
654#include <ATen/ops/rsqrt_native.h>
655#include <ATen/ops/scatter.h>
656#include <ATen/ops/scatter_add.h>
657#include <ATen/ops/scatter_add_compositeexplicitautogradnonfunctional_dispatch.h>
658#include <ATen/ops/scatter_add_native.h>
659#include <ATen/ops/scatter_compositeexplicitautogradnonfunctional_dispatch.h>
660#include <ATen/ops/scatter_native.h>
661#include <ATen/ops/scatter_reduce.h>
662#include <ATen/ops/scatter_reduce_compositeexplicitautogradnonfunctional_dispatch.h>
663#include <ATen/ops/scatter_reduce_native.h>
664#include <ATen/ops/select_backward.h>
665#include <ATen/ops/select_backward_compositeexplicitautogradnonfunctional_dispatch.h>
666#include <ATen/ops/select_backward_native.h>
667#include <ATen/ops/select_copy.h>
668#include <ATen/ops/select_copy_compositeexplicitautogradnonfunctional_dispatch.h>
669#include <ATen/ops/select_copy_native.h>
670#include <ATen/ops/sgn.h>
671#include <ATen/ops/sgn_compositeexplicitautogradnonfunctional_dispatch.h>
672#include <ATen/ops/sgn_native.h>
673#include <ATen/ops/sigmoid.h>
674#include <ATen/ops/sigmoid_backward.h>
675#include <ATen/ops/sigmoid_backward_compositeexplicitautogradnonfunctional_dispatch.h>
676#include <ATen/ops/sigmoid_backward_native.h>
677#include <ATen/ops/sigmoid_compositeexplicitautogradnonfunctional_dispatch.h>
678#include <ATen/ops/sigmoid_native.h>
679#include <ATen/ops/sign.h>
680#include <ATen/ops/sign_compositeexplicitautogradnonfunctional_dispatch.h>
681#include <ATen/ops/sign_native.h>
682#include <ATen/ops/signbit.h>
683#include <ATen/ops/signbit_compositeexplicitautogradnonfunctional_dispatch.h>
684#include <ATen/ops/signbit_native.h>
685#include <ATen/ops/silu.h>
686#include <ATen/ops/silu_backward.h>
687#include <ATen/ops/silu_backward_compositeexplicitautogradnonfunctional_dispatch.h>
688#include <ATen/ops/silu_backward_native.h>
689#include <ATen/ops/silu_compositeexplicitautogradnonfunctional_dispatch.h>
690#include <ATen/ops/silu_native.h>
691#include <ATen/ops/sin.h>
692#include <ATen/ops/sin_compositeexplicitautogradnonfunctional_dispatch.h>
693#include <ATen/ops/sin_native.h>
694#include <ATen/ops/sinc.h>
695#include <ATen/ops/sinc_compositeexplicitautogradnonfunctional_dispatch.h>
696#include <ATen/ops/sinc_native.h>
697#include <ATen/ops/sinh.h>
698#include <ATen/ops/sinh_compositeexplicitautogradnonfunctional_dispatch.h>
699#include <ATen/ops/sinh_native.h>
700#include <ATen/ops/slice_copy.h>
701#include <ATen/ops/slice_copy_compositeexplicitautogradnonfunctional_dispatch.h>
702#include <ATen/ops/slice_copy_native.h>
703#include <ATen/ops/slow_conv_transpose2d.h>
704#include <ATen/ops/slow_conv_transpose2d_compositeexplicitautogradnonfunctional_dispatch.h>
705#include <ATen/ops/slow_conv_transpose2d_native.h>
706#include <ATen/ops/smooth_l1_loss.h>
707#include <ATen/ops/smooth_l1_loss_compositeexplicitautogradnonfunctional_dispatch.h>
708#include <ATen/ops/smooth_l1_loss_native.h>
709#include <ATen/ops/softplus.h>
710#include <ATen/ops/softplus_backward.h>
711#include <ATen/ops/softplus_backward_compositeexplicitautogradnonfunctional_dispatch.h>
712#include <ATen/ops/softplus_backward_native.h>
713#include <ATen/ops/softplus_compositeexplicitautogradnonfunctional_dispatch.h>
714#include <ATen/ops/softplus_native.h>
715#include <ATen/ops/softshrink.h>
716#include <ATen/ops/softshrink_backward.h>
717#include <ATen/ops/softshrink_backward_compositeexplicitautogradnonfunctional_dispatch.h>
718#include <ATen/ops/softshrink_backward_native.h>
719#include <ATen/ops/softshrink_compositeexplicitautogradnonfunctional_dispatch.h>
720#include <ATen/ops/softshrink_native.h>
721#include <ATen/ops/sort.h>
722#include <ATen/ops/sort_compositeexplicitautogradnonfunctional_dispatch.h>
723#include <ATen/ops/sort_native.h>
724#include <ATen/ops/special_airy_ai.h>
725#include <ATen/ops/special_airy_ai_compositeexplicitautogradnonfunctional_dispatch.h>
726#include <ATen/ops/special_airy_ai_native.h>
727#include <ATen/ops/special_bessel_j0.h>
728#include <ATen/ops/special_bessel_j0_compositeexplicitautogradnonfunctional_dispatch.h>
729#include <ATen/ops/special_bessel_j0_native.h>
730#include <ATen/ops/special_bessel_j1.h>
731#include <ATen/ops/special_bessel_j1_compositeexplicitautogradnonfunctional_dispatch.h>
732#include <ATen/ops/special_bessel_j1_native.h>
733#include <ATen/ops/special_bessel_y0.h>
734#include <ATen/ops/special_bessel_y0_compositeexplicitautogradnonfunctional_dispatch.h>
735#include <ATen/ops/special_bessel_y0_native.h>
736#include <ATen/ops/special_bessel_y1.h>
737#include <ATen/ops/special_bessel_y1_compositeexplicitautogradnonfunctional_dispatch.h>
738#include <ATen/ops/special_bessel_y1_native.h>
739#include <ATen/ops/special_chebyshev_polynomial_t.h>
740#include <ATen/ops/special_chebyshev_polynomial_t_compositeexplicitautogradnonfunctional_dispatch.h>
741#include <ATen/ops/special_chebyshev_polynomial_t_native.h>
742#include <ATen/ops/special_chebyshev_polynomial_u.h>
743#include <ATen/ops/special_chebyshev_polynomial_u_compositeexplicitautogradnonfunctional_dispatch.h>
744#include <ATen/ops/special_chebyshev_polynomial_u_native.h>
745#include <ATen/ops/special_chebyshev_polynomial_v.h>
746#include <ATen/ops/special_chebyshev_polynomial_v_compositeexplicitautogradnonfunctional_dispatch.h>
747#include <ATen/ops/special_chebyshev_polynomial_v_native.h>
748#include <ATen/ops/special_chebyshev_polynomial_w.h>
749#include <ATen/ops/special_chebyshev_polynomial_w_compositeexplicitautogradnonfunctional_dispatch.h>
750#include <ATen/ops/special_chebyshev_polynomial_w_native.h>
751#include <ATen/ops/special_entr.h>
752#include <ATen/ops/special_entr_compositeexplicitautogradnonfunctional_dispatch.h>
753#include <ATen/ops/special_entr_native.h>
754#include <ATen/ops/special_erfcx.h>
755#include <ATen/ops/special_erfcx_compositeexplicitautogradnonfunctional_dispatch.h>
756#include <ATen/ops/special_erfcx_native.h>
757#include <ATen/ops/special_hermite_polynomial_h.h>
758#include <ATen/ops/special_hermite_polynomial_h_compositeexplicitautogradnonfunctional_dispatch.h>
759#include <ATen/ops/special_hermite_polynomial_h_native.h>
760#include <ATen/ops/special_hermite_polynomial_he.h>
761#include <ATen/ops/special_hermite_polynomial_he_compositeexplicitautogradnonfunctional_dispatch.h>
762#include <ATen/ops/special_hermite_polynomial_he_native.h>
763#include <ATen/ops/special_i0e.h>
764#include <ATen/ops/special_i0e_compositeexplicitautogradnonfunctional_dispatch.h>
765#include <ATen/ops/special_i0e_native.h>
766#include <ATen/ops/special_i1.h>
767#include <ATen/ops/special_i1_compositeexplicitautogradnonfunctional_dispatch.h>
768#include <ATen/ops/special_i1_native.h>
769#include <ATen/ops/special_i1e.h>
770#include <ATen/ops/special_i1e_compositeexplicitautogradnonfunctional_dispatch.h>
771#include <ATen/ops/special_i1e_native.h>
772#include <ATen/ops/special_laguerre_polynomial_l.h>
773#include <ATen/ops/special_laguerre_polynomial_l_compositeexplicitautogradnonfunctional_dispatch.h>
774#include <ATen/ops/special_laguerre_polynomial_l_native.h>
775#include <ATen/ops/special_legendre_polynomial_p.h>
776#include <ATen/ops/special_legendre_polynomial_p_compositeexplicitautogradnonfunctional_dispatch.h>
777#include <ATen/ops/special_legendre_polynomial_p_native.h>
778#include <ATen/ops/special_log_ndtr.h>
779#include <ATen/ops/special_log_ndtr_compositeexplicitautogradnonfunctional_dispatch.h>
780#include <ATen/ops/special_log_ndtr_native.h>
781#include <ATen/ops/special_modified_bessel_i0.h>
782#include <ATen/ops/special_modified_bessel_i0_compositeexplicitautogradnonfunctional_dispatch.h>
783#include <ATen/ops/special_modified_bessel_i0_native.h>
784#include <ATen/ops/special_modified_bessel_i1.h>
785#include <ATen/ops/special_modified_bessel_i1_compositeexplicitautogradnonfunctional_dispatch.h>
786#include <ATen/ops/special_modified_bessel_i1_native.h>
787#include <ATen/ops/special_modified_bessel_k0.h>
788#include <ATen/ops/special_modified_bessel_k0_compositeexplicitautogradnonfunctional_dispatch.h>
789#include <ATen/ops/special_modified_bessel_k0_native.h>
790#include <ATen/ops/special_modified_bessel_k1.h>
791#include <ATen/ops/special_modified_bessel_k1_compositeexplicitautogradnonfunctional_dispatch.h>
792#include <ATen/ops/special_modified_bessel_k1_native.h>
793#include <ATen/ops/special_ndtri.h>
794#include <ATen/ops/special_ndtri_compositeexplicitautogradnonfunctional_dispatch.h>
795#include <ATen/ops/special_ndtri_native.h>
796#include <ATen/ops/special_scaled_modified_bessel_k0.h>
797#include <ATen/ops/special_scaled_modified_bessel_k0_compositeexplicitautogradnonfunctional_dispatch.h>
798#include <ATen/ops/special_scaled_modified_bessel_k0_native.h>
799#include <ATen/ops/special_scaled_modified_bessel_k1.h>
800#include <ATen/ops/special_scaled_modified_bessel_k1_compositeexplicitautogradnonfunctional_dispatch.h>
801#include <ATen/ops/special_scaled_modified_bessel_k1_native.h>
802#include <ATen/ops/special_shifted_chebyshev_polynomial_t.h>
803#include <ATen/ops/special_shifted_chebyshev_polynomial_t_compositeexplicitautogradnonfunctional_dispatch.h>
804#include <ATen/ops/special_shifted_chebyshev_polynomial_t_native.h>
805#include <ATen/ops/special_shifted_chebyshev_polynomial_u.h>
806#include <ATen/ops/special_shifted_chebyshev_polynomial_u_compositeexplicitautogradnonfunctional_dispatch.h>
807#include <ATen/ops/special_shifted_chebyshev_polynomial_u_native.h>
808#include <ATen/ops/special_shifted_chebyshev_polynomial_v.h>
809#include <ATen/ops/special_shifted_chebyshev_polynomial_v_compositeexplicitautogradnonfunctional_dispatch.h>
810#include <ATen/ops/special_shifted_chebyshev_polynomial_v_native.h>
811#include <ATen/ops/special_shifted_chebyshev_polynomial_w.h>
812#include <ATen/ops/special_shifted_chebyshev_polynomial_w_compositeexplicitautogradnonfunctional_dispatch.h>
813#include <ATen/ops/special_shifted_chebyshev_polynomial_w_native.h>
814#include <ATen/ops/special_spherical_bessel_j0.h>
815#include <ATen/ops/special_spherical_bessel_j0_compositeexplicitautogradnonfunctional_dispatch.h>
816#include <ATen/ops/special_spherical_bessel_j0_native.h>
817#include <ATen/ops/special_xlog1py.h>
818#include <ATen/ops/special_xlog1py_compositeexplicitautogradnonfunctional_dispatch.h>
819#include <ATen/ops/special_xlog1py_native.h>
820#include <ATen/ops/special_zeta.h>
821#include <ATen/ops/special_zeta_compositeexplicitautogradnonfunctional_dispatch.h>
822#include <ATen/ops/special_zeta_native.h>
823#include <ATen/ops/split_copy.h>
824#include <ATen/ops/split_copy_compositeexplicitautogradnonfunctional_dispatch.h>
825#include <ATen/ops/split_copy_native.h>
826#include <ATen/ops/split_with_sizes_copy.h>
827#include <ATen/ops/split_with_sizes_copy_compositeexplicitautogradnonfunctional_dispatch.h>
828#include <ATen/ops/split_with_sizes_copy_native.h>
829#include <ATen/ops/sqrt.h>
830#include <ATen/ops/sqrt_compositeexplicitautogradnonfunctional_dispatch.h>
831#include <ATen/ops/sqrt_native.h>
832#include <ATen/ops/squeeze_copy.h>
833#include <ATen/ops/squeeze_copy_compositeexplicitautogradnonfunctional_dispatch.h>
834#include <ATen/ops/squeeze_copy_native.h>
835#include <ATen/ops/sub.h>
836#include <ATen/ops/sub_compositeexplicitautogradnonfunctional_dispatch.h>
837#include <ATen/ops/sub_native.h>
838#include <ATen/ops/sum.h>
839#include <ATen/ops/sum_compositeexplicitautogradnonfunctional_dispatch.h>
840#include <ATen/ops/sum_native.h>
841#include <ATen/ops/t_copy.h>
842#include <ATen/ops/t_copy_compositeexplicitautogradnonfunctional_dispatch.h>
843#include <ATen/ops/t_copy_native.h>
844#include <ATen/ops/tan.h>
845#include <ATen/ops/tan_compositeexplicitautogradnonfunctional_dispatch.h>
846#include <ATen/ops/tan_native.h>
847#include <ATen/ops/tanh.h>
848#include <ATen/ops/tanh_backward.h>
849#include <ATen/ops/tanh_backward_compositeexplicitautogradnonfunctional_dispatch.h>
850#include <ATen/ops/tanh_backward_native.h>
851#include <ATen/ops/tanh_compositeexplicitautogradnonfunctional_dispatch.h>
852#include <ATen/ops/tanh_native.h>
853#include <ATen/ops/threshold.h>
854#include <ATen/ops/threshold_backward.h>
855#include <ATen/ops/threshold_backward_compositeexplicitautogradnonfunctional_dispatch.h>
856#include <ATen/ops/threshold_backward_native.h>
857#include <ATen/ops/threshold_compositeexplicitautogradnonfunctional_dispatch.h>
858#include <ATen/ops/threshold_native.h>
859#include <ATen/ops/topk.h>
860#include <ATen/ops/topk_compositeexplicitautogradnonfunctional_dispatch.h>
861#include <ATen/ops/topk_native.h>
862#include <ATen/ops/transpose_copy.h>
863#include <ATen/ops/transpose_copy_compositeexplicitautogradnonfunctional_dispatch.h>
864#include <ATen/ops/transpose_copy_native.h>
865#include <ATen/ops/triangular_solve.h>
866#include <ATen/ops/triangular_solve_compositeexplicitautogradnonfunctional_dispatch.h>
867#include <ATen/ops/triangular_solve_native.h>
868#include <ATen/ops/tril.h>
869#include <ATen/ops/tril_compositeexplicitautogradnonfunctional_dispatch.h>
870#include <ATen/ops/tril_native.h>
871#include <ATen/ops/triu.h>
872#include <ATen/ops/triu_compositeexplicitautogradnonfunctional_dispatch.h>
873#include <ATen/ops/triu_native.h>
874#include <ATen/ops/trunc.h>
875#include <ATen/ops/trunc_compositeexplicitautogradnonfunctional_dispatch.h>
876#include <ATen/ops/trunc_native.h>
877#include <ATen/ops/unbind_copy.h>
878#include <ATen/ops/unbind_copy_compositeexplicitautogradnonfunctional_dispatch.h>
879#include <ATen/ops/unbind_copy_native.h>
880#include <ATen/ops/unfold_copy.h>
881#include <ATen/ops/unfold_copy_compositeexplicitautogradnonfunctional_dispatch.h>
882#include <ATen/ops/unfold_copy_native.h>
883#include <ATen/ops/unsqueeze_copy.h>
884#include <ATen/ops/unsqueeze_copy_compositeexplicitautogradnonfunctional_dispatch.h>
885#include <ATen/ops/unsqueeze_copy_native.h>
886#include <ATen/ops/upsample_bicubic2d.h>
887#include <ATen/ops/upsample_bicubic2d_backward.h>
888#include <ATen/ops/upsample_bicubic2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
889#include <ATen/ops/upsample_bicubic2d_backward_native.h>
890#include <ATen/ops/upsample_bicubic2d_compositeexplicitautogradnonfunctional_dispatch.h>
891#include <ATen/ops/upsample_bicubic2d_native.h>
892#include <ATen/ops/upsample_bilinear2d.h>
893#include <ATen/ops/upsample_bilinear2d_backward.h>
894#include <ATen/ops/upsample_bilinear2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
895#include <ATen/ops/upsample_bilinear2d_backward_native.h>
896#include <ATen/ops/upsample_bilinear2d_compositeexplicitautogradnonfunctional_dispatch.h>
897#include <ATen/ops/upsample_bilinear2d_native.h>
898#include <ATen/ops/upsample_linear1d.h>
899#include <ATen/ops/upsample_linear1d_backward.h>
900#include <ATen/ops/upsample_linear1d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
901#include <ATen/ops/upsample_linear1d_backward_native.h>
902#include <ATen/ops/upsample_linear1d_compositeexplicitautogradnonfunctional_dispatch.h>
903#include <ATen/ops/upsample_linear1d_native.h>
904#include <ATen/ops/upsample_nearest1d.h>
905#include <ATen/ops/upsample_nearest1d_backward.h>
906#include <ATen/ops/upsample_nearest1d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
907#include <ATen/ops/upsample_nearest1d_backward_native.h>
908#include <ATen/ops/upsample_nearest1d_compositeexplicitautogradnonfunctional_dispatch.h>
909#include <ATen/ops/upsample_nearest1d_native.h>
910#include <ATen/ops/upsample_nearest2d.h>
911#include <ATen/ops/upsample_nearest2d_backward.h>
912#include <ATen/ops/upsample_nearest2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
913#include <ATen/ops/upsample_nearest2d_backward_native.h>
914#include <ATen/ops/upsample_nearest2d_compositeexplicitautogradnonfunctional_dispatch.h>
915#include <ATen/ops/upsample_nearest2d_native.h>
916#include <ATen/ops/upsample_nearest3d.h>
917#include <ATen/ops/upsample_nearest3d_backward.h>
918#include <ATen/ops/upsample_nearest3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
919#include <ATen/ops/upsample_nearest3d_backward_native.h>
920#include <ATen/ops/upsample_nearest3d_compositeexplicitautogradnonfunctional_dispatch.h>
921#include <ATen/ops/upsample_nearest3d_native.h>
922#include <ATen/ops/upsample_trilinear3d.h>
923#include <ATen/ops/upsample_trilinear3d_backward.h>
924#include <ATen/ops/upsample_trilinear3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
925#include <ATen/ops/upsample_trilinear3d_backward_native.h>
926#include <ATen/ops/upsample_trilinear3d_compositeexplicitautogradnonfunctional_dispatch.h>
927#include <ATen/ops/upsample_trilinear3d_native.h>
928#include <ATen/ops/values_copy.h>
929#include <ATen/ops/values_copy_compositeexplicitautogradnonfunctional_dispatch.h>
930#include <ATen/ops/values_copy_native.h>
931#include <ATen/ops/view_as_complex_copy.h>
932#include <ATen/ops/view_as_complex_copy_compositeexplicitautogradnonfunctional_dispatch.h>
933#include <ATen/ops/view_as_complex_copy_native.h>
934#include <ATen/ops/view_as_real_copy.h>
935#include <ATen/ops/view_as_real_copy_compositeexplicitautogradnonfunctional_dispatch.h>
936#include <ATen/ops/view_as_real_copy_native.h>
937#include <ATen/ops/view_copy.h>
938#include <ATen/ops/view_copy_compositeexplicitautogradnonfunctional_dispatch.h>
939#include <ATen/ops/view_copy_native.h>
940#include <ATen/ops/xlogy.h>
941#include <ATen/ops/xlogy_compositeexplicitautogradnonfunctional_dispatch.h>
942#include <ATen/ops/xlogy_native.h>
943
944// See template file RegisterDispatchDefinitions.ini
945namespace at {
946// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
947// ambiguity with conflicting identifiers that may have been defined in
948// at namespace already.
949namespace {
950Tensor create_out(IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
951 if (strides.empty()) {
952 return at::empty(sizes, options);
953 } else {
954 return at::empty_strided(sizes, strides, options);
955 }
956}
957void check_inplace(const Tensor &self, IntArrayRef sizes, const TensorOptions &options) {
958 // These checks are needed on those operators that:
959 // 1) don't use 'TensorIterator' (e.g. 'addmm' and 'baddbmm')
960 // 2) have particular typing rules (e.g. 'cumsum' and 'cumprod')
961 // For other operators (e.g. 'add'), 'TensorIterator' already checks
962 // these things separately.
963 TORCH_CHECK(options.dtype() == self.dtype(),
964 "Bad in-place call: ",
965 "input tensor dtype ", self.dtype(), " and output tensor dtype ", options.dtype(), " should match");
966 TORCH_CHECK(options.device() == self.device(),
967 "Bad in-place call: ",
968 "input tensor device ", self.device(), " and output tensor device ", options.device(), " should match");
969 TORCH_CHECK(sizes == self.sizes(),
970 "Bad in-place call: ",
971 "input tensor size ", self.sizes(), " and output tensor size ", sizes, " should match");
972}
973c10::optional<Tensor> maybe_create_proxy(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
974 if (out.strides() != strides) {
975 return at::empty_strided(sizes, strides, options);
976 }
977 return c10::nullopt;
978}
979struct structured_sgn_default_backend_functional final : public at::meta::structured_sgn {
980 void set_output_strided(
981 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
982 TensorOptions options, DimnameList names
983 ) override {
984 auto current_device = guard_.current_device();
985 if (C10_UNLIKELY(current_device.has_value())) {
986 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
987 "structured kernels don't support multi-device outputs");
988 } else {
989 guard_.reset_device(options.device());
990 }
991 outputs_[output_idx] = create_out(sizes, strides, options);
992 if (!names.empty()) {
993 namedinference::propagate_names(*outputs_[output_idx], names);
994 }
995 // super must happen after, so that downstream can use maybe_get_output
996 // to retrieve the output
997 at::meta::structured_sgn::set_output_raw_strided(output_idx, sizes, strides, options, names);
998 }
999 void set_output_raw_strided(
1000 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1001 TensorOptions options, DimnameList names
1002 ) override {
1003 auto current_device = guard_.current_device();
1004 if (C10_UNLIKELY(current_device.has_value())) {
1005 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1006 "structured kernels don't support multi-device outputs");
1007 } else {
1008 guard_.reset_device(options.device());
1009 }
1010 outputs_[output_idx] = create_out(sizes, strides, options);
1011 if (!names.empty()) {
1012 namedinference::propagate_names(*outputs_[output_idx], names);
1013 }
1014 // super must happen after, so that downstream can use maybe_get_output
1015 // to retrieve the output
1016 at::meta::structured_sgn::set_output_raw_strided(output_idx, sizes, strides, options, names);
1017 }
1018 const Tensor& maybe_get_output(int64_t output_idx) override {
1019 return *outputs_[output_idx];
1020 }
1021 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
1022 c10::OptionalDeviceGuard guard_;
1023};
1024at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_sgn(const at::Tensor & self) {
1025structured_sgn_default_backend_functional op;
1026op.meta(self);
1027at::sgn_outf(self, *op.outputs_[0]);
1028return std::move(op.outputs_[0]).take();
1029}
1030struct structured_sgn_default_backend_inplace final : public at::meta::structured_sgn {
1031 structured_sgn_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
1032 void set_output_strided(
1033 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1034 TensorOptions options, DimnameList names
1035 ) override {
1036 auto current_device = guard_.current_device();
1037 if (C10_UNLIKELY(current_device.has_value())) {
1038 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1039 "structured kernels don't support multi-device outputs");
1040 } else {
1041 guard_.reset_device(options.device());
1042 }
1043 const auto& out = outputs_[output_idx].get();
1044 check_inplace(out, sizes, options);
1045 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
1046 if (C10_UNLIKELY(maybe_proxy.has_value())) {
1047 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
1048 }
1049 if (!names.empty()) {
1050 namedinference::propagate_names(outputs_[output_idx], names);
1051 }
1052 // super must happen after, so that downstream can use maybe_get_output
1053 // to retrieve the output
1054 at::meta::structured_sgn::set_output_raw_strided(output_idx, sizes, strides, options, names);
1055 }
1056 void set_output_raw_strided(
1057 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1058 TensorOptions options, DimnameList names
1059 ) override {
1060 auto current_device = guard_.current_device();
1061 if (C10_UNLIKELY(current_device.has_value())) {
1062 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1063 "structured kernels don't support multi-device outputs");
1064 } else {
1065 guard_.reset_device(options.device());
1066 }
1067 const auto& out = outputs_[output_idx].get();
1068 check_inplace(out, sizes, options);
1069 if (!names.empty()) {
1070 namedinference::propagate_names(outputs_[output_idx], names);
1071 }
1072 // super must happen after, so that downstream can use maybe_get_output
1073 // to retrieve the output
1074 at::meta::structured_sgn::set_output_raw_strided(output_idx, sizes, strides, options, names);
1075 }
1076 const Tensor& maybe_get_output(int64_t output_idx) override {
1077 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
1078 }
1079 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
1080 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
1081 c10::OptionalDeviceGuard guard_;
1082};
1083at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_sgn_(at::Tensor & self) {
1084structured_sgn_default_backend_inplace op(self);
1085op.meta(self);
1086at::sgn_outf(self, op.outputs_[0]);
1087if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
1088return self;
1089}
1090struct structured_acos_default_backend_functional final : public at::meta::structured_acos {
1091 void set_output_strided(
1092 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1093 TensorOptions options, DimnameList names
1094 ) override {
1095 auto current_device = guard_.current_device();
1096 if (C10_UNLIKELY(current_device.has_value())) {
1097 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1098 "structured kernels don't support multi-device outputs");
1099 } else {
1100 guard_.reset_device(options.device());
1101 }
1102 outputs_[output_idx] = create_out(sizes, strides, options);
1103 if (!names.empty()) {
1104 namedinference::propagate_names(*outputs_[output_idx], names);
1105 }
1106 // super must happen after, so that downstream can use maybe_get_output
1107 // to retrieve the output
1108 at::meta::structured_acos::set_output_raw_strided(output_idx, sizes, strides, options, names);
1109 }
1110 void set_output_raw_strided(
1111 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1112 TensorOptions options, DimnameList names
1113 ) override {
1114 auto current_device = guard_.current_device();
1115 if (C10_UNLIKELY(current_device.has_value())) {
1116 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1117 "structured kernels don't support multi-device outputs");
1118 } else {
1119 guard_.reset_device(options.device());
1120 }
1121 outputs_[output_idx] = create_out(sizes, strides, options);
1122 if (!names.empty()) {
1123 namedinference::propagate_names(*outputs_[output_idx], names);
1124 }
1125 // super must happen after, so that downstream can use maybe_get_output
1126 // to retrieve the output
1127 at::meta::structured_acos::set_output_raw_strided(output_idx, sizes, strides, options, names);
1128 }
1129 const Tensor& maybe_get_output(int64_t output_idx) override {
1130 return *outputs_[output_idx];
1131 }
1132 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
1133 c10::OptionalDeviceGuard guard_;
1134};
1135at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_acos(const at::Tensor & self) {
1136structured_acos_default_backend_functional op;
1137op.meta(self);
1138at::acos_outf(self, *op.outputs_[0]);
1139return std::move(op.outputs_[0]).take();
1140}
1141struct structured_acos_default_backend_inplace final : public at::meta::structured_acos {
1142 structured_acos_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
1143 void set_output_strided(
1144 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1145 TensorOptions options, DimnameList names
1146 ) override {
1147 auto current_device = guard_.current_device();
1148 if (C10_UNLIKELY(current_device.has_value())) {
1149 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1150 "structured kernels don't support multi-device outputs");
1151 } else {
1152 guard_.reset_device(options.device());
1153 }
1154 const auto& out = outputs_[output_idx].get();
1155 check_inplace(out, sizes, options);
1156 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
1157 if (C10_UNLIKELY(maybe_proxy.has_value())) {
1158 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
1159 }
1160 if (!names.empty()) {
1161 namedinference::propagate_names(outputs_[output_idx], names);
1162 }
1163 // super must happen after, so that downstream can use maybe_get_output
1164 // to retrieve the output
1165 at::meta::structured_acos::set_output_raw_strided(output_idx, sizes, strides, options, names);
1166 }
1167 void set_output_raw_strided(
1168 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1169 TensorOptions options, DimnameList names
1170 ) override {
1171 auto current_device = guard_.current_device();
1172 if (C10_UNLIKELY(current_device.has_value())) {
1173 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1174 "structured kernels don't support multi-device outputs");
1175 } else {
1176 guard_.reset_device(options.device());
1177 }
1178 const auto& out = outputs_[output_idx].get();
1179 check_inplace(out, sizes, options);
1180 if (!names.empty()) {
1181 namedinference::propagate_names(outputs_[output_idx], names);
1182 }
1183 // super must happen after, so that downstream can use maybe_get_output
1184 // to retrieve the output
1185 at::meta::structured_acos::set_output_raw_strided(output_idx, sizes, strides, options, names);
1186 }
1187 const Tensor& maybe_get_output(int64_t output_idx) override {
1188 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
1189 }
1190 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
1191 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
1192 c10::OptionalDeviceGuard guard_;
1193};
1194at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_acos_(at::Tensor & self) {
1195structured_acos_default_backend_inplace op(self);
1196op.meta(self);
1197at::acos_outf(self, op.outputs_[0]);
1198if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
1199return self;
1200}
1201struct structured_add_Tensor_default_backend_functional final : public at::meta::structured_add_Tensor {
1202 void set_output_strided(
1203 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1204 TensorOptions options, DimnameList names
1205 ) override {
1206 auto current_device = guard_.current_device();
1207 if (C10_UNLIKELY(current_device.has_value())) {
1208 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1209 "structured kernels don't support multi-device outputs");
1210 } else {
1211 guard_.reset_device(options.device());
1212 }
1213 outputs_[output_idx] = create_out(sizes, strides, options);
1214 if (!names.empty()) {
1215 namedinference::propagate_names(*outputs_[output_idx], names);
1216 }
1217 // super must happen after, so that downstream can use maybe_get_output
1218 // to retrieve the output
1219 at::meta::structured_add_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
1220 }
1221 void set_output_raw_strided(
1222 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1223 TensorOptions options, DimnameList names
1224 ) override {
1225 auto current_device = guard_.current_device();
1226 if (C10_UNLIKELY(current_device.has_value())) {
1227 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1228 "structured kernels don't support multi-device outputs");
1229 } else {
1230 guard_.reset_device(options.device());
1231 }
1232 outputs_[output_idx] = create_out(sizes, strides, options);
1233 if (!names.empty()) {
1234 namedinference::propagate_names(*outputs_[output_idx], names);
1235 }
1236 // super must happen after, so that downstream can use maybe_get_output
1237 // to retrieve the output
1238 at::meta::structured_add_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
1239 }
1240 const Tensor& maybe_get_output(int64_t output_idx) override {
1241 return *outputs_[output_idx];
1242 }
1243 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
1244 c10::OptionalDeviceGuard guard_;
1245};
1246at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_add_Tensor(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1247structured_add_Tensor_default_backend_functional op;
1248op.meta(self, other, alpha);
1249at::add_outf(self, other, alpha, *op.outputs_[0]);
1250return std::move(op.outputs_[0]).take();
1251}
1252struct structured_add_Tensor_default_backend_inplace final : public at::meta::structured_add_Tensor {
1253 structured_add_Tensor_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
1254 void set_output_strided(
1255 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1256 TensorOptions options, DimnameList names
1257 ) override {
1258 auto current_device = guard_.current_device();
1259 if (C10_UNLIKELY(current_device.has_value())) {
1260 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1261 "structured kernels don't support multi-device outputs");
1262 } else {
1263 guard_.reset_device(options.device());
1264 }
1265 const auto& out = outputs_[output_idx].get();
1266 check_inplace(out, sizes, options);
1267 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
1268 if (C10_UNLIKELY(maybe_proxy.has_value())) {
1269 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
1270 }
1271 if (!names.empty()) {
1272 namedinference::propagate_names(outputs_[output_idx], names);
1273 }
1274 // super must happen after, so that downstream can use maybe_get_output
1275 // to retrieve the output
1276 at::meta::structured_add_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
1277 }
1278 void set_output_raw_strided(
1279 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1280 TensorOptions options, DimnameList names
1281 ) override {
1282 auto current_device = guard_.current_device();
1283 if (C10_UNLIKELY(current_device.has_value())) {
1284 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1285 "structured kernels don't support multi-device outputs");
1286 } else {
1287 guard_.reset_device(options.device());
1288 }
1289 const auto& out = outputs_[output_idx].get();
1290 check_inplace(out, sizes, options);
1291 if (!names.empty()) {
1292 namedinference::propagate_names(outputs_[output_idx], names);
1293 }
1294 // super must happen after, so that downstream can use maybe_get_output
1295 // to retrieve the output
1296 at::meta::structured_add_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
1297 }
1298 const Tensor& maybe_get_output(int64_t output_idx) override {
1299 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
1300 }
1301 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
1302 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
1303 c10::OptionalDeviceGuard guard_;
1304};
1305at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_add__Tensor(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1306structured_add_Tensor_default_backend_inplace op(self);
1307op.meta(self, other, alpha);
1308at::add_outf(self, other, alpha, op.outputs_[0]);
1309if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
1310return self;
1311}
1312struct structured_addmv_default_backend_functional final : public at::meta::structured_addmv {
1313 void set_output_strided(
1314 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1315 TensorOptions options, DimnameList names
1316 ) override {
1317 auto current_device = guard_.current_device();
1318 if (C10_UNLIKELY(current_device.has_value())) {
1319 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1320 "structured kernels don't support multi-device outputs");
1321 } else {
1322 guard_.reset_device(options.device());
1323 }
1324 outputs_[output_idx] = create_out(sizes, strides, options);
1325 if (!names.empty()) {
1326 namedinference::propagate_names(*outputs_[output_idx], names);
1327 }
1328 // super must happen after, so that downstream can use maybe_get_output
1329 // to retrieve the output
1330 }
1331 void set_output_raw_strided(
1332 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1333 TensorOptions options, DimnameList names
1334 ) override {
1335 auto current_device = guard_.current_device();
1336 if (C10_UNLIKELY(current_device.has_value())) {
1337 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1338 "structured kernels don't support multi-device outputs");
1339 } else {
1340 guard_.reset_device(options.device());
1341 }
1342 outputs_[output_idx] = create_out(sizes, strides, options);
1343 if (!names.empty()) {
1344 namedinference::propagate_names(*outputs_[output_idx], names);
1345 }
1346 // super must happen after, so that downstream can use maybe_get_output
1347 // to retrieve the output
1348 }
1349 const Tensor& maybe_get_output(int64_t output_idx) override {
1350 return *outputs_[output_idx];
1351 }
1352 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
1353 c10::OptionalDeviceGuard guard_;
1354};
1355at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_addmv(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
1356structured_addmv_default_backend_functional op;
1357op.meta(self, mat, vec, beta, alpha);
1358at::addmv_outf(self, mat, vec, beta, alpha, *op.outputs_[0]);
1359return std::move(op.outputs_[0]).take();
1360}
1361struct structured_addmv_default_backend_inplace final : public at::meta::structured_addmv {
1362 structured_addmv_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
1363 void set_output_strided(
1364 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1365 TensorOptions options, DimnameList names
1366 ) override {
1367 auto current_device = guard_.current_device();
1368 if (C10_UNLIKELY(current_device.has_value())) {
1369 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1370 "structured kernels don't support multi-device outputs");
1371 } else {
1372 guard_.reset_device(options.device());
1373 }
1374 const auto& out = outputs_[output_idx].get();
1375 check_inplace(out, sizes, options);
1376 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
1377 if (C10_UNLIKELY(maybe_proxy.has_value())) {
1378 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
1379 }
1380 if (!names.empty()) {
1381 namedinference::propagate_names(outputs_[output_idx], names);
1382 }
1383 // super must happen after, so that downstream can use maybe_get_output
1384 // to retrieve the output
1385 }
1386 void set_output_raw_strided(
1387 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1388 TensorOptions options, DimnameList names
1389 ) override {
1390 auto current_device = guard_.current_device();
1391 if (C10_UNLIKELY(current_device.has_value())) {
1392 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1393 "structured kernels don't support multi-device outputs");
1394 } else {
1395 guard_.reset_device(options.device());
1396 }
1397 const auto& out = outputs_[output_idx].get();
1398 check_inplace(out, sizes, options);
1399 if (!names.empty()) {
1400 namedinference::propagate_names(outputs_[output_idx], names);
1401 }
1402 // super must happen after, so that downstream can use maybe_get_output
1403 // to retrieve the output
1404 }
1405 const Tensor& maybe_get_output(int64_t output_idx) override {
1406 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
1407 }
1408 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
1409 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
1410 c10::OptionalDeviceGuard guard_;
1411};
1412at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_addmv_(at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
1413structured_addmv_default_backend_inplace op(self);
1414op.meta(self, mat, vec, beta, alpha);
1415at::addmv_outf(self, mat, vec, beta, alpha, op.outputs_[0]);
1416if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
1417return self;
1418}
1419struct structured_all_dim_default_backend_functional final : public at::meta::structured_all_dim {
1420 void set_output_strided(
1421 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1422 TensorOptions options, DimnameList names
1423 ) override {
1424 auto current_device = guard_.current_device();
1425 if (C10_UNLIKELY(current_device.has_value())) {
1426 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1427 "structured kernels don't support multi-device outputs");
1428 } else {
1429 guard_.reset_device(options.device());
1430 }
1431 outputs_[output_idx] = create_out(sizes, strides, options);
1432 if (!names.empty()) {
1433 namedinference::propagate_names(*outputs_[output_idx], names);
1434 }
1435 // super must happen after, so that downstream can use maybe_get_output
1436 // to retrieve the output
1437 }
1438 void set_output_raw_strided(
1439 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1440 TensorOptions options, DimnameList names
1441 ) override {
1442 auto current_device = guard_.current_device();
1443 if (C10_UNLIKELY(current_device.has_value())) {
1444 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1445 "structured kernels don't support multi-device outputs");
1446 } else {
1447 guard_.reset_device(options.device());
1448 }
1449 outputs_[output_idx] = create_out(sizes, strides, options);
1450 if (!names.empty()) {
1451 namedinference::propagate_names(*outputs_[output_idx], names);
1452 }
1453 // super must happen after, so that downstream can use maybe_get_output
1454 // to retrieve the output
1455 }
1456 const Tensor& maybe_get_output(int64_t output_idx) override {
1457 return *outputs_[output_idx];
1458 }
1459 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
1460 c10::OptionalDeviceGuard guard_;
1461};
1462at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_all_dim(const at::Tensor & self, int64_t dim, bool keepdim) {
1463structured_all_dim_default_backend_functional op;
1464auto precompute = op.meta(self, dim, keepdim);
1465(void)precompute;
1466at::all_outf(self, precompute.dim, keepdim, *op.outputs_[0]);
1467return std::move(op.outputs_[0]).take();
1468}
1469struct structured_any_dim_default_backend_functional final : public at::meta::structured_any_dim {
1470 void set_output_strided(
1471 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1472 TensorOptions options, DimnameList names
1473 ) override {
1474 auto current_device = guard_.current_device();
1475 if (C10_UNLIKELY(current_device.has_value())) {
1476 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1477 "structured kernels don't support multi-device outputs");
1478 } else {
1479 guard_.reset_device(options.device());
1480 }
1481 outputs_[output_idx] = create_out(sizes, strides, options);
1482 if (!names.empty()) {
1483 namedinference::propagate_names(*outputs_[output_idx], names);
1484 }
1485 // super must happen after, so that downstream can use maybe_get_output
1486 // to retrieve the output
1487 }
1488 void set_output_raw_strided(
1489 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1490 TensorOptions options, DimnameList names
1491 ) override {
1492 auto current_device = guard_.current_device();
1493 if (C10_UNLIKELY(current_device.has_value())) {
1494 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1495 "structured kernels don't support multi-device outputs");
1496 } else {
1497 guard_.reset_device(options.device());
1498 }
1499 outputs_[output_idx] = create_out(sizes, strides, options);
1500 if (!names.empty()) {
1501 namedinference::propagate_names(*outputs_[output_idx], names);
1502 }
1503 // super must happen after, so that downstream can use maybe_get_output
1504 // to retrieve the output
1505 }
1506 const Tensor& maybe_get_output(int64_t output_idx) override {
1507 return *outputs_[output_idx];
1508 }
1509 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
1510 c10::OptionalDeviceGuard guard_;
1511};
1512at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_any_dim(const at::Tensor & self, int64_t dim, bool keepdim) {
1513structured_any_dim_default_backend_functional op;
1514auto precompute = op.meta(self, dim, keepdim);
1515(void)precompute;
1516at::any_outf(self, precompute.dim, keepdim, *op.outputs_[0]);
1517return std::move(op.outputs_[0]).take();
1518}
1519struct structured_argmax_default_backend_functional final : public at::meta::structured_argmax {
1520 void set_output_strided(
1521 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1522 TensorOptions options, DimnameList names
1523 ) override {
1524 auto current_device = guard_.current_device();
1525 if (C10_UNLIKELY(current_device.has_value())) {
1526 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1527 "structured kernels don't support multi-device outputs");
1528 } else {
1529 guard_.reset_device(options.device());
1530 }
1531 outputs_[output_idx] = create_out(sizes, strides, options);
1532 if (!names.empty()) {
1533 namedinference::propagate_names(*outputs_[output_idx], names);
1534 }
1535 // super must happen after, so that downstream can use maybe_get_output
1536 // to retrieve the output
1537 }
1538 void set_output_raw_strided(
1539 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1540 TensorOptions options, DimnameList names
1541 ) override {
1542 auto current_device = guard_.current_device();
1543 if (C10_UNLIKELY(current_device.has_value())) {
1544 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1545 "structured kernels don't support multi-device outputs");
1546 } else {
1547 guard_.reset_device(options.device());
1548 }
1549 outputs_[output_idx] = create_out(sizes, strides, options);
1550 if (!names.empty()) {
1551 namedinference::propagate_names(*outputs_[output_idx], names);
1552 }
1553 // super must happen after, so that downstream can use maybe_get_output
1554 // to retrieve the output
1555 }
1556 const Tensor& maybe_get_output(int64_t output_idx) override {
1557 return *outputs_[output_idx];
1558 }
1559 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
1560 c10::OptionalDeviceGuard guard_;
1561};
1562at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_argmax(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
1563structured_argmax_default_backend_functional op;
1564op.meta(self, dim, keepdim);
1565at::argmax_outf(self, dim, keepdim, *op.outputs_[0]);
1566return std::move(op.outputs_[0]).take();
1567}
1568struct structured_argmin_default_backend_functional final : public at::meta::structured_argmin {
1569 void set_output_strided(
1570 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1571 TensorOptions options, DimnameList names
1572 ) override {
1573 auto current_device = guard_.current_device();
1574 if (C10_UNLIKELY(current_device.has_value())) {
1575 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1576 "structured kernels don't support multi-device outputs");
1577 } else {
1578 guard_.reset_device(options.device());
1579 }
1580 outputs_[output_idx] = create_out(sizes, strides, options);
1581 if (!names.empty()) {
1582 namedinference::propagate_names(*outputs_[output_idx], names);
1583 }
1584 // super must happen after, so that downstream can use maybe_get_output
1585 // to retrieve the output
1586 }
1587 void set_output_raw_strided(
1588 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1589 TensorOptions options, DimnameList names
1590 ) override {
1591 auto current_device = guard_.current_device();
1592 if (C10_UNLIKELY(current_device.has_value())) {
1593 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1594 "structured kernels don't support multi-device outputs");
1595 } else {
1596 guard_.reset_device(options.device());
1597 }
1598 outputs_[output_idx] = create_out(sizes, strides, options);
1599 if (!names.empty()) {
1600 namedinference::propagate_names(*outputs_[output_idx], names);
1601 }
1602 // super must happen after, so that downstream can use maybe_get_output
1603 // to retrieve the output
1604 }
1605 const Tensor& maybe_get_output(int64_t output_idx) override {
1606 return *outputs_[output_idx];
1607 }
1608 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
1609 c10::OptionalDeviceGuard guard_;
1610};
1611at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_argmin(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
1612structured_argmin_default_backend_functional op;
1613op.meta(self, dim, keepdim);
1614at::argmin_outf(self, dim, keepdim, *op.outputs_[0]);
1615return std::move(op.outputs_[0]).take();
1616}
1617struct structured_acosh_default_backend_functional final : public at::meta::structured_acosh {
1618 void set_output_strided(
1619 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1620 TensorOptions options, DimnameList names
1621 ) override {
1622 auto current_device = guard_.current_device();
1623 if (C10_UNLIKELY(current_device.has_value())) {
1624 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1625 "structured kernels don't support multi-device outputs");
1626 } else {
1627 guard_.reset_device(options.device());
1628 }
1629 outputs_[output_idx] = create_out(sizes, strides, options);
1630 if (!names.empty()) {
1631 namedinference::propagate_names(*outputs_[output_idx], names);
1632 }
1633 // super must happen after, so that downstream can use maybe_get_output
1634 // to retrieve the output
1635 at::meta::structured_acosh::set_output_raw_strided(output_idx, sizes, strides, options, names);
1636 }
1637 void set_output_raw_strided(
1638 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1639 TensorOptions options, DimnameList names
1640 ) override {
1641 auto current_device = guard_.current_device();
1642 if (C10_UNLIKELY(current_device.has_value())) {
1643 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1644 "structured kernels don't support multi-device outputs");
1645 } else {
1646 guard_.reset_device(options.device());
1647 }
1648 outputs_[output_idx] = create_out(sizes, strides, options);
1649 if (!names.empty()) {
1650 namedinference::propagate_names(*outputs_[output_idx], names);
1651 }
1652 // super must happen after, so that downstream can use maybe_get_output
1653 // to retrieve the output
1654 at::meta::structured_acosh::set_output_raw_strided(output_idx, sizes, strides, options, names);
1655 }
1656 const Tensor& maybe_get_output(int64_t output_idx) override {
1657 return *outputs_[output_idx];
1658 }
1659 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
1660 c10::OptionalDeviceGuard guard_;
1661};
1662at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_acosh(const at::Tensor & self) {
1663structured_acosh_default_backend_functional op;
1664op.meta(self);
1665at::acosh_outf(self, *op.outputs_[0]);
1666return std::move(op.outputs_[0]).take();
1667}
1668struct structured_acosh_default_backend_inplace final : public at::meta::structured_acosh {
1669 structured_acosh_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
1670 void set_output_strided(
1671 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1672 TensorOptions options, DimnameList names
1673 ) override {
1674 auto current_device = guard_.current_device();
1675 if (C10_UNLIKELY(current_device.has_value())) {
1676 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1677 "structured kernels don't support multi-device outputs");
1678 } else {
1679 guard_.reset_device(options.device());
1680 }
1681 const auto& out = outputs_[output_idx].get();
1682 check_inplace(out, sizes, options);
1683 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
1684 if (C10_UNLIKELY(maybe_proxy.has_value())) {
1685 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
1686 }
1687 if (!names.empty()) {
1688 namedinference::propagate_names(outputs_[output_idx], names);
1689 }
1690 // super must happen after, so that downstream can use maybe_get_output
1691 // to retrieve the output
1692 at::meta::structured_acosh::set_output_raw_strided(output_idx, sizes, strides, options, names);
1693 }
1694 void set_output_raw_strided(
1695 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1696 TensorOptions options, DimnameList names
1697 ) override {
1698 auto current_device = guard_.current_device();
1699 if (C10_UNLIKELY(current_device.has_value())) {
1700 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1701 "structured kernels don't support multi-device outputs");
1702 } else {
1703 guard_.reset_device(options.device());
1704 }
1705 const auto& out = outputs_[output_idx].get();
1706 check_inplace(out, sizes, options);
1707 if (!names.empty()) {
1708 namedinference::propagate_names(outputs_[output_idx], names);
1709 }
1710 // super must happen after, so that downstream can use maybe_get_output
1711 // to retrieve the output
1712 at::meta::structured_acosh::set_output_raw_strided(output_idx, sizes, strides, options, names);
1713 }
1714 const Tensor& maybe_get_output(int64_t output_idx) override {
1715 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
1716 }
1717 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
1718 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
1719 c10::OptionalDeviceGuard guard_;
1720};
1721at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_acosh_(at::Tensor & self) {
1722structured_acosh_default_backend_inplace op(self);
1723op.meta(self);
1724at::acosh_outf(self, op.outputs_[0]);
1725if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
1726return self;
1727}
1728struct structured_asinh_default_backend_functional final : public at::meta::structured_asinh {
1729 void set_output_strided(
1730 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1731 TensorOptions options, DimnameList names
1732 ) override {
1733 auto current_device = guard_.current_device();
1734 if (C10_UNLIKELY(current_device.has_value())) {
1735 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1736 "structured kernels don't support multi-device outputs");
1737 } else {
1738 guard_.reset_device(options.device());
1739 }
1740 outputs_[output_idx] = create_out(sizes, strides, options);
1741 if (!names.empty()) {
1742 namedinference::propagate_names(*outputs_[output_idx], names);
1743 }
1744 // super must happen after, so that downstream can use maybe_get_output
1745 // to retrieve the output
1746 at::meta::structured_asinh::set_output_raw_strided(output_idx, sizes, strides, options, names);
1747 }
1748 void set_output_raw_strided(
1749 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1750 TensorOptions options, DimnameList names
1751 ) override {
1752 auto current_device = guard_.current_device();
1753 if (C10_UNLIKELY(current_device.has_value())) {
1754 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1755 "structured kernels don't support multi-device outputs");
1756 } else {
1757 guard_.reset_device(options.device());
1758 }
1759 outputs_[output_idx] = create_out(sizes, strides, options);
1760 if (!names.empty()) {
1761 namedinference::propagate_names(*outputs_[output_idx], names);
1762 }
1763 // super must happen after, so that downstream can use maybe_get_output
1764 // to retrieve the output
1765 at::meta::structured_asinh::set_output_raw_strided(output_idx, sizes, strides, options, names);
1766 }
1767 const Tensor& maybe_get_output(int64_t output_idx) override {
1768 return *outputs_[output_idx];
1769 }
1770 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
1771 c10::OptionalDeviceGuard guard_;
1772};
1773at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_asinh(const at::Tensor & self) {
1774structured_asinh_default_backend_functional op;
1775op.meta(self);
1776at::asinh_outf(self, *op.outputs_[0]);
1777return std::move(op.outputs_[0]).take();
1778}
1779struct structured_asinh_default_backend_inplace final : public at::meta::structured_asinh {
1780 structured_asinh_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
1781 void set_output_strided(
1782 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1783 TensorOptions options, DimnameList names
1784 ) override {
1785 auto current_device = guard_.current_device();
1786 if (C10_UNLIKELY(current_device.has_value())) {
1787 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1788 "structured kernels don't support multi-device outputs");
1789 } else {
1790 guard_.reset_device(options.device());
1791 }
1792 const auto& out = outputs_[output_idx].get();
1793 check_inplace(out, sizes, options);
1794 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
1795 if (C10_UNLIKELY(maybe_proxy.has_value())) {
1796 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
1797 }
1798 if (!names.empty()) {
1799 namedinference::propagate_names(outputs_[output_idx], names);
1800 }
1801 // super must happen after, so that downstream can use maybe_get_output
1802 // to retrieve the output
1803 at::meta::structured_asinh::set_output_raw_strided(output_idx, sizes, strides, options, names);
1804 }
1805 void set_output_raw_strided(
1806 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1807 TensorOptions options, DimnameList names
1808 ) override {
1809 auto current_device = guard_.current_device();
1810 if (C10_UNLIKELY(current_device.has_value())) {
1811 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1812 "structured kernels don't support multi-device outputs");
1813 } else {
1814 guard_.reset_device(options.device());
1815 }
1816 const auto& out = outputs_[output_idx].get();
1817 check_inplace(out, sizes, options);
1818 if (!names.empty()) {
1819 namedinference::propagate_names(outputs_[output_idx], names);
1820 }
1821 // super must happen after, so that downstream can use maybe_get_output
1822 // to retrieve the output
1823 at::meta::structured_asinh::set_output_raw_strided(output_idx, sizes, strides, options, names);
1824 }
1825 const Tensor& maybe_get_output(int64_t output_idx) override {
1826 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
1827 }
1828 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
1829 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
1830 c10::OptionalDeviceGuard guard_;
1831};
1832at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_asinh_(at::Tensor & self) {
1833structured_asinh_default_backend_inplace op(self);
1834op.meta(self);
1835at::asinh_outf(self, op.outputs_[0]);
1836if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
1837return self;
1838}
1839struct structured_atanh_default_backend_functional final : public at::meta::structured_atanh {
1840 void set_output_strided(
1841 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1842 TensorOptions options, DimnameList names
1843 ) override {
1844 auto current_device = guard_.current_device();
1845 if (C10_UNLIKELY(current_device.has_value())) {
1846 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1847 "structured kernels don't support multi-device outputs");
1848 } else {
1849 guard_.reset_device(options.device());
1850 }
1851 outputs_[output_idx] = create_out(sizes, strides, options);
1852 if (!names.empty()) {
1853 namedinference::propagate_names(*outputs_[output_idx], names);
1854 }
1855 // super must happen after, so that downstream can use maybe_get_output
1856 // to retrieve the output
1857 at::meta::structured_atanh::set_output_raw_strided(output_idx, sizes, strides, options, names);
1858 }
1859 void set_output_raw_strided(
1860 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1861 TensorOptions options, DimnameList names
1862 ) override {
1863 auto current_device = guard_.current_device();
1864 if (C10_UNLIKELY(current_device.has_value())) {
1865 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1866 "structured kernels don't support multi-device outputs");
1867 } else {
1868 guard_.reset_device(options.device());
1869 }
1870 outputs_[output_idx] = create_out(sizes, strides, options);
1871 if (!names.empty()) {
1872 namedinference::propagate_names(*outputs_[output_idx], names);
1873 }
1874 // super must happen after, so that downstream can use maybe_get_output
1875 // to retrieve the output
1876 at::meta::structured_atanh::set_output_raw_strided(output_idx, sizes, strides, options, names);
1877 }
1878 const Tensor& maybe_get_output(int64_t output_idx) override {
1879 return *outputs_[output_idx];
1880 }
1881 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
1882 c10::OptionalDeviceGuard guard_;
1883};
1884at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_atanh(const at::Tensor & self) {
1885structured_atanh_default_backend_functional op;
1886op.meta(self);
1887at::atanh_outf(self, *op.outputs_[0]);
1888return std::move(op.outputs_[0]).take();
1889}
1890struct structured_atanh_default_backend_inplace final : public at::meta::structured_atanh {
1891 structured_atanh_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
1892 void set_output_strided(
1893 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1894 TensorOptions options, DimnameList names
1895 ) override {
1896 auto current_device = guard_.current_device();
1897 if (C10_UNLIKELY(current_device.has_value())) {
1898 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1899 "structured kernels don't support multi-device outputs");
1900 } else {
1901 guard_.reset_device(options.device());
1902 }
1903 const auto& out = outputs_[output_idx].get();
1904 check_inplace(out, sizes, options);
1905 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
1906 if (C10_UNLIKELY(maybe_proxy.has_value())) {
1907 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
1908 }
1909 if (!names.empty()) {
1910 namedinference::propagate_names(outputs_[output_idx], names);
1911 }
1912 // super must happen after, so that downstream can use maybe_get_output
1913 // to retrieve the output
1914 at::meta::structured_atanh::set_output_raw_strided(output_idx, sizes, strides, options, names);
1915 }
1916 void set_output_raw_strided(
1917 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1918 TensorOptions options, DimnameList names
1919 ) override {
1920 auto current_device = guard_.current_device();
1921 if (C10_UNLIKELY(current_device.has_value())) {
1922 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1923 "structured kernels don't support multi-device outputs");
1924 } else {
1925 guard_.reset_device(options.device());
1926 }
1927 const auto& out = outputs_[output_idx].get();
1928 check_inplace(out, sizes, options);
1929 if (!names.empty()) {
1930 namedinference::propagate_names(outputs_[output_idx], names);
1931 }
1932 // super must happen after, so that downstream can use maybe_get_output
1933 // to retrieve the output
1934 at::meta::structured_atanh::set_output_raw_strided(output_idx, sizes, strides, options, names);
1935 }
1936 const Tensor& maybe_get_output(int64_t output_idx) override {
1937 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
1938 }
1939 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
1940 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
1941 c10::OptionalDeviceGuard guard_;
1942};
1943at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_atanh_(at::Tensor & self) {
1944structured_atanh_default_backend_inplace op(self);
1945op.meta(self);
1946at::atanh_outf(self, op.outputs_[0]);
1947if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
1948return self;
1949}
1950namespace {
1951const at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional__as_strided_(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
1952 // No device check
1953 // DeviceGuard omitted
1954 return at::native::as_strided__symint(self, size, stride, storage_offset);
1955}
1956} // anonymous namespace
1957struct structured_asin_default_backend_functional final : public at::meta::structured_asin {
1958 void set_output_strided(
1959 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1960 TensorOptions options, DimnameList names
1961 ) override {
1962 auto current_device = guard_.current_device();
1963 if (C10_UNLIKELY(current_device.has_value())) {
1964 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1965 "structured kernels don't support multi-device outputs");
1966 } else {
1967 guard_.reset_device(options.device());
1968 }
1969 outputs_[output_idx] = create_out(sizes, strides, options);
1970 if (!names.empty()) {
1971 namedinference::propagate_names(*outputs_[output_idx], names);
1972 }
1973 // super must happen after, so that downstream can use maybe_get_output
1974 // to retrieve the output
1975 at::meta::structured_asin::set_output_raw_strided(output_idx, sizes, strides, options, names);
1976 }
1977 void set_output_raw_strided(
1978 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1979 TensorOptions options, DimnameList names
1980 ) override {
1981 auto current_device = guard_.current_device();
1982 if (C10_UNLIKELY(current_device.has_value())) {
1983 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
1984 "structured kernels don't support multi-device outputs");
1985 } else {
1986 guard_.reset_device(options.device());
1987 }
1988 outputs_[output_idx] = create_out(sizes, strides, options);
1989 if (!names.empty()) {
1990 namedinference::propagate_names(*outputs_[output_idx], names);
1991 }
1992 // super must happen after, so that downstream can use maybe_get_output
1993 // to retrieve the output
1994 at::meta::structured_asin::set_output_raw_strided(output_idx, sizes, strides, options, names);
1995 }
1996 const Tensor& maybe_get_output(int64_t output_idx) override {
1997 return *outputs_[output_idx];
1998 }
1999 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
2000 c10::OptionalDeviceGuard guard_;
2001};
2002at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_asin(const at::Tensor & self) {
2003structured_asin_default_backend_functional op;
2004op.meta(self);
2005at::asin_outf(self, *op.outputs_[0]);
2006return std::move(op.outputs_[0]).take();
2007}
2008struct structured_asin_default_backend_inplace final : public at::meta::structured_asin {
2009 structured_asin_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
2010 void set_output_strided(
2011 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2012 TensorOptions options, DimnameList names
2013 ) override {
2014 auto current_device = guard_.current_device();
2015 if (C10_UNLIKELY(current_device.has_value())) {
2016 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2017 "structured kernels don't support multi-device outputs");
2018 } else {
2019 guard_.reset_device(options.device());
2020 }
2021 const auto& out = outputs_[output_idx].get();
2022 check_inplace(out, sizes, options);
2023 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
2024 if (C10_UNLIKELY(maybe_proxy.has_value())) {
2025 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
2026 }
2027 if (!names.empty()) {
2028 namedinference::propagate_names(outputs_[output_idx], names);
2029 }
2030 // super must happen after, so that downstream can use maybe_get_output
2031 // to retrieve the output
2032 at::meta::structured_asin::set_output_raw_strided(output_idx, sizes, strides, options, names);
2033 }
2034 void set_output_raw_strided(
2035 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2036 TensorOptions options, DimnameList names
2037 ) override {
2038 auto current_device = guard_.current_device();
2039 if (C10_UNLIKELY(current_device.has_value())) {
2040 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2041 "structured kernels don't support multi-device outputs");
2042 } else {
2043 guard_.reset_device(options.device());
2044 }
2045 const auto& out = outputs_[output_idx].get();
2046 check_inplace(out, sizes, options);
2047 if (!names.empty()) {
2048 namedinference::propagate_names(outputs_[output_idx], names);
2049 }
2050 // super must happen after, so that downstream can use maybe_get_output
2051 // to retrieve the output
2052 at::meta::structured_asin::set_output_raw_strided(output_idx, sizes, strides, options, names);
2053 }
2054 const Tensor& maybe_get_output(int64_t output_idx) override {
2055 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
2056 }
2057 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
2058 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
2059 c10::OptionalDeviceGuard guard_;
2060};
2061at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_asin_(at::Tensor & self) {
2062structured_asin_default_backend_inplace op(self);
2063op.meta(self);
2064at::asin_outf(self, op.outputs_[0]);
2065if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
2066return self;
2067}
2068struct structured_atan_default_backend_functional final : public at::meta::structured_atan {
2069 void set_output_strided(
2070 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2071 TensorOptions options, DimnameList names
2072 ) override {
2073 auto current_device = guard_.current_device();
2074 if (C10_UNLIKELY(current_device.has_value())) {
2075 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2076 "structured kernels don't support multi-device outputs");
2077 } else {
2078 guard_.reset_device(options.device());
2079 }
2080 outputs_[output_idx] = create_out(sizes, strides, options);
2081 if (!names.empty()) {
2082 namedinference::propagate_names(*outputs_[output_idx], names);
2083 }
2084 // super must happen after, so that downstream can use maybe_get_output
2085 // to retrieve the output
2086 at::meta::structured_atan::set_output_raw_strided(output_idx, sizes, strides, options, names);
2087 }
2088 void set_output_raw_strided(
2089 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2090 TensorOptions options, DimnameList names
2091 ) override {
2092 auto current_device = guard_.current_device();
2093 if (C10_UNLIKELY(current_device.has_value())) {
2094 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2095 "structured kernels don't support multi-device outputs");
2096 } else {
2097 guard_.reset_device(options.device());
2098 }
2099 outputs_[output_idx] = create_out(sizes, strides, options);
2100 if (!names.empty()) {
2101 namedinference::propagate_names(*outputs_[output_idx], names);
2102 }
2103 // super must happen after, so that downstream can use maybe_get_output
2104 // to retrieve the output
2105 at::meta::structured_atan::set_output_raw_strided(output_idx, sizes, strides, options, names);
2106 }
2107 const Tensor& maybe_get_output(int64_t output_idx) override {
2108 return *outputs_[output_idx];
2109 }
2110 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
2111 c10::OptionalDeviceGuard guard_;
2112};
2113at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_atan(const at::Tensor & self) {
2114structured_atan_default_backend_functional op;
2115op.meta(self);
2116at::atan_outf(self, *op.outputs_[0]);
2117return std::move(op.outputs_[0]).take();
2118}
2119struct structured_atan_default_backend_inplace final : public at::meta::structured_atan {
2120 structured_atan_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
2121 void set_output_strided(
2122 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2123 TensorOptions options, DimnameList names
2124 ) override {
2125 auto current_device = guard_.current_device();
2126 if (C10_UNLIKELY(current_device.has_value())) {
2127 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2128 "structured kernels don't support multi-device outputs");
2129 } else {
2130 guard_.reset_device(options.device());
2131 }
2132 const auto& out = outputs_[output_idx].get();
2133 check_inplace(out, sizes, options);
2134 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
2135 if (C10_UNLIKELY(maybe_proxy.has_value())) {
2136 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
2137 }
2138 if (!names.empty()) {
2139 namedinference::propagate_names(outputs_[output_idx], names);
2140 }
2141 // super must happen after, so that downstream can use maybe_get_output
2142 // to retrieve the output
2143 at::meta::structured_atan::set_output_raw_strided(output_idx, sizes, strides, options, names);
2144 }
2145 void set_output_raw_strided(
2146 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2147 TensorOptions options, DimnameList names
2148 ) override {
2149 auto current_device = guard_.current_device();
2150 if (C10_UNLIKELY(current_device.has_value())) {
2151 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2152 "structured kernels don't support multi-device outputs");
2153 } else {
2154 guard_.reset_device(options.device());
2155 }
2156 const auto& out = outputs_[output_idx].get();
2157 check_inplace(out, sizes, options);
2158 if (!names.empty()) {
2159 namedinference::propagate_names(outputs_[output_idx], names);
2160 }
2161 // super must happen after, so that downstream can use maybe_get_output
2162 // to retrieve the output
2163 at::meta::structured_atan::set_output_raw_strided(output_idx, sizes, strides, options, names);
2164 }
2165 const Tensor& maybe_get_output(int64_t output_idx) override {
2166 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
2167 }
2168 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
2169 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
2170 c10::OptionalDeviceGuard guard_;
2171};
2172at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_atan_(at::Tensor & self) {
2173structured_atan_default_backend_inplace op(self);
2174op.meta(self);
2175at::atan_outf(self, op.outputs_[0]);
2176if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
2177return self;
2178}
2179struct structured_baddbmm_default_backend_functional final : public at::meta::structured_baddbmm {
2180 void set_output_strided(
2181 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2182 TensorOptions options, DimnameList names
2183 ) override {
2184 auto current_device = guard_.current_device();
2185 if (C10_UNLIKELY(current_device.has_value())) {
2186 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2187 "structured kernels don't support multi-device outputs");
2188 } else {
2189 guard_.reset_device(options.device());
2190 }
2191 outputs_[output_idx] = create_out(sizes, strides, options);
2192 if (!names.empty()) {
2193 namedinference::propagate_names(*outputs_[output_idx], names);
2194 }
2195 // super must happen after, so that downstream can use maybe_get_output
2196 // to retrieve the output
2197 }
2198 void set_output_raw_strided(
2199 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2200 TensorOptions options, DimnameList names
2201 ) override {
2202 auto current_device = guard_.current_device();
2203 if (C10_UNLIKELY(current_device.has_value())) {
2204 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2205 "structured kernels don't support multi-device outputs");
2206 } else {
2207 guard_.reset_device(options.device());
2208 }
2209 outputs_[output_idx] = create_out(sizes, strides, options);
2210 if (!names.empty()) {
2211 namedinference::propagate_names(*outputs_[output_idx], names);
2212 }
2213 // super must happen after, so that downstream can use maybe_get_output
2214 // to retrieve the output
2215 }
2216 const Tensor& maybe_get_output(int64_t output_idx) override {
2217 return *outputs_[output_idx];
2218 }
2219 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
2220 c10::OptionalDeviceGuard guard_;
2221};
2222at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_baddbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
2223structured_baddbmm_default_backend_functional op;
2224op.meta(self, batch1, batch2, beta, alpha);
2225at::baddbmm_outf(self, batch1, batch2, beta, alpha, *op.outputs_[0]);
2226return std::move(op.outputs_[0]).take();
2227}
2228struct structured_baddbmm_default_backend_inplace final : public at::meta::structured_baddbmm {
2229 structured_baddbmm_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
2230 void set_output_strided(
2231 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2232 TensorOptions options, DimnameList names
2233 ) override {
2234 auto current_device = guard_.current_device();
2235 if (C10_UNLIKELY(current_device.has_value())) {
2236 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2237 "structured kernels don't support multi-device outputs");
2238 } else {
2239 guard_.reset_device(options.device());
2240 }
2241 const auto& out = outputs_[output_idx].get();
2242 check_inplace(out, sizes, options);
2243 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
2244 if (C10_UNLIKELY(maybe_proxy.has_value())) {
2245 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
2246 }
2247 if (!names.empty()) {
2248 namedinference::propagate_names(outputs_[output_idx], names);
2249 }
2250 // super must happen after, so that downstream can use maybe_get_output
2251 // to retrieve the output
2252 }
2253 void set_output_raw_strided(
2254 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2255 TensorOptions options, DimnameList names
2256 ) override {
2257 auto current_device = guard_.current_device();
2258 if (C10_UNLIKELY(current_device.has_value())) {
2259 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2260 "structured kernels don't support multi-device outputs");
2261 } else {
2262 guard_.reset_device(options.device());
2263 }
2264 const auto& out = outputs_[output_idx].get();
2265 check_inplace(out, sizes, options);
2266 if (!names.empty()) {
2267 namedinference::propagate_names(outputs_[output_idx], names);
2268 }
2269 // super must happen after, so that downstream can use maybe_get_output
2270 // to retrieve the output
2271 }
2272 const Tensor& maybe_get_output(int64_t output_idx) override {
2273 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
2274 }
2275 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
2276 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
2277 c10::OptionalDeviceGuard guard_;
2278};
2279at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_baddbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
2280structured_baddbmm_default_backend_inplace op(self);
2281op.meta(self, batch1, batch2, beta, alpha);
2282at::baddbmm_outf(self, batch1, batch2, beta, alpha, op.outputs_[0]);
2283if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
2284return self;
2285}
2286namespace {
2287at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_p_bernoulli(const at::Tensor & self, double p, c10::optional<at::Generator> generator) {
2288 // No device check
2289 // DeviceGuard omitted
2290 return at::native::bernoulli(self, p, generator);
2291}
2292} // anonymous namespace
2293struct structured_bitwise_not_default_backend_functional final : public at::meta::structured_bitwise_not {
2294 void set_output_strided(
2295 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2296 TensorOptions options, DimnameList names
2297 ) override {
2298 auto current_device = guard_.current_device();
2299 if (C10_UNLIKELY(current_device.has_value())) {
2300 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2301 "structured kernels don't support multi-device outputs");
2302 } else {
2303 guard_.reset_device(options.device());
2304 }
2305 outputs_[output_idx] = create_out(sizes, strides, options);
2306 if (!names.empty()) {
2307 namedinference::propagate_names(*outputs_[output_idx], names);
2308 }
2309 // super must happen after, so that downstream can use maybe_get_output
2310 // to retrieve the output
2311 at::meta::structured_bitwise_not::set_output_raw_strided(output_idx, sizes, strides, options, names);
2312 }
2313 void set_output_raw_strided(
2314 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2315 TensorOptions options, DimnameList names
2316 ) override {
2317 auto current_device = guard_.current_device();
2318 if (C10_UNLIKELY(current_device.has_value())) {
2319 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2320 "structured kernels don't support multi-device outputs");
2321 } else {
2322 guard_.reset_device(options.device());
2323 }
2324 outputs_[output_idx] = create_out(sizes, strides, options);
2325 if (!names.empty()) {
2326 namedinference::propagate_names(*outputs_[output_idx], names);
2327 }
2328 // super must happen after, so that downstream can use maybe_get_output
2329 // to retrieve the output
2330 at::meta::structured_bitwise_not::set_output_raw_strided(output_idx, sizes, strides, options, names);
2331 }
2332 const Tensor& maybe_get_output(int64_t output_idx) override {
2333 return *outputs_[output_idx];
2334 }
2335 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
2336 c10::OptionalDeviceGuard guard_;
2337};
2338at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_bitwise_not(const at::Tensor & self) {
2339structured_bitwise_not_default_backend_functional op;
2340op.meta(self);
2341at::bitwise_not_outf(self, *op.outputs_[0]);
2342return std::move(op.outputs_[0]).take();
2343}
2344struct structured_bitwise_not_default_backend_inplace final : public at::meta::structured_bitwise_not {
2345 structured_bitwise_not_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
2346 void set_output_strided(
2347 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2348 TensorOptions options, DimnameList names
2349 ) override {
2350 auto current_device = guard_.current_device();
2351 if (C10_UNLIKELY(current_device.has_value())) {
2352 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2353 "structured kernels don't support multi-device outputs");
2354 } else {
2355 guard_.reset_device(options.device());
2356 }
2357 const auto& out = outputs_[output_idx].get();
2358 check_inplace(out, sizes, options);
2359 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
2360 if (C10_UNLIKELY(maybe_proxy.has_value())) {
2361 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
2362 }
2363 if (!names.empty()) {
2364 namedinference::propagate_names(outputs_[output_idx], names);
2365 }
2366 // super must happen after, so that downstream can use maybe_get_output
2367 // to retrieve the output
2368 at::meta::structured_bitwise_not::set_output_raw_strided(output_idx, sizes, strides, options, names);
2369 }
2370 void set_output_raw_strided(
2371 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2372 TensorOptions options, DimnameList names
2373 ) override {
2374 auto current_device = guard_.current_device();
2375 if (C10_UNLIKELY(current_device.has_value())) {
2376 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2377 "structured kernels don't support multi-device outputs");
2378 } else {
2379 guard_.reset_device(options.device());
2380 }
2381 const auto& out = outputs_[output_idx].get();
2382 check_inplace(out, sizes, options);
2383 if (!names.empty()) {
2384 namedinference::propagate_names(outputs_[output_idx], names);
2385 }
2386 // super must happen after, so that downstream can use maybe_get_output
2387 // to retrieve the output
2388 at::meta::structured_bitwise_not::set_output_raw_strided(output_idx, sizes, strides, options, names);
2389 }
2390 const Tensor& maybe_get_output(int64_t output_idx) override {
2391 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
2392 }
2393 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
2394 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
2395 c10::OptionalDeviceGuard guard_;
2396};
2397at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_bitwise_not_(at::Tensor & self) {
2398structured_bitwise_not_default_backend_inplace op(self);
2399op.meta(self);
2400at::bitwise_not_outf(self, op.outputs_[0]);
2401if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
2402return self;
2403}
2404struct structured_copysign_Tensor_default_backend_functional final : public at::meta::structured_copysign_Tensor {
2405 void set_output_strided(
2406 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2407 TensorOptions options, DimnameList names
2408 ) override {
2409 auto current_device = guard_.current_device();
2410 if (C10_UNLIKELY(current_device.has_value())) {
2411 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2412 "structured kernels don't support multi-device outputs");
2413 } else {
2414 guard_.reset_device(options.device());
2415 }
2416 outputs_[output_idx] = create_out(sizes, strides, options);
2417 if (!names.empty()) {
2418 namedinference::propagate_names(*outputs_[output_idx], names);
2419 }
2420 // super must happen after, so that downstream can use maybe_get_output
2421 // to retrieve the output
2422 at::meta::structured_copysign_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
2423 }
2424 void set_output_raw_strided(
2425 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2426 TensorOptions options, DimnameList names
2427 ) override {
2428 auto current_device = guard_.current_device();
2429 if (C10_UNLIKELY(current_device.has_value())) {
2430 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2431 "structured kernels don't support multi-device outputs");
2432 } else {
2433 guard_.reset_device(options.device());
2434 }
2435 outputs_[output_idx] = create_out(sizes, strides, options);
2436 if (!names.empty()) {
2437 namedinference::propagate_names(*outputs_[output_idx], names);
2438 }
2439 // super must happen after, so that downstream can use maybe_get_output
2440 // to retrieve the output
2441 at::meta::structured_copysign_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
2442 }
2443 const Tensor& maybe_get_output(int64_t output_idx) override {
2444 return *outputs_[output_idx];
2445 }
2446 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
2447 c10::OptionalDeviceGuard guard_;
2448};
2449at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_copysign_Tensor(const at::Tensor & self, const at::Tensor & other) {
2450structured_copysign_Tensor_default_backend_functional op;
2451op.meta(self, other);
2452at::copysign_outf(self, other, *op.outputs_[0]);
2453return std::move(op.outputs_[0]).take();
2454}
2455struct structured_copysign_Tensor_default_backend_inplace final : public at::meta::structured_copysign_Tensor {
2456 structured_copysign_Tensor_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
2457 void set_output_strided(
2458 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2459 TensorOptions options, DimnameList names
2460 ) override {
2461 auto current_device = guard_.current_device();
2462 if (C10_UNLIKELY(current_device.has_value())) {
2463 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2464 "structured kernels don't support multi-device outputs");
2465 } else {
2466 guard_.reset_device(options.device());
2467 }
2468 const auto& out = outputs_[output_idx].get();
2469 check_inplace(out, sizes, options);
2470 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
2471 if (C10_UNLIKELY(maybe_proxy.has_value())) {
2472 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
2473 }
2474 if (!names.empty()) {
2475 namedinference::propagate_names(outputs_[output_idx], names);
2476 }
2477 // super must happen after, so that downstream can use maybe_get_output
2478 // to retrieve the output
2479 at::meta::structured_copysign_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
2480 }
2481 void set_output_raw_strided(
2482 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2483 TensorOptions options, DimnameList names
2484 ) override {
2485 auto current_device = guard_.current_device();
2486 if (C10_UNLIKELY(current_device.has_value())) {
2487 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2488 "structured kernels don't support multi-device outputs");
2489 } else {
2490 guard_.reset_device(options.device());
2491 }
2492 const auto& out = outputs_[output_idx].get();
2493 check_inplace(out, sizes, options);
2494 if (!names.empty()) {
2495 namedinference::propagate_names(outputs_[output_idx], names);
2496 }
2497 // super must happen after, so that downstream can use maybe_get_output
2498 // to retrieve the output
2499 at::meta::structured_copysign_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
2500 }
2501 const Tensor& maybe_get_output(int64_t output_idx) override {
2502 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
2503 }
2504 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
2505 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
2506 c10::OptionalDeviceGuard guard_;
2507};
2508at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_copysign__Tensor(at::Tensor & self, const at::Tensor & other) {
2509structured_copysign_Tensor_default_backend_inplace op(self);
2510op.meta(self, other);
2511at::copysign_outf(self, other, op.outputs_[0]);
2512if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
2513return self;
2514}
2515struct structured_bmm_default_backend_functional final : public at::meta::structured_bmm {
2516 void set_output_strided(
2517 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2518 TensorOptions options, DimnameList names
2519 ) override {
2520 auto current_device = guard_.current_device();
2521 if (C10_UNLIKELY(current_device.has_value())) {
2522 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2523 "structured kernels don't support multi-device outputs");
2524 } else {
2525 guard_.reset_device(options.device());
2526 }
2527 outputs_[output_idx] = create_out(sizes, strides, options);
2528 if (!names.empty()) {
2529 namedinference::propagate_names(*outputs_[output_idx], names);
2530 }
2531 // super must happen after, so that downstream can use maybe_get_output
2532 // to retrieve the output
2533 }
2534 void set_output_raw_strided(
2535 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2536 TensorOptions options, DimnameList names
2537 ) override {
2538 auto current_device = guard_.current_device();
2539 if (C10_UNLIKELY(current_device.has_value())) {
2540 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2541 "structured kernels don't support multi-device outputs");
2542 } else {
2543 guard_.reset_device(options.device());
2544 }
2545 outputs_[output_idx] = create_out(sizes, strides, options);
2546 if (!names.empty()) {
2547 namedinference::propagate_names(*outputs_[output_idx], names);
2548 }
2549 // super must happen after, so that downstream can use maybe_get_output
2550 // to retrieve the output
2551 }
2552 const Tensor& maybe_get_output(int64_t output_idx) override {
2553 return *outputs_[output_idx];
2554 }
2555 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
2556 c10::OptionalDeviceGuard guard_;
2557};
2558at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_bmm(const at::Tensor & self, const at::Tensor & mat2) {
2559structured_bmm_default_backend_functional op;
2560op.meta(self, mat2);
2561at::bmm_outf(self, mat2, *op.outputs_[0]);
2562return std::move(op.outputs_[0]).take();
2563}
2564struct structured_cat_default_backend_functional final : public at::meta::structured_cat {
2565 void set_output_strided(
2566 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2567 TensorOptions options, DimnameList names
2568 ) override {
2569 auto current_device = guard_.current_device();
2570 if (C10_UNLIKELY(current_device.has_value())) {
2571 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2572 "structured kernels don't support multi-device outputs");
2573 } else {
2574 guard_.reset_device(options.device());
2575 }
2576 outputs_[output_idx] = create_out(sizes, strides, options);
2577 if (!names.empty()) {
2578 namedinference::propagate_names(*outputs_[output_idx], names);
2579 }
2580 // super must happen after, so that downstream can use maybe_get_output
2581 // to retrieve the output
2582 }
2583 void set_output_raw_strided(
2584 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2585 TensorOptions options, DimnameList names
2586 ) override {
2587 auto current_device = guard_.current_device();
2588 if (C10_UNLIKELY(current_device.has_value())) {
2589 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2590 "structured kernels don't support multi-device outputs");
2591 } else {
2592 guard_.reset_device(options.device());
2593 }
2594 outputs_[output_idx] = create_out(sizes, strides, options);
2595 if (!names.empty()) {
2596 namedinference::propagate_names(*outputs_[output_idx], names);
2597 }
2598 // super must happen after, so that downstream can use maybe_get_output
2599 // to retrieve the output
2600 }
2601 const Tensor& maybe_get_output(int64_t output_idx) override {
2602 return *outputs_[output_idx];
2603 }
2604 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
2605 c10::OptionalDeviceGuard guard_;
2606};
2607at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_cat(const at::ITensorListRef & tensors, int64_t dim) {
2608structured_cat_default_backend_functional op;
2609auto precompute = op.meta(tensors, dim);
2610(void)precompute;
2611at::cat_outf(tensors, precompute.dim, *op.outputs_[0]);
2612return std::move(op.outputs_[0]).take();
2613}
2614struct structured_ceil_default_backend_functional final : public at::meta::structured_ceil {
2615 void set_output_strided(
2616 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2617 TensorOptions options, DimnameList names
2618 ) override {
2619 auto current_device = guard_.current_device();
2620 if (C10_UNLIKELY(current_device.has_value())) {
2621 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2622 "structured kernels don't support multi-device outputs");
2623 } else {
2624 guard_.reset_device(options.device());
2625 }
2626 outputs_[output_idx] = create_out(sizes, strides, options);
2627 if (!names.empty()) {
2628 namedinference::propagate_names(*outputs_[output_idx], names);
2629 }
2630 // super must happen after, so that downstream can use maybe_get_output
2631 // to retrieve the output
2632 at::meta::structured_ceil::set_output_raw_strided(output_idx, sizes, strides, options, names);
2633 }
2634 void set_output_raw_strided(
2635 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2636 TensorOptions options, DimnameList names
2637 ) override {
2638 auto current_device = guard_.current_device();
2639 if (C10_UNLIKELY(current_device.has_value())) {
2640 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2641 "structured kernels don't support multi-device outputs");
2642 } else {
2643 guard_.reset_device(options.device());
2644 }
2645 outputs_[output_idx] = create_out(sizes, strides, options);
2646 if (!names.empty()) {
2647 namedinference::propagate_names(*outputs_[output_idx], names);
2648 }
2649 // super must happen after, so that downstream can use maybe_get_output
2650 // to retrieve the output
2651 at::meta::structured_ceil::set_output_raw_strided(output_idx, sizes, strides, options, names);
2652 }
2653 const Tensor& maybe_get_output(int64_t output_idx) override {
2654 return *outputs_[output_idx];
2655 }
2656 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
2657 c10::OptionalDeviceGuard guard_;
2658};
2659at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_ceil(const at::Tensor & self) {
2660structured_ceil_default_backend_functional op;
2661op.meta(self);
2662at::ceil_outf(self, *op.outputs_[0]);
2663return std::move(op.outputs_[0]).take();
2664}
2665struct structured_ceil_default_backend_inplace final : public at::meta::structured_ceil {
2666 structured_ceil_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
2667 void set_output_strided(
2668 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2669 TensorOptions options, DimnameList names
2670 ) override {
2671 auto current_device = guard_.current_device();
2672 if (C10_UNLIKELY(current_device.has_value())) {
2673 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2674 "structured kernels don't support multi-device outputs");
2675 } else {
2676 guard_.reset_device(options.device());
2677 }
2678 const auto& out = outputs_[output_idx].get();
2679 check_inplace(out, sizes, options);
2680 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
2681 if (C10_UNLIKELY(maybe_proxy.has_value())) {
2682 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
2683 }
2684 if (!names.empty()) {
2685 namedinference::propagate_names(outputs_[output_idx], names);
2686 }
2687 // super must happen after, so that downstream can use maybe_get_output
2688 // to retrieve the output
2689 at::meta::structured_ceil::set_output_raw_strided(output_idx, sizes, strides, options, names);
2690 }
2691 void set_output_raw_strided(
2692 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2693 TensorOptions options, DimnameList names
2694 ) override {
2695 auto current_device = guard_.current_device();
2696 if (C10_UNLIKELY(current_device.has_value())) {
2697 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2698 "structured kernels don't support multi-device outputs");
2699 } else {
2700 guard_.reset_device(options.device());
2701 }
2702 const auto& out = outputs_[output_idx].get();
2703 check_inplace(out, sizes, options);
2704 if (!names.empty()) {
2705 namedinference::propagate_names(outputs_[output_idx], names);
2706 }
2707 // super must happen after, so that downstream can use maybe_get_output
2708 // to retrieve the output
2709 at::meta::structured_ceil::set_output_raw_strided(output_idx, sizes, strides, options, names);
2710 }
2711 const Tensor& maybe_get_output(int64_t output_idx) override {
2712 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
2713 }
2714 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
2715 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
2716 c10::OptionalDeviceGuard guard_;
2717};
2718at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_ceil_(at::Tensor & self) {
2719structured_ceil_default_backend_inplace op(self);
2720op.meta(self);
2721at::ceil_outf(self, op.outputs_[0]);
2722if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
2723return self;
2724}
2725struct structured_clamp_default_backend_functional final : public at::meta::structured_clamp {
2726 void set_output_strided(
2727 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2728 TensorOptions options, DimnameList names
2729 ) override {
2730 auto current_device = guard_.current_device();
2731 if (C10_UNLIKELY(current_device.has_value())) {
2732 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2733 "structured kernels don't support multi-device outputs");
2734 } else {
2735 guard_.reset_device(options.device());
2736 }
2737 outputs_[output_idx] = create_out(sizes, strides, options);
2738 if (!names.empty()) {
2739 namedinference::propagate_names(*outputs_[output_idx], names);
2740 }
2741 // super must happen after, so that downstream can use maybe_get_output
2742 // to retrieve the output
2743 at::meta::structured_clamp::set_output_raw_strided(output_idx, sizes, strides, options, names);
2744 }
2745 void set_output_raw_strided(
2746 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2747 TensorOptions options, DimnameList names
2748 ) override {
2749 auto current_device = guard_.current_device();
2750 if (C10_UNLIKELY(current_device.has_value())) {
2751 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2752 "structured kernels don't support multi-device outputs");
2753 } else {
2754 guard_.reset_device(options.device());
2755 }
2756 outputs_[output_idx] = create_out(sizes, strides, options);
2757 if (!names.empty()) {
2758 namedinference::propagate_names(*outputs_[output_idx], names);
2759 }
2760 // super must happen after, so that downstream can use maybe_get_output
2761 // to retrieve the output
2762 at::meta::structured_clamp::set_output_raw_strided(output_idx, sizes, strides, options, names);
2763 }
2764 const Tensor& maybe_get_output(int64_t output_idx) override {
2765 return *outputs_[output_idx];
2766 }
2767 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
2768 c10::OptionalDeviceGuard guard_;
2769};
2770at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_clamp(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
2771structured_clamp_default_backend_functional op;
2772op.meta(self, (min.has_value() ? at::OptionalScalarRef(&(min.value())) : at::OptionalScalarRef()), (max.has_value() ? at::OptionalScalarRef(&(max.value())) : at::OptionalScalarRef()));
2773at::clamp_outf(self, min, max, *op.outputs_[0]);
2774return std::move(op.outputs_[0]).take();
2775}
2776struct structured_clamp_default_backend_inplace final : public at::meta::structured_clamp {
2777 structured_clamp_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
2778 void set_output_strided(
2779 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2780 TensorOptions options, DimnameList names
2781 ) override {
2782 auto current_device = guard_.current_device();
2783 if (C10_UNLIKELY(current_device.has_value())) {
2784 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2785 "structured kernels don't support multi-device outputs");
2786 } else {
2787 guard_.reset_device(options.device());
2788 }
2789 const auto& out = outputs_[output_idx].get();
2790 check_inplace(out, sizes, options);
2791 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
2792 if (C10_UNLIKELY(maybe_proxy.has_value())) {
2793 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
2794 }
2795 if (!names.empty()) {
2796 namedinference::propagate_names(outputs_[output_idx], names);
2797 }
2798 // super must happen after, so that downstream can use maybe_get_output
2799 // to retrieve the output
2800 at::meta::structured_clamp::set_output_raw_strided(output_idx, sizes, strides, options, names);
2801 }
2802 void set_output_raw_strided(
2803 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2804 TensorOptions options, DimnameList names
2805 ) override {
2806 auto current_device = guard_.current_device();
2807 if (C10_UNLIKELY(current_device.has_value())) {
2808 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2809 "structured kernels don't support multi-device outputs");
2810 } else {
2811 guard_.reset_device(options.device());
2812 }
2813 const auto& out = outputs_[output_idx].get();
2814 check_inplace(out, sizes, options);
2815 if (!names.empty()) {
2816 namedinference::propagate_names(outputs_[output_idx], names);
2817 }
2818 // super must happen after, so that downstream can use maybe_get_output
2819 // to retrieve the output
2820 at::meta::structured_clamp::set_output_raw_strided(output_idx, sizes, strides, options, names);
2821 }
2822 const Tensor& maybe_get_output(int64_t output_idx) override {
2823 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
2824 }
2825 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
2826 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
2827 c10::OptionalDeviceGuard guard_;
2828};
2829at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_clamp_(at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
2830structured_clamp_default_backend_inplace op(self);
2831op.meta(self, (min.has_value() ? at::OptionalScalarRef(&(min.value())) : at::OptionalScalarRef()), (max.has_value() ? at::OptionalScalarRef(&(max.value())) : at::OptionalScalarRef()));
2832at::clamp_outf(self, min, max, op.outputs_[0]);
2833if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
2834return self;
2835}
2836struct structured_clamp_Tensor_default_backend_functional final : public at::meta::structured_clamp_Tensor {
2837 void set_output_strided(
2838 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2839 TensorOptions options, DimnameList names
2840 ) override {
2841 auto current_device = guard_.current_device();
2842 if (C10_UNLIKELY(current_device.has_value())) {
2843 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2844 "structured kernels don't support multi-device outputs");
2845 } else {
2846 guard_.reset_device(options.device());
2847 }
2848 outputs_[output_idx] = create_out(sizes, strides, options);
2849 if (!names.empty()) {
2850 namedinference::propagate_names(*outputs_[output_idx], names);
2851 }
2852 // super must happen after, so that downstream can use maybe_get_output
2853 // to retrieve the output
2854 at::meta::structured_clamp_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
2855 }
2856 void set_output_raw_strided(
2857 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2858 TensorOptions options, DimnameList names
2859 ) override {
2860 auto current_device = guard_.current_device();
2861 if (C10_UNLIKELY(current_device.has_value())) {
2862 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2863 "structured kernels don't support multi-device outputs");
2864 } else {
2865 guard_.reset_device(options.device());
2866 }
2867 outputs_[output_idx] = create_out(sizes, strides, options);
2868 if (!names.empty()) {
2869 namedinference::propagate_names(*outputs_[output_idx], names);
2870 }
2871 // super must happen after, so that downstream can use maybe_get_output
2872 // to retrieve the output
2873 at::meta::structured_clamp_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
2874 }
2875 const Tensor& maybe_get_output(int64_t output_idx) override {
2876 return *outputs_[output_idx];
2877 }
2878 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
2879 c10::OptionalDeviceGuard guard_;
2880};
2881at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_clamp_Tensor(const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
2882structured_clamp_Tensor_default_backend_functional op;
2883op.meta(self, ((min.has_value() && (*min).defined()) ? at::OptionalTensorRef(*min) : at::OptionalTensorRef()), ((max.has_value() && (*max).defined()) ? at::OptionalTensorRef(*max) : at::OptionalTensorRef()));
2884at::clamp_outf(self, min, max, *op.outputs_[0]);
2885return std::move(op.outputs_[0]).take();
2886}
2887struct structured_clamp_Tensor_default_backend_inplace final : public at::meta::structured_clamp_Tensor {
2888 structured_clamp_Tensor_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
2889 void set_output_strided(
2890 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2891 TensorOptions options, DimnameList names
2892 ) override {
2893 auto current_device = guard_.current_device();
2894 if (C10_UNLIKELY(current_device.has_value())) {
2895 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2896 "structured kernels don't support multi-device outputs");
2897 } else {
2898 guard_.reset_device(options.device());
2899 }
2900 const auto& out = outputs_[output_idx].get();
2901 check_inplace(out, sizes, options);
2902 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
2903 if (C10_UNLIKELY(maybe_proxy.has_value())) {
2904 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
2905 }
2906 if (!names.empty()) {
2907 namedinference::propagate_names(outputs_[output_idx], names);
2908 }
2909 // super must happen after, so that downstream can use maybe_get_output
2910 // to retrieve the output
2911 at::meta::structured_clamp_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
2912 }
2913 void set_output_raw_strided(
2914 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2915 TensorOptions options, DimnameList names
2916 ) override {
2917 auto current_device = guard_.current_device();
2918 if (C10_UNLIKELY(current_device.has_value())) {
2919 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2920 "structured kernels don't support multi-device outputs");
2921 } else {
2922 guard_.reset_device(options.device());
2923 }
2924 const auto& out = outputs_[output_idx].get();
2925 check_inplace(out, sizes, options);
2926 if (!names.empty()) {
2927 namedinference::propagate_names(outputs_[output_idx], names);
2928 }
2929 // super must happen after, so that downstream can use maybe_get_output
2930 // to retrieve the output
2931 at::meta::structured_clamp_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
2932 }
2933 const Tensor& maybe_get_output(int64_t output_idx) override {
2934 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
2935 }
2936 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
2937 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
2938 c10::OptionalDeviceGuard guard_;
2939};
2940at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_clamp__Tensor(at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
2941structured_clamp_Tensor_default_backend_inplace op(self);
2942op.meta(self, ((min.has_value() && (*min).defined()) ? at::OptionalTensorRef(*min) : at::OptionalTensorRef()), ((max.has_value() && (*max).defined()) ? at::OptionalTensorRef(*max) : at::OptionalTensorRef()));
2943at::clamp_outf(self, min, max, op.outputs_[0]);
2944if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
2945return self;
2946}
2947struct structured_clamp_max_default_backend_functional final : public at::meta::structured_clamp_max {
2948 void set_output_strided(
2949 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2950 TensorOptions options, DimnameList names
2951 ) override {
2952 auto current_device = guard_.current_device();
2953 if (C10_UNLIKELY(current_device.has_value())) {
2954 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2955 "structured kernels don't support multi-device outputs");
2956 } else {
2957 guard_.reset_device(options.device());
2958 }
2959 outputs_[output_idx] = create_out(sizes, strides, options);
2960 if (!names.empty()) {
2961 namedinference::propagate_names(*outputs_[output_idx], names);
2962 }
2963 // super must happen after, so that downstream can use maybe_get_output
2964 // to retrieve the output
2965 at::meta::structured_clamp_max::set_output_raw_strided(output_idx, sizes, strides, options, names);
2966 }
2967 void set_output_raw_strided(
2968 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2969 TensorOptions options, DimnameList names
2970 ) override {
2971 auto current_device = guard_.current_device();
2972 if (C10_UNLIKELY(current_device.has_value())) {
2973 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
2974 "structured kernels don't support multi-device outputs");
2975 } else {
2976 guard_.reset_device(options.device());
2977 }
2978 outputs_[output_idx] = create_out(sizes, strides, options);
2979 if (!names.empty()) {
2980 namedinference::propagate_names(*outputs_[output_idx], names);
2981 }
2982 // super must happen after, so that downstream can use maybe_get_output
2983 // to retrieve the output
2984 at::meta::structured_clamp_max::set_output_raw_strided(output_idx, sizes, strides, options, names);
2985 }
2986 const Tensor& maybe_get_output(int64_t output_idx) override {
2987 return *outputs_[output_idx];
2988 }
2989 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
2990 c10::OptionalDeviceGuard guard_;
2991};
2992at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_clamp_max(const at::Tensor & self, const at::Scalar & max) {
2993structured_clamp_max_default_backend_functional op;
2994op.meta(self, max);
2995at::clamp_max_outf(self, max, *op.outputs_[0]);
2996return std::move(op.outputs_[0]).take();
2997}
2998struct structured_clamp_max_default_backend_inplace final : public at::meta::structured_clamp_max {
2999 structured_clamp_max_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
3000 void set_output_strided(
3001 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3002 TensorOptions options, DimnameList names
3003 ) override {
3004 auto current_device = guard_.current_device();
3005 if (C10_UNLIKELY(current_device.has_value())) {
3006 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3007 "structured kernels don't support multi-device outputs");
3008 } else {
3009 guard_.reset_device(options.device());
3010 }
3011 const auto& out = outputs_[output_idx].get();
3012 check_inplace(out, sizes, options);
3013 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
3014 if (C10_UNLIKELY(maybe_proxy.has_value())) {
3015 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
3016 }
3017 if (!names.empty()) {
3018 namedinference::propagate_names(outputs_[output_idx], names);
3019 }
3020 // super must happen after, so that downstream can use maybe_get_output
3021 // to retrieve the output
3022 at::meta::structured_clamp_max::set_output_raw_strided(output_idx, sizes, strides, options, names);
3023 }
3024 void set_output_raw_strided(
3025 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3026 TensorOptions options, DimnameList names
3027 ) override {
3028 auto current_device = guard_.current_device();
3029 if (C10_UNLIKELY(current_device.has_value())) {
3030 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3031 "structured kernels don't support multi-device outputs");
3032 } else {
3033 guard_.reset_device(options.device());
3034 }
3035 const auto& out = outputs_[output_idx].get();
3036 check_inplace(out, sizes, options);
3037 if (!names.empty()) {
3038 namedinference::propagate_names(outputs_[output_idx], names);
3039 }
3040 // super must happen after, so that downstream can use maybe_get_output
3041 // to retrieve the output
3042 at::meta::structured_clamp_max::set_output_raw_strided(output_idx, sizes, strides, options, names);
3043 }
3044 const Tensor& maybe_get_output(int64_t output_idx) override {
3045 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
3046 }
3047 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
3048 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
3049 c10::OptionalDeviceGuard guard_;
3050};
3051at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_clamp_max_(at::Tensor & self, const at::Scalar & max) {
3052structured_clamp_max_default_backend_inplace op(self);
3053op.meta(self, max);
3054at::clamp_max_outf(self, max, op.outputs_[0]);
3055if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
3056return self;
3057}
3058struct structured_clamp_max_Tensor_default_backend_functional final : public at::meta::structured_clamp_max_Tensor {
3059 void set_output_strided(
3060 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3061 TensorOptions options, DimnameList names
3062 ) override {
3063 auto current_device = guard_.current_device();
3064 if (C10_UNLIKELY(current_device.has_value())) {
3065 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3066 "structured kernels don't support multi-device outputs");
3067 } else {
3068 guard_.reset_device(options.device());
3069 }
3070 outputs_[output_idx] = create_out(sizes, strides, options);
3071 if (!names.empty()) {
3072 namedinference::propagate_names(*outputs_[output_idx], names);
3073 }
3074 // super must happen after, so that downstream can use maybe_get_output
3075 // to retrieve the output
3076 at::meta::structured_clamp_max_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
3077 }
3078 void set_output_raw_strided(
3079 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3080 TensorOptions options, DimnameList names
3081 ) override {
3082 auto current_device = guard_.current_device();
3083 if (C10_UNLIKELY(current_device.has_value())) {
3084 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3085 "structured kernels don't support multi-device outputs");
3086 } else {
3087 guard_.reset_device(options.device());
3088 }
3089 outputs_[output_idx] = create_out(sizes, strides, options);
3090 if (!names.empty()) {
3091 namedinference::propagate_names(*outputs_[output_idx], names);
3092 }
3093 // super must happen after, so that downstream can use maybe_get_output
3094 // to retrieve the output
3095 at::meta::structured_clamp_max_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
3096 }
3097 const Tensor& maybe_get_output(int64_t output_idx) override {
3098 return *outputs_[output_idx];
3099 }
3100 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
3101 c10::OptionalDeviceGuard guard_;
3102};
3103at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_clamp_max_Tensor(const at::Tensor & self, const at::Tensor & max) {
3104structured_clamp_max_Tensor_default_backend_functional op;
3105op.meta(self, max);
3106at::clamp_max_outf(self, max, *op.outputs_[0]);
3107return std::move(op.outputs_[0]).take();
3108}
3109struct structured_clamp_max_Tensor_default_backend_inplace final : public at::meta::structured_clamp_max_Tensor {
3110 structured_clamp_max_Tensor_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
3111 void set_output_strided(
3112 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3113 TensorOptions options, DimnameList names
3114 ) override {
3115 auto current_device = guard_.current_device();
3116 if (C10_UNLIKELY(current_device.has_value())) {
3117 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3118 "structured kernels don't support multi-device outputs");
3119 } else {
3120 guard_.reset_device(options.device());
3121 }
3122 const auto& out = outputs_[output_idx].get();
3123 check_inplace(out, sizes, options);
3124 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
3125 if (C10_UNLIKELY(maybe_proxy.has_value())) {
3126 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
3127 }
3128 if (!names.empty()) {
3129 namedinference::propagate_names(outputs_[output_idx], names);
3130 }
3131 // super must happen after, so that downstream can use maybe_get_output
3132 // to retrieve the output
3133 at::meta::structured_clamp_max_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
3134 }
3135 void set_output_raw_strided(
3136 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3137 TensorOptions options, DimnameList names
3138 ) override {
3139 auto current_device = guard_.current_device();
3140 if (C10_UNLIKELY(current_device.has_value())) {
3141 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3142 "structured kernels don't support multi-device outputs");
3143 } else {
3144 guard_.reset_device(options.device());
3145 }
3146 const auto& out = outputs_[output_idx].get();
3147 check_inplace(out, sizes, options);
3148 if (!names.empty()) {
3149 namedinference::propagate_names(outputs_[output_idx], names);
3150 }
3151 // super must happen after, so that downstream can use maybe_get_output
3152 // to retrieve the output
3153 at::meta::structured_clamp_max_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
3154 }
3155 const Tensor& maybe_get_output(int64_t output_idx) override {
3156 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
3157 }
3158 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
3159 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
3160 c10::OptionalDeviceGuard guard_;
3161};
3162at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_clamp_max__Tensor(at::Tensor & self, const at::Tensor & max) {
3163structured_clamp_max_Tensor_default_backend_inplace op(self);
3164op.meta(self, max);
3165at::clamp_max_outf(self, max, op.outputs_[0]);
3166if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
3167return self;
3168}
3169struct structured_clamp_min_default_backend_functional final : public at::meta::structured_clamp_min {
3170 void set_output_strided(
3171 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3172 TensorOptions options, DimnameList names
3173 ) override {
3174 auto current_device = guard_.current_device();
3175 if (C10_UNLIKELY(current_device.has_value())) {
3176 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3177 "structured kernels don't support multi-device outputs");
3178 } else {
3179 guard_.reset_device(options.device());
3180 }
3181 outputs_[output_idx] = create_out(sizes, strides, options);
3182 if (!names.empty()) {
3183 namedinference::propagate_names(*outputs_[output_idx], names);
3184 }
3185 // super must happen after, so that downstream can use maybe_get_output
3186 // to retrieve the output
3187 at::meta::structured_clamp_min::set_output_raw_strided(output_idx, sizes, strides, options, names);
3188 }
3189 void set_output_raw_strided(
3190 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3191 TensorOptions options, DimnameList names
3192 ) override {
3193 auto current_device = guard_.current_device();
3194 if (C10_UNLIKELY(current_device.has_value())) {
3195 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3196 "structured kernels don't support multi-device outputs");
3197 } else {
3198 guard_.reset_device(options.device());
3199 }
3200 outputs_[output_idx] = create_out(sizes, strides, options);
3201 if (!names.empty()) {
3202 namedinference::propagate_names(*outputs_[output_idx], names);
3203 }
3204 // super must happen after, so that downstream can use maybe_get_output
3205 // to retrieve the output
3206 at::meta::structured_clamp_min::set_output_raw_strided(output_idx, sizes, strides, options, names);
3207 }
3208 const Tensor& maybe_get_output(int64_t output_idx) override {
3209 return *outputs_[output_idx];
3210 }
3211 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
3212 c10::OptionalDeviceGuard guard_;
3213};
3214at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_clamp_min(const at::Tensor & self, const at::Scalar & min) {
3215structured_clamp_min_default_backend_functional op;
3216op.meta(self, min);
3217at::clamp_min_outf(self, min, *op.outputs_[0]);
3218return std::move(op.outputs_[0]).take();
3219}
3220struct structured_clamp_min_default_backend_inplace final : public at::meta::structured_clamp_min {
3221 structured_clamp_min_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
3222 void set_output_strided(
3223 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3224 TensorOptions options, DimnameList names
3225 ) override {
3226 auto current_device = guard_.current_device();
3227 if (C10_UNLIKELY(current_device.has_value())) {
3228 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3229 "structured kernels don't support multi-device outputs");
3230 } else {
3231 guard_.reset_device(options.device());
3232 }
3233 const auto& out = outputs_[output_idx].get();
3234 check_inplace(out, sizes, options);
3235 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
3236 if (C10_UNLIKELY(maybe_proxy.has_value())) {
3237 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
3238 }
3239 if (!names.empty()) {
3240 namedinference::propagate_names(outputs_[output_idx], names);
3241 }
3242 // super must happen after, so that downstream can use maybe_get_output
3243 // to retrieve the output
3244 at::meta::structured_clamp_min::set_output_raw_strided(output_idx, sizes, strides, options, names);
3245 }
3246 void set_output_raw_strided(
3247 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3248 TensorOptions options, DimnameList names
3249 ) override {
3250 auto current_device = guard_.current_device();
3251 if (C10_UNLIKELY(current_device.has_value())) {
3252 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3253 "structured kernels don't support multi-device outputs");
3254 } else {
3255 guard_.reset_device(options.device());
3256 }
3257 const auto& out = outputs_[output_idx].get();
3258 check_inplace(out, sizes, options);
3259 if (!names.empty()) {
3260 namedinference::propagate_names(outputs_[output_idx], names);
3261 }
3262 // super must happen after, so that downstream can use maybe_get_output
3263 // to retrieve the output
3264 at::meta::structured_clamp_min::set_output_raw_strided(output_idx, sizes, strides, options, names);
3265 }
3266 const Tensor& maybe_get_output(int64_t output_idx) override {
3267 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
3268 }
3269 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
3270 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
3271 c10::OptionalDeviceGuard guard_;
3272};
3273at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_clamp_min_(at::Tensor & self, const at::Scalar & min) {
3274structured_clamp_min_default_backend_inplace op(self);
3275op.meta(self, min);
3276at::clamp_min_outf(self, min, op.outputs_[0]);
3277if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
3278return self;
3279}
3280struct structured_clamp_min_Tensor_default_backend_functional final : public at::meta::structured_clamp_min_Tensor {
3281 void set_output_strided(
3282 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3283 TensorOptions options, DimnameList names
3284 ) override {
3285 auto current_device = guard_.current_device();
3286 if (C10_UNLIKELY(current_device.has_value())) {
3287 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3288 "structured kernels don't support multi-device outputs");
3289 } else {
3290 guard_.reset_device(options.device());
3291 }
3292 outputs_[output_idx] = create_out(sizes, strides, options);
3293 if (!names.empty()) {
3294 namedinference::propagate_names(*outputs_[output_idx], names);
3295 }
3296 // super must happen after, so that downstream can use maybe_get_output
3297 // to retrieve the output
3298 at::meta::structured_clamp_min_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
3299 }
3300 void set_output_raw_strided(
3301 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3302 TensorOptions options, DimnameList names
3303 ) override {
3304 auto current_device = guard_.current_device();
3305 if (C10_UNLIKELY(current_device.has_value())) {
3306 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3307 "structured kernels don't support multi-device outputs");
3308 } else {
3309 guard_.reset_device(options.device());
3310 }
3311 outputs_[output_idx] = create_out(sizes, strides, options);
3312 if (!names.empty()) {
3313 namedinference::propagate_names(*outputs_[output_idx], names);
3314 }
3315 // super must happen after, so that downstream can use maybe_get_output
3316 // to retrieve the output
3317 at::meta::structured_clamp_min_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
3318 }
3319 const Tensor& maybe_get_output(int64_t output_idx) override {
3320 return *outputs_[output_idx];
3321 }
3322 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
3323 c10::OptionalDeviceGuard guard_;
3324};
3325at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_clamp_min_Tensor(const at::Tensor & self, const at::Tensor & min) {
3326structured_clamp_min_Tensor_default_backend_functional op;
3327op.meta(self, min);
3328at::clamp_min_outf(self, min, *op.outputs_[0]);
3329return std::move(op.outputs_[0]).take();
3330}
3331struct structured_clamp_min_Tensor_default_backend_inplace final : public at::meta::structured_clamp_min_Tensor {
3332 structured_clamp_min_Tensor_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
3333 void set_output_strided(
3334 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3335 TensorOptions options, DimnameList names
3336 ) override {
3337 auto current_device = guard_.current_device();
3338 if (C10_UNLIKELY(current_device.has_value())) {
3339 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3340 "structured kernels don't support multi-device outputs");
3341 } else {
3342 guard_.reset_device(options.device());
3343 }
3344 const auto& out = outputs_[output_idx].get();
3345 check_inplace(out, sizes, options);
3346 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
3347 if (C10_UNLIKELY(maybe_proxy.has_value())) {
3348 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
3349 }
3350 if (!names.empty()) {
3351 namedinference::propagate_names(outputs_[output_idx], names);
3352 }
3353 // super must happen after, so that downstream can use maybe_get_output
3354 // to retrieve the output
3355 at::meta::structured_clamp_min_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
3356 }
3357 void set_output_raw_strided(
3358 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3359 TensorOptions options, DimnameList names
3360 ) override {
3361 auto current_device = guard_.current_device();
3362 if (C10_UNLIKELY(current_device.has_value())) {
3363 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3364 "structured kernels don't support multi-device outputs");
3365 } else {
3366 guard_.reset_device(options.device());
3367 }
3368 const auto& out = outputs_[output_idx].get();
3369 check_inplace(out, sizes, options);
3370 if (!names.empty()) {
3371 namedinference::propagate_names(outputs_[output_idx], names);
3372 }
3373 // super must happen after, so that downstream can use maybe_get_output
3374 // to retrieve the output
3375 at::meta::structured_clamp_min_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
3376 }
3377 const Tensor& maybe_get_output(int64_t output_idx) override {
3378 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
3379 }
3380 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
3381 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
3382 c10::OptionalDeviceGuard guard_;
3383};
3384at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_clamp_min__Tensor(at::Tensor & self, const at::Tensor & min) {
3385structured_clamp_min_Tensor_default_backend_inplace op(self);
3386op.meta(self, min);
3387at::clamp_min_outf(self, min, op.outputs_[0]);
3388if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
3389return self;
3390}
3391namespace {
3392at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__copy(const at::Tensor & self, const at::Tensor & src, bool non_blocking) {
3393 // No device check
3394 // DeviceGuard omitted
3395 return at::native::copy(self, src, non_blocking);
3396}
3397} // anonymous namespace
3398struct structured_cos_default_backend_functional final : public at::meta::structured_cos {
3399 void set_output_strided(
3400 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3401 TensorOptions options, DimnameList names
3402 ) override {
3403 auto current_device = guard_.current_device();
3404 if (C10_UNLIKELY(current_device.has_value())) {
3405 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3406 "structured kernels don't support multi-device outputs");
3407 } else {
3408 guard_.reset_device(options.device());
3409 }
3410 outputs_[output_idx] = create_out(sizes, strides, options);
3411 if (!names.empty()) {
3412 namedinference::propagate_names(*outputs_[output_idx], names);
3413 }
3414 // super must happen after, so that downstream can use maybe_get_output
3415 // to retrieve the output
3416 at::meta::structured_cos::set_output_raw_strided(output_idx, sizes, strides, options, names);
3417 }
3418 void set_output_raw_strided(
3419 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3420 TensorOptions options, DimnameList names
3421 ) override {
3422 auto current_device = guard_.current_device();
3423 if (C10_UNLIKELY(current_device.has_value())) {
3424 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3425 "structured kernels don't support multi-device outputs");
3426 } else {
3427 guard_.reset_device(options.device());
3428 }
3429 outputs_[output_idx] = create_out(sizes, strides, options);
3430 if (!names.empty()) {
3431 namedinference::propagate_names(*outputs_[output_idx], names);
3432 }
3433 // super must happen after, so that downstream can use maybe_get_output
3434 // to retrieve the output
3435 at::meta::structured_cos::set_output_raw_strided(output_idx, sizes, strides, options, names);
3436 }
3437 const Tensor& maybe_get_output(int64_t output_idx) override {
3438 return *outputs_[output_idx];
3439 }
3440 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
3441 c10::OptionalDeviceGuard guard_;
3442};
3443at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_cos(const at::Tensor & self) {
3444structured_cos_default_backend_functional op;
3445op.meta(self);
3446at::cos_outf(self, *op.outputs_[0]);
3447return std::move(op.outputs_[0]).take();
3448}
3449struct structured_cos_default_backend_inplace final : public at::meta::structured_cos {
3450 structured_cos_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
3451 void set_output_strided(
3452 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3453 TensorOptions options, DimnameList names
3454 ) override {
3455 auto current_device = guard_.current_device();
3456 if (C10_UNLIKELY(current_device.has_value())) {
3457 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3458 "structured kernels don't support multi-device outputs");
3459 } else {
3460 guard_.reset_device(options.device());
3461 }
3462 const auto& out = outputs_[output_idx].get();
3463 check_inplace(out, sizes, options);
3464 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
3465 if (C10_UNLIKELY(maybe_proxy.has_value())) {
3466 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
3467 }
3468 if (!names.empty()) {
3469 namedinference::propagate_names(outputs_[output_idx], names);
3470 }
3471 // super must happen after, so that downstream can use maybe_get_output
3472 // to retrieve the output
3473 at::meta::structured_cos::set_output_raw_strided(output_idx, sizes, strides, options, names);
3474 }
3475 void set_output_raw_strided(
3476 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3477 TensorOptions options, DimnameList names
3478 ) override {
3479 auto current_device = guard_.current_device();
3480 if (C10_UNLIKELY(current_device.has_value())) {
3481 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3482 "structured kernels don't support multi-device outputs");
3483 } else {
3484 guard_.reset_device(options.device());
3485 }
3486 const auto& out = outputs_[output_idx].get();
3487 check_inplace(out, sizes, options);
3488 if (!names.empty()) {
3489 namedinference::propagate_names(outputs_[output_idx], names);
3490 }
3491 // super must happen after, so that downstream can use maybe_get_output
3492 // to retrieve the output
3493 at::meta::structured_cos::set_output_raw_strided(output_idx, sizes, strides, options, names);
3494 }
3495 const Tensor& maybe_get_output(int64_t output_idx) override {
3496 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
3497 }
3498 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
3499 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
3500 c10::OptionalDeviceGuard guard_;
3501};
3502at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_cos_(at::Tensor & self) {
3503structured_cos_default_backend_inplace op(self);
3504op.meta(self);
3505at::cos_outf(self, op.outputs_[0]);
3506if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
3507return self;
3508}
3509struct structured_cosh_default_backend_functional final : public at::meta::structured_cosh {
3510 void set_output_strided(
3511 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3512 TensorOptions options, DimnameList names
3513 ) override {
3514 auto current_device = guard_.current_device();
3515 if (C10_UNLIKELY(current_device.has_value())) {
3516 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3517 "structured kernels don't support multi-device outputs");
3518 } else {
3519 guard_.reset_device(options.device());
3520 }
3521 outputs_[output_idx] = create_out(sizes, strides, options);
3522 if (!names.empty()) {
3523 namedinference::propagate_names(*outputs_[output_idx], names);
3524 }
3525 // super must happen after, so that downstream can use maybe_get_output
3526 // to retrieve the output
3527 at::meta::structured_cosh::set_output_raw_strided(output_idx, sizes, strides, options, names);
3528 }
3529 void set_output_raw_strided(
3530 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3531 TensorOptions options, DimnameList names
3532 ) override {
3533 auto current_device = guard_.current_device();
3534 if (C10_UNLIKELY(current_device.has_value())) {
3535 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3536 "structured kernels don't support multi-device outputs");
3537 } else {
3538 guard_.reset_device(options.device());
3539 }
3540 outputs_[output_idx] = create_out(sizes, strides, options);
3541 if (!names.empty()) {
3542 namedinference::propagate_names(*outputs_[output_idx], names);
3543 }
3544 // super must happen after, so that downstream can use maybe_get_output
3545 // to retrieve the output
3546 at::meta::structured_cosh::set_output_raw_strided(output_idx, sizes, strides, options, names);
3547 }
3548 const Tensor& maybe_get_output(int64_t output_idx) override {
3549 return *outputs_[output_idx];
3550 }
3551 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
3552 c10::OptionalDeviceGuard guard_;
3553};
3554at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_cosh(const at::Tensor & self) {
3555structured_cosh_default_backend_functional op;
3556op.meta(self);
3557at::cosh_outf(self, *op.outputs_[0]);
3558return std::move(op.outputs_[0]).take();
3559}
3560struct structured_cosh_default_backend_inplace final : public at::meta::structured_cosh {
3561 structured_cosh_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
3562 void set_output_strided(
3563 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3564 TensorOptions options, DimnameList names
3565 ) override {
3566 auto current_device = guard_.current_device();
3567 if (C10_UNLIKELY(current_device.has_value())) {
3568 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3569 "structured kernels don't support multi-device outputs");
3570 } else {
3571 guard_.reset_device(options.device());
3572 }
3573 const auto& out = outputs_[output_idx].get();
3574 check_inplace(out, sizes, options);
3575 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
3576 if (C10_UNLIKELY(maybe_proxy.has_value())) {
3577 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
3578 }
3579 if (!names.empty()) {
3580 namedinference::propagate_names(outputs_[output_idx], names);
3581 }
3582 // super must happen after, so that downstream can use maybe_get_output
3583 // to retrieve the output
3584 at::meta::structured_cosh::set_output_raw_strided(output_idx, sizes, strides, options, names);
3585 }
3586 void set_output_raw_strided(
3587 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3588 TensorOptions options, DimnameList names
3589 ) override {
3590 auto current_device = guard_.current_device();
3591 if (C10_UNLIKELY(current_device.has_value())) {
3592 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3593 "structured kernels don't support multi-device outputs");
3594 } else {
3595 guard_.reset_device(options.device());
3596 }
3597 const auto& out = outputs_[output_idx].get();
3598 check_inplace(out, sizes, options);
3599 if (!names.empty()) {
3600 namedinference::propagate_names(outputs_[output_idx], names);
3601 }
3602 // super must happen after, so that downstream can use maybe_get_output
3603 // to retrieve the output
3604 at::meta::structured_cosh::set_output_raw_strided(output_idx, sizes, strides, options, names);
3605 }
3606 const Tensor& maybe_get_output(int64_t output_idx) override {
3607 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
3608 }
3609 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
3610 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
3611 c10::OptionalDeviceGuard guard_;
3612};
3613at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_cosh_(at::Tensor & self) {
3614structured_cosh_default_backend_inplace op(self);
3615op.meta(self);
3616at::cosh_outf(self, op.outputs_[0]);
3617if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
3618return self;
3619}
3620struct structured_cumprod_default_backend_functional final : public at::meta::structured_cumprod {
3621 void set_output_strided(
3622 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3623 TensorOptions options, DimnameList names
3624 ) override {
3625 auto current_device = guard_.current_device();
3626 if (C10_UNLIKELY(current_device.has_value())) {
3627 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3628 "structured kernels don't support multi-device outputs");
3629 } else {
3630 guard_.reset_device(options.device());
3631 }
3632 outputs_[output_idx] = create_out(sizes, strides, options);
3633 if (!names.empty()) {
3634 namedinference::propagate_names(*outputs_[output_idx], names);
3635 }
3636 // super must happen after, so that downstream can use maybe_get_output
3637 // to retrieve the output
3638 }
3639 void set_output_raw_strided(
3640 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3641 TensorOptions options, DimnameList names
3642 ) override {
3643 auto current_device = guard_.current_device();
3644 if (C10_UNLIKELY(current_device.has_value())) {
3645 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3646 "structured kernels don't support multi-device outputs");
3647 } else {
3648 guard_.reset_device(options.device());
3649 }
3650 outputs_[output_idx] = create_out(sizes, strides, options);
3651 if (!names.empty()) {
3652 namedinference::propagate_names(*outputs_[output_idx], names);
3653 }
3654 // super must happen after, so that downstream can use maybe_get_output
3655 // to retrieve the output
3656 }
3657 const Tensor& maybe_get_output(int64_t output_idx) override {
3658 return *outputs_[output_idx];
3659 }
3660 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
3661 c10::OptionalDeviceGuard guard_;
3662};
3663at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_cumprod(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
3664structured_cumprod_default_backend_functional op;
3665op.meta(self, dim, dtype);
3666at::cumprod_outf(self, dim, dtype, *op.outputs_[0]);
3667return std::move(op.outputs_[0]).take();
3668}
3669struct structured_cumprod_default_backend_inplace final : public at::meta::structured_cumprod {
3670 structured_cumprod_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
3671 void set_output_strided(
3672 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3673 TensorOptions options, DimnameList names
3674 ) override {
3675 auto current_device = guard_.current_device();
3676 if (C10_UNLIKELY(current_device.has_value())) {
3677 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3678 "structured kernels don't support multi-device outputs");
3679 } else {
3680 guard_.reset_device(options.device());
3681 }
3682 const auto& out = outputs_[output_idx].get();
3683 check_inplace(out, sizes, options);
3684 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
3685 if (C10_UNLIKELY(maybe_proxy.has_value())) {
3686 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
3687 }
3688 if (!names.empty()) {
3689 namedinference::propagate_names(outputs_[output_idx], names);
3690 }
3691 // super must happen after, so that downstream can use maybe_get_output
3692 // to retrieve the output
3693 }
3694 void set_output_raw_strided(
3695 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3696 TensorOptions options, DimnameList names
3697 ) override {
3698 auto current_device = guard_.current_device();
3699 if (C10_UNLIKELY(current_device.has_value())) {
3700 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3701 "structured kernels don't support multi-device outputs");
3702 } else {
3703 guard_.reset_device(options.device());
3704 }
3705 const auto& out = outputs_[output_idx].get();
3706 check_inplace(out, sizes, options);
3707 if (!names.empty()) {
3708 namedinference::propagate_names(outputs_[output_idx], names);
3709 }
3710 // super must happen after, so that downstream can use maybe_get_output
3711 // to retrieve the output
3712 }
3713 const Tensor& maybe_get_output(int64_t output_idx) override {
3714 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
3715 }
3716 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
3717 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
3718 c10::OptionalDeviceGuard guard_;
3719};
3720at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_cumprod_(at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
3721structured_cumprod_default_backend_inplace op(self);
3722op.meta(self, dim, dtype);
3723at::cumprod_outf(self, dim, dtype, op.outputs_[0]);
3724if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
3725return self;
3726}
3727struct structured_cumsum_default_backend_functional final : public at::meta::structured_cumsum {
3728 void set_output_strided(
3729 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3730 TensorOptions options, DimnameList names
3731 ) override {
3732 auto current_device = guard_.current_device();
3733 if (C10_UNLIKELY(current_device.has_value())) {
3734 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3735 "structured kernels don't support multi-device outputs");
3736 } else {
3737 guard_.reset_device(options.device());
3738 }
3739 outputs_[output_idx] = create_out(sizes, strides, options);
3740 if (!names.empty()) {
3741 namedinference::propagate_names(*outputs_[output_idx], names);
3742 }
3743 // super must happen after, so that downstream can use maybe_get_output
3744 // to retrieve the output
3745 }
3746 void set_output_raw_strided(
3747 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3748 TensorOptions options, DimnameList names
3749 ) override {
3750 auto current_device = guard_.current_device();
3751 if (C10_UNLIKELY(current_device.has_value())) {
3752 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3753 "structured kernels don't support multi-device outputs");
3754 } else {
3755 guard_.reset_device(options.device());
3756 }
3757 outputs_[output_idx] = create_out(sizes, strides, options);
3758 if (!names.empty()) {
3759 namedinference::propagate_names(*outputs_[output_idx], names);
3760 }
3761 // super must happen after, so that downstream can use maybe_get_output
3762 // to retrieve the output
3763 }
3764 const Tensor& maybe_get_output(int64_t output_idx) override {
3765 return *outputs_[output_idx];
3766 }
3767 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
3768 c10::OptionalDeviceGuard guard_;
3769};
3770at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_cumsum(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
3771structured_cumsum_default_backend_functional op;
3772op.meta(self, dim, dtype);
3773at::cumsum_outf(self, dim, dtype, *op.outputs_[0]);
3774return std::move(op.outputs_[0]).take();
3775}
3776struct structured_cumsum_default_backend_inplace final : public at::meta::structured_cumsum {
3777 structured_cumsum_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
3778 void set_output_strided(
3779 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3780 TensorOptions options, DimnameList names
3781 ) override {
3782 auto current_device = guard_.current_device();
3783 if (C10_UNLIKELY(current_device.has_value())) {
3784 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3785 "structured kernels don't support multi-device outputs");
3786 } else {
3787 guard_.reset_device(options.device());
3788 }
3789 const auto& out = outputs_[output_idx].get();
3790 check_inplace(out, sizes, options);
3791 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
3792 if (C10_UNLIKELY(maybe_proxy.has_value())) {
3793 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
3794 }
3795 if (!names.empty()) {
3796 namedinference::propagate_names(outputs_[output_idx], names);
3797 }
3798 // super must happen after, so that downstream can use maybe_get_output
3799 // to retrieve the output
3800 }
3801 void set_output_raw_strided(
3802 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3803 TensorOptions options, DimnameList names
3804 ) override {
3805 auto current_device = guard_.current_device();
3806 if (C10_UNLIKELY(current_device.has_value())) {
3807 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3808 "structured kernels don't support multi-device outputs");
3809 } else {
3810 guard_.reset_device(options.device());
3811 }
3812 const auto& out = outputs_[output_idx].get();
3813 check_inplace(out, sizes, options);
3814 if (!names.empty()) {
3815 namedinference::propagate_names(outputs_[output_idx], names);
3816 }
3817 // super must happen after, so that downstream can use maybe_get_output
3818 // to retrieve the output
3819 }
3820 const Tensor& maybe_get_output(int64_t output_idx) override {
3821 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
3822 }
3823 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
3824 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
3825 c10::OptionalDeviceGuard guard_;
3826};
3827at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_cumsum_(at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
3828structured_cumsum_default_backend_inplace op(self);
3829op.meta(self, dim, dtype);
3830at::cumsum_outf(self, dim, dtype, op.outputs_[0]);
3831if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
3832return self;
3833}
3834namespace {
3835at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__diag_embed(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
3836 // No device check
3837 // DeviceGuard omitted
3838 return at::native::diag_embed(self, offset, dim1, dim2);
3839}
3840} // anonymous namespace
3841struct structured_div_Tensor_default_backend_functional final : public at::meta::structured_div_Tensor {
3842 void set_output_strided(
3843 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3844 TensorOptions options, DimnameList names
3845 ) override {
3846 auto current_device = guard_.current_device();
3847 if (C10_UNLIKELY(current_device.has_value())) {
3848 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3849 "structured kernels don't support multi-device outputs");
3850 } else {
3851 guard_.reset_device(options.device());
3852 }
3853 outputs_[output_idx] = create_out(sizes, strides, options);
3854 if (!names.empty()) {
3855 namedinference::propagate_names(*outputs_[output_idx], names);
3856 }
3857 // super must happen after, so that downstream can use maybe_get_output
3858 // to retrieve the output
3859 at::meta::structured_div_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
3860 }
3861 void set_output_raw_strided(
3862 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3863 TensorOptions options, DimnameList names
3864 ) override {
3865 auto current_device = guard_.current_device();
3866 if (C10_UNLIKELY(current_device.has_value())) {
3867 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3868 "structured kernels don't support multi-device outputs");
3869 } else {
3870 guard_.reset_device(options.device());
3871 }
3872 outputs_[output_idx] = create_out(sizes, strides, options);
3873 if (!names.empty()) {
3874 namedinference::propagate_names(*outputs_[output_idx], names);
3875 }
3876 // super must happen after, so that downstream can use maybe_get_output
3877 // to retrieve the output
3878 at::meta::structured_div_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
3879 }
3880 const Tensor& maybe_get_output(int64_t output_idx) override {
3881 return *outputs_[output_idx];
3882 }
3883 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
3884 c10::OptionalDeviceGuard guard_;
3885};
3886at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_div_Tensor(const at::Tensor & self, const at::Tensor & other) {
3887structured_div_Tensor_default_backend_functional op;
3888op.meta(self, other);
3889at::div_outf(self, other, *op.outputs_[0]);
3890return std::move(op.outputs_[0]).take();
3891}
3892struct structured_div_Tensor_default_backend_inplace final : public at::meta::structured_div_Tensor {
3893 structured_div_Tensor_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
3894 void set_output_strided(
3895 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3896 TensorOptions options, DimnameList names
3897 ) override {
3898 auto current_device = guard_.current_device();
3899 if (C10_UNLIKELY(current_device.has_value())) {
3900 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3901 "structured kernels don't support multi-device outputs");
3902 } else {
3903 guard_.reset_device(options.device());
3904 }
3905 const auto& out = outputs_[output_idx].get();
3906 check_inplace(out, sizes, options);
3907 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
3908 if (C10_UNLIKELY(maybe_proxy.has_value())) {
3909 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
3910 }
3911 if (!names.empty()) {
3912 namedinference::propagate_names(outputs_[output_idx], names);
3913 }
3914 // super must happen after, so that downstream can use maybe_get_output
3915 // to retrieve the output
3916 at::meta::structured_div_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
3917 }
3918 void set_output_raw_strided(
3919 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3920 TensorOptions options, DimnameList names
3921 ) override {
3922 auto current_device = guard_.current_device();
3923 if (C10_UNLIKELY(current_device.has_value())) {
3924 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3925 "structured kernels don't support multi-device outputs");
3926 } else {
3927 guard_.reset_device(options.device());
3928 }
3929 const auto& out = outputs_[output_idx].get();
3930 check_inplace(out, sizes, options);
3931 if (!names.empty()) {
3932 namedinference::propagate_names(outputs_[output_idx], names);
3933 }
3934 // super must happen after, so that downstream can use maybe_get_output
3935 // to retrieve the output
3936 at::meta::structured_div_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
3937 }
3938 const Tensor& maybe_get_output(int64_t output_idx) override {
3939 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
3940 }
3941 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
3942 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
3943 c10::OptionalDeviceGuard guard_;
3944};
3945at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_div__Tensor(at::Tensor & self, const at::Tensor & other) {
3946structured_div_Tensor_default_backend_inplace op(self);
3947op.meta(self, other);
3948at::div_outf(self, other, op.outputs_[0]);
3949if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
3950return self;
3951}
3952struct structured_div_Tensor_mode_default_backend_functional final : public at::meta::structured_div_Tensor_mode {
3953 void set_output_strided(
3954 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3955 TensorOptions options, DimnameList names
3956 ) override {
3957 auto current_device = guard_.current_device();
3958 if (C10_UNLIKELY(current_device.has_value())) {
3959 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3960 "structured kernels don't support multi-device outputs");
3961 } else {
3962 guard_.reset_device(options.device());
3963 }
3964 outputs_[output_idx] = create_out(sizes, strides, options);
3965 if (!names.empty()) {
3966 namedinference::propagate_names(*outputs_[output_idx], names);
3967 }
3968 // super must happen after, so that downstream can use maybe_get_output
3969 // to retrieve the output
3970 at::meta::structured_div_Tensor_mode::set_output_raw_strided(output_idx, sizes, strides, options, names);
3971 }
3972 void set_output_raw_strided(
3973 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3974 TensorOptions options, DimnameList names
3975 ) override {
3976 auto current_device = guard_.current_device();
3977 if (C10_UNLIKELY(current_device.has_value())) {
3978 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
3979 "structured kernels don't support multi-device outputs");
3980 } else {
3981 guard_.reset_device(options.device());
3982 }
3983 outputs_[output_idx] = create_out(sizes, strides, options);
3984 if (!names.empty()) {
3985 namedinference::propagate_names(*outputs_[output_idx], names);
3986 }
3987 // super must happen after, so that downstream can use maybe_get_output
3988 // to retrieve the output
3989 at::meta::structured_div_Tensor_mode::set_output_raw_strided(output_idx, sizes, strides, options, names);
3990 }
3991 const Tensor& maybe_get_output(int64_t output_idx) override {
3992 return *outputs_[output_idx];
3993 }
3994 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
3995 c10::OptionalDeviceGuard guard_;
3996};
3997at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_div_Tensor_mode(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
3998structured_div_Tensor_mode_default_backend_functional op;
3999op.meta(self, other, rounding_mode);
4000at::div_outf(self, other, rounding_mode, *op.outputs_[0]);
4001return std::move(op.outputs_[0]).take();
4002}
4003struct structured_div_Tensor_mode_default_backend_inplace final : public at::meta::structured_div_Tensor_mode {
4004 structured_div_Tensor_mode_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
4005 void set_output_strided(
4006 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4007 TensorOptions options, DimnameList names
4008 ) override {
4009 auto current_device = guard_.current_device();
4010 if (C10_UNLIKELY(current_device.has_value())) {
4011 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4012 "structured kernels don't support multi-device outputs");
4013 } else {
4014 guard_.reset_device(options.device());
4015 }
4016 const auto& out = outputs_[output_idx].get();
4017 check_inplace(out, sizes, options);
4018 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
4019 if (C10_UNLIKELY(maybe_proxy.has_value())) {
4020 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
4021 }
4022 if (!names.empty()) {
4023 namedinference::propagate_names(outputs_[output_idx], names);
4024 }
4025 // super must happen after, so that downstream can use maybe_get_output
4026 // to retrieve the output
4027 at::meta::structured_div_Tensor_mode::set_output_raw_strided(output_idx, sizes, strides, options, names);
4028 }
4029 void set_output_raw_strided(
4030 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4031 TensorOptions options, DimnameList names
4032 ) override {
4033 auto current_device = guard_.current_device();
4034 if (C10_UNLIKELY(current_device.has_value())) {
4035 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4036 "structured kernels don't support multi-device outputs");
4037 } else {
4038 guard_.reset_device(options.device());
4039 }
4040 const auto& out = outputs_[output_idx].get();
4041 check_inplace(out, sizes, options);
4042 if (!names.empty()) {
4043 namedinference::propagate_names(outputs_[output_idx], names);
4044 }
4045 // super must happen after, so that downstream can use maybe_get_output
4046 // to retrieve the output
4047 at::meta::structured_div_Tensor_mode::set_output_raw_strided(output_idx, sizes, strides, options, names);
4048 }
4049 const Tensor& maybe_get_output(int64_t output_idx) override {
4050 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
4051 }
4052 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
4053 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
4054 c10::OptionalDeviceGuard guard_;
4055};
4056at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_div__Tensor_mode(at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
4057structured_div_Tensor_mode_default_backend_inplace op(self);
4058op.meta(self, other, rounding_mode);
4059at::div_outf(self, other, rounding_mode, op.outputs_[0]);
4060if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
4061return self;
4062}
4063namespace {
4064at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__new_empty_strided(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4065 // No device check
4066 // DeviceGuard omitted
4067 return at::native::new_empty_strided_symint(self, size, stride, dtype, layout, device, pin_memory);
4068}
4069} // anonymous namespace
4070struct structured_erf_default_backend_functional final : public at::meta::structured_erf {
4071 void set_output_strided(
4072 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4073 TensorOptions options, DimnameList names
4074 ) override {
4075 auto current_device = guard_.current_device();
4076 if (C10_UNLIKELY(current_device.has_value())) {
4077 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4078 "structured kernels don't support multi-device outputs");
4079 } else {
4080 guard_.reset_device(options.device());
4081 }
4082 outputs_[output_idx] = create_out(sizes, strides, options);
4083 if (!names.empty()) {
4084 namedinference::propagate_names(*outputs_[output_idx], names);
4085 }
4086 // super must happen after, so that downstream can use maybe_get_output
4087 // to retrieve the output
4088 at::meta::structured_erf::set_output_raw_strided(output_idx, sizes, strides, options, names);
4089 }
4090 void set_output_raw_strided(
4091 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4092 TensorOptions options, DimnameList names
4093 ) override {
4094 auto current_device = guard_.current_device();
4095 if (C10_UNLIKELY(current_device.has_value())) {
4096 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4097 "structured kernels don't support multi-device outputs");
4098 } else {
4099 guard_.reset_device(options.device());
4100 }
4101 outputs_[output_idx] = create_out(sizes, strides, options);
4102 if (!names.empty()) {
4103 namedinference::propagate_names(*outputs_[output_idx], names);
4104 }
4105 // super must happen after, so that downstream can use maybe_get_output
4106 // to retrieve the output
4107 at::meta::structured_erf::set_output_raw_strided(output_idx, sizes, strides, options, names);
4108 }
4109 const Tensor& maybe_get_output(int64_t output_idx) override {
4110 return *outputs_[output_idx];
4111 }
4112 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
4113 c10::OptionalDeviceGuard guard_;
4114};
4115at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_erf(const at::Tensor & self) {
4116structured_erf_default_backend_functional op;
4117op.meta(self);
4118at::erf_outf(self, *op.outputs_[0]);
4119return std::move(op.outputs_[0]).take();
4120}
4121struct structured_erf_default_backend_inplace final : public at::meta::structured_erf {
4122 structured_erf_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
4123 void set_output_strided(
4124 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4125 TensorOptions options, DimnameList names
4126 ) override {
4127 auto current_device = guard_.current_device();
4128 if (C10_UNLIKELY(current_device.has_value())) {
4129 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4130 "structured kernels don't support multi-device outputs");
4131 } else {
4132 guard_.reset_device(options.device());
4133 }
4134 const auto& out = outputs_[output_idx].get();
4135 check_inplace(out, sizes, options);
4136 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
4137 if (C10_UNLIKELY(maybe_proxy.has_value())) {
4138 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
4139 }
4140 if (!names.empty()) {
4141 namedinference::propagate_names(outputs_[output_idx], names);
4142 }
4143 // super must happen after, so that downstream can use maybe_get_output
4144 // to retrieve the output
4145 at::meta::structured_erf::set_output_raw_strided(output_idx, sizes, strides, options, names);
4146 }
4147 void set_output_raw_strided(
4148 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4149 TensorOptions options, DimnameList names
4150 ) override {
4151 auto current_device = guard_.current_device();
4152 if (C10_UNLIKELY(current_device.has_value())) {
4153 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4154 "structured kernels don't support multi-device outputs");
4155 } else {
4156 guard_.reset_device(options.device());
4157 }
4158 const auto& out = outputs_[output_idx].get();
4159 check_inplace(out, sizes, options);
4160 if (!names.empty()) {
4161 namedinference::propagate_names(outputs_[output_idx], names);
4162 }
4163 // super must happen after, so that downstream can use maybe_get_output
4164 // to retrieve the output
4165 at::meta::structured_erf::set_output_raw_strided(output_idx, sizes, strides, options, names);
4166 }
4167 const Tensor& maybe_get_output(int64_t output_idx) override {
4168 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
4169 }
4170 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
4171 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
4172 c10::OptionalDeviceGuard guard_;
4173};
4174at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_erf_(at::Tensor & self) {
4175structured_erf_default_backend_inplace op(self);
4176op.meta(self);
4177at::erf_outf(self, op.outputs_[0]);
4178if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
4179return self;
4180}
4181struct structured_erfc_default_backend_functional final : public at::meta::structured_erfc {
4182 void set_output_strided(
4183 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4184 TensorOptions options, DimnameList names
4185 ) override {
4186 auto current_device = guard_.current_device();
4187 if (C10_UNLIKELY(current_device.has_value())) {
4188 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4189 "structured kernels don't support multi-device outputs");
4190 } else {
4191 guard_.reset_device(options.device());
4192 }
4193 outputs_[output_idx] = create_out(sizes, strides, options);
4194 if (!names.empty()) {
4195 namedinference::propagate_names(*outputs_[output_idx], names);
4196 }
4197 // super must happen after, so that downstream can use maybe_get_output
4198 // to retrieve the output
4199 at::meta::structured_erfc::set_output_raw_strided(output_idx, sizes, strides, options, names);
4200 }
4201 void set_output_raw_strided(
4202 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4203 TensorOptions options, DimnameList names
4204 ) override {
4205 auto current_device = guard_.current_device();
4206 if (C10_UNLIKELY(current_device.has_value())) {
4207 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4208 "structured kernels don't support multi-device outputs");
4209 } else {
4210 guard_.reset_device(options.device());
4211 }
4212 outputs_[output_idx] = create_out(sizes, strides, options);
4213 if (!names.empty()) {
4214 namedinference::propagate_names(*outputs_[output_idx], names);
4215 }
4216 // super must happen after, so that downstream can use maybe_get_output
4217 // to retrieve the output
4218 at::meta::structured_erfc::set_output_raw_strided(output_idx, sizes, strides, options, names);
4219 }
4220 const Tensor& maybe_get_output(int64_t output_idx) override {
4221 return *outputs_[output_idx];
4222 }
4223 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
4224 c10::OptionalDeviceGuard guard_;
4225};
4226at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_erfc(const at::Tensor & self) {
4227structured_erfc_default_backend_functional op;
4228op.meta(self);
4229at::erfc_outf(self, *op.outputs_[0]);
4230return std::move(op.outputs_[0]).take();
4231}
4232struct structured_erfc_default_backend_inplace final : public at::meta::structured_erfc {
4233 structured_erfc_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
4234 void set_output_strided(
4235 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4236 TensorOptions options, DimnameList names
4237 ) override {
4238 auto current_device = guard_.current_device();
4239 if (C10_UNLIKELY(current_device.has_value())) {
4240 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4241 "structured kernels don't support multi-device outputs");
4242 } else {
4243 guard_.reset_device(options.device());
4244 }
4245 const auto& out = outputs_[output_idx].get();
4246 check_inplace(out, sizes, options);
4247 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
4248 if (C10_UNLIKELY(maybe_proxy.has_value())) {
4249 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
4250 }
4251 if (!names.empty()) {
4252 namedinference::propagate_names(outputs_[output_idx], names);
4253 }
4254 // super must happen after, so that downstream can use maybe_get_output
4255 // to retrieve the output
4256 at::meta::structured_erfc::set_output_raw_strided(output_idx, sizes, strides, options, names);
4257 }
4258 void set_output_raw_strided(
4259 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4260 TensorOptions options, DimnameList names
4261 ) override {
4262 auto current_device = guard_.current_device();
4263 if (C10_UNLIKELY(current_device.has_value())) {
4264 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4265 "structured kernels don't support multi-device outputs");
4266 } else {
4267 guard_.reset_device(options.device());
4268 }
4269 const auto& out = outputs_[output_idx].get();
4270 check_inplace(out, sizes, options);
4271 if (!names.empty()) {
4272 namedinference::propagate_names(outputs_[output_idx], names);
4273 }
4274 // super must happen after, so that downstream can use maybe_get_output
4275 // to retrieve the output
4276 at::meta::structured_erfc::set_output_raw_strided(output_idx, sizes, strides, options, names);
4277 }
4278 const Tensor& maybe_get_output(int64_t output_idx) override {
4279 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
4280 }
4281 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
4282 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
4283 c10::OptionalDeviceGuard guard_;
4284};
4285at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_erfc_(at::Tensor & self) {
4286structured_erfc_default_backend_inplace op(self);
4287op.meta(self);
4288at::erfc_outf(self, op.outputs_[0]);
4289if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
4290return self;
4291}
4292struct structured_exp_default_backend_functional final : public at::meta::structured_exp {
4293 void set_output_strided(
4294 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4295 TensorOptions options, DimnameList names
4296 ) override {
4297 auto current_device = guard_.current_device();
4298 if (C10_UNLIKELY(current_device.has_value())) {
4299 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4300 "structured kernels don't support multi-device outputs");
4301 } else {
4302 guard_.reset_device(options.device());
4303 }
4304 outputs_[output_idx] = create_out(sizes, strides, options);
4305 if (!names.empty()) {
4306 namedinference::propagate_names(*outputs_[output_idx], names);
4307 }
4308 // super must happen after, so that downstream can use maybe_get_output
4309 // to retrieve the output
4310 at::meta::structured_exp::set_output_raw_strided(output_idx, sizes, strides, options, names);
4311 }
4312 void set_output_raw_strided(
4313 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4314 TensorOptions options, DimnameList names
4315 ) override {
4316 auto current_device = guard_.current_device();
4317 if (C10_UNLIKELY(current_device.has_value())) {
4318 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4319 "structured kernels don't support multi-device outputs");
4320 } else {
4321 guard_.reset_device(options.device());
4322 }
4323 outputs_[output_idx] = create_out(sizes, strides, options);
4324 if (!names.empty()) {
4325 namedinference::propagate_names(*outputs_[output_idx], names);
4326 }
4327 // super must happen after, so that downstream can use maybe_get_output
4328 // to retrieve the output
4329 at::meta::structured_exp::set_output_raw_strided(output_idx, sizes, strides, options, names);
4330 }
4331 const Tensor& maybe_get_output(int64_t output_idx) override {
4332 return *outputs_[output_idx];
4333 }
4334 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
4335 c10::OptionalDeviceGuard guard_;
4336};
4337at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_exp(const at::Tensor & self) {
4338structured_exp_default_backend_functional op;
4339op.meta(self);
4340at::exp_outf(self, *op.outputs_[0]);
4341return std::move(op.outputs_[0]).take();
4342}
4343struct structured_exp_default_backend_inplace final : public at::meta::structured_exp {
4344 structured_exp_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
4345 void set_output_strided(
4346 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4347 TensorOptions options, DimnameList names
4348 ) override {
4349 auto current_device = guard_.current_device();
4350 if (C10_UNLIKELY(current_device.has_value())) {
4351 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4352 "structured kernels don't support multi-device outputs");
4353 } else {
4354 guard_.reset_device(options.device());
4355 }
4356 const auto& out = outputs_[output_idx].get();
4357 check_inplace(out, sizes, options);
4358 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
4359 if (C10_UNLIKELY(maybe_proxy.has_value())) {
4360 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
4361 }
4362 if (!names.empty()) {
4363 namedinference::propagate_names(outputs_[output_idx], names);
4364 }
4365 // super must happen after, so that downstream can use maybe_get_output
4366 // to retrieve the output
4367 at::meta::structured_exp::set_output_raw_strided(output_idx, sizes, strides, options, names);
4368 }
4369 void set_output_raw_strided(
4370 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4371 TensorOptions options, DimnameList names
4372 ) override {
4373 auto current_device = guard_.current_device();
4374 if (C10_UNLIKELY(current_device.has_value())) {
4375 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4376 "structured kernels don't support multi-device outputs");
4377 } else {
4378 guard_.reset_device(options.device());
4379 }
4380 const auto& out = outputs_[output_idx].get();
4381 check_inplace(out, sizes, options);
4382 if (!names.empty()) {
4383 namedinference::propagate_names(outputs_[output_idx], names);
4384 }
4385 // super must happen after, so that downstream can use maybe_get_output
4386 // to retrieve the output
4387 at::meta::structured_exp::set_output_raw_strided(output_idx, sizes, strides, options, names);
4388 }
4389 const Tensor& maybe_get_output(int64_t output_idx) override {
4390 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
4391 }
4392 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
4393 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
4394 c10::OptionalDeviceGuard guard_;
4395};
4396at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_exp_(at::Tensor & self) {
4397structured_exp_default_backend_inplace op(self);
4398op.meta(self);
4399at::exp_outf(self, op.outputs_[0]);
4400if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
4401return self;
4402}
4403struct structured_exp2_default_backend_functional final : public at::meta::structured_exp2 {
4404 void set_output_strided(
4405 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4406 TensorOptions options, DimnameList names
4407 ) override {
4408 auto current_device = guard_.current_device();
4409 if (C10_UNLIKELY(current_device.has_value())) {
4410 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4411 "structured kernels don't support multi-device outputs");
4412 } else {
4413 guard_.reset_device(options.device());
4414 }
4415 outputs_[output_idx] = create_out(sizes, strides, options);
4416 if (!names.empty()) {
4417 namedinference::propagate_names(*outputs_[output_idx], names);
4418 }
4419 // super must happen after, so that downstream can use maybe_get_output
4420 // to retrieve the output
4421 at::meta::structured_exp2::set_output_raw_strided(output_idx, sizes, strides, options, names);
4422 }
4423 void set_output_raw_strided(
4424 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4425 TensorOptions options, DimnameList names
4426 ) override {
4427 auto current_device = guard_.current_device();
4428 if (C10_UNLIKELY(current_device.has_value())) {
4429 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4430 "structured kernels don't support multi-device outputs");
4431 } else {
4432 guard_.reset_device(options.device());
4433 }
4434 outputs_[output_idx] = create_out(sizes, strides, options);
4435 if (!names.empty()) {
4436 namedinference::propagate_names(*outputs_[output_idx], names);
4437 }
4438 // super must happen after, so that downstream can use maybe_get_output
4439 // to retrieve the output
4440 at::meta::structured_exp2::set_output_raw_strided(output_idx, sizes, strides, options, names);
4441 }
4442 const Tensor& maybe_get_output(int64_t output_idx) override {
4443 return *outputs_[output_idx];
4444 }
4445 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
4446 c10::OptionalDeviceGuard guard_;
4447};
4448at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_exp2(const at::Tensor & self) {
4449structured_exp2_default_backend_functional op;
4450op.meta(self);
4451at::exp2_outf(self, *op.outputs_[0]);
4452return std::move(op.outputs_[0]).take();
4453}
4454struct structured_exp2_default_backend_inplace final : public at::meta::structured_exp2 {
4455 structured_exp2_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
4456 void set_output_strided(
4457 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4458 TensorOptions options, DimnameList names
4459 ) override {
4460 auto current_device = guard_.current_device();
4461 if (C10_UNLIKELY(current_device.has_value())) {
4462 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4463 "structured kernels don't support multi-device outputs");
4464 } else {
4465 guard_.reset_device(options.device());
4466 }
4467 const auto& out = outputs_[output_idx].get();
4468 check_inplace(out, sizes, options);
4469 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
4470 if (C10_UNLIKELY(maybe_proxy.has_value())) {
4471 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
4472 }
4473 if (!names.empty()) {
4474 namedinference::propagate_names(outputs_[output_idx], names);
4475 }
4476 // super must happen after, so that downstream can use maybe_get_output
4477 // to retrieve the output
4478 at::meta::structured_exp2::set_output_raw_strided(output_idx, sizes, strides, options, names);
4479 }
4480 void set_output_raw_strided(
4481 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4482 TensorOptions options, DimnameList names
4483 ) override {
4484 auto current_device = guard_.current_device();
4485 if (C10_UNLIKELY(current_device.has_value())) {
4486 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4487 "structured kernels don't support multi-device outputs");
4488 } else {
4489 guard_.reset_device(options.device());
4490 }
4491 const auto& out = outputs_[output_idx].get();
4492 check_inplace(out, sizes, options);
4493 if (!names.empty()) {
4494 namedinference::propagate_names(outputs_[output_idx], names);
4495 }
4496 // super must happen after, so that downstream can use maybe_get_output
4497 // to retrieve the output
4498 at::meta::structured_exp2::set_output_raw_strided(output_idx, sizes, strides, options, names);
4499 }
4500 const Tensor& maybe_get_output(int64_t output_idx) override {
4501 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
4502 }
4503 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
4504 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
4505 c10::OptionalDeviceGuard guard_;
4506};
4507at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_exp2_(at::Tensor & self) {
4508structured_exp2_default_backend_inplace op(self);
4509op.meta(self);
4510at::exp2_outf(self, op.outputs_[0]);
4511if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
4512return self;
4513}
4514struct structured_expm1_default_backend_functional final : public at::meta::structured_expm1 {
4515 void set_output_strided(
4516 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4517 TensorOptions options, DimnameList names
4518 ) override {
4519 auto current_device = guard_.current_device();
4520 if (C10_UNLIKELY(current_device.has_value())) {
4521 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4522 "structured kernels don't support multi-device outputs");
4523 } else {
4524 guard_.reset_device(options.device());
4525 }
4526 outputs_[output_idx] = create_out(sizes, strides, options);
4527 if (!names.empty()) {
4528 namedinference::propagate_names(*outputs_[output_idx], names);
4529 }
4530 // super must happen after, so that downstream can use maybe_get_output
4531 // to retrieve the output
4532 at::meta::structured_expm1::set_output_raw_strided(output_idx, sizes, strides, options, names);
4533 }
4534 void set_output_raw_strided(
4535 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4536 TensorOptions options, DimnameList names
4537 ) override {
4538 auto current_device = guard_.current_device();
4539 if (C10_UNLIKELY(current_device.has_value())) {
4540 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4541 "structured kernels don't support multi-device outputs");
4542 } else {
4543 guard_.reset_device(options.device());
4544 }
4545 outputs_[output_idx] = create_out(sizes, strides, options);
4546 if (!names.empty()) {
4547 namedinference::propagate_names(*outputs_[output_idx], names);
4548 }
4549 // super must happen after, so that downstream can use maybe_get_output
4550 // to retrieve the output
4551 at::meta::structured_expm1::set_output_raw_strided(output_idx, sizes, strides, options, names);
4552 }
4553 const Tensor& maybe_get_output(int64_t output_idx) override {
4554 return *outputs_[output_idx];
4555 }
4556 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
4557 c10::OptionalDeviceGuard guard_;
4558};
4559at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_expm1(const at::Tensor & self) {
4560structured_expm1_default_backend_functional op;
4561op.meta(self);
4562at::expm1_outf(self, *op.outputs_[0]);
4563return std::move(op.outputs_[0]).take();
4564}
4565struct structured_expm1_default_backend_inplace final : public at::meta::structured_expm1 {
4566 structured_expm1_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
4567 void set_output_strided(
4568 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4569 TensorOptions options, DimnameList names
4570 ) override {
4571 auto current_device = guard_.current_device();
4572 if (C10_UNLIKELY(current_device.has_value())) {
4573 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4574 "structured kernels don't support multi-device outputs");
4575 } else {
4576 guard_.reset_device(options.device());
4577 }
4578 const auto& out = outputs_[output_idx].get();
4579 check_inplace(out, sizes, options);
4580 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
4581 if (C10_UNLIKELY(maybe_proxy.has_value())) {
4582 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
4583 }
4584 if (!names.empty()) {
4585 namedinference::propagate_names(outputs_[output_idx], names);
4586 }
4587 // super must happen after, so that downstream can use maybe_get_output
4588 // to retrieve the output
4589 at::meta::structured_expm1::set_output_raw_strided(output_idx, sizes, strides, options, names);
4590 }
4591 void set_output_raw_strided(
4592 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4593 TensorOptions options, DimnameList names
4594 ) override {
4595 auto current_device = guard_.current_device();
4596 if (C10_UNLIKELY(current_device.has_value())) {
4597 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4598 "structured kernels don't support multi-device outputs");
4599 } else {
4600 guard_.reset_device(options.device());
4601 }
4602 const auto& out = outputs_[output_idx].get();
4603 check_inplace(out, sizes, options);
4604 if (!names.empty()) {
4605 namedinference::propagate_names(outputs_[output_idx], names);
4606 }
4607 // super must happen after, so that downstream can use maybe_get_output
4608 // to retrieve the output
4609 at::meta::structured_expm1::set_output_raw_strided(output_idx, sizes, strides, options, names);
4610 }
4611 const Tensor& maybe_get_output(int64_t output_idx) override {
4612 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
4613 }
4614 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
4615 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
4616 c10::OptionalDeviceGuard guard_;
4617};
4618at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_expm1_(at::Tensor & self) {
4619structured_expm1_default_backend_inplace op(self);
4620op.meta(self);
4621at::expm1_outf(self, op.outputs_[0]);
4622if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
4623return self;
4624}
4625struct structured_floor_default_backend_functional final : public at::meta::structured_floor {
4626 void set_output_strided(
4627 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4628 TensorOptions options, DimnameList names
4629 ) override {
4630 auto current_device = guard_.current_device();
4631 if (C10_UNLIKELY(current_device.has_value())) {
4632 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4633 "structured kernels don't support multi-device outputs");
4634 } else {
4635 guard_.reset_device(options.device());
4636 }
4637 outputs_[output_idx] = create_out(sizes, strides, options);
4638 if (!names.empty()) {
4639 namedinference::propagate_names(*outputs_[output_idx], names);
4640 }
4641 // super must happen after, so that downstream can use maybe_get_output
4642 // to retrieve the output
4643 at::meta::structured_floor::set_output_raw_strided(output_idx, sizes, strides, options, names);
4644 }
4645 void set_output_raw_strided(
4646 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4647 TensorOptions options, DimnameList names
4648 ) override {
4649 auto current_device = guard_.current_device();
4650 if (C10_UNLIKELY(current_device.has_value())) {
4651 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4652 "structured kernels don't support multi-device outputs");
4653 } else {
4654 guard_.reset_device(options.device());
4655 }
4656 outputs_[output_idx] = create_out(sizes, strides, options);
4657 if (!names.empty()) {
4658 namedinference::propagate_names(*outputs_[output_idx], names);
4659 }
4660 // super must happen after, so that downstream can use maybe_get_output
4661 // to retrieve the output
4662 at::meta::structured_floor::set_output_raw_strided(output_idx, sizes, strides, options, names);
4663 }
4664 const Tensor& maybe_get_output(int64_t output_idx) override {
4665 return *outputs_[output_idx];
4666 }
4667 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
4668 c10::OptionalDeviceGuard guard_;
4669};
4670at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_floor(const at::Tensor & self) {
4671structured_floor_default_backend_functional op;
4672op.meta(self);
4673at::floor_outf(self, *op.outputs_[0]);
4674return std::move(op.outputs_[0]).take();
4675}
4676struct structured_floor_default_backend_inplace final : public at::meta::structured_floor {
4677 structured_floor_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
4678 void set_output_strided(
4679 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4680 TensorOptions options, DimnameList names
4681 ) override {
4682 auto current_device = guard_.current_device();
4683 if (C10_UNLIKELY(current_device.has_value())) {
4684 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4685 "structured kernels don't support multi-device outputs");
4686 } else {
4687 guard_.reset_device(options.device());
4688 }
4689 const auto& out = outputs_[output_idx].get();
4690 check_inplace(out, sizes, options);
4691 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
4692 if (C10_UNLIKELY(maybe_proxy.has_value())) {
4693 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
4694 }
4695 if (!names.empty()) {
4696 namedinference::propagate_names(outputs_[output_idx], names);
4697 }
4698 // super must happen after, so that downstream can use maybe_get_output
4699 // to retrieve the output
4700 at::meta::structured_floor::set_output_raw_strided(output_idx, sizes, strides, options, names);
4701 }
4702 void set_output_raw_strided(
4703 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4704 TensorOptions options, DimnameList names
4705 ) override {
4706 auto current_device = guard_.current_device();
4707 if (C10_UNLIKELY(current_device.has_value())) {
4708 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4709 "structured kernels don't support multi-device outputs");
4710 } else {
4711 guard_.reset_device(options.device());
4712 }
4713 const auto& out = outputs_[output_idx].get();
4714 check_inplace(out, sizes, options);
4715 if (!names.empty()) {
4716 namedinference::propagate_names(outputs_[output_idx], names);
4717 }
4718 // super must happen after, so that downstream can use maybe_get_output
4719 // to retrieve the output
4720 at::meta::structured_floor::set_output_raw_strided(output_idx, sizes, strides, options, names);
4721 }
4722 const Tensor& maybe_get_output(int64_t output_idx) override {
4723 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
4724 }
4725 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
4726 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
4727 c10::OptionalDeviceGuard guard_;
4728};
4729at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_floor_(at::Tensor & self) {
4730structured_floor_default_backend_inplace op(self);
4731op.meta(self);
4732at::floor_outf(self, op.outputs_[0]);
4733if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
4734return self;
4735}
4736struct structured_frac_default_backend_functional final : public at::meta::structured_frac {
4737 void set_output_strided(
4738 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4739 TensorOptions options, DimnameList names
4740 ) override {
4741 auto current_device = guard_.current_device();
4742 if (C10_UNLIKELY(current_device.has_value())) {
4743 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4744 "structured kernels don't support multi-device outputs");
4745 } else {
4746 guard_.reset_device(options.device());
4747 }
4748 outputs_[output_idx] = create_out(sizes, strides, options);
4749 if (!names.empty()) {
4750 namedinference::propagate_names(*outputs_[output_idx], names);
4751 }
4752 // super must happen after, so that downstream can use maybe_get_output
4753 // to retrieve the output
4754 at::meta::structured_frac::set_output_raw_strided(output_idx, sizes, strides, options, names);
4755 }
4756 void set_output_raw_strided(
4757 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4758 TensorOptions options, DimnameList names
4759 ) override {
4760 auto current_device = guard_.current_device();
4761 if (C10_UNLIKELY(current_device.has_value())) {
4762 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4763 "structured kernels don't support multi-device outputs");
4764 } else {
4765 guard_.reset_device(options.device());
4766 }
4767 outputs_[output_idx] = create_out(sizes, strides, options);
4768 if (!names.empty()) {
4769 namedinference::propagate_names(*outputs_[output_idx], names);
4770 }
4771 // super must happen after, so that downstream can use maybe_get_output
4772 // to retrieve the output
4773 at::meta::structured_frac::set_output_raw_strided(output_idx, sizes, strides, options, names);
4774 }
4775 const Tensor& maybe_get_output(int64_t output_idx) override {
4776 return *outputs_[output_idx];
4777 }
4778 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
4779 c10::OptionalDeviceGuard guard_;
4780};
4781at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_frac(const at::Tensor & self) {
4782structured_frac_default_backend_functional op;
4783op.meta(self);
4784at::frac_outf(self, *op.outputs_[0]);
4785return std::move(op.outputs_[0]).take();
4786}
4787struct structured_frac_default_backend_inplace final : public at::meta::structured_frac {
4788 structured_frac_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
4789 void set_output_strided(
4790 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4791 TensorOptions options, DimnameList names
4792 ) override {
4793 auto current_device = guard_.current_device();
4794 if (C10_UNLIKELY(current_device.has_value())) {
4795 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4796 "structured kernels don't support multi-device outputs");
4797 } else {
4798 guard_.reset_device(options.device());
4799 }
4800 const auto& out = outputs_[output_idx].get();
4801 check_inplace(out, sizes, options);
4802 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
4803 if (C10_UNLIKELY(maybe_proxy.has_value())) {
4804 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
4805 }
4806 if (!names.empty()) {
4807 namedinference::propagate_names(outputs_[output_idx], names);
4808 }
4809 // super must happen after, so that downstream can use maybe_get_output
4810 // to retrieve the output
4811 at::meta::structured_frac::set_output_raw_strided(output_idx, sizes, strides, options, names);
4812 }
4813 void set_output_raw_strided(
4814 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4815 TensorOptions options, DimnameList names
4816 ) override {
4817 auto current_device = guard_.current_device();
4818 if (C10_UNLIKELY(current_device.has_value())) {
4819 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4820 "structured kernels don't support multi-device outputs");
4821 } else {
4822 guard_.reset_device(options.device());
4823 }
4824 const auto& out = outputs_[output_idx].get();
4825 check_inplace(out, sizes, options);
4826 if (!names.empty()) {
4827 namedinference::propagate_names(outputs_[output_idx], names);
4828 }
4829 // super must happen after, so that downstream can use maybe_get_output
4830 // to retrieve the output
4831 at::meta::structured_frac::set_output_raw_strided(output_idx, sizes, strides, options, names);
4832 }
4833 const Tensor& maybe_get_output(int64_t output_idx) override {
4834 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
4835 }
4836 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
4837 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
4838 c10::OptionalDeviceGuard guard_;
4839};
4840at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_frac_(at::Tensor & self) {
4841structured_frac_default_backend_inplace op(self);
4842op.meta(self);
4843at::frac_outf(self, op.outputs_[0]);
4844if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
4845return self;
4846}
4847struct structured_gcd_default_backend_functional final : public at::meta::structured_gcd {
4848 void set_output_strided(
4849 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4850 TensorOptions options, DimnameList names
4851 ) override {
4852 auto current_device = guard_.current_device();
4853 if (C10_UNLIKELY(current_device.has_value())) {
4854 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4855 "structured kernels don't support multi-device outputs");
4856 } else {
4857 guard_.reset_device(options.device());
4858 }
4859 outputs_[output_idx] = create_out(sizes, strides, options);
4860 if (!names.empty()) {
4861 namedinference::propagate_names(*outputs_[output_idx], names);
4862 }
4863 // super must happen after, so that downstream can use maybe_get_output
4864 // to retrieve the output
4865 at::meta::structured_gcd::set_output_raw_strided(output_idx, sizes, strides, options, names);
4866 }
4867 void set_output_raw_strided(
4868 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4869 TensorOptions options, DimnameList names
4870 ) override {
4871 auto current_device = guard_.current_device();
4872 if (C10_UNLIKELY(current_device.has_value())) {
4873 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4874 "structured kernels don't support multi-device outputs");
4875 } else {
4876 guard_.reset_device(options.device());
4877 }
4878 outputs_[output_idx] = create_out(sizes, strides, options);
4879 if (!names.empty()) {
4880 namedinference::propagate_names(*outputs_[output_idx], names);
4881 }
4882 // super must happen after, so that downstream can use maybe_get_output
4883 // to retrieve the output
4884 at::meta::structured_gcd::set_output_raw_strided(output_idx, sizes, strides, options, names);
4885 }
4886 const Tensor& maybe_get_output(int64_t output_idx) override {
4887 return *outputs_[output_idx];
4888 }
4889 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
4890 c10::OptionalDeviceGuard guard_;
4891};
4892at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_gcd(const at::Tensor & self, const at::Tensor & other) {
4893structured_gcd_default_backend_functional op;
4894op.meta(self, other);
4895at::gcd_outf(self, other, *op.outputs_[0]);
4896return std::move(op.outputs_[0]).take();
4897}
4898struct structured_gcd_default_backend_inplace final : public at::meta::structured_gcd {
4899 structured_gcd_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
4900 void set_output_strided(
4901 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4902 TensorOptions options, DimnameList names
4903 ) override {
4904 auto current_device = guard_.current_device();
4905 if (C10_UNLIKELY(current_device.has_value())) {
4906 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4907 "structured kernels don't support multi-device outputs");
4908 } else {
4909 guard_.reset_device(options.device());
4910 }
4911 const auto& out = outputs_[output_idx].get();
4912 check_inplace(out, sizes, options);
4913 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
4914 if (C10_UNLIKELY(maybe_proxy.has_value())) {
4915 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
4916 }
4917 if (!names.empty()) {
4918 namedinference::propagate_names(outputs_[output_idx], names);
4919 }
4920 // super must happen after, so that downstream can use maybe_get_output
4921 // to retrieve the output
4922 at::meta::structured_gcd::set_output_raw_strided(output_idx, sizes, strides, options, names);
4923 }
4924 void set_output_raw_strided(
4925 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4926 TensorOptions options, DimnameList names
4927 ) override {
4928 auto current_device = guard_.current_device();
4929 if (C10_UNLIKELY(current_device.has_value())) {
4930 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4931 "structured kernels don't support multi-device outputs");
4932 } else {
4933 guard_.reset_device(options.device());
4934 }
4935 const auto& out = outputs_[output_idx].get();
4936 check_inplace(out, sizes, options);
4937 if (!names.empty()) {
4938 namedinference::propagate_names(outputs_[output_idx], names);
4939 }
4940 // super must happen after, so that downstream can use maybe_get_output
4941 // to retrieve the output
4942 at::meta::structured_gcd::set_output_raw_strided(output_idx, sizes, strides, options, names);
4943 }
4944 const Tensor& maybe_get_output(int64_t output_idx) override {
4945 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
4946 }
4947 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
4948 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
4949 c10::OptionalDeviceGuard guard_;
4950};
4951at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_gcd_(at::Tensor & self, const at::Tensor & other) {
4952structured_gcd_default_backend_inplace op(self);
4953op.meta(self, other);
4954at::gcd_outf(self, other, op.outputs_[0]);
4955if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
4956return self;
4957}
4958struct structured_lcm_default_backend_functional final : public at::meta::structured_lcm {
4959 void set_output_strided(
4960 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4961 TensorOptions options, DimnameList names
4962 ) override {
4963 auto current_device = guard_.current_device();
4964 if (C10_UNLIKELY(current_device.has_value())) {
4965 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4966 "structured kernels don't support multi-device outputs");
4967 } else {
4968 guard_.reset_device(options.device());
4969 }
4970 outputs_[output_idx] = create_out(sizes, strides, options);
4971 if (!names.empty()) {
4972 namedinference::propagate_names(*outputs_[output_idx], names);
4973 }
4974 // super must happen after, so that downstream can use maybe_get_output
4975 // to retrieve the output
4976 at::meta::structured_lcm::set_output_raw_strided(output_idx, sizes, strides, options, names);
4977 }
4978 void set_output_raw_strided(
4979 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4980 TensorOptions options, DimnameList names
4981 ) override {
4982 auto current_device = guard_.current_device();
4983 if (C10_UNLIKELY(current_device.has_value())) {
4984 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
4985 "structured kernels don't support multi-device outputs");
4986 } else {
4987 guard_.reset_device(options.device());
4988 }
4989 outputs_[output_idx] = create_out(sizes, strides, options);
4990 if (!names.empty()) {
4991 namedinference::propagate_names(*outputs_[output_idx], names);
4992 }
4993 // super must happen after, so that downstream can use maybe_get_output
4994 // to retrieve the output
4995 at::meta::structured_lcm::set_output_raw_strided(output_idx, sizes, strides, options, names);
4996 }
4997 const Tensor& maybe_get_output(int64_t output_idx) override {
4998 return *outputs_[output_idx];
4999 }
5000 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
5001 c10::OptionalDeviceGuard guard_;
5002};
5003at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_lcm(const at::Tensor & self, const at::Tensor & other) {
5004structured_lcm_default_backend_functional op;
5005op.meta(self, other);
5006at::lcm_outf(self, other, *op.outputs_[0]);
5007return std::move(op.outputs_[0]).take();
5008}
5009struct structured_lcm_default_backend_inplace final : public at::meta::structured_lcm {
5010 structured_lcm_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
5011 void set_output_strided(
5012 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5013 TensorOptions options, DimnameList names
5014 ) override {
5015 auto current_device = guard_.current_device();
5016 if (C10_UNLIKELY(current_device.has_value())) {
5017 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5018 "structured kernels don't support multi-device outputs");
5019 } else {
5020 guard_.reset_device(options.device());
5021 }
5022 const auto& out = outputs_[output_idx].get();
5023 check_inplace(out, sizes, options);
5024 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
5025 if (C10_UNLIKELY(maybe_proxy.has_value())) {
5026 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
5027 }
5028 if (!names.empty()) {
5029 namedinference::propagate_names(outputs_[output_idx], names);
5030 }
5031 // super must happen after, so that downstream can use maybe_get_output
5032 // to retrieve the output
5033 at::meta::structured_lcm::set_output_raw_strided(output_idx, sizes, strides, options, names);
5034 }
5035 void set_output_raw_strided(
5036 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5037 TensorOptions options, DimnameList names
5038 ) override {
5039 auto current_device = guard_.current_device();
5040 if (C10_UNLIKELY(current_device.has_value())) {
5041 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5042 "structured kernels don't support multi-device outputs");
5043 } else {
5044 guard_.reset_device(options.device());
5045 }
5046 const auto& out = outputs_[output_idx].get();
5047 check_inplace(out, sizes, options);
5048 if (!names.empty()) {
5049 namedinference::propagate_names(outputs_[output_idx], names);
5050 }
5051 // super must happen after, so that downstream can use maybe_get_output
5052 // to retrieve the output
5053 at::meta::structured_lcm::set_output_raw_strided(output_idx, sizes, strides, options, names);
5054 }
5055 const Tensor& maybe_get_output(int64_t output_idx) override {
5056 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
5057 }
5058 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
5059 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
5060 c10::OptionalDeviceGuard guard_;
5061};
5062at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_lcm_(at::Tensor & self, const at::Tensor & other) {
5063structured_lcm_default_backend_inplace op(self);
5064op.meta(self, other);
5065at::lcm_outf(self, other, op.outputs_[0]);
5066if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
5067return self;
5068}
5069struct structured_index_Tensor_default_backend_functional final : public at::meta::structured_index_Tensor {
5070 void set_output_strided(
5071 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5072 TensorOptions options, DimnameList names
5073 ) override {
5074 auto current_device = guard_.current_device();
5075 if (C10_UNLIKELY(current_device.has_value())) {
5076 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5077 "structured kernels don't support multi-device outputs");
5078 } else {
5079 guard_.reset_device(options.device());
5080 }
5081 outputs_[output_idx] = create_out(sizes, strides, options);
5082 if (!names.empty()) {
5083 namedinference::propagate_names(*outputs_[output_idx], names);
5084 }
5085 // super must happen after, so that downstream can use maybe_get_output
5086 // to retrieve the output
5087 at::meta::structured_index_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
5088 }
5089 void set_output_raw_strided(
5090 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5091 TensorOptions options, DimnameList names
5092 ) override {
5093 auto current_device = guard_.current_device();
5094 if (C10_UNLIKELY(current_device.has_value())) {
5095 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5096 "structured kernels don't support multi-device outputs");
5097 } else {
5098 guard_.reset_device(options.device());
5099 }
5100 outputs_[output_idx] = create_out(sizes, strides, options);
5101 if (!names.empty()) {
5102 namedinference::propagate_names(*outputs_[output_idx], names);
5103 }
5104 // super must happen after, so that downstream can use maybe_get_output
5105 // to retrieve the output
5106 at::meta::structured_index_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
5107 }
5108 const Tensor& maybe_get_output(int64_t output_idx) override {
5109 return *outputs_[output_idx];
5110 }
5111 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
5112 c10::OptionalDeviceGuard guard_;
5113};
5114at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_index_Tensor(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices) {
5115structured_index_Tensor_default_backend_functional op;
5116auto precompute = op.meta(self, at::IOptTensorListRef(indices));
5117(void)precompute;
5118at::index_outf(self, indices, *op.outputs_[0]);
5119return std::move(op.outputs_[0]).take();
5120}
5121struct structured_index_copy_default_backend_functional final : public at::meta::structured_index_copy {
5122 void set_output_strided(
5123 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5124 TensorOptions options, DimnameList names
5125 ) override {
5126 auto current_device = guard_.current_device();
5127 if (C10_UNLIKELY(current_device.has_value())) {
5128 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5129 "structured kernels don't support multi-device outputs");
5130 } else {
5131 guard_.reset_device(options.device());
5132 }
5133 outputs_[output_idx] = create_out(sizes, strides, options);
5134 if (!names.empty()) {
5135 namedinference::propagate_names(*outputs_[output_idx], names);
5136 }
5137 // super must happen after, so that downstream can use maybe_get_output
5138 // to retrieve the output
5139 }
5140 void set_output_raw_strided(
5141 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5142 TensorOptions options, DimnameList names
5143 ) override {
5144 auto current_device = guard_.current_device();
5145 if (C10_UNLIKELY(current_device.has_value())) {
5146 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5147 "structured kernels don't support multi-device outputs");
5148 } else {
5149 guard_.reset_device(options.device());
5150 }
5151 outputs_[output_idx] = create_out(sizes, strides, options);
5152 if (!names.empty()) {
5153 namedinference::propagate_names(*outputs_[output_idx], names);
5154 }
5155 // super must happen after, so that downstream can use maybe_get_output
5156 // to retrieve the output
5157 }
5158 const Tensor& maybe_get_output(int64_t output_idx) override {
5159 return *outputs_[output_idx];
5160 }
5161 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
5162 c10::OptionalDeviceGuard guard_;
5163};
5164at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_index_copy(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
5165structured_index_copy_default_backend_functional op;
5166auto precompute = op.meta(self, dim, index, source);
5167(void)precompute;
5168at::index_copy_outf(self, precompute.dim, index, source, *op.outputs_[0]);
5169return std::move(op.outputs_[0]).take();
5170}
5171struct structured_index_copy_default_backend_inplace final : public at::meta::structured_index_copy {
5172 structured_index_copy_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
5173 void set_output_strided(
5174 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5175 TensorOptions options, DimnameList names
5176 ) override {
5177 auto current_device = guard_.current_device();
5178 if (C10_UNLIKELY(current_device.has_value())) {
5179 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5180 "structured kernels don't support multi-device outputs");
5181 } else {
5182 guard_.reset_device(options.device());
5183 }
5184 const auto& out = outputs_[output_idx].get();
5185 check_inplace(out, sizes, options);
5186 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
5187 if (C10_UNLIKELY(maybe_proxy.has_value())) {
5188 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
5189 }
5190 if (!names.empty()) {
5191 namedinference::propagate_names(outputs_[output_idx], names);
5192 }
5193 // super must happen after, so that downstream can use maybe_get_output
5194 // to retrieve the output
5195 }
5196 void set_output_raw_strided(
5197 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5198 TensorOptions options, DimnameList names
5199 ) override {
5200 auto current_device = guard_.current_device();
5201 if (C10_UNLIKELY(current_device.has_value())) {
5202 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5203 "structured kernels don't support multi-device outputs");
5204 } else {
5205 guard_.reset_device(options.device());
5206 }
5207 const auto& out = outputs_[output_idx].get();
5208 check_inplace(out, sizes, options);
5209 if (!names.empty()) {
5210 namedinference::propagate_names(outputs_[output_idx], names);
5211 }
5212 // super must happen after, so that downstream can use maybe_get_output
5213 // to retrieve the output
5214 }
5215 const Tensor& maybe_get_output(int64_t output_idx) override {
5216 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
5217 }
5218 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
5219 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
5220 c10::OptionalDeviceGuard guard_;
5221};
5222at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_index_copy_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
5223structured_index_copy_default_backend_inplace op(self);
5224auto precompute = op.meta(self, dim, index, source);
5225(void)precompute;
5226at::index_copy_outf(self, precompute.dim, index, source, op.outputs_[0]);
5227if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
5228return self;
5229}
5230struct structured_isin_Tensor_Tensor_default_backend_functional final : public at::meta::structured_isin_Tensor_Tensor {
5231 void set_output_strided(
5232 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5233 TensorOptions options, DimnameList names
5234 ) override {
5235 auto current_device = guard_.current_device();
5236 if (C10_UNLIKELY(current_device.has_value())) {
5237 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5238 "structured kernels don't support multi-device outputs");
5239 } else {
5240 guard_.reset_device(options.device());
5241 }
5242 outputs_[output_idx] = create_out(sizes, strides, options);
5243 if (!names.empty()) {
5244 namedinference::propagate_names(*outputs_[output_idx], names);
5245 }
5246 // super must happen after, so that downstream can use maybe_get_output
5247 // to retrieve the output
5248 }
5249 void set_output_raw_strided(
5250 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5251 TensorOptions options, DimnameList names
5252 ) override {
5253 auto current_device = guard_.current_device();
5254 if (C10_UNLIKELY(current_device.has_value())) {
5255 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5256 "structured kernels don't support multi-device outputs");
5257 } else {
5258 guard_.reset_device(options.device());
5259 }
5260 outputs_[output_idx] = create_out(sizes, strides, options);
5261 if (!names.empty()) {
5262 namedinference::propagate_names(*outputs_[output_idx], names);
5263 }
5264 // super must happen after, so that downstream can use maybe_get_output
5265 // to retrieve the output
5266 }
5267 const Tensor& maybe_get_output(int64_t output_idx) override {
5268 return *outputs_[output_idx];
5269 }
5270 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
5271 c10::OptionalDeviceGuard guard_;
5272};
5273at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_isin_Tensor_Tensor(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert) {
5274structured_isin_Tensor_Tensor_default_backend_functional op;
5275op.meta(elements, test_elements, assume_unique, invert);
5276at::isin_outf(elements, test_elements, assume_unique, invert, *op.outputs_[0]);
5277return std::move(op.outputs_[0]).take();
5278}
5279struct structured_isin_Tensor_Scalar_default_backend_functional final : public at::meta::structured_isin_Tensor_Scalar {
5280 void set_output_strided(
5281 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5282 TensorOptions options, DimnameList names
5283 ) override {
5284 auto current_device = guard_.current_device();
5285 if (C10_UNLIKELY(current_device.has_value())) {
5286 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5287 "structured kernels don't support multi-device outputs");
5288 } else {
5289 guard_.reset_device(options.device());
5290 }
5291 outputs_[output_idx] = create_out(sizes, strides, options);
5292 if (!names.empty()) {
5293 namedinference::propagate_names(*outputs_[output_idx], names);
5294 }
5295 // super must happen after, so that downstream can use maybe_get_output
5296 // to retrieve the output
5297 }
5298 void set_output_raw_strided(
5299 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5300 TensorOptions options, DimnameList names
5301 ) override {
5302 auto current_device = guard_.current_device();
5303 if (C10_UNLIKELY(current_device.has_value())) {
5304 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5305 "structured kernels don't support multi-device outputs");
5306 } else {
5307 guard_.reset_device(options.device());
5308 }
5309 outputs_[output_idx] = create_out(sizes, strides, options);
5310 if (!names.empty()) {
5311 namedinference::propagate_names(*outputs_[output_idx], names);
5312 }
5313 // super must happen after, so that downstream can use maybe_get_output
5314 // to retrieve the output
5315 }
5316 const Tensor& maybe_get_output(int64_t output_idx) override {
5317 return *outputs_[output_idx];
5318 }
5319 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
5320 c10::OptionalDeviceGuard guard_;
5321};
5322at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_isin_Tensor_Scalar(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert) {
5323structured_isin_Tensor_Scalar_default_backend_functional op;
5324op.meta(elements, test_element, assume_unique, invert);
5325at::isin_outf(elements, test_element, assume_unique, invert, *op.outputs_[0]);
5326return std::move(op.outputs_[0]).take();
5327}
5328struct structured_isin_Scalar_Tensor_default_backend_functional final : public at::meta::structured_isin_Scalar_Tensor {
5329 void set_output_strided(
5330 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5331 TensorOptions options, DimnameList names
5332 ) override {
5333 auto current_device = guard_.current_device();
5334 if (C10_UNLIKELY(current_device.has_value())) {
5335 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5336 "structured kernels don't support multi-device outputs");
5337 } else {
5338 guard_.reset_device(options.device());
5339 }
5340 outputs_[output_idx] = create_out(sizes, strides, options);
5341 if (!names.empty()) {
5342 namedinference::propagate_names(*outputs_[output_idx], names);
5343 }
5344 // super must happen after, so that downstream can use maybe_get_output
5345 // to retrieve the output
5346 }
5347 void set_output_raw_strided(
5348 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5349 TensorOptions options, DimnameList names
5350 ) override {
5351 auto current_device = guard_.current_device();
5352 if (C10_UNLIKELY(current_device.has_value())) {
5353 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5354 "structured kernels don't support multi-device outputs");
5355 } else {
5356 guard_.reset_device(options.device());
5357 }
5358 outputs_[output_idx] = create_out(sizes, strides, options);
5359 if (!names.empty()) {
5360 namedinference::propagate_names(*outputs_[output_idx], names);
5361 }
5362 // super must happen after, so that downstream can use maybe_get_output
5363 // to retrieve the output
5364 }
5365 const Tensor& maybe_get_output(int64_t output_idx) override {
5366 return *outputs_[output_idx];
5367 }
5368 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
5369 c10::OptionalDeviceGuard guard_;
5370};
5371at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_isin_Scalar_Tensor(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert) {
5372structured_isin_Scalar_Tensor_default_backend_functional op;
5373op.meta(element, test_elements, assume_unique, invert);
5374at::isin_outf(element, test_elements, assume_unique, invert, *op.outputs_[0]);
5375return std::move(op.outputs_[0]).take();
5376}
5377struct structured_log_default_backend_functional final : public at::meta::structured_log {
5378 void set_output_strided(
5379 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5380 TensorOptions options, DimnameList names
5381 ) override {
5382 auto current_device = guard_.current_device();
5383 if (C10_UNLIKELY(current_device.has_value())) {
5384 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5385 "structured kernels don't support multi-device outputs");
5386 } else {
5387 guard_.reset_device(options.device());
5388 }
5389 outputs_[output_idx] = create_out(sizes, strides, options);
5390 if (!names.empty()) {
5391 namedinference::propagate_names(*outputs_[output_idx], names);
5392 }
5393 // super must happen after, so that downstream can use maybe_get_output
5394 // to retrieve the output
5395 at::meta::structured_log::set_output_raw_strided(output_idx, sizes, strides, options, names);
5396 }
5397 void set_output_raw_strided(
5398 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5399 TensorOptions options, DimnameList names
5400 ) override {
5401 auto current_device = guard_.current_device();
5402 if (C10_UNLIKELY(current_device.has_value())) {
5403 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5404 "structured kernels don't support multi-device outputs");
5405 } else {
5406 guard_.reset_device(options.device());
5407 }
5408 outputs_[output_idx] = create_out(sizes, strides, options);
5409 if (!names.empty()) {
5410 namedinference::propagate_names(*outputs_[output_idx], names);
5411 }
5412 // super must happen after, so that downstream can use maybe_get_output
5413 // to retrieve the output
5414 at::meta::structured_log::set_output_raw_strided(output_idx, sizes, strides, options, names);
5415 }
5416 const Tensor& maybe_get_output(int64_t output_idx) override {
5417 return *outputs_[output_idx];
5418 }
5419 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
5420 c10::OptionalDeviceGuard guard_;
5421};
5422at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_log(const at::Tensor & self) {
5423structured_log_default_backend_functional op;
5424op.meta(self);
5425at::log_outf(self, *op.outputs_[0]);
5426return std::move(op.outputs_[0]).take();
5427}
5428struct structured_log_default_backend_inplace final : public at::meta::structured_log {
5429 structured_log_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
5430 void set_output_strided(
5431 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5432 TensorOptions options, DimnameList names
5433 ) override {
5434 auto current_device = guard_.current_device();
5435 if (C10_UNLIKELY(current_device.has_value())) {
5436 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5437 "structured kernels don't support multi-device outputs");
5438 } else {
5439 guard_.reset_device(options.device());
5440 }
5441 const auto& out = outputs_[output_idx].get();
5442 check_inplace(out, sizes, options);
5443 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
5444 if (C10_UNLIKELY(maybe_proxy.has_value())) {
5445 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
5446 }
5447 if (!names.empty()) {
5448 namedinference::propagate_names(outputs_[output_idx], names);
5449 }
5450 // super must happen after, so that downstream can use maybe_get_output
5451 // to retrieve the output
5452 at::meta::structured_log::set_output_raw_strided(output_idx, sizes, strides, options, names);
5453 }
5454 void set_output_raw_strided(
5455 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5456 TensorOptions options, DimnameList names
5457 ) override {
5458 auto current_device = guard_.current_device();
5459 if (C10_UNLIKELY(current_device.has_value())) {
5460 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5461 "structured kernels don't support multi-device outputs");
5462 } else {
5463 guard_.reset_device(options.device());
5464 }
5465 const auto& out = outputs_[output_idx].get();
5466 check_inplace(out, sizes, options);
5467 if (!names.empty()) {
5468 namedinference::propagate_names(outputs_[output_idx], names);
5469 }
5470 // super must happen after, so that downstream can use maybe_get_output
5471 // to retrieve the output
5472 at::meta::structured_log::set_output_raw_strided(output_idx, sizes, strides, options, names);
5473 }
5474 const Tensor& maybe_get_output(int64_t output_idx) override {
5475 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
5476 }
5477 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
5478 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
5479 c10::OptionalDeviceGuard guard_;
5480};
5481at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_log_(at::Tensor & self) {
5482structured_log_default_backend_inplace op(self);
5483op.meta(self);
5484at::log_outf(self, op.outputs_[0]);
5485if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
5486return self;
5487}
5488struct structured_log10_default_backend_functional final : public at::meta::structured_log10 {
5489 void set_output_strided(
5490 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5491 TensorOptions options, DimnameList names
5492 ) override {
5493 auto current_device = guard_.current_device();
5494 if (C10_UNLIKELY(current_device.has_value())) {
5495 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5496 "structured kernels don't support multi-device outputs");
5497 } else {
5498 guard_.reset_device(options.device());
5499 }
5500 outputs_[output_idx] = create_out(sizes, strides, options);
5501 if (!names.empty()) {
5502 namedinference::propagate_names(*outputs_[output_idx], names);
5503 }
5504 // super must happen after, so that downstream can use maybe_get_output
5505 // to retrieve the output
5506 at::meta::structured_log10::set_output_raw_strided(output_idx, sizes, strides, options, names);
5507 }
5508 void set_output_raw_strided(
5509 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5510 TensorOptions options, DimnameList names
5511 ) override {
5512 auto current_device = guard_.current_device();
5513 if (C10_UNLIKELY(current_device.has_value())) {
5514 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5515 "structured kernels don't support multi-device outputs");
5516 } else {
5517 guard_.reset_device(options.device());
5518 }
5519 outputs_[output_idx] = create_out(sizes, strides, options);
5520 if (!names.empty()) {
5521 namedinference::propagate_names(*outputs_[output_idx], names);
5522 }
5523 // super must happen after, so that downstream can use maybe_get_output
5524 // to retrieve the output
5525 at::meta::structured_log10::set_output_raw_strided(output_idx, sizes, strides, options, names);
5526 }
5527 const Tensor& maybe_get_output(int64_t output_idx) override {
5528 return *outputs_[output_idx];
5529 }
5530 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
5531 c10::OptionalDeviceGuard guard_;
5532};
5533at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_log10(const at::Tensor & self) {
5534structured_log10_default_backend_functional op;
5535op.meta(self);
5536at::log10_outf(self, *op.outputs_[0]);
5537return std::move(op.outputs_[0]).take();
5538}
5539struct structured_log10_default_backend_inplace final : public at::meta::structured_log10 {
5540 structured_log10_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
5541 void set_output_strided(
5542 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5543 TensorOptions options, DimnameList names
5544 ) override {
5545 auto current_device = guard_.current_device();
5546 if (C10_UNLIKELY(current_device.has_value())) {
5547 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5548 "structured kernels don't support multi-device outputs");
5549 } else {
5550 guard_.reset_device(options.device());
5551 }
5552 const auto& out = outputs_[output_idx].get();
5553 check_inplace(out, sizes, options);
5554 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
5555 if (C10_UNLIKELY(maybe_proxy.has_value())) {
5556 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
5557 }
5558 if (!names.empty()) {
5559 namedinference::propagate_names(outputs_[output_idx], names);
5560 }
5561 // super must happen after, so that downstream can use maybe_get_output
5562 // to retrieve the output
5563 at::meta::structured_log10::set_output_raw_strided(output_idx, sizes, strides, options, names);
5564 }
5565 void set_output_raw_strided(
5566 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5567 TensorOptions options, DimnameList names
5568 ) override {
5569 auto current_device = guard_.current_device();
5570 if (C10_UNLIKELY(current_device.has_value())) {
5571 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5572 "structured kernels don't support multi-device outputs");
5573 } else {
5574 guard_.reset_device(options.device());
5575 }
5576 const auto& out = outputs_[output_idx].get();
5577 check_inplace(out, sizes, options);
5578 if (!names.empty()) {
5579 namedinference::propagate_names(outputs_[output_idx], names);
5580 }
5581 // super must happen after, so that downstream can use maybe_get_output
5582 // to retrieve the output
5583 at::meta::structured_log10::set_output_raw_strided(output_idx, sizes, strides, options, names);
5584 }
5585 const Tensor& maybe_get_output(int64_t output_idx) override {
5586 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
5587 }
5588 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
5589 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
5590 c10::OptionalDeviceGuard guard_;
5591};
5592at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_log10_(at::Tensor & self) {
5593structured_log10_default_backend_inplace op(self);
5594op.meta(self);
5595at::log10_outf(self, op.outputs_[0]);
5596if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
5597return self;
5598}
5599struct structured_log1p_default_backend_functional final : public at::meta::structured_log1p {
5600 void set_output_strided(
5601 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5602 TensorOptions options, DimnameList names
5603 ) override {
5604 auto current_device = guard_.current_device();
5605 if (C10_UNLIKELY(current_device.has_value())) {
5606 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5607 "structured kernels don't support multi-device outputs");
5608 } else {
5609 guard_.reset_device(options.device());
5610 }
5611 outputs_[output_idx] = create_out(sizes, strides, options);
5612 if (!names.empty()) {
5613 namedinference::propagate_names(*outputs_[output_idx], names);
5614 }
5615 // super must happen after, so that downstream can use maybe_get_output
5616 // to retrieve the output
5617 at::meta::structured_log1p::set_output_raw_strided(output_idx, sizes, strides, options, names);
5618 }
5619 void set_output_raw_strided(
5620 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5621 TensorOptions options, DimnameList names
5622 ) override {
5623 auto current_device = guard_.current_device();
5624 if (C10_UNLIKELY(current_device.has_value())) {
5625 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5626 "structured kernels don't support multi-device outputs");
5627 } else {
5628 guard_.reset_device(options.device());
5629 }
5630 outputs_[output_idx] = create_out(sizes, strides, options);
5631 if (!names.empty()) {
5632 namedinference::propagate_names(*outputs_[output_idx], names);
5633 }
5634 // super must happen after, so that downstream can use maybe_get_output
5635 // to retrieve the output
5636 at::meta::structured_log1p::set_output_raw_strided(output_idx, sizes, strides, options, names);
5637 }
5638 const Tensor& maybe_get_output(int64_t output_idx) override {
5639 return *outputs_[output_idx];
5640 }
5641 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
5642 c10::OptionalDeviceGuard guard_;
5643};
5644at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_log1p(const at::Tensor & self) {
5645structured_log1p_default_backend_functional op;
5646op.meta(self);
5647at::log1p_outf(self, *op.outputs_[0]);
5648return std::move(op.outputs_[0]).take();
5649}
5650struct structured_log1p_default_backend_inplace final : public at::meta::structured_log1p {
5651 structured_log1p_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
5652 void set_output_strided(
5653 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5654 TensorOptions options, DimnameList names
5655 ) override {
5656 auto current_device = guard_.current_device();
5657 if (C10_UNLIKELY(current_device.has_value())) {
5658 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5659 "structured kernels don't support multi-device outputs");
5660 } else {
5661 guard_.reset_device(options.device());
5662 }
5663 const auto& out = outputs_[output_idx].get();
5664 check_inplace(out, sizes, options);
5665 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
5666 if (C10_UNLIKELY(maybe_proxy.has_value())) {
5667 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
5668 }
5669 if (!names.empty()) {
5670 namedinference::propagate_names(outputs_[output_idx], names);
5671 }
5672 // super must happen after, so that downstream can use maybe_get_output
5673 // to retrieve the output
5674 at::meta::structured_log1p::set_output_raw_strided(output_idx, sizes, strides, options, names);
5675 }
5676 void set_output_raw_strided(
5677 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5678 TensorOptions options, DimnameList names
5679 ) override {
5680 auto current_device = guard_.current_device();
5681 if (C10_UNLIKELY(current_device.has_value())) {
5682 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5683 "structured kernels don't support multi-device outputs");
5684 } else {
5685 guard_.reset_device(options.device());
5686 }
5687 const auto& out = outputs_[output_idx].get();
5688 check_inplace(out, sizes, options);
5689 if (!names.empty()) {
5690 namedinference::propagate_names(outputs_[output_idx], names);
5691 }
5692 // super must happen after, so that downstream can use maybe_get_output
5693 // to retrieve the output
5694 at::meta::structured_log1p::set_output_raw_strided(output_idx, sizes, strides, options, names);
5695 }
5696 const Tensor& maybe_get_output(int64_t output_idx) override {
5697 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
5698 }
5699 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
5700 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
5701 c10::OptionalDeviceGuard guard_;
5702};
5703at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_log1p_(at::Tensor & self) {
5704structured_log1p_default_backend_inplace op(self);
5705op.meta(self);
5706at::log1p_outf(self, op.outputs_[0]);
5707if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
5708return self;
5709}
5710struct structured_log2_default_backend_functional final : public at::meta::structured_log2 {
5711 void set_output_strided(
5712 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5713 TensorOptions options, DimnameList names
5714 ) override {
5715 auto current_device = guard_.current_device();
5716 if (C10_UNLIKELY(current_device.has_value())) {
5717 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5718 "structured kernels don't support multi-device outputs");
5719 } else {
5720 guard_.reset_device(options.device());
5721 }
5722 outputs_[output_idx] = create_out(sizes, strides, options);
5723 if (!names.empty()) {
5724 namedinference::propagate_names(*outputs_[output_idx], names);
5725 }
5726 // super must happen after, so that downstream can use maybe_get_output
5727 // to retrieve the output
5728 at::meta::structured_log2::set_output_raw_strided(output_idx, sizes, strides, options, names);
5729 }
5730 void set_output_raw_strided(
5731 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5732 TensorOptions options, DimnameList names
5733 ) override {
5734 auto current_device = guard_.current_device();
5735 if (C10_UNLIKELY(current_device.has_value())) {
5736 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5737 "structured kernels don't support multi-device outputs");
5738 } else {
5739 guard_.reset_device(options.device());
5740 }
5741 outputs_[output_idx] = create_out(sizes, strides, options);
5742 if (!names.empty()) {
5743 namedinference::propagate_names(*outputs_[output_idx], names);
5744 }
5745 // super must happen after, so that downstream can use maybe_get_output
5746 // to retrieve the output
5747 at::meta::structured_log2::set_output_raw_strided(output_idx, sizes, strides, options, names);
5748 }
5749 const Tensor& maybe_get_output(int64_t output_idx) override {
5750 return *outputs_[output_idx];
5751 }
5752 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
5753 c10::OptionalDeviceGuard guard_;
5754};
5755at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_log2(const at::Tensor & self) {
5756structured_log2_default_backend_functional op;
5757op.meta(self);
5758at::log2_outf(self, *op.outputs_[0]);
5759return std::move(op.outputs_[0]).take();
5760}
5761struct structured_log2_default_backend_inplace final : public at::meta::structured_log2 {
5762 structured_log2_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
5763 void set_output_strided(
5764 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5765 TensorOptions options, DimnameList names
5766 ) override {
5767 auto current_device = guard_.current_device();
5768 if (C10_UNLIKELY(current_device.has_value())) {
5769 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5770 "structured kernels don't support multi-device outputs");
5771 } else {
5772 guard_.reset_device(options.device());
5773 }
5774 const auto& out = outputs_[output_idx].get();
5775 check_inplace(out, sizes, options);
5776 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
5777 if (C10_UNLIKELY(maybe_proxy.has_value())) {
5778 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
5779 }
5780 if (!names.empty()) {
5781 namedinference::propagate_names(outputs_[output_idx], names);
5782 }
5783 // super must happen after, so that downstream can use maybe_get_output
5784 // to retrieve the output
5785 at::meta::structured_log2::set_output_raw_strided(output_idx, sizes, strides, options, names);
5786 }
5787 void set_output_raw_strided(
5788 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5789 TensorOptions options, DimnameList names
5790 ) override {
5791 auto current_device = guard_.current_device();
5792 if (C10_UNLIKELY(current_device.has_value())) {
5793 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5794 "structured kernels don't support multi-device outputs");
5795 } else {
5796 guard_.reset_device(options.device());
5797 }
5798 const auto& out = outputs_[output_idx].get();
5799 check_inplace(out, sizes, options);
5800 if (!names.empty()) {
5801 namedinference::propagate_names(outputs_[output_idx], names);
5802 }
5803 // super must happen after, so that downstream can use maybe_get_output
5804 // to retrieve the output
5805 at::meta::structured_log2::set_output_raw_strided(output_idx, sizes, strides, options, names);
5806 }
5807 const Tensor& maybe_get_output(int64_t output_idx) override {
5808 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
5809 }
5810 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
5811 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
5812 c10::OptionalDeviceGuard guard_;
5813};
5814at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_log2_(at::Tensor & self) {
5815structured_log2_default_backend_inplace op(self);
5816op.meta(self);
5817at::log2_outf(self, op.outputs_[0]);
5818if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
5819return self;
5820}
5821struct structured_logaddexp_default_backend_functional final : public at::meta::structured_logaddexp {
5822 void set_output_strided(
5823 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5824 TensorOptions options, DimnameList names
5825 ) override {
5826 auto current_device = guard_.current_device();
5827 if (C10_UNLIKELY(current_device.has_value())) {
5828 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5829 "structured kernels don't support multi-device outputs");
5830 } else {
5831 guard_.reset_device(options.device());
5832 }
5833 outputs_[output_idx] = create_out(sizes, strides, options);
5834 if (!names.empty()) {
5835 namedinference::propagate_names(*outputs_[output_idx], names);
5836 }
5837 // super must happen after, so that downstream can use maybe_get_output
5838 // to retrieve the output
5839 at::meta::structured_logaddexp::set_output_raw_strided(output_idx, sizes, strides, options, names);
5840 }
5841 void set_output_raw_strided(
5842 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5843 TensorOptions options, DimnameList names
5844 ) override {
5845 auto current_device = guard_.current_device();
5846 if (C10_UNLIKELY(current_device.has_value())) {
5847 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5848 "structured kernels don't support multi-device outputs");
5849 } else {
5850 guard_.reset_device(options.device());
5851 }
5852 outputs_[output_idx] = create_out(sizes, strides, options);
5853 if (!names.empty()) {
5854 namedinference::propagate_names(*outputs_[output_idx], names);
5855 }
5856 // super must happen after, so that downstream can use maybe_get_output
5857 // to retrieve the output
5858 at::meta::structured_logaddexp::set_output_raw_strided(output_idx, sizes, strides, options, names);
5859 }
5860 const Tensor& maybe_get_output(int64_t output_idx) override {
5861 return *outputs_[output_idx];
5862 }
5863 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
5864 c10::OptionalDeviceGuard guard_;
5865};
5866at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_logaddexp(const at::Tensor & self, const at::Tensor & other) {
5867structured_logaddexp_default_backend_functional op;
5868op.meta(self, other);
5869at::logaddexp_outf(self, other, *op.outputs_[0]);
5870return std::move(op.outputs_[0]).take();
5871}
5872struct structured_logaddexp2_default_backend_functional final : public at::meta::structured_logaddexp2 {
5873 void set_output_strided(
5874 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5875 TensorOptions options, DimnameList names
5876 ) override {
5877 auto current_device = guard_.current_device();
5878 if (C10_UNLIKELY(current_device.has_value())) {
5879 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5880 "structured kernels don't support multi-device outputs");
5881 } else {
5882 guard_.reset_device(options.device());
5883 }
5884 outputs_[output_idx] = create_out(sizes, strides, options);
5885 if (!names.empty()) {
5886 namedinference::propagate_names(*outputs_[output_idx], names);
5887 }
5888 // super must happen after, so that downstream can use maybe_get_output
5889 // to retrieve the output
5890 at::meta::structured_logaddexp2::set_output_raw_strided(output_idx, sizes, strides, options, names);
5891 }
5892 void set_output_raw_strided(
5893 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5894 TensorOptions options, DimnameList names
5895 ) override {
5896 auto current_device = guard_.current_device();
5897 if (C10_UNLIKELY(current_device.has_value())) {
5898 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5899 "structured kernels don't support multi-device outputs");
5900 } else {
5901 guard_.reset_device(options.device());
5902 }
5903 outputs_[output_idx] = create_out(sizes, strides, options);
5904 if (!names.empty()) {
5905 namedinference::propagate_names(*outputs_[output_idx], names);
5906 }
5907 // super must happen after, so that downstream can use maybe_get_output
5908 // to retrieve the output
5909 at::meta::structured_logaddexp2::set_output_raw_strided(output_idx, sizes, strides, options, names);
5910 }
5911 const Tensor& maybe_get_output(int64_t output_idx) override {
5912 return *outputs_[output_idx];
5913 }
5914 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
5915 c10::OptionalDeviceGuard guard_;
5916};
5917at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_logaddexp2(const at::Tensor & self, const at::Tensor & other) {
5918structured_logaddexp2_default_backend_functional op;
5919op.meta(self, other);
5920at::logaddexp2_outf(self, other, *op.outputs_[0]);
5921return std::move(op.outputs_[0]).take();
5922}
5923struct structured_xlogy_Tensor_default_backend_functional final : public at::meta::structured_xlogy_Tensor {
5924 void set_output_strided(
5925 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5926 TensorOptions options, DimnameList names
5927 ) override {
5928 auto current_device = guard_.current_device();
5929 if (C10_UNLIKELY(current_device.has_value())) {
5930 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5931 "structured kernels don't support multi-device outputs");
5932 } else {
5933 guard_.reset_device(options.device());
5934 }
5935 outputs_[output_idx] = create_out(sizes, strides, options);
5936 if (!names.empty()) {
5937 namedinference::propagate_names(*outputs_[output_idx], names);
5938 }
5939 // super must happen after, so that downstream can use maybe_get_output
5940 // to retrieve the output
5941 at::meta::structured_xlogy_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
5942 }
5943 void set_output_raw_strided(
5944 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5945 TensorOptions options, DimnameList names
5946 ) override {
5947 auto current_device = guard_.current_device();
5948 if (C10_UNLIKELY(current_device.has_value())) {
5949 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5950 "structured kernels don't support multi-device outputs");
5951 } else {
5952 guard_.reset_device(options.device());
5953 }
5954 outputs_[output_idx] = create_out(sizes, strides, options);
5955 if (!names.empty()) {
5956 namedinference::propagate_names(*outputs_[output_idx], names);
5957 }
5958 // super must happen after, so that downstream can use maybe_get_output
5959 // to retrieve the output
5960 at::meta::structured_xlogy_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
5961 }
5962 const Tensor& maybe_get_output(int64_t output_idx) override {
5963 return *outputs_[output_idx];
5964 }
5965 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
5966 c10::OptionalDeviceGuard guard_;
5967};
5968at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_xlogy_Tensor(const at::Tensor & self, const at::Tensor & other) {
5969structured_xlogy_Tensor_default_backend_functional op;
5970op.meta(self, other);
5971at::xlogy_outf(self, other, *op.outputs_[0]);
5972return std::move(op.outputs_[0]).take();
5973}
5974struct structured_xlogy_Tensor_default_backend_inplace final : public at::meta::structured_xlogy_Tensor {
5975 structured_xlogy_Tensor_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
5976 void set_output_strided(
5977 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5978 TensorOptions options, DimnameList names
5979 ) override {
5980 auto current_device = guard_.current_device();
5981 if (C10_UNLIKELY(current_device.has_value())) {
5982 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
5983 "structured kernels don't support multi-device outputs");
5984 } else {
5985 guard_.reset_device(options.device());
5986 }
5987 const auto& out = outputs_[output_idx].get();
5988 check_inplace(out, sizes, options);
5989 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
5990 if (C10_UNLIKELY(maybe_proxy.has_value())) {
5991 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
5992 }
5993 if (!names.empty()) {
5994 namedinference::propagate_names(outputs_[output_idx], names);
5995 }
5996 // super must happen after, so that downstream can use maybe_get_output
5997 // to retrieve the output
5998 at::meta::structured_xlogy_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
5999 }
6000 void set_output_raw_strided(
6001 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6002 TensorOptions options, DimnameList names
6003 ) override {
6004 auto current_device = guard_.current_device();
6005 if (C10_UNLIKELY(current_device.has_value())) {
6006 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6007 "structured kernels don't support multi-device outputs");
6008 } else {
6009 guard_.reset_device(options.device());
6010 }
6011 const auto& out = outputs_[output_idx].get();
6012 check_inplace(out, sizes, options);
6013 if (!names.empty()) {
6014 namedinference::propagate_names(outputs_[output_idx], names);
6015 }
6016 // super must happen after, so that downstream can use maybe_get_output
6017 // to retrieve the output
6018 at::meta::structured_xlogy_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
6019 }
6020 const Tensor& maybe_get_output(int64_t output_idx) override {
6021 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
6022 }
6023 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
6024 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
6025 c10::OptionalDeviceGuard guard_;
6026};
6027at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_xlogy__Tensor(at::Tensor & self, const at::Tensor & other) {
6028structured_xlogy_Tensor_default_backend_inplace op(self);
6029op.meta(self, other);
6030at::xlogy_outf(self, other, op.outputs_[0]);
6031if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
6032return self;
6033}
6034struct structured__log_softmax_default_backend_functional final : public at::meta::structured__log_softmax {
6035 void set_output_strided(
6036 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6037 TensorOptions options, DimnameList names
6038 ) override {
6039 auto current_device = guard_.current_device();
6040 if (C10_UNLIKELY(current_device.has_value())) {
6041 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6042 "structured kernels don't support multi-device outputs");
6043 } else {
6044 guard_.reset_device(options.device());
6045 }
6046 outputs_[output_idx] = create_out(sizes, strides, options);
6047 if (!names.empty()) {
6048 namedinference::propagate_names(*outputs_[output_idx], names);
6049 }
6050 // super must happen after, so that downstream can use maybe_get_output
6051 // to retrieve the output
6052 }
6053 void set_output_raw_strided(
6054 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6055 TensorOptions options, DimnameList names
6056 ) override {
6057 auto current_device = guard_.current_device();
6058 if (C10_UNLIKELY(current_device.has_value())) {
6059 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6060 "structured kernels don't support multi-device outputs");
6061 } else {
6062 guard_.reset_device(options.device());
6063 }
6064 outputs_[output_idx] = create_out(sizes, strides, options);
6065 if (!names.empty()) {
6066 namedinference::propagate_names(*outputs_[output_idx], names);
6067 }
6068 // super must happen after, so that downstream can use maybe_get_output
6069 // to retrieve the output
6070 }
6071 const Tensor& maybe_get_output(int64_t output_idx) override {
6072 return *outputs_[output_idx];
6073 }
6074 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
6075 c10::OptionalDeviceGuard guard_;
6076};
6077at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__log_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
6078structured__log_softmax_default_backend_functional op;
6079op.meta(self, dim, half_to_float);
6080at::_log_softmax_outf(self, dim, half_to_float, *op.outputs_[0]);
6081return std::move(op.outputs_[0]).take();
6082}
6083struct structured__log_softmax_backward_data_default_backend_functional final : public at::meta::structured__log_softmax_backward_data {
6084 void set_output_strided(
6085 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6086 TensorOptions options, DimnameList names
6087 ) override {
6088 auto current_device = guard_.current_device();
6089 if (C10_UNLIKELY(current_device.has_value())) {
6090 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6091 "structured kernels don't support multi-device outputs");
6092 } else {
6093 guard_.reset_device(options.device());
6094 }
6095 outputs_[output_idx] = create_out(sizes, strides, options);
6096 if (!names.empty()) {
6097 namedinference::propagate_names(*outputs_[output_idx], names);
6098 }
6099 // super must happen after, so that downstream can use maybe_get_output
6100 // to retrieve the output
6101 }
6102 void set_output_raw_strided(
6103 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6104 TensorOptions options, DimnameList names
6105 ) override {
6106 auto current_device = guard_.current_device();
6107 if (C10_UNLIKELY(current_device.has_value())) {
6108 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6109 "structured kernels don't support multi-device outputs");
6110 } else {
6111 guard_.reset_device(options.device());
6112 }
6113 outputs_[output_idx] = create_out(sizes, strides, options);
6114 if (!names.empty()) {
6115 namedinference::propagate_names(*outputs_[output_idx], names);
6116 }
6117 // super must happen after, so that downstream can use maybe_get_output
6118 // to retrieve the output
6119 }
6120 const Tensor& maybe_get_output(int64_t output_idx) override {
6121 return *outputs_[output_idx];
6122 }
6123 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
6124 c10::OptionalDeviceGuard guard_;
6125};
6126at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
6127structured__log_softmax_backward_data_default_backend_functional op;
6128op.meta(grad_output, output, dim, input_dtype);
6129at::_log_softmax_backward_data_outf(grad_output, output, dim, input_dtype, *op.outputs_[0]);
6130return std::move(op.outputs_[0]).take();
6131}
6132namespace {
6133at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_out_logsumexp_out(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
6134 // No device check
6135 // DeviceGuard omitted
6136 return at::native::logsumexp_out(self, dim, keepdim, out);
6137}
6138} // anonymous namespace
6139struct structured_aminmax_default_backend_functional final : public at::meta::structured_aminmax {
6140 void set_output_strided(
6141 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6142 TensorOptions options, DimnameList names
6143 ) override {
6144 auto current_device = guard_.current_device();
6145 if (C10_UNLIKELY(current_device.has_value())) {
6146 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6147 "structured kernels don't support multi-device outputs");
6148 } else {
6149 guard_.reset_device(options.device());
6150 }
6151 outputs_[output_idx] = create_out(sizes, strides, options);
6152 if (!names.empty()) {
6153 namedinference::propagate_names(*outputs_[output_idx], names);
6154 }
6155 // super must happen after, so that downstream can use maybe_get_output
6156 // to retrieve the output
6157 }
6158 void set_output_raw_strided(
6159 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6160 TensorOptions options, DimnameList names
6161 ) override {
6162 auto current_device = guard_.current_device();
6163 if (C10_UNLIKELY(current_device.has_value())) {
6164 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6165 "structured kernels don't support multi-device outputs");
6166 } else {
6167 guard_.reset_device(options.device());
6168 }
6169 outputs_[output_idx] = create_out(sizes, strides, options);
6170 if (!names.empty()) {
6171 namedinference::propagate_names(*outputs_[output_idx], names);
6172 }
6173 // super must happen after, so that downstream can use maybe_get_output
6174 // to retrieve the output
6175 }
6176 const Tensor& maybe_get_output(int64_t output_idx) override {
6177 return *outputs_[output_idx];
6178 }
6179 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
6180 c10::OptionalDeviceGuard guard_;
6181};
6182::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeExplicitAutogradNonFunctional_aminmax(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
6183structured_aminmax_default_backend_functional op;
6184op.meta(self, dim, keepdim);
6185at::aminmax_outf(self, dim, keepdim, *op.outputs_[0], *op.outputs_[1]);
6186return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
6187}
6188struct structured_max_dim_default_backend_functional final : public at::meta::structured_max_dim {
6189 void set_output_strided(
6190 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6191 TensorOptions options, DimnameList names
6192 ) override {
6193 auto current_device = guard_.current_device();
6194 if (C10_UNLIKELY(current_device.has_value())) {
6195 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6196 "structured kernels don't support multi-device outputs");
6197 } else {
6198 guard_.reset_device(options.device());
6199 }
6200 outputs_[output_idx] = create_out(sizes, strides, options);
6201 if (!names.empty()) {
6202 namedinference::propagate_names(*outputs_[output_idx], names);
6203 }
6204 // super must happen after, so that downstream can use maybe_get_output
6205 // to retrieve the output
6206 }
6207 void set_output_raw_strided(
6208 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6209 TensorOptions options, DimnameList names
6210 ) override {
6211 auto current_device = guard_.current_device();
6212 if (C10_UNLIKELY(current_device.has_value())) {
6213 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6214 "structured kernels don't support multi-device outputs");
6215 } else {
6216 guard_.reset_device(options.device());
6217 }
6218 outputs_[output_idx] = create_out(sizes, strides, options);
6219 if (!names.empty()) {
6220 namedinference::propagate_names(*outputs_[output_idx], names);
6221 }
6222 // super must happen after, so that downstream can use maybe_get_output
6223 // to retrieve the output
6224 }
6225 const Tensor& maybe_get_output(int64_t output_idx) override {
6226 return *outputs_[output_idx];
6227 }
6228 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
6229 c10::OptionalDeviceGuard guard_;
6230};
6231::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeExplicitAutogradNonFunctional_max_dim(const at::Tensor & self, int64_t dim, bool keepdim) {
6232structured_max_dim_default_backend_functional op;
6233auto precompute = op.meta(self, dim, keepdim);
6234(void)precompute;
6235at::max_outf(self, precompute.dim, keepdim, *op.outputs_[0], *op.outputs_[1]);
6236return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
6237}
6238struct structured_amax_default_backend_functional final : public at::meta::structured_amax {
6239 void set_output_strided(
6240 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6241 TensorOptions options, DimnameList names
6242 ) override {
6243 auto current_device = guard_.current_device();
6244 if (C10_UNLIKELY(current_device.has_value())) {
6245 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6246 "structured kernels don't support multi-device outputs");
6247 } else {
6248 guard_.reset_device(options.device());
6249 }
6250 outputs_[output_idx] = create_out(sizes, strides, options);
6251 if (!names.empty()) {
6252 namedinference::propagate_names(*outputs_[output_idx], names);
6253 }
6254 // super must happen after, so that downstream can use maybe_get_output
6255 // to retrieve the output
6256 }
6257 void set_output_raw_strided(
6258 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6259 TensorOptions options, DimnameList names
6260 ) override {
6261 auto current_device = guard_.current_device();
6262 if (C10_UNLIKELY(current_device.has_value())) {
6263 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6264 "structured kernels don't support multi-device outputs");
6265 } else {
6266 guard_.reset_device(options.device());
6267 }
6268 outputs_[output_idx] = create_out(sizes, strides, options);
6269 if (!names.empty()) {
6270 namedinference::propagate_names(*outputs_[output_idx], names);
6271 }
6272 // super must happen after, so that downstream can use maybe_get_output
6273 // to retrieve the output
6274 }
6275 const Tensor& maybe_get_output(int64_t output_idx) override {
6276 return *outputs_[output_idx];
6277 }
6278 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
6279 c10::OptionalDeviceGuard guard_;
6280};
6281at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_amax(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
6282structured_amax_default_backend_functional op;
6283op.meta(self, dim, keepdim);
6284at::amax_outf(self, dim, keepdim, *op.outputs_[0]);
6285return std::move(op.outputs_[0]).take();
6286}
6287struct structured_mean_dim_default_backend_functional final : public at::meta::structured_mean_dim {
6288 void set_output_strided(
6289 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6290 TensorOptions options, DimnameList names
6291 ) override {
6292 auto current_device = guard_.current_device();
6293 if (C10_UNLIKELY(current_device.has_value())) {
6294 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6295 "structured kernels don't support multi-device outputs");
6296 } else {
6297 guard_.reset_device(options.device());
6298 }
6299 outputs_[output_idx] = create_out(sizes, strides, options);
6300 if (!names.empty()) {
6301 namedinference::propagate_names(*outputs_[output_idx], names);
6302 }
6303 // super must happen after, so that downstream can use maybe_get_output
6304 // to retrieve the output
6305 }
6306 void set_output_raw_strided(
6307 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6308 TensorOptions options, DimnameList names
6309 ) override {
6310 auto current_device = guard_.current_device();
6311 if (C10_UNLIKELY(current_device.has_value())) {
6312 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6313 "structured kernels don't support multi-device outputs");
6314 } else {
6315 guard_.reset_device(options.device());
6316 }
6317 outputs_[output_idx] = create_out(sizes, strides, options);
6318 if (!names.empty()) {
6319 namedinference::propagate_names(*outputs_[output_idx], names);
6320 }
6321 // super must happen after, so that downstream can use maybe_get_output
6322 // to retrieve the output
6323 }
6324 const Tensor& maybe_get_output(int64_t output_idx) override {
6325 return *outputs_[output_idx];
6326 }
6327 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
6328 c10::OptionalDeviceGuard guard_;
6329};
6330at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_mean_dim(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
6331structured_mean_dim_default_backend_functional op;
6332op.meta(self, dim, keepdim, dtype);
6333at::mean_outf(self, dim, keepdim, dtype, *op.outputs_[0]);
6334return std::move(op.outputs_[0]).take();
6335}
6336struct structured_min_dim_default_backend_functional final : public at::meta::structured_min_dim {
6337 void set_output_strided(
6338 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6339 TensorOptions options, DimnameList names
6340 ) override {
6341 auto current_device = guard_.current_device();
6342 if (C10_UNLIKELY(current_device.has_value())) {
6343 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6344 "structured kernels don't support multi-device outputs");
6345 } else {
6346 guard_.reset_device(options.device());
6347 }
6348 outputs_[output_idx] = create_out(sizes, strides, options);
6349 if (!names.empty()) {
6350 namedinference::propagate_names(*outputs_[output_idx], names);
6351 }
6352 // super must happen after, so that downstream can use maybe_get_output
6353 // to retrieve the output
6354 }
6355 void set_output_raw_strided(
6356 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6357 TensorOptions options, DimnameList names
6358 ) override {
6359 auto current_device = guard_.current_device();
6360 if (C10_UNLIKELY(current_device.has_value())) {
6361 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6362 "structured kernels don't support multi-device outputs");
6363 } else {
6364 guard_.reset_device(options.device());
6365 }
6366 outputs_[output_idx] = create_out(sizes, strides, options);
6367 if (!names.empty()) {
6368 namedinference::propagate_names(*outputs_[output_idx], names);
6369 }
6370 // super must happen after, so that downstream can use maybe_get_output
6371 // to retrieve the output
6372 }
6373 const Tensor& maybe_get_output(int64_t output_idx) override {
6374 return *outputs_[output_idx];
6375 }
6376 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
6377 c10::OptionalDeviceGuard guard_;
6378};
6379::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeExplicitAutogradNonFunctional_min_dim(const at::Tensor & self, int64_t dim, bool keepdim) {
6380structured_min_dim_default_backend_functional op;
6381auto precompute = op.meta(self, dim, keepdim);
6382(void)precompute;
6383at::min_outf(self, precompute.dim, keepdim, *op.outputs_[0], *op.outputs_[1]);
6384return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
6385}
6386struct structured_amin_default_backend_functional final : public at::meta::structured_amin {
6387 void set_output_strided(
6388 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6389 TensorOptions options, DimnameList names
6390 ) override {
6391 auto current_device = guard_.current_device();
6392 if (C10_UNLIKELY(current_device.has_value())) {
6393 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6394 "structured kernels don't support multi-device outputs");
6395 } else {
6396 guard_.reset_device(options.device());
6397 }
6398 outputs_[output_idx] = create_out(sizes, strides, options);
6399 if (!names.empty()) {
6400 namedinference::propagate_names(*outputs_[output_idx], names);
6401 }
6402 // super must happen after, so that downstream can use maybe_get_output
6403 // to retrieve the output
6404 }
6405 void set_output_raw_strided(
6406 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6407 TensorOptions options, DimnameList names
6408 ) override {
6409 auto current_device = guard_.current_device();
6410 if (C10_UNLIKELY(current_device.has_value())) {
6411 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6412 "structured kernels don't support multi-device outputs");
6413 } else {
6414 guard_.reset_device(options.device());
6415 }
6416 outputs_[output_idx] = create_out(sizes, strides, options);
6417 if (!names.empty()) {
6418 namedinference::propagate_names(*outputs_[output_idx], names);
6419 }
6420 // super must happen after, so that downstream can use maybe_get_output
6421 // to retrieve the output
6422 }
6423 const Tensor& maybe_get_output(int64_t output_idx) override {
6424 return *outputs_[output_idx];
6425 }
6426 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
6427 c10::OptionalDeviceGuard guard_;
6428};
6429at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_amin(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
6430structured_amin_default_backend_functional op;
6431op.meta(self, dim, keepdim);
6432at::amin_outf(self, dim, keepdim, *op.outputs_[0]);
6433return std::move(op.outputs_[0]).take();
6434}
6435struct structured_mm_default_backend_functional final : public at::meta::structured_mm {
6436 void set_output_strided(
6437 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6438 TensorOptions options, DimnameList names
6439 ) override {
6440 auto current_device = guard_.current_device();
6441 if (C10_UNLIKELY(current_device.has_value())) {
6442 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6443 "structured kernels don't support multi-device outputs");
6444 } else {
6445 guard_.reset_device(options.device());
6446 }
6447 outputs_[output_idx] = create_out(sizes, strides, options);
6448 if (!names.empty()) {
6449 namedinference::propagate_names(*outputs_[output_idx], names);
6450 }
6451 // super must happen after, so that downstream can use maybe_get_output
6452 // to retrieve the output
6453 }
6454 void set_output_raw_strided(
6455 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6456 TensorOptions options, DimnameList names
6457 ) override {
6458 auto current_device = guard_.current_device();
6459 if (C10_UNLIKELY(current_device.has_value())) {
6460 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6461 "structured kernels don't support multi-device outputs");
6462 } else {
6463 guard_.reset_device(options.device());
6464 }
6465 outputs_[output_idx] = create_out(sizes, strides, options);
6466 if (!names.empty()) {
6467 namedinference::propagate_names(*outputs_[output_idx], names);
6468 }
6469 // super must happen after, so that downstream can use maybe_get_output
6470 // to retrieve the output
6471 }
6472 const Tensor& maybe_get_output(int64_t output_idx) override {
6473 return *outputs_[output_idx];
6474 }
6475 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
6476 c10::OptionalDeviceGuard guard_;
6477};
6478at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_mm(const at::Tensor & self, const at::Tensor & mat2) {
6479structured_mm_default_backend_functional op;
6480op.meta(self, mat2);
6481at::mm_outf(self, mat2, *op.outputs_[0]);
6482return std::move(op.outputs_[0]).take();
6483}
6484struct structured_mul_Tensor_default_backend_functional final : public at::meta::structured_mul_Tensor {
6485 void set_output_strided(
6486 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6487 TensorOptions options, DimnameList names
6488 ) override {
6489 auto current_device = guard_.current_device();
6490 if (C10_UNLIKELY(current_device.has_value())) {
6491 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6492 "structured kernels don't support multi-device outputs");
6493 } else {
6494 guard_.reset_device(options.device());
6495 }
6496 outputs_[output_idx] = create_out(sizes, strides, options);
6497 if (!names.empty()) {
6498 namedinference::propagate_names(*outputs_[output_idx], names);
6499 }
6500 // super must happen after, so that downstream can use maybe_get_output
6501 // to retrieve the output
6502 at::meta::structured_mul_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
6503 }
6504 void set_output_raw_strided(
6505 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6506 TensorOptions options, DimnameList names
6507 ) override {
6508 auto current_device = guard_.current_device();
6509 if (C10_UNLIKELY(current_device.has_value())) {
6510 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6511 "structured kernels don't support multi-device outputs");
6512 } else {
6513 guard_.reset_device(options.device());
6514 }
6515 outputs_[output_idx] = create_out(sizes, strides, options);
6516 if (!names.empty()) {
6517 namedinference::propagate_names(*outputs_[output_idx], names);
6518 }
6519 // super must happen after, so that downstream can use maybe_get_output
6520 // to retrieve the output
6521 at::meta::structured_mul_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
6522 }
6523 const Tensor& maybe_get_output(int64_t output_idx) override {
6524 return *outputs_[output_idx];
6525 }
6526 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
6527 c10::OptionalDeviceGuard guard_;
6528};
6529at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_mul_Tensor(const at::Tensor & self, const at::Tensor & other) {
6530structured_mul_Tensor_default_backend_functional op;
6531op.meta(self, other);
6532at::mul_outf(self, other, *op.outputs_[0]);
6533return std::move(op.outputs_[0]).take();
6534}
6535struct structured_mul_Tensor_default_backend_inplace final : public at::meta::structured_mul_Tensor {
6536 structured_mul_Tensor_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
6537 void set_output_strided(
6538 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6539 TensorOptions options, DimnameList names
6540 ) override {
6541 auto current_device = guard_.current_device();
6542 if (C10_UNLIKELY(current_device.has_value())) {
6543 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6544 "structured kernels don't support multi-device outputs");
6545 } else {
6546 guard_.reset_device(options.device());
6547 }
6548 const auto& out = outputs_[output_idx].get();
6549 check_inplace(out, sizes, options);
6550 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
6551 if (C10_UNLIKELY(maybe_proxy.has_value())) {
6552 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
6553 }
6554 if (!names.empty()) {
6555 namedinference::propagate_names(outputs_[output_idx], names);
6556 }
6557 // super must happen after, so that downstream can use maybe_get_output
6558 // to retrieve the output
6559 at::meta::structured_mul_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
6560 }
6561 void set_output_raw_strided(
6562 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6563 TensorOptions options, DimnameList names
6564 ) override {
6565 auto current_device = guard_.current_device();
6566 if (C10_UNLIKELY(current_device.has_value())) {
6567 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6568 "structured kernels don't support multi-device outputs");
6569 } else {
6570 guard_.reset_device(options.device());
6571 }
6572 const auto& out = outputs_[output_idx].get();
6573 check_inplace(out, sizes, options);
6574 if (!names.empty()) {
6575 namedinference::propagate_names(outputs_[output_idx], names);
6576 }
6577 // super must happen after, so that downstream can use maybe_get_output
6578 // to retrieve the output
6579 at::meta::structured_mul_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
6580 }
6581 const Tensor& maybe_get_output(int64_t output_idx) override {
6582 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
6583 }
6584 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
6585 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
6586 c10::OptionalDeviceGuard guard_;
6587};
6588at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_mul__Tensor(at::Tensor & self, const at::Tensor & other) {
6589structured_mul_Tensor_default_backend_inplace op(self);
6590op.meta(self, other);
6591at::mul_outf(self, other, op.outputs_[0]);
6592if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
6593return self;
6594}
6595namespace {
6596at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__narrow_copy(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
6597 // No device check
6598 // DeviceGuard omitted
6599 return at::native::narrow_copy_dense_symint(self, dim, start, length);
6600}
6601} // anonymous namespace
6602namespace {
6603at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__pixel_shuffle(const at::Tensor & self, int64_t upscale_factor) {
6604 // No device check
6605 // DeviceGuard omitted
6606 return at::native::math_pixel_shuffle(self, upscale_factor);
6607}
6608} // anonymous namespace
6609namespace {
6610at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__pixel_unshuffle(const at::Tensor & self, int64_t downscale_factor) {
6611 // No device check
6612 // DeviceGuard omitted
6613 return at::native::math_pixel_unshuffle(self, downscale_factor);
6614}
6615} // anonymous namespace
6616struct structured_reciprocal_default_backend_functional final : public at::meta::structured_reciprocal {
6617 void set_output_strided(
6618 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6619 TensorOptions options, DimnameList names
6620 ) override {
6621 auto current_device = guard_.current_device();
6622 if (C10_UNLIKELY(current_device.has_value())) {
6623 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6624 "structured kernels don't support multi-device outputs");
6625 } else {
6626 guard_.reset_device(options.device());
6627 }
6628 outputs_[output_idx] = create_out(sizes, strides, options);
6629 if (!names.empty()) {
6630 namedinference::propagate_names(*outputs_[output_idx], names);
6631 }
6632 // super must happen after, so that downstream can use maybe_get_output
6633 // to retrieve the output
6634 at::meta::structured_reciprocal::set_output_raw_strided(output_idx, sizes, strides, options, names);
6635 }
6636 void set_output_raw_strided(
6637 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6638 TensorOptions options, DimnameList names
6639 ) override {
6640 auto current_device = guard_.current_device();
6641 if (C10_UNLIKELY(current_device.has_value())) {
6642 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6643 "structured kernels don't support multi-device outputs");
6644 } else {
6645 guard_.reset_device(options.device());
6646 }
6647 outputs_[output_idx] = create_out(sizes, strides, options);
6648 if (!names.empty()) {
6649 namedinference::propagate_names(*outputs_[output_idx], names);
6650 }
6651 // super must happen after, so that downstream can use maybe_get_output
6652 // to retrieve the output
6653 at::meta::structured_reciprocal::set_output_raw_strided(output_idx, sizes, strides, options, names);
6654 }
6655 const Tensor& maybe_get_output(int64_t output_idx) override {
6656 return *outputs_[output_idx];
6657 }
6658 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
6659 c10::OptionalDeviceGuard guard_;
6660};
6661at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_reciprocal(const at::Tensor & self) {
6662structured_reciprocal_default_backend_functional op;
6663op.meta(self);
6664at::reciprocal_outf(self, *op.outputs_[0]);
6665return std::move(op.outputs_[0]).take();
6666}
6667struct structured_reciprocal_default_backend_inplace final : public at::meta::structured_reciprocal {
6668 structured_reciprocal_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
6669 void set_output_strided(
6670 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6671 TensorOptions options, DimnameList names
6672 ) override {
6673 auto current_device = guard_.current_device();
6674 if (C10_UNLIKELY(current_device.has_value())) {
6675 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6676 "structured kernels don't support multi-device outputs");
6677 } else {
6678 guard_.reset_device(options.device());
6679 }
6680 const auto& out = outputs_[output_idx].get();
6681 check_inplace(out, sizes, options);
6682 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
6683 if (C10_UNLIKELY(maybe_proxy.has_value())) {
6684 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
6685 }
6686 if (!names.empty()) {
6687 namedinference::propagate_names(outputs_[output_idx], names);
6688 }
6689 // super must happen after, so that downstream can use maybe_get_output
6690 // to retrieve the output
6691 at::meta::structured_reciprocal::set_output_raw_strided(output_idx, sizes, strides, options, names);
6692 }
6693 void set_output_raw_strided(
6694 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6695 TensorOptions options, DimnameList names
6696 ) override {
6697 auto current_device = guard_.current_device();
6698 if (C10_UNLIKELY(current_device.has_value())) {
6699 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6700 "structured kernels don't support multi-device outputs");
6701 } else {
6702 guard_.reset_device(options.device());
6703 }
6704 const auto& out = outputs_[output_idx].get();
6705 check_inplace(out, sizes, options);
6706 if (!names.empty()) {
6707 namedinference::propagate_names(outputs_[output_idx], names);
6708 }
6709 // super must happen after, so that downstream can use maybe_get_output
6710 // to retrieve the output
6711 at::meta::structured_reciprocal::set_output_raw_strided(output_idx, sizes, strides, options, names);
6712 }
6713 const Tensor& maybe_get_output(int64_t output_idx) override {
6714 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
6715 }
6716 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
6717 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
6718 c10::OptionalDeviceGuard guard_;
6719};
6720at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_reciprocal_(at::Tensor & self) {
6721structured_reciprocal_default_backend_inplace op(self);
6722op.meta(self);
6723at::reciprocal_outf(self, op.outputs_[0]);
6724if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
6725return self;
6726}
6727struct structured_neg_default_backend_functional final : public at::meta::structured_neg {
6728 void set_output_strided(
6729 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6730 TensorOptions options, DimnameList names
6731 ) override {
6732 auto current_device = guard_.current_device();
6733 if (C10_UNLIKELY(current_device.has_value())) {
6734 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6735 "structured kernels don't support multi-device outputs");
6736 } else {
6737 guard_.reset_device(options.device());
6738 }
6739 outputs_[output_idx] = create_out(sizes, strides, options);
6740 if (!names.empty()) {
6741 namedinference::propagate_names(*outputs_[output_idx], names);
6742 }
6743 // super must happen after, so that downstream can use maybe_get_output
6744 // to retrieve the output
6745 at::meta::structured_neg::set_output_raw_strided(output_idx, sizes, strides, options, names);
6746 }
6747 void set_output_raw_strided(
6748 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6749 TensorOptions options, DimnameList names
6750 ) override {
6751 auto current_device = guard_.current_device();
6752 if (C10_UNLIKELY(current_device.has_value())) {
6753 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6754 "structured kernels don't support multi-device outputs");
6755 } else {
6756 guard_.reset_device(options.device());
6757 }
6758 outputs_[output_idx] = create_out(sizes, strides, options);
6759 if (!names.empty()) {
6760 namedinference::propagate_names(*outputs_[output_idx], names);
6761 }
6762 // super must happen after, so that downstream can use maybe_get_output
6763 // to retrieve the output
6764 at::meta::structured_neg::set_output_raw_strided(output_idx, sizes, strides, options, names);
6765 }
6766 const Tensor& maybe_get_output(int64_t output_idx) override {
6767 return *outputs_[output_idx];
6768 }
6769 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
6770 c10::OptionalDeviceGuard guard_;
6771};
6772at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_neg(const at::Tensor & self) {
6773structured_neg_default_backend_functional op;
6774op.meta(self);
6775at::neg_outf(self, *op.outputs_[0]);
6776return std::move(op.outputs_[0]).take();
6777}
6778struct structured_neg_default_backend_inplace final : public at::meta::structured_neg {
6779 structured_neg_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
6780 void set_output_strided(
6781 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6782 TensorOptions options, DimnameList names
6783 ) override {
6784 auto current_device = guard_.current_device();
6785 if (C10_UNLIKELY(current_device.has_value())) {
6786 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6787 "structured kernels don't support multi-device outputs");
6788 } else {
6789 guard_.reset_device(options.device());
6790 }
6791 const auto& out = outputs_[output_idx].get();
6792 check_inplace(out, sizes, options);
6793 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
6794 if (C10_UNLIKELY(maybe_proxy.has_value())) {
6795 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
6796 }
6797 if (!names.empty()) {
6798 namedinference::propagate_names(outputs_[output_idx], names);
6799 }
6800 // super must happen after, so that downstream can use maybe_get_output
6801 // to retrieve the output
6802 at::meta::structured_neg::set_output_raw_strided(output_idx, sizes, strides, options, names);
6803 }
6804 void set_output_raw_strided(
6805 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6806 TensorOptions options, DimnameList names
6807 ) override {
6808 auto current_device = guard_.current_device();
6809 if (C10_UNLIKELY(current_device.has_value())) {
6810 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6811 "structured kernels don't support multi-device outputs");
6812 } else {
6813 guard_.reset_device(options.device());
6814 }
6815 const auto& out = outputs_[output_idx].get();
6816 check_inplace(out, sizes, options);
6817 if (!names.empty()) {
6818 namedinference::propagate_names(outputs_[output_idx], names);
6819 }
6820 // super must happen after, so that downstream can use maybe_get_output
6821 // to retrieve the output
6822 at::meta::structured_neg::set_output_raw_strided(output_idx, sizes, strides, options, names);
6823 }
6824 const Tensor& maybe_get_output(int64_t output_idx) override {
6825 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
6826 }
6827 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
6828 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
6829 c10::OptionalDeviceGuard guard_;
6830};
6831at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_neg_(at::Tensor & self) {
6832structured_neg_default_backend_inplace op(self);
6833op.meta(self);
6834at::neg_outf(self, op.outputs_[0]);
6835if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
6836return self;
6837}
6838struct structured_round_default_backend_functional final : public at::meta::structured_round {
6839 void set_output_strided(
6840 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6841 TensorOptions options, DimnameList names
6842 ) override {
6843 auto current_device = guard_.current_device();
6844 if (C10_UNLIKELY(current_device.has_value())) {
6845 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6846 "structured kernels don't support multi-device outputs");
6847 } else {
6848 guard_.reset_device(options.device());
6849 }
6850 outputs_[output_idx] = create_out(sizes, strides, options);
6851 if (!names.empty()) {
6852 namedinference::propagate_names(*outputs_[output_idx], names);
6853 }
6854 // super must happen after, so that downstream can use maybe_get_output
6855 // to retrieve the output
6856 at::meta::structured_round::set_output_raw_strided(output_idx, sizes, strides, options, names);
6857 }
6858 void set_output_raw_strided(
6859 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6860 TensorOptions options, DimnameList names
6861 ) override {
6862 auto current_device = guard_.current_device();
6863 if (C10_UNLIKELY(current_device.has_value())) {
6864 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6865 "structured kernels don't support multi-device outputs");
6866 } else {
6867 guard_.reset_device(options.device());
6868 }
6869 outputs_[output_idx] = create_out(sizes, strides, options);
6870 if (!names.empty()) {
6871 namedinference::propagate_names(*outputs_[output_idx], names);
6872 }
6873 // super must happen after, so that downstream can use maybe_get_output
6874 // to retrieve the output
6875 at::meta::structured_round::set_output_raw_strided(output_idx, sizes, strides, options, names);
6876 }
6877 const Tensor& maybe_get_output(int64_t output_idx) override {
6878 return *outputs_[output_idx];
6879 }
6880 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
6881 c10::OptionalDeviceGuard guard_;
6882};
6883at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_round(const at::Tensor & self) {
6884structured_round_default_backend_functional op;
6885op.meta(self);
6886at::round_outf(self, *op.outputs_[0]);
6887return std::move(op.outputs_[0]).take();
6888}
6889struct structured_round_default_backend_inplace final : public at::meta::structured_round {
6890 structured_round_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
6891 void set_output_strided(
6892 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6893 TensorOptions options, DimnameList names
6894 ) override {
6895 auto current_device = guard_.current_device();
6896 if (C10_UNLIKELY(current_device.has_value())) {
6897 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6898 "structured kernels don't support multi-device outputs");
6899 } else {
6900 guard_.reset_device(options.device());
6901 }
6902 const auto& out = outputs_[output_idx].get();
6903 check_inplace(out, sizes, options);
6904 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
6905 if (C10_UNLIKELY(maybe_proxy.has_value())) {
6906 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
6907 }
6908 if (!names.empty()) {
6909 namedinference::propagate_names(outputs_[output_idx], names);
6910 }
6911 // super must happen after, so that downstream can use maybe_get_output
6912 // to retrieve the output
6913 at::meta::structured_round::set_output_raw_strided(output_idx, sizes, strides, options, names);
6914 }
6915 void set_output_raw_strided(
6916 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6917 TensorOptions options, DimnameList names
6918 ) override {
6919 auto current_device = guard_.current_device();
6920 if (C10_UNLIKELY(current_device.has_value())) {
6921 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6922 "structured kernels don't support multi-device outputs");
6923 } else {
6924 guard_.reset_device(options.device());
6925 }
6926 const auto& out = outputs_[output_idx].get();
6927 check_inplace(out, sizes, options);
6928 if (!names.empty()) {
6929 namedinference::propagate_names(outputs_[output_idx], names);
6930 }
6931 // super must happen after, so that downstream can use maybe_get_output
6932 // to retrieve the output
6933 at::meta::structured_round::set_output_raw_strided(output_idx, sizes, strides, options, names);
6934 }
6935 const Tensor& maybe_get_output(int64_t output_idx) override {
6936 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
6937 }
6938 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
6939 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
6940 c10::OptionalDeviceGuard guard_;
6941};
6942at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_round_(at::Tensor & self) {
6943structured_round_default_backend_inplace op(self);
6944op.meta(self);
6945at::round_outf(self, op.outputs_[0]);
6946if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
6947return self;
6948}
6949struct structured_round_decimals_default_backend_functional final : public at::meta::structured_round_decimals {
6950 void set_output_strided(
6951 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6952 TensorOptions options, DimnameList names
6953 ) override {
6954 auto current_device = guard_.current_device();
6955 if (C10_UNLIKELY(current_device.has_value())) {
6956 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6957 "structured kernels don't support multi-device outputs");
6958 } else {
6959 guard_.reset_device(options.device());
6960 }
6961 outputs_[output_idx] = create_out(sizes, strides, options);
6962 if (!names.empty()) {
6963 namedinference::propagate_names(*outputs_[output_idx], names);
6964 }
6965 // super must happen after, so that downstream can use maybe_get_output
6966 // to retrieve the output
6967 at::meta::structured_round_decimals::set_output_raw_strided(output_idx, sizes, strides, options, names);
6968 }
6969 void set_output_raw_strided(
6970 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6971 TensorOptions options, DimnameList names
6972 ) override {
6973 auto current_device = guard_.current_device();
6974 if (C10_UNLIKELY(current_device.has_value())) {
6975 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
6976 "structured kernels don't support multi-device outputs");
6977 } else {
6978 guard_.reset_device(options.device());
6979 }
6980 outputs_[output_idx] = create_out(sizes, strides, options);
6981 if (!names.empty()) {
6982 namedinference::propagate_names(*outputs_[output_idx], names);
6983 }
6984 // super must happen after, so that downstream can use maybe_get_output
6985 // to retrieve the output
6986 at::meta::structured_round_decimals::set_output_raw_strided(output_idx, sizes, strides, options, names);
6987 }
6988 const Tensor& maybe_get_output(int64_t output_idx) override {
6989 return *outputs_[output_idx];
6990 }
6991 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
6992 c10::OptionalDeviceGuard guard_;
6993};
6994at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_round_decimals(const at::Tensor & self, int64_t decimals) {
6995structured_round_decimals_default_backend_functional op;
6996op.meta(self, decimals);
6997at::round_outf(self, decimals, *op.outputs_[0]);
6998return std::move(op.outputs_[0]).take();
6999}
7000struct structured_round_decimals_default_backend_inplace final : public at::meta::structured_round_decimals {
7001 structured_round_decimals_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
7002 void set_output_strided(
7003 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7004 TensorOptions options, DimnameList names
7005 ) override {
7006 auto current_device = guard_.current_device();
7007 if (C10_UNLIKELY(current_device.has_value())) {
7008 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7009 "structured kernels don't support multi-device outputs");
7010 } else {
7011 guard_.reset_device(options.device());
7012 }
7013 const auto& out = outputs_[output_idx].get();
7014 check_inplace(out, sizes, options);
7015 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
7016 if (C10_UNLIKELY(maybe_proxy.has_value())) {
7017 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
7018 }
7019 if (!names.empty()) {
7020 namedinference::propagate_names(outputs_[output_idx], names);
7021 }
7022 // super must happen after, so that downstream can use maybe_get_output
7023 // to retrieve the output
7024 at::meta::structured_round_decimals::set_output_raw_strided(output_idx, sizes, strides, options, names);
7025 }
7026 void set_output_raw_strided(
7027 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7028 TensorOptions options, DimnameList names
7029 ) override {
7030 auto current_device = guard_.current_device();
7031 if (C10_UNLIKELY(current_device.has_value())) {
7032 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7033 "structured kernels don't support multi-device outputs");
7034 } else {
7035 guard_.reset_device(options.device());
7036 }
7037 const auto& out = outputs_[output_idx].get();
7038 check_inplace(out, sizes, options);
7039 if (!names.empty()) {
7040 namedinference::propagate_names(outputs_[output_idx], names);
7041 }
7042 // super must happen after, so that downstream can use maybe_get_output
7043 // to retrieve the output
7044 at::meta::structured_round_decimals::set_output_raw_strided(output_idx, sizes, strides, options, names);
7045 }
7046 const Tensor& maybe_get_output(int64_t output_idx) override {
7047 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
7048 }
7049 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
7050 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
7051 c10::OptionalDeviceGuard guard_;
7052};
7053at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_round__decimals(at::Tensor & self, int64_t decimals) {
7054structured_round_decimals_default_backend_inplace op(self);
7055op.meta(self, decimals);
7056at::round_outf(self, decimals, op.outputs_[0]);
7057if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
7058return self;
7059}
7060struct structured_gelu_default_backend_functional final : public at::meta::structured_gelu {
7061 void set_output_strided(
7062 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7063 TensorOptions options, DimnameList names
7064 ) override {
7065 auto current_device = guard_.current_device();
7066 if (C10_UNLIKELY(current_device.has_value())) {
7067 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7068 "structured kernels don't support multi-device outputs");
7069 } else {
7070 guard_.reset_device(options.device());
7071 }
7072 outputs_[output_idx] = create_out(sizes, strides, options);
7073 if (!names.empty()) {
7074 namedinference::propagate_names(*outputs_[output_idx], names);
7075 }
7076 // super must happen after, so that downstream can use maybe_get_output
7077 // to retrieve the output
7078 at::meta::structured_gelu::set_output_raw_strided(output_idx, sizes, strides, options, names);
7079 }
7080 void set_output_raw_strided(
7081 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7082 TensorOptions options, DimnameList names
7083 ) override {
7084 auto current_device = guard_.current_device();
7085 if (C10_UNLIKELY(current_device.has_value())) {
7086 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7087 "structured kernels don't support multi-device outputs");
7088 } else {
7089 guard_.reset_device(options.device());
7090 }
7091 outputs_[output_idx] = create_out(sizes, strides, options);
7092 if (!names.empty()) {
7093 namedinference::propagate_names(*outputs_[output_idx], names);
7094 }
7095 // super must happen after, so that downstream can use maybe_get_output
7096 // to retrieve the output
7097 at::meta::structured_gelu::set_output_raw_strided(output_idx, sizes, strides, options, names);
7098 }
7099 const Tensor& maybe_get_output(int64_t output_idx) override {
7100 return *outputs_[output_idx];
7101 }
7102 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
7103 c10::OptionalDeviceGuard guard_;
7104};
7105at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_gelu(const at::Tensor & self, c10::string_view approximate) {
7106structured_gelu_default_backend_functional op;
7107op.meta(self, approximate);
7108at::gelu_outf(self, approximate, *op.outputs_[0]);
7109return std::move(op.outputs_[0]).take();
7110}
7111struct structured_gelu_default_backend_inplace final : public at::meta::structured_gelu {
7112 structured_gelu_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
7113 void set_output_strided(
7114 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7115 TensorOptions options, DimnameList names
7116 ) override {
7117 auto current_device = guard_.current_device();
7118 if (C10_UNLIKELY(current_device.has_value())) {
7119 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7120 "structured kernels don't support multi-device outputs");
7121 } else {
7122 guard_.reset_device(options.device());
7123 }
7124 const auto& out = outputs_[output_idx].get();
7125 check_inplace(out, sizes, options);
7126 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
7127 if (C10_UNLIKELY(maybe_proxy.has_value())) {
7128 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
7129 }
7130 if (!names.empty()) {
7131 namedinference::propagate_names(outputs_[output_idx], names);
7132 }
7133 // super must happen after, so that downstream can use maybe_get_output
7134 // to retrieve the output
7135 at::meta::structured_gelu::set_output_raw_strided(output_idx, sizes, strides, options, names);
7136 }
7137 void set_output_raw_strided(
7138 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7139 TensorOptions options, DimnameList names
7140 ) override {
7141 auto current_device = guard_.current_device();
7142 if (C10_UNLIKELY(current_device.has_value())) {
7143 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7144 "structured kernels don't support multi-device outputs");
7145 } else {
7146 guard_.reset_device(options.device());
7147 }
7148 const auto& out = outputs_[output_idx].get();
7149 check_inplace(out, sizes, options);
7150 if (!names.empty()) {
7151 namedinference::propagate_names(outputs_[output_idx], names);
7152 }
7153 // super must happen after, so that downstream can use maybe_get_output
7154 // to retrieve the output
7155 at::meta::structured_gelu::set_output_raw_strided(output_idx, sizes, strides, options, names);
7156 }
7157 const Tensor& maybe_get_output(int64_t output_idx) override {
7158 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
7159 }
7160 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
7161 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
7162 c10::OptionalDeviceGuard guard_;
7163};
7164at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_gelu_(at::Tensor & self, c10::string_view approximate) {
7165structured_gelu_default_backend_inplace op(self);
7166op.meta(self, approximate);
7167at::gelu_outf(self, approximate, op.outputs_[0]);
7168if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
7169return self;
7170}
7171struct structured_gelu_backward_default_backend_functional final : public at::meta::structured_gelu_backward {
7172 void set_output_strided(
7173 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7174 TensorOptions options, DimnameList names
7175 ) override {
7176 auto current_device = guard_.current_device();
7177 if (C10_UNLIKELY(current_device.has_value())) {
7178 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7179 "structured kernels don't support multi-device outputs");
7180 } else {
7181 guard_.reset_device(options.device());
7182 }
7183 outputs_[output_idx] = create_out(sizes, strides, options);
7184 if (!names.empty()) {
7185 namedinference::propagate_names(*outputs_[output_idx], names);
7186 }
7187 // super must happen after, so that downstream can use maybe_get_output
7188 // to retrieve the output
7189 at::meta::structured_gelu_backward::set_output_raw_strided(output_idx, sizes, strides, options, names);
7190 }
7191 void set_output_raw_strided(
7192 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7193 TensorOptions options, DimnameList names
7194 ) override {
7195 auto current_device = guard_.current_device();
7196 if (C10_UNLIKELY(current_device.has_value())) {
7197 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7198 "structured kernels don't support multi-device outputs");
7199 } else {
7200 guard_.reset_device(options.device());
7201 }
7202 outputs_[output_idx] = create_out(sizes, strides, options);
7203 if (!names.empty()) {
7204 namedinference::propagate_names(*outputs_[output_idx], names);
7205 }
7206 // super must happen after, so that downstream can use maybe_get_output
7207 // to retrieve the output
7208 at::meta::structured_gelu_backward::set_output_raw_strided(output_idx, sizes, strides, options, names);
7209 }
7210 const Tensor& maybe_get_output(int64_t output_idx) override {
7211 return *outputs_[output_idx];
7212 }
7213 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
7214 c10::OptionalDeviceGuard guard_;
7215};
7216at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate) {
7217structured_gelu_backward_default_backend_functional op;
7218op.meta(grad_output, self, approximate);
7219at::gelu_backward_outf(grad_output, self, approximate, *op.outputs_[0]);
7220return std::move(op.outputs_[0]).take();
7221}
7222struct structured_hardshrink_default_backend_functional final : public at::meta::structured_hardshrink {
7223 void set_output_strided(
7224 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7225 TensorOptions options, DimnameList names
7226 ) override {
7227 auto current_device = guard_.current_device();
7228 if (C10_UNLIKELY(current_device.has_value())) {
7229 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7230 "structured kernels don't support multi-device outputs");
7231 } else {
7232 guard_.reset_device(options.device());
7233 }
7234 outputs_[output_idx] = create_out(sizes, strides, options);
7235 if (!names.empty()) {
7236 namedinference::propagate_names(*outputs_[output_idx], names);
7237 }
7238 // super must happen after, so that downstream can use maybe_get_output
7239 // to retrieve the output
7240 at::meta::structured_hardshrink::set_output_raw_strided(output_idx, sizes, strides, options, names);
7241 }
7242 void set_output_raw_strided(
7243 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7244 TensorOptions options, DimnameList names
7245 ) override {
7246 auto current_device = guard_.current_device();
7247 if (C10_UNLIKELY(current_device.has_value())) {
7248 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7249 "structured kernels don't support multi-device outputs");
7250 } else {
7251 guard_.reset_device(options.device());
7252 }
7253 outputs_[output_idx] = create_out(sizes, strides, options);
7254 if (!names.empty()) {
7255 namedinference::propagate_names(*outputs_[output_idx], names);
7256 }
7257 // super must happen after, so that downstream can use maybe_get_output
7258 // to retrieve the output
7259 at::meta::structured_hardshrink::set_output_raw_strided(output_idx, sizes, strides, options, names);
7260 }
7261 const Tensor& maybe_get_output(int64_t output_idx) override {
7262 return *outputs_[output_idx];
7263 }
7264 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
7265 c10::OptionalDeviceGuard guard_;
7266};
7267at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_hardshrink(const at::Tensor & self, const at::Scalar & lambd) {
7268structured_hardshrink_default_backend_functional op;
7269op.meta(self, lambd);
7270at::hardshrink_outf(self, lambd, *op.outputs_[0]);
7271return std::move(op.outputs_[0]).take();
7272}
7273struct structured_hardshrink_backward_default_backend_functional final : public at::meta::structured_hardshrink_backward {
7274 void set_output_strided(
7275 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7276 TensorOptions options, DimnameList names
7277 ) override {
7278 auto current_device = guard_.current_device();
7279 if (C10_UNLIKELY(current_device.has_value())) {
7280 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7281 "structured kernels don't support multi-device outputs");
7282 } else {
7283 guard_.reset_device(options.device());
7284 }
7285 outputs_[output_idx] = create_out(sizes, strides, options);
7286 if (!names.empty()) {
7287 namedinference::propagate_names(*outputs_[output_idx], names);
7288 }
7289 // super must happen after, so that downstream can use maybe_get_output
7290 // to retrieve the output
7291 at::meta::structured_hardshrink_backward::set_output_raw_strided(output_idx, sizes, strides, options, names);
7292 }
7293 void set_output_raw_strided(
7294 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7295 TensorOptions options, DimnameList names
7296 ) override {
7297 auto current_device = guard_.current_device();
7298 if (C10_UNLIKELY(current_device.has_value())) {
7299 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7300 "structured kernels don't support multi-device outputs");
7301 } else {
7302 guard_.reset_device(options.device());
7303 }
7304 outputs_[output_idx] = create_out(sizes, strides, options);
7305 if (!names.empty()) {
7306 namedinference::propagate_names(*outputs_[output_idx], names);
7307 }
7308 // super must happen after, so that downstream can use maybe_get_output
7309 // to retrieve the output
7310 at::meta::structured_hardshrink_backward::set_output_raw_strided(output_idx, sizes, strides, options, names);
7311 }
7312 const Tensor& maybe_get_output(int64_t output_idx) override {
7313 return *outputs_[output_idx];
7314 }
7315 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
7316 c10::OptionalDeviceGuard guard_;
7317};
7318at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_hardshrink_backward(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd) {
7319structured_hardshrink_backward_default_backend_functional op;
7320op.meta(grad_out, self, lambd);
7321at::hardshrink_backward_outf(grad_out, self, lambd, *op.outputs_[0]);
7322return std::move(op.outputs_[0]).take();
7323}
7324struct structured_rsqrt_default_backend_functional final : public at::meta::structured_rsqrt {
7325 void set_output_strided(
7326 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7327 TensorOptions options, DimnameList names
7328 ) override {
7329 auto current_device = guard_.current_device();
7330 if (C10_UNLIKELY(current_device.has_value())) {
7331 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7332 "structured kernels don't support multi-device outputs");
7333 } else {
7334 guard_.reset_device(options.device());
7335 }
7336 outputs_[output_idx] = create_out(sizes, strides, options);
7337 if (!names.empty()) {
7338 namedinference::propagate_names(*outputs_[output_idx], names);
7339 }
7340 // super must happen after, so that downstream can use maybe_get_output
7341 // to retrieve the output
7342 at::meta::structured_rsqrt::set_output_raw_strided(output_idx, sizes, strides, options, names);
7343 }
7344 void set_output_raw_strided(
7345 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7346 TensorOptions options, DimnameList names
7347 ) override {
7348 auto current_device = guard_.current_device();
7349 if (C10_UNLIKELY(current_device.has_value())) {
7350 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7351 "structured kernels don't support multi-device outputs");
7352 } else {
7353 guard_.reset_device(options.device());
7354 }
7355 outputs_[output_idx] = create_out(sizes, strides, options);
7356 if (!names.empty()) {
7357 namedinference::propagate_names(*outputs_[output_idx], names);
7358 }
7359 // super must happen after, so that downstream can use maybe_get_output
7360 // to retrieve the output
7361 at::meta::structured_rsqrt::set_output_raw_strided(output_idx, sizes, strides, options, names);
7362 }
7363 const Tensor& maybe_get_output(int64_t output_idx) override {
7364 return *outputs_[output_idx];
7365 }
7366 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
7367 c10::OptionalDeviceGuard guard_;
7368};
7369at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_rsqrt(const at::Tensor & self) {
7370structured_rsqrt_default_backend_functional op;
7371op.meta(self);
7372at::rsqrt_outf(self, *op.outputs_[0]);
7373return std::move(op.outputs_[0]).take();
7374}
7375struct structured_rsqrt_default_backend_inplace final : public at::meta::structured_rsqrt {
7376 structured_rsqrt_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
7377 void set_output_strided(
7378 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7379 TensorOptions options, DimnameList names
7380 ) override {
7381 auto current_device = guard_.current_device();
7382 if (C10_UNLIKELY(current_device.has_value())) {
7383 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7384 "structured kernels don't support multi-device outputs");
7385 } else {
7386 guard_.reset_device(options.device());
7387 }
7388 const auto& out = outputs_[output_idx].get();
7389 check_inplace(out, sizes, options);
7390 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
7391 if (C10_UNLIKELY(maybe_proxy.has_value())) {
7392 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
7393 }
7394 if (!names.empty()) {
7395 namedinference::propagate_names(outputs_[output_idx], names);
7396 }
7397 // super must happen after, so that downstream can use maybe_get_output
7398 // to retrieve the output
7399 at::meta::structured_rsqrt::set_output_raw_strided(output_idx, sizes, strides, options, names);
7400 }
7401 void set_output_raw_strided(
7402 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7403 TensorOptions options, DimnameList names
7404 ) override {
7405 auto current_device = guard_.current_device();
7406 if (C10_UNLIKELY(current_device.has_value())) {
7407 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7408 "structured kernels don't support multi-device outputs");
7409 } else {
7410 guard_.reset_device(options.device());
7411 }
7412 const auto& out = outputs_[output_idx].get();
7413 check_inplace(out, sizes, options);
7414 if (!names.empty()) {
7415 namedinference::propagate_names(outputs_[output_idx], names);
7416 }
7417 // super must happen after, so that downstream can use maybe_get_output
7418 // to retrieve the output
7419 at::meta::structured_rsqrt::set_output_raw_strided(output_idx, sizes, strides, options, names);
7420 }
7421 const Tensor& maybe_get_output(int64_t output_idx) override {
7422 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
7423 }
7424 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
7425 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
7426 c10::OptionalDeviceGuard guard_;
7427};
7428at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_rsqrt_(at::Tensor & self) {
7429structured_rsqrt_default_backend_inplace op(self);
7430op.meta(self);
7431at::rsqrt_outf(self, op.outputs_[0]);
7432if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
7433return self;
7434}
7435namespace {
7436at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__select_backward(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index) {
7437 // No device check
7438 // DeviceGuard omitted
7439 return at::native::select_backward_symint(grad_output, input_sizes, dim, index);
7440}
7441} // anonymous namespace
7442struct structured_silu_default_backend_functional final : public at::meta::structured_silu {
7443 void set_output_strided(
7444 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7445 TensorOptions options, DimnameList names
7446 ) override {
7447 auto current_device = guard_.current_device();
7448 if (C10_UNLIKELY(current_device.has_value())) {
7449 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7450 "structured kernels don't support multi-device outputs");
7451 } else {
7452 guard_.reset_device(options.device());
7453 }
7454 outputs_[output_idx] = create_out(sizes, strides, options);
7455 if (!names.empty()) {
7456 namedinference::propagate_names(*outputs_[output_idx], names);
7457 }
7458 // super must happen after, so that downstream can use maybe_get_output
7459 // to retrieve the output
7460 at::meta::structured_silu::set_output_raw_strided(output_idx, sizes, strides, options, names);
7461 }
7462 void set_output_raw_strided(
7463 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7464 TensorOptions options, DimnameList names
7465 ) override {
7466 auto current_device = guard_.current_device();
7467 if (C10_UNLIKELY(current_device.has_value())) {
7468 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7469 "structured kernels don't support multi-device outputs");
7470 } else {
7471 guard_.reset_device(options.device());
7472 }
7473 outputs_[output_idx] = create_out(sizes, strides, options);
7474 if (!names.empty()) {
7475 namedinference::propagate_names(*outputs_[output_idx], names);
7476 }
7477 // super must happen after, so that downstream can use maybe_get_output
7478 // to retrieve the output
7479 at::meta::structured_silu::set_output_raw_strided(output_idx, sizes, strides, options, names);
7480 }
7481 const Tensor& maybe_get_output(int64_t output_idx) override {
7482 return *outputs_[output_idx];
7483 }
7484 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
7485 c10::OptionalDeviceGuard guard_;
7486};
7487at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_silu(const at::Tensor & self) {
7488structured_silu_default_backend_functional op;
7489op.meta(self);
7490at::silu_outf(self, *op.outputs_[0]);
7491return std::move(op.outputs_[0]).take();
7492}
7493struct structured_silu_default_backend_inplace final : public at::meta::structured_silu {
7494 structured_silu_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
7495 void set_output_strided(
7496 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7497 TensorOptions options, DimnameList names
7498 ) override {
7499 auto current_device = guard_.current_device();
7500 if (C10_UNLIKELY(current_device.has_value())) {
7501 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7502 "structured kernels don't support multi-device outputs");
7503 } else {
7504 guard_.reset_device(options.device());
7505 }
7506 const auto& out = outputs_[output_idx].get();
7507 check_inplace(out, sizes, options);
7508 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
7509 if (C10_UNLIKELY(maybe_proxy.has_value())) {
7510 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
7511 }
7512 if (!names.empty()) {
7513 namedinference::propagate_names(outputs_[output_idx], names);
7514 }
7515 // super must happen after, so that downstream can use maybe_get_output
7516 // to retrieve the output
7517 at::meta::structured_silu::set_output_raw_strided(output_idx, sizes, strides, options, names);
7518 }
7519 void set_output_raw_strided(
7520 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7521 TensorOptions options, DimnameList names
7522 ) override {
7523 auto current_device = guard_.current_device();
7524 if (C10_UNLIKELY(current_device.has_value())) {
7525 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7526 "structured kernels don't support multi-device outputs");
7527 } else {
7528 guard_.reset_device(options.device());
7529 }
7530 const auto& out = outputs_[output_idx].get();
7531 check_inplace(out, sizes, options);
7532 if (!names.empty()) {
7533 namedinference::propagate_names(outputs_[output_idx], names);
7534 }
7535 // super must happen after, so that downstream can use maybe_get_output
7536 // to retrieve the output
7537 at::meta::structured_silu::set_output_raw_strided(output_idx, sizes, strides, options, names);
7538 }
7539 const Tensor& maybe_get_output(int64_t output_idx) override {
7540 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
7541 }
7542 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
7543 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
7544 c10::OptionalDeviceGuard guard_;
7545};
7546at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_silu_(at::Tensor & self) {
7547structured_silu_default_backend_inplace op(self);
7548op.meta(self);
7549at::silu_outf(self, op.outputs_[0]);
7550if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
7551return self;
7552}
7553struct structured_silu_backward_default_backend_functional final : public at::meta::structured_silu_backward {
7554 void set_output_strided(
7555 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7556 TensorOptions options, DimnameList names
7557 ) override {
7558 auto current_device = guard_.current_device();
7559 if (C10_UNLIKELY(current_device.has_value())) {
7560 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7561 "structured kernels don't support multi-device outputs");
7562 } else {
7563 guard_.reset_device(options.device());
7564 }
7565 outputs_[output_idx] = create_out(sizes, strides, options);
7566 if (!names.empty()) {
7567 namedinference::propagate_names(*outputs_[output_idx], names);
7568 }
7569 // super must happen after, so that downstream can use maybe_get_output
7570 // to retrieve the output
7571 at::meta::structured_silu_backward::set_output_raw_strided(output_idx, sizes, strides, options, names);
7572 }
7573 void set_output_raw_strided(
7574 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7575 TensorOptions options, DimnameList names
7576 ) override {
7577 auto current_device = guard_.current_device();
7578 if (C10_UNLIKELY(current_device.has_value())) {
7579 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7580 "structured kernels don't support multi-device outputs");
7581 } else {
7582 guard_.reset_device(options.device());
7583 }
7584 outputs_[output_idx] = create_out(sizes, strides, options);
7585 if (!names.empty()) {
7586 namedinference::propagate_names(*outputs_[output_idx], names);
7587 }
7588 // super must happen after, so that downstream can use maybe_get_output
7589 // to retrieve the output
7590 at::meta::structured_silu_backward::set_output_raw_strided(output_idx, sizes, strides, options, names);
7591 }
7592 const Tensor& maybe_get_output(int64_t output_idx) override {
7593 return *outputs_[output_idx];
7594 }
7595 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
7596 c10::OptionalDeviceGuard guard_;
7597};
7598at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_silu_backward(const at::Tensor & grad_output, const at::Tensor & self) {
7599structured_silu_backward_default_backend_functional op;
7600op.meta(grad_output, self);
7601at::silu_backward_outf(grad_output, self, *op.outputs_[0]);
7602return std::move(op.outputs_[0]).take();
7603}
7604struct structured_mish_default_backend_functional final : public at::meta::structured_mish {
7605 void set_output_strided(
7606 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7607 TensorOptions options, DimnameList names
7608 ) override {
7609 auto current_device = guard_.current_device();
7610 if (C10_UNLIKELY(current_device.has_value())) {
7611 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7612 "structured kernels don't support multi-device outputs");
7613 } else {
7614 guard_.reset_device(options.device());
7615 }
7616 outputs_[output_idx] = create_out(sizes, strides, options);
7617 if (!names.empty()) {
7618 namedinference::propagate_names(*outputs_[output_idx], names);
7619 }
7620 // super must happen after, so that downstream can use maybe_get_output
7621 // to retrieve the output
7622 at::meta::structured_mish::set_output_raw_strided(output_idx, sizes, strides, options, names);
7623 }
7624 void set_output_raw_strided(
7625 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7626 TensorOptions options, DimnameList names
7627 ) override {
7628 auto current_device = guard_.current_device();
7629 if (C10_UNLIKELY(current_device.has_value())) {
7630 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7631 "structured kernels don't support multi-device outputs");
7632 } else {
7633 guard_.reset_device(options.device());
7634 }
7635 outputs_[output_idx] = create_out(sizes, strides, options);
7636 if (!names.empty()) {
7637 namedinference::propagate_names(*outputs_[output_idx], names);
7638 }
7639 // super must happen after, so that downstream can use maybe_get_output
7640 // to retrieve the output
7641 at::meta::structured_mish::set_output_raw_strided(output_idx, sizes, strides, options, names);
7642 }
7643 const Tensor& maybe_get_output(int64_t output_idx) override {
7644 return *outputs_[output_idx];
7645 }
7646 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
7647 c10::OptionalDeviceGuard guard_;
7648};
7649at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_mish(const at::Tensor & self) {
7650structured_mish_default_backend_functional op;
7651op.meta(self);
7652at::mish_outf(self, *op.outputs_[0]);
7653return std::move(op.outputs_[0]).take();
7654}
7655struct structured_mish_default_backend_inplace final : public at::meta::structured_mish {
7656 structured_mish_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
7657 void set_output_strided(
7658 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7659 TensorOptions options, DimnameList names
7660 ) override {
7661 auto current_device = guard_.current_device();
7662 if (C10_UNLIKELY(current_device.has_value())) {
7663 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7664 "structured kernels don't support multi-device outputs");
7665 } else {
7666 guard_.reset_device(options.device());
7667 }
7668 const auto& out = outputs_[output_idx].get();
7669 check_inplace(out, sizes, options);
7670 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
7671 if (C10_UNLIKELY(maybe_proxy.has_value())) {
7672 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
7673 }
7674 if (!names.empty()) {
7675 namedinference::propagate_names(outputs_[output_idx], names);
7676 }
7677 // super must happen after, so that downstream can use maybe_get_output
7678 // to retrieve the output
7679 at::meta::structured_mish::set_output_raw_strided(output_idx, sizes, strides, options, names);
7680 }
7681 void set_output_raw_strided(
7682 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7683 TensorOptions options, DimnameList names
7684 ) override {
7685 auto current_device = guard_.current_device();
7686 if (C10_UNLIKELY(current_device.has_value())) {
7687 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7688 "structured kernels don't support multi-device outputs");
7689 } else {
7690 guard_.reset_device(options.device());
7691 }
7692 const auto& out = outputs_[output_idx].get();
7693 check_inplace(out, sizes, options);
7694 if (!names.empty()) {
7695 namedinference::propagate_names(outputs_[output_idx], names);
7696 }
7697 // super must happen after, so that downstream can use maybe_get_output
7698 // to retrieve the output
7699 at::meta::structured_mish::set_output_raw_strided(output_idx, sizes, strides, options, names);
7700 }
7701 const Tensor& maybe_get_output(int64_t output_idx) override {
7702 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
7703 }
7704 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
7705 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
7706 c10::OptionalDeviceGuard guard_;
7707};
7708at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_mish_(at::Tensor & self) {
7709structured_mish_default_backend_inplace op(self);
7710op.meta(self);
7711at::mish_outf(self, op.outputs_[0]);
7712if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
7713return self;
7714}
7715struct structured_sigmoid_default_backend_functional final : public at::meta::structured_sigmoid {
7716 void set_output_strided(
7717 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7718 TensorOptions options, DimnameList names
7719 ) override {
7720 auto current_device = guard_.current_device();
7721 if (C10_UNLIKELY(current_device.has_value())) {
7722 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7723 "structured kernels don't support multi-device outputs");
7724 } else {
7725 guard_.reset_device(options.device());
7726 }
7727 outputs_[output_idx] = create_out(sizes, strides, options);
7728 if (!names.empty()) {
7729 namedinference::propagate_names(*outputs_[output_idx], names);
7730 }
7731 // super must happen after, so that downstream can use maybe_get_output
7732 // to retrieve the output
7733 at::meta::structured_sigmoid::set_output_raw_strided(output_idx, sizes, strides, options, names);
7734 }
7735 void set_output_raw_strided(
7736 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7737 TensorOptions options, DimnameList names
7738 ) override {
7739 auto current_device = guard_.current_device();
7740 if (C10_UNLIKELY(current_device.has_value())) {
7741 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7742 "structured kernels don't support multi-device outputs");
7743 } else {
7744 guard_.reset_device(options.device());
7745 }
7746 outputs_[output_idx] = create_out(sizes, strides, options);
7747 if (!names.empty()) {
7748 namedinference::propagate_names(*outputs_[output_idx], names);
7749 }
7750 // super must happen after, so that downstream can use maybe_get_output
7751 // to retrieve the output
7752 at::meta::structured_sigmoid::set_output_raw_strided(output_idx, sizes, strides, options, names);
7753 }
7754 const Tensor& maybe_get_output(int64_t output_idx) override {
7755 return *outputs_[output_idx];
7756 }
7757 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
7758 c10::OptionalDeviceGuard guard_;
7759};
7760at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_sigmoid(const at::Tensor & self) {
7761structured_sigmoid_default_backend_functional op;
7762op.meta(self);
7763at::sigmoid_outf(self, *op.outputs_[0]);
7764return std::move(op.outputs_[0]).take();
7765}
7766struct structured_sigmoid_default_backend_inplace final : public at::meta::structured_sigmoid {
7767 structured_sigmoid_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
7768 void set_output_strided(
7769 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7770 TensorOptions options, DimnameList names
7771 ) override {
7772 auto current_device = guard_.current_device();
7773 if (C10_UNLIKELY(current_device.has_value())) {
7774 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7775 "structured kernels don't support multi-device outputs");
7776 } else {
7777 guard_.reset_device(options.device());
7778 }
7779 const auto& out = outputs_[output_idx].get();
7780 check_inplace(out, sizes, options);
7781 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
7782 if (C10_UNLIKELY(maybe_proxy.has_value())) {
7783 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
7784 }
7785 if (!names.empty()) {
7786 namedinference::propagate_names(outputs_[output_idx], names);
7787 }
7788 // super must happen after, so that downstream can use maybe_get_output
7789 // to retrieve the output
7790 at::meta::structured_sigmoid::set_output_raw_strided(output_idx, sizes, strides, options, names);
7791 }
7792 void set_output_raw_strided(
7793 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7794 TensorOptions options, DimnameList names
7795 ) override {
7796 auto current_device = guard_.current_device();
7797 if (C10_UNLIKELY(current_device.has_value())) {
7798 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7799 "structured kernels don't support multi-device outputs");
7800 } else {
7801 guard_.reset_device(options.device());
7802 }
7803 const auto& out = outputs_[output_idx].get();
7804 check_inplace(out, sizes, options);
7805 if (!names.empty()) {
7806 namedinference::propagate_names(outputs_[output_idx], names);
7807 }
7808 // super must happen after, so that downstream can use maybe_get_output
7809 // to retrieve the output
7810 at::meta::structured_sigmoid::set_output_raw_strided(output_idx, sizes, strides, options, names);
7811 }
7812 const Tensor& maybe_get_output(int64_t output_idx) override {
7813 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
7814 }
7815 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
7816 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
7817 c10::OptionalDeviceGuard guard_;
7818};
7819at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_sigmoid_(at::Tensor & self) {
7820structured_sigmoid_default_backend_inplace op(self);
7821op.meta(self);
7822at::sigmoid_outf(self, op.outputs_[0]);
7823if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
7824return self;
7825}
7826struct structured_sin_default_backend_functional final : public at::meta::structured_sin {
7827 void set_output_strided(
7828 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7829 TensorOptions options, DimnameList names
7830 ) override {
7831 auto current_device = guard_.current_device();
7832 if (C10_UNLIKELY(current_device.has_value())) {
7833 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7834 "structured kernels don't support multi-device outputs");
7835 } else {
7836 guard_.reset_device(options.device());
7837 }
7838 outputs_[output_idx] = create_out(sizes, strides, options);
7839 if (!names.empty()) {
7840 namedinference::propagate_names(*outputs_[output_idx], names);
7841 }
7842 // super must happen after, so that downstream can use maybe_get_output
7843 // to retrieve the output
7844 at::meta::structured_sin::set_output_raw_strided(output_idx, sizes, strides, options, names);
7845 }
7846 void set_output_raw_strided(
7847 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7848 TensorOptions options, DimnameList names
7849 ) override {
7850 auto current_device = guard_.current_device();
7851 if (C10_UNLIKELY(current_device.has_value())) {
7852 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7853 "structured kernels don't support multi-device outputs");
7854 } else {
7855 guard_.reset_device(options.device());
7856 }
7857 outputs_[output_idx] = create_out(sizes, strides, options);
7858 if (!names.empty()) {
7859 namedinference::propagate_names(*outputs_[output_idx], names);
7860 }
7861 // super must happen after, so that downstream can use maybe_get_output
7862 // to retrieve the output
7863 at::meta::structured_sin::set_output_raw_strided(output_idx, sizes, strides, options, names);
7864 }
7865 const Tensor& maybe_get_output(int64_t output_idx) override {
7866 return *outputs_[output_idx];
7867 }
7868 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
7869 c10::OptionalDeviceGuard guard_;
7870};
7871at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_sin(const at::Tensor & self) {
7872structured_sin_default_backend_functional op;
7873op.meta(self);
7874at::sin_outf(self, *op.outputs_[0]);
7875return std::move(op.outputs_[0]).take();
7876}
7877struct structured_sin_default_backend_inplace final : public at::meta::structured_sin {
7878 structured_sin_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
7879 void set_output_strided(
7880 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7881 TensorOptions options, DimnameList names
7882 ) override {
7883 auto current_device = guard_.current_device();
7884 if (C10_UNLIKELY(current_device.has_value())) {
7885 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7886 "structured kernels don't support multi-device outputs");
7887 } else {
7888 guard_.reset_device(options.device());
7889 }
7890 const auto& out = outputs_[output_idx].get();
7891 check_inplace(out, sizes, options);
7892 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
7893 if (C10_UNLIKELY(maybe_proxy.has_value())) {
7894 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
7895 }
7896 if (!names.empty()) {
7897 namedinference::propagate_names(outputs_[output_idx], names);
7898 }
7899 // super must happen after, so that downstream can use maybe_get_output
7900 // to retrieve the output
7901 at::meta::structured_sin::set_output_raw_strided(output_idx, sizes, strides, options, names);
7902 }
7903 void set_output_raw_strided(
7904 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7905 TensorOptions options, DimnameList names
7906 ) override {
7907 auto current_device = guard_.current_device();
7908 if (C10_UNLIKELY(current_device.has_value())) {
7909 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7910 "structured kernels don't support multi-device outputs");
7911 } else {
7912 guard_.reset_device(options.device());
7913 }
7914 const auto& out = outputs_[output_idx].get();
7915 check_inplace(out, sizes, options);
7916 if (!names.empty()) {
7917 namedinference::propagate_names(outputs_[output_idx], names);
7918 }
7919 // super must happen after, so that downstream can use maybe_get_output
7920 // to retrieve the output
7921 at::meta::structured_sin::set_output_raw_strided(output_idx, sizes, strides, options, names);
7922 }
7923 const Tensor& maybe_get_output(int64_t output_idx) override {
7924 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
7925 }
7926 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
7927 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
7928 c10::OptionalDeviceGuard guard_;
7929};
7930at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_sin_(at::Tensor & self) {
7931structured_sin_default_backend_inplace op(self);
7932op.meta(self);
7933at::sin_outf(self, op.outputs_[0]);
7934if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
7935return self;
7936}
7937struct structured_sinc_default_backend_functional final : public at::meta::structured_sinc {
7938 void set_output_strided(
7939 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7940 TensorOptions options, DimnameList names
7941 ) override {
7942 auto current_device = guard_.current_device();
7943 if (C10_UNLIKELY(current_device.has_value())) {
7944 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7945 "structured kernels don't support multi-device outputs");
7946 } else {
7947 guard_.reset_device(options.device());
7948 }
7949 outputs_[output_idx] = create_out(sizes, strides, options);
7950 if (!names.empty()) {
7951 namedinference::propagate_names(*outputs_[output_idx], names);
7952 }
7953 // super must happen after, so that downstream can use maybe_get_output
7954 // to retrieve the output
7955 at::meta::structured_sinc::set_output_raw_strided(output_idx, sizes, strides, options, names);
7956 }
7957 void set_output_raw_strided(
7958 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7959 TensorOptions options, DimnameList names
7960 ) override {
7961 auto current_device = guard_.current_device();
7962 if (C10_UNLIKELY(current_device.has_value())) {
7963 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7964 "structured kernels don't support multi-device outputs");
7965 } else {
7966 guard_.reset_device(options.device());
7967 }
7968 outputs_[output_idx] = create_out(sizes, strides, options);
7969 if (!names.empty()) {
7970 namedinference::propagate_names(*outputs_[output_idx], names);
7971 }
7972 // super must happen after, so that downstream can use maybe_get_output
7973 // to retrieve the output
7974 at::meta::structured_sinc::set_output_raw_strided(output_idx, sizes, strides, options, names);
7975 }
7976 const Tensor& maybe_get_output(int64_t output_idx) override {
7977 return *outputs_[output_idx];
7978 }
7979 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
7980 c10::OptionalDeviceGuard guard_;
7981};
7982at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_sinc(const at::Tensor & self) {
7983structured_sinc_default_backend_functional op;
7984op.meta(self);
7985at::sinc_outf(self, *op.outputs_[0]);
7986return std::move(op.outputs_[0]).take();
7987}
7988struct structured_sinc_default_backend_inplace final : public at::meta::structured_sinc {
7989 structured_sinc_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
7990 void set_output_strided(
7991 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7992 TensorOptions options, DimnameList names
7993 ) override {
7994 auto current_device = guard_.current_device();
7995 if (C10_UNLIKELY(current_device.has_value())) {
7996 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
7997 "structured kernels don't support multi-device outputs");
7998 } else {
7999 guard_.reset_device(options.device());
8000 }
8001 const auto& out = outputs_[output_idx].get();
8002 check_inplace(out, sizes, options);
8003 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
8004 if (C10_UNLIKELY(maybe_proxy.has_value())) {
8005 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
8006 }
8007 if (!names.empty()) {
8008 namedinference::propagate_names(outputs_[output_idx], names);
8009 }
8010 // super must happen after, so that downstream can use maybe_get_output
8011 // to retrieve the output
8012 at::meta::structured_sinc::set_output_raw_strided(output_idx, sizes, strides, options, names);
8013 }
8014 void set_output_raw_strided(
8015 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8016 TensorOptions options, DimnameList names
8017 ) override {
8018 auto current_device = guard_.current_device();
8019 if (C10_UNLIKELY(current_device.has_value())) {
8020 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8021 "structured kernels don't support multi-device outputs");
8022 } else {
8023 guard_.reset_device(options.device());
8024 }
8025 const auto& out = outputs_[output_idx].get();
8026 check_inplace(out, sizes, options);
8027 if (!names.empty()) {
8028 namedinference::propagate_names(outputs_[output_idx], names);
8029 }
8030 // super must happen after, so that downstream can use maybe_get_output
8031 // to retrieve the output
8032 at::meta::structured_sinc::set_output_raw_strided(output_idx, sizes, strides, options, names);
8033 }
8034 const Tensor& maybe_get_output(int64_t output_idx) override {
8035 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
8036 }
8037 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
8038 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
8039 c10::OptionalDeviceGuard guard_;
8040};
8041at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_sinc_(at::Tensor & self) {
8042structured_sinc_default_backend_inplace op(self);
8043op.meta(self);
8044at::sinc_outf(self, op.outputs_[0]);
8045if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
8046return self;
8047}
8048struct structured_sinh_default_backend_functional final : public at::meta::structured_sinh {
8049 void set_output_strided(
8050 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8051 TensorOptions options, DimnameList names
8052 ) override {
8053 auto current_device = guard_.current_device();
8054 if (C10_UNLIKELY(current_device.has_value())) {
8055 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8056 "structured kernels don't support multi-device outputs");
8057 } else {
8058 guard_.reset_device(options.device());
8059 }
8060 outputs_[output_idx] = create_out(sizes, strides, options);
8061 if (!names.empty()) {
8062 namedinference::propagate_names(*outputs_[output_idx], names);
8063 }
8064 // super must happen after, so that downstream can use maybe_get_output
8065 // to retrieve the output
8066 at::meta::structured_sinh::set_output_raw_strided(output_idx, sizes, strides, options, names);
8067 }
8068 void set_output_raw_strided(
8069 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8070 TensorOptions options, DimnameList names
8071 ) override {
8072 auto current_device = guard_.current_device();
8073 if (C10_UNLIKELY(current_device.has_value())) {
8074 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8075 "structured kernels don't support multi-device outputs");
8076 } else {
8077 guard_.reset_device(options.device());
8078 }
8079 outputs_[output_idx] = create_out(sizes, strides, options);
8080 if (!names.empty()) {
8081 namedinference::propagate_names(*outputs_[output_idx], names);
8082 }
8083 // super must happen after, so that downstream can use maybe_get_output
8084 // to retrieve the output
8085 at::meta::structured_sinh::set_output_raw_strided(output_idx, sizes, strides, options, names);
8086 }
8087 const Tensor& maybe_get_output(int64_t output_idx) override {
8088 return *outputs_[output_idx];
8089 }
8090 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
8091 c10::OptionalDeviceGuard guard_;
8092};
8093at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_sinh(const at::Tensor & self) {
8094structured_sinh_default_backend_functional op;
8095op.meta(self);
8096at::sinh_outf(self, *op.outputs_[0]);
8097return std::move(op.outputs_[0]).take();
8098}
8099struct structured_sinh_default_backend_inplace final : public at::meta::structured_sinh {
8100 structured_sinh_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
8101 void set_output_strided(
8102 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8103 TensorOptions options, DimnameList names
8104 ) override {
8105 auto current_device = guard_.current_device();
8106 if (C10_UNLIKELY(current_device.has_value())) {
8107 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8108 "structured kernels don't support multi-device outputs");
8109 } else {
8110 guard_.reset_device(options.device());
8111 }
8112 const auto& out = outputs_[output_idx].get();
8113 check_inplace(out, sizes, options);
8114 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
8115 if (C10_UNLIKELY(maybe_proxy.has_value())) {
8116 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
8117 }
8118 if (!names.empty()) {
8119 namedinference::propagate_names(outputs_[output_idx], names);
8120 }
8121 // super must happen after, so that downstream can use maybe_get_output
8122 // to retrieve the output
8123 at::meta::structured_sinh::set_output_raw_strided(output_idx, sizes, strides, options, names);
8124 }
8125 void set_output_raw_strided(
8126 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8127 TensorOptions options, DimnameList names
8128 ) override {
8129 auto current_device = guard_.current_device();
8130 if (C10_UNLIKELY(current_device.has_value())) {
8131 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8132 "structured kernels don't support multi-device outputs");
8133 } else {
8134 guard_.reset_device(options.device());
8135 }
8136 const auto& out = outputs_[output_idx].get();
8137 check_inplace(out, sizes, options);
8138 if (!names.empty()) {
8139 namedinference::propagate_names(outputs_[output_idx], names);
8140 }
8141 // super must happen after, so that downstream can use maybe_get_output
8142 // to retrieve the output
8143 at::meta::structured_sinh::set_output_raw_strided(output_idx, sizes, strides, options, names);
8144 }
8145 const Tensor& maybe_get_output(int64_t output_idx) override {
8146 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
8147 }
8148 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
8149 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
8150 c10::OptionalDeviceGuard guard_;
8151};
8152at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_sinh_(at::Tensor & self) {
8153structured_sinh_default_backend_inplace op(self);
8154op.meta(self);
8155at::sinh_outf(self, op.outputs_[0]);
8156if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
8157return self;
8158}
8159struct structured__softmax_default_backend_functional final : public at::meta::structured__softmax {
8160 void set_output_strided(
8161 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8162 TensorOptions options, DimnameList names
8163 ) override {
8164 auto current_device = guard_.current_device();
8165 if (C10_UNLIKELY(current_device.has_value())) {
8166 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8167 "structured kernels don't support multi-device outputs");
8168 } else {
8169 guard_.reset_device(options.device());
8170 }
8171 outputs_[output_idx] = create_out(sizes, strides, options);
8172 if (!names.empty()) {
8173 namedinference::propagate_names(*outputs_[output_idx], names);
8174 }
8175 // super must happen after, so that downstream can use maybe_get_output
8176 // to retrieve the output
8177 }
8178 void set_output_raw_strided(
8179 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8180 TensorOptions options, DimnameList names
8181 ) override {
8182 auto current_device = guard_.current_device();
8183 if (C10_UNLIKELY(current_device.has_value())) {
8184 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8185 "structured kernels don't support multi-device outputs");
8186 } else {
8187 guard_.reset_device(options.device());
8188 }
8189 outputs_[output_idx] = create_out(sizes, strides, options);
8190 if (!names.empty()) {
8191 namedinference::propagate_names(*outputs_[output_idx], names);
8192 }
8193 // super must happen after, so that downstream can use maybe_get_output
8194 // to retrieve the output
8195 }
8196 const Tensor& maybe_get_output(int64_t output_idx) override {
8197 return *outputs_[output_idx];
8198 }
8199 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
8200 c10::OptionalDeviceGuard guard_;
8201};
8202at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
8203structured__softmax_default_backend_functional op;
8204op.meta(self, dim, half_to_float);
8205at::_softmax_outf(self, dim, half_to_float, *op.outputs_[0]);
8206return std::move(op.outputs_[0]).take();
8207}
8208struct structured__softmax_backward_data_default_backend_functional final : public at::meta::structured__softmax_backward_data {
8209 void set_output_strided(
8210 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8211 TensorOptions options, DimnameList names
8212 ) override {
8213 auto current_device = guard_.current_device();
8214 if (C10_UNLIKELY(current_device.has_value())) {
8215 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8216 "structured kernels don't support multi-device outputs");
8217 } else {
8218 guard_.reset_device(options.device());
8219 }
8220 outputs_[output_idx] = create_out(sizes, strides, options);
8221 if (!names.empty()) {
8222 namedinference::propagate_names(*outputs_[output_idx], names);
8223 }
8224 // super must happen after, so that downstream can use maybe_get_output
8225 // to retrieve the output
8226 }
8227 void set_output_raw_strided(
8228 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8229 TensorOptions options, DimnameList names
8230 ) override {
8231 auto current_device = guard_.current_device();
8232 if (C10_UNLIKELY(current_device.has_value())) {
8233 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8234 "structured kernels don't support multi-device outputs");
8235 } else {
8236 guard_.reset_device(options.device());
8237 }
8238 outputs_[output_idx] = create_out(sizes, strides, options);
8239 if (!names.empty()) {
8240 namedinference::propagate_names(*outputs_[output_idx], names);
8241 }
8242 // super must happen after, so that downstream can use maybe_get_output
8243 // to retrieve the output
8244 }
8245 const Tensor& maybe_get_output(int64_t output_idx) override {
8246 return *outputs_[output_idx];
8247 }
8248 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
8249 c10::OptionalDeviceGuard guard_;
8250};
8251at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
8252structured__softmax_backward_data_default_backend_functional op;
8253op.meta(grad_output, output, dim, input_dtype);
8254at::_softmax_backward_data_outf(grad_output, output, dim, input_dtype, *op.outputs_[0]);
8255return std::move(op.outputs_[0]).take();
8256}
8257struct structured_sum_dim_IntList_default_backend_functional final : public at::meta::structured_sum_dim_IntList {
8258 void set_output_strided(
8259 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8260 TensorOptions options, DimnameList names
8261 ) override {
8262 auto current_device = guard_.current_device();
8263 if (C10_UNLIKELY(current_device.has_value())) {
8264 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8265 "structured kernels don't support multi-device outputs");
8266 } else {
8267 guard_.reset_device(options.device());
8268 }
8269 outputs_[output_idx] = create_out(sizes, strides, options);
8270 if (!names.empty()) {
8271 namedinference::propagate_names(*outputs_[output_idx], names);
8272 }
8273 // super must happen after, so that downstream can use maybe_get_output
8274 // to retrieve the output
8275 }
8276 void set_output_raw_strided(
8277 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8278 TensorOptions options, DimnameList names
8279 ) override {
8280 auto current_device = guard_.current_device();
8281 if (C10_UNLIKELY(current_device.has_value())) {
8282 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8283 "structured kernels don't support multi-device outputs");
8284 } else {
8285 guard_.reset_device(options.device());
8286 }
8287 outputs_[output_idx] = create_out(sizes, strides, options);
8288 if (!names.empty()) {
8289 namedinference::propagate_names(*outputs_[output_idx], names);
8290 }
8291 // super must happen after, so that downstream can use maybe_get_output
8292 // to retrieve the output
8293 }
8294 const Tensor& maybe_get_output(int64_t output_idx) override {
8295 return *outputs_[output_idx];
8296 }
8297 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
8298 c10::OptionalDeviceGuard guard_;
8299};
8300at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_sum_dim_IntList(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
8301structured_sum_dim_IntList_default_backend_functional op;
8302op.meta(self, dim, keepdim, dtype);
8303at::sum_outf(self, dim, keepdim, dtype, *op.outputs_[0]);
8304return std::move(op.outputs_[0]).take();
8305}
8306struct structured_sqrt_default_backend_functional final : public at::meta::structured_sqrt {
8307 void set_output_strided(
8308 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8309 TensorOptions options, DimnameList names
8310 ) override {
8311 auto current_device = guard_.current_device();
8312 if (C10_UNLIKELY(current_device.has_value())) {
8313 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8314 "structured kernels don't support multi-device outputs");
8315 } else {
8316 guard_.reset_device(options.device());
8317 }
8318 outputs_[output_idx] = create_out(sizes, strides, options);
8319 if (!names.empty()) {
8320 namedinference::propagate_names(*outputs_[output_idx], names);
8321 }
8322 // super must happen after, so that downstream can use maybe_get_output
8323 // to retrieve the output
8324 at::meta::structured_sqrt::set_output_raw_strided(output_idx, sizes, strides, options, names);
8325 }
8326 void set_output_raw_strided(
8327 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8328 TensorOptions options, DimnameList names
8329 ) override {
8330 auto current_device = guard_.current_device();
8331 if (C10_UNLIKELY(current_device.has_value())) {
8332 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8333 "structured kernels don't support multi-device outputs");
8334 } else {
8335 guard_.reset_device(options.device());
8336 }
8337 outputs_[output_idx] = create_out(sizes, strides, options);
8338 if (!names.empty()) {
8339 namedinference::propagate_names(*outputs_[output_idx], names);
8340 }
8341 // super must happen after, so that downstream can use maybe_get_output
8342 // to retrieve the output
8343 at::meta::structured_sqrt::set_output_raw_strided(output_idx, sizes, strides, options, names);
8344 }
8345 const Tensor& maybe_get_output(int64_t output_idx) override {
8346 return *outputs_[output_idx];
8347 }
8348 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
8349 c10::OptionalDeviceGuard guard_;
8350};
8351at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_sqrt(const at::Tensor & self) {
8352structured_sqrt_default_backend_functional op;
8353op.meta(self);
8354at::sqrt_outf(self, *op.outputs_[0]);
8355return std::move(op.outputs_[0]).take();
8356}
8357struct structured_sqrt_default_backend_inplace final : public at::meta::structured_sqrt {
8358 structured_sqrt_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
8359 void set_output_strided(
8360 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8361 TensorOptions options, DimnameList names
8362 ) override {
8363 auto current_device = guard_.current_device();
8364 if (C10_UNLIKELY(current_device.has_value())) {
8365 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8366 "structured kernels don't support multi-device outputs");
8367 } else {
8368 guard_.reset_device(options.device());
8369 }
8370 const auto& out = outputs_[output_idx].get();
8371 check_inplace(out, sizes, options);
8372 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
8373 if (C10_UNLIKELY(maybe_proxy.has_value())) {
8374 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
8375 }
8376 if (!names.empty()) {
8377 namedinference::propagate_names(outputs_[output_idx], names);
8378 }
8379 // super must happen after, so that downstream can use maybe_get_output
8380 // to retrieve the output
8381 at::meta::structured_sqrt::set_output_raw_strided(output_idx, sizes, strides, options, names);
8382 }
8383 void set_output_raw_strided(
8384 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8385 TensorOptions options, DimnameList names
8386 ) override {
8387 auto current_device = guard_.current_device();
8388 if (C10_UNLIKELY(current_device.has_value())) {
8389 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8390 "structured kernels don't support multi-device outputs");
8391 } else {
8392 guard_.reset_device(options.device());
8393 }
8394 const auto& out = outputs_[output_idx].get();
8395 check_inplace(out, sizes, options);
8396 if (!names.empty()) {
8397 namedinference::propagate_names(outputs_[output_idx], names);
8398 }
8399 // super must happen after, so that downstream can use maybe_get_output
8400 // to retrieve the output
8401 at::meta::structured_sqrt::set_output_raw_strided(output_idx, sizes, strides, options, names);
8402 }
8403 const Tensor& maybe_get_output(int64_t output_idx) override {
8404 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
8405 }
8406 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
8407 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
8408 c10::OptionalDeviceGuard guard_;
8409};
8410at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_sqrt_(at::Tensor & self) {
8411structured_sqrt_default_backend_inplace op(self);
8412op.meta(self);
8413at::sqrt_outf(self, op.outputs_[0]);
8414if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
8415return self;
8416}
8417struct structured_prod_dim_int_default_backend_functional final : public at::meta::structured_prod_dim_int {
8418 void set_output_strided(
8419 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8420 TensorOptions options, DimnameList names
8421 ) override {
8422 auto current_device = guard_.current_device();
8423 if (C10_UNLIKELY(current_device.has_value())) {
8424 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8425 "structured kernels don't support multi-device outputs");
8426 } else {
8427 guard_.reset_device(options.device());
8428 }
8429 outputs_[output_idx] = create_out(sizes, strides, options);
8430 if (!names.empty()) {
8431 namedinference::propagate_names(*outputs_[output_idx], names);
8432 }
8433 // super must happen after, so that downstream can use maybe_get_output
8434 // to retrieve the output
8435 }
8436 void set_output_raw_strided(
8437 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8438 TensorOptions options, DimnameList names
8439 ) override {
8440 auto current_device = guard_.current_device();
8441 if (C10_UNLIKELY(current_device.has_value())) {
8442 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8443 "structured kernels don't support multi-device outputs");
8444 } else {
8445 guard_.reset_device(options.device());
8446 }
8447 outputs_[output_idx] = create_out(sizes, strides, options);
8448 if (!names.empty()) {
8449 namedinference::propagate_names(*outputs_[output_idx], names);
8450 }
8451 // super must happen after, so that downstream can use maybe_get_output
8452 // to retrieve the output
8453 }
8454 const Tensor& maybe_get_output(int64_t output_idx) override {
8455 return *outputs_[output_idx];
8456 }
8457 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
8458 c10::OptionalDeviceGuard guard_;
8459};
8460at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_prod_dim_int(const at::Tensor & self, int64_t dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
8461structured_prod_dim_int_default_backend_functional op;
8462op.meta(self, dim, keepdim, dtype);
8463at::prod_outf(self, dim, keepdim, dtype, *op.outputs_[0]);
8464return std::move(op.outputs_[0]).take();
8465}
8466struct structured_tan_default_backend_functional final : public at::meta::structured_tan {
8467 void set_output_strided(
8468 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8469 TensorOptions options, DimnameList names
8470 ) override {
8471 auto current_device = guard_.current_device();
8472 if (C10_UNLIKELY(current_device.has_value())) {
8473 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8474 "structured kernels don't support multi-device outputs");
8475 } else {
8476 guard_.reset_device(options.device());
8477 }
8478 outputs_[output_idx] = create_out(sizes, strides, options);
8479 if (!names.empty()) {
8480 namedinference::propagate_names(*outputs_[output_idx], names);
8481 }
8482 // super must happen after, so that downstream can use maybe_get_output
8483 // to retrieve the output
8484 at::meta::structured_tan::set_output_raw_strided(output_idx, sizes, strides, options, names);
8485 }
8486 void set_output_raw_strided(
8487 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8488 TensorOptions options, DimnameList names
8489 ) override {
8490 auto current_device = guard_.current_device();
8491 if (C10_UNLIKELY(current_device.has_value())) {
8492 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8493 "structured kernels don't support multi-device outputs");
8494 } else {
8495 guard_.reset_device(options.device());
8496 }
8497 outputs_[output_idx] = create_out(sizes, strides, options);
8498 if (!names.empty()) {
8499 namedinference::propagate_names(*outputs_[output_idx], names);
8500 }
8501 // super must happen after, so that downstream can use maybe_get_output
8502 // to retrieve the output
8503 at::meta::structured_tan::set_output_raw_strided(output_idx, sizes, strides, options, names);
8504 }
8505 const Tensor& maybe_get_output(int64_t output_idx) override {
8506 return *outputs_[output_idx];
8507 }
8508 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
8509 c10::OptionalDeviceGuard guard_;
8510};
8511at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_tan(const at::Tensor & self) {
8512structured_tan_default_backend_functional op;
8513op.meta(self);
8514at::tan_outf(self, *op.outputs_[0]);
8515return std::move(op.outputs_[0]).take();
8516}
8517struct structured_tan_default_backend_inplace final : public at::meta::structured_tan {
8518 structured_tan_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
8519 void set_output_strided(
8520 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8521 TensorOptions options, DimnameList names
8522 ) override {
8523 auto current_device = guard_.current_device();
8524 if (C10_UNLIKELY(current_device.has_value())) {
8525 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8526 "structured kernels don't support multi-device outputs");
8527 } else {
8528 guard_.reset_device(options.device());
8529 }
8530 const auto& out = outputs_[output_idx].get();
8531 check_inplace(out, sizes, options);
8532 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
8533 if (C10_UNLIKELY(maybe_proxy.has_value())) {
8534 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
8535 }
8536 if (!names.empty()) {
8537 namedinference::propagate_names(outputs_[output_idx], names);
8538 }
8539 // super must happen after, so that downstream can use maybe_get_output
8540 // to retrieve the output
8541 at::meta::structured_tan::set_output_raw_strided(output_idx, sizes, strides, options, names);
8542 }
8543 void set_output_raw_strided(
8544 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8545 TensorOptions options, DimnameList names
8546 ) override {
8547 auto current_device = guard_.current_device();
8548 if (C10_UNLIKELY(current_device.has_value())) {
8549 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8550 "structured kernels don't support multi-device outputs");
8551 } else {
8552 guard_.reset_device(options.device());
8553 }
8554 const auto& out = outputs_[output_idx].get();
8555 check_inplace(out, sizes, options);
8556 if (!names.empty()) {
8557 namedinference::propagate_names(outputs_[output_idx], names);
8558 }
8559 // super must happen after, so that downstream can use maybe_get_output
8560 // to retrieve the output
8561 at::meta::structured_tan::set_output_raw_strided(output_idx, sizes, strides, options, names);
8562 }
8563 const Tensor& maybe_get_output(int64_t output_idx) override {
8564 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
8565 }
8566 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
8567 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
8568 c10::OptionalDeviceGuard guard_;
8569};
8570at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_tan_(at::Tensor & self) {
8571structured_tan_default_backend_inplace op(self);
8572op.meta(self);
8573at::tan_outf(self, op.outputs_[0]);
8574if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
8575return self;
8576}
8577struct structured_tanh_default_backend_functional final : public at::meta::structured_tanh {
8578 void set_output_strided(
8579 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8580 TensorOptions options, DimnameList names
8581 ) override {
8582 auto current_device = guard_.current_device();
8583 if (C10_UNLIKELY(current_device.has_value())) {
8584 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8585 "structured kernels don't support multi-device outputs");
8586 } else {
8587 guard_.reset_device(options.device());
8588 }
8589 outputs_[output_idx] = create_out(sizes, strides, options);
8590 if (!names.empty()) {
8591 namedinference::propagate_names(*outputs_[output_idx], names);
8592 }
8593 // super must happen after, so that downstream can use maybe_get_output
8594 // to retrieve the output
8595 at::meta::structured_tanh::set_output_raw_strided(output_idx, sizes, strides, options, names);
8596 }
8597 void set_output_raw_strided(
8598 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8599 TensorOptions options, DimnameList names
8600 ) override {
8601 auto current_device = guard_.current_device();
8602 if (C10_UNLIKELY(current_device.has_value())) {
8603 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8604 "structured kernels don't support multi-device outputs");
8605 } else {
8606 guard_.reset_device(options.device());
8607 }
8608 outputs_[output_idx] = create_out(sizes, strides, options);
8609 if (!names.empty()) {
8610 namedinference::propagate_names(*outputs_[output_idx], names);
8611 }
8612 // super must happen after, so that downstream can use maybe_get_output
8613 // to retrieve the output
8614 at::meta::structured_tanh::set_output_raw_strided(output_idx, sizes, strides, options, names);
8615 }
8616 const Tensor& maybe_get_output(int64_t output_idx) override {
8617 return *outputs_[output_idx];
8618 }
8619 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
8620 c10::OptionalDeviceGuard guard_;
8621};
8622at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_tanh(const at::Tensor & self) {
8623structured_tanh_default_backend_functional op;
8624op.meta(self);
8625at::tanh_outf(self, *op.outputs_[0]);
8626return std::move(op.outputs_[0]).take();
8627}
8628struct structured_tanh_default_backend_inplace final : public at::meta::structured_tanh {
8629 structured_tanh_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
8630 void set_output_strided(
8631 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8632 TensorOptions options, DimnameList names
8633 ) override {
8634 auto current_device = guard_.current_device();
8635 if (C10_UNLIKELY(current_device.has_value())) {
8636 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8637 "structured kernels don't support multi-device outputs");
8638 } else {
8639 guard_.reset_device(options.device());
8640 }
8641 const auto& out = outputs_[output_idx].get();
8642 check_inplace(out, sizes, options);
8643 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
8644 if (C10_UNLIKELY(maybe_proxy.has_value())) {
8645 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
8646 }
8647 if (!names.empty()) {
8648 namedinference::propagate_names(outputs_[output_idx], names);
8649 }
8650 // super must happen after, so that downstream can use maybe_get_output
8651 // to retrieve the output
8652 at::meta::structured_tanh::set_output_raw_strided(output_idx, sizes, strides, options, names);
8653 }
8654 void set_output_raw_strided(
8655 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8656 TensorOptions options, DimnameList names
8657 ) override {
8658 auto current_device = guard_.current_device();
8659 if (C10_UNLIKELY(current_device.has_value())) {
8660 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8661 "structured kernels don't support multi-device outputs");
8662 } else {
8663 guard_.reset_device(options.device());
8664 }
8665 const auto& out = outputs_[output_idx].get();
8666 check_inplace(out, sizes, options);
8667 if (!names.empty()) {
8668 namedinference::propagate_names(outputs_[output_idx], names);
8669 }
8670 // super must happen after, so that downstream can use maybe_get_output
8671 // to retrieve the output
8672 at::meta::structured_tanh::set_output_raw_strided(output_idx, sizes, strides, options, names);
8673 }
8674 const Tensor& maybe_get_output(int64_t output_idx) override {
8675 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
8676 }
8677 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
8678 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
8679 c10::OptionalDeviceGuard guard_;
8680};
8681at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_tanh_(at::Tensor & self) {
8682structured_tanh_default_backend_inplace op(self);
8683op.meta(self);
8684at::tanh_outf(self, op.outputs_[0]);
8685if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
8686return self;
8687}
8688struct structured_threshold_default_backend_functional final : public at::meta::structured_threshold {
8689 void set_output_strided(
8690 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8691 TensorOptions options, DimnameList names
8692 ) override {
8693 auto current_device = guard_.current_device();
8694 if (C10_UNLIKELY(current_device.has_value())) {
8695 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8696 "structured kernels don't support multi-device outputs");
8697 } else {
8698 guard_.reset_device(options.device());
8699 }
8700 outputs_[output_idx] = create_out(sizes, strides, options);
8701 if (!names.empty()) {
8702 namedinference::propagate_names(*outputs_[output_idx], names);
8703 }
8704 // super must happen after, so that downstream can use maybe_get_output
8705 // to retrieve the output
8706 at::meta::structured_threshold::set_output_raw_strided(output_idx, sizes, strides, options, names);
8707 }
8708 void set_output_raw_strided(
8709 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8710 TensorOptions options, DimnameList names
8711 ) override {
8712 auto current_device = guard_.current_device();
8713 if (C10_UNLIKELY(current_device.has_value())) {
8714 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8715 "structured kernels don't support multi-device outputs");
8716 } else {
8717 guard_.reset_device(options.device());
8718 }
8719 outputs_[output_idx] = create_out(sizes, strides, options);
8720 if (!names.empty()) {
8721 namedinference::propagate_names(*outputs_[output_idx], names);
8722 }
8723 // super must happen after, so that downstream can use maybe_get_output
8724 // to retrieve the output
8725 at::meta::structured_threshold::set_output_raw_strided(output_idx, sizes, strides, options, names);
8726 }
8727 const Tensor& maybe_get_output(int64_t output_idx) override {
8728 return *outputs_[output_idx];
8729 }
8730 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
8731 c10::OptionalDeviceGuard guard_;
8732};
8733at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_threshold(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
8734structured_threshold_default_backend_functional op;
8735op.meta(self, threshold, value);
8736at::threshold_outf(self, threshold, value, *op.outputs_[0]);
8737return std::move(op.outputs_[0]).take();
8738}
8739struct structured_threshold_default_backend_inplace final : public at::meta::structured_threshold {
8740 structured_threshold_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
8741 void set_output_strided(
8742 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8743 TensorOptions options, DimnameList names
8744 ) override {
8745 auto current_device = guard_.current_device();
8746 if (C10_UNLIKELY(current_device.has_value())) {
8747 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8748 "structured kernels don't support multi-device outputs");
8749 } else {
8750 guard_.reset_device(options.device());
8751 }
8752 const auto& out = outputs_[output_idx].get();
8753 check_inplace(out, sizes, options);
8754 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
8755 if (C10_UNLIKELY(maybe_proxy.has_value())) {
8756 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
8757 }
8758 if (!names.empty()) {
8759 namedinference::propagate_names(outputs_[output_idx], names);
8760 }
8761 // super must happen after, so that downstream can use maybe_get_output
8762 // to retrieve the output
8763 at::meta::structured_threshold::set_output_raw_strided(output_idx, sizes, strides, options, names);
8764 }
8765 void set_output_raw_strided(
8766 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8767 TensorOptions options, DimnameList names
8768 ) override {
8769 auto current_device = guard_.current_device();
8770 if (C10_UNLIKELY(current_device.has_value())) {
8771 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8772 "structured kernels don't support multi-device outputs");
8773 } else {
8774 guard_.reset_device(options.device());
8775 }
8776 const auto& out = outputs_[output_idx].get();
8777 check_inplace(out, sizes, options);
8778 if (!names.empty()) {
8779 namedinference::propagate_names(outputs_[output_idx], names);
8780 }
8781 // super must happen after, so that downstream can use maybe_get_output
8782 // to retrieve the output
8783 at::meta::structured_threshold::set_output_raw_strided(output_idx, sizes, strides, options, names);
8784 }
8785 const Tensor& maybe_get_output(int64_t output_idx) override {
8786 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
8787 }
8788 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
8789 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
8790 c10::OptionalDeviceGuard guard_;
8791};
8792at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_threshold_(at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
8793structured_threshold_default_backend_inplace op(self);
8794op.meta(self, threshold, value);
8795at::threshold_outf(self, threshold, value, op.outputs_[0]);
8796if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
8797return self;
8798}
8799struct structured_threshold_backward_default_backend_functional final : public at::meta::structured_threshold_backward {
8800 void set_output_strided(
8801 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8802 TensorOptions options, DimnameList names
8803 ) override {
8804 auto current_device = guard_.current_device();
8805 if (C10_UNLIKELY(current_device.has_value())) {
8806 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8807 "structured kernels don't support multi-device outputs");
8808 } else {
8809 guard_.reset_device(options.device());
8810 }
8811 outputs_[output_idx] = create_out(sizes, strides, options);
8812 if (!names.empty()) {
8813 namedinference::propagate_names(*outputs_[output_idx], names);
8814 }
8815 // super must happen after, so that downstream can use maybe_get_output
8816 // to retrieve the output
8817 at::meta::structured_threshold_backward::set_output_raw_strided(output_idx, sizes, strides, options, names);
8818 }
8819 void set_output_raw_strided(
8820 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8821 TensorOptions options, DimnameList names
8822 ) override {
8823 auto current_device = guard_.current_device();
8824 if (C10_UNLIKELY(current_device.has_value())) {
8825 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8826 "structured kernels don't support multi-device outputs");
8827 } else {
8828 guard_.reset_device(options.device());
8829 }
8830 outputs_[output_idx] = create_out(sizes, strides, options);
8831 if (!names.empty()) {
8832 namedinference::propagate_names(*outputs_[output_idx], names);
8833 }
8834 // super must happen after, so that downstream can use maybe_get_output
8835 // to retrieve the output
8836 at::meta::structured_threshold_backward::set_output_raw_strided(output_idx, sizes, strides, options, names);
8837 }
8838 const Tensor& maybe_get_output(int64_t output_idx) override {
8839 return *outputs_[output_idx];
8840 }
8841 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
8842 c10::OptionalDeviceGuard guard_;
8843};
8844at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
8845structured_threshold_backward_default_backend_functional op;
8846op.meta(grad_output, self, threshold);
8847at::threshold_backward_outf(grad_output, self, threshold, *op.outputs_[0]);
8848return std::move(op.outputs_[0]).take();
8849}
8850namespace {
8851at::Tensor wrapper_CompositeExplicitAutogradNonFunctional___nested_view_from_buffer_copy(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets) {
8852 // No device check
8853 // DeviceGuard omitted
8854 return at::native::_nested_view_from_buffer_copy(self, nested_size, nested_strides, offsets);
8855}
8856} // anonymous namespace
8857namespace {
8858at::Tensor wrapper_CompositeExplicitAutogradNonFunctional___trilinear(const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim) {
8859 // No device check
8860 // DeviceGuard omitted
8861 return at::native::_trilinear(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim);
8862}
8863} // anonymous namespace
8864struct structured_trunc_default_backend_functional final : public at::meta::structured_trunc {
8865 void set_output_strided(
8866 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8867 TensorOptions options, DimnameList names
8868 ) override {
8869 auto current_device = guard_.current_device();
8870 if (C10_UNLIKELY(current_device.has_value())) {
8871 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8872 "structured kernels don't support multi-device outputs");
8873 } else {
8874 guard_.reset_device(options.device());
8875 }
8876 outputs_[output_idx] = create_out(sizes, strides, options);
8877 if (!names.empty()) {
8878 namedinference::propagate_names(*outputs_[output_idx], names);
8879 }
8880 // super must happen after, so that downstream can use maybe_get_output
8881 // to retrieve the output
8882 at::meta::structured_trunc::set_output_raw_strided(output_idx, sizes, strides, options, names);
8883 }
8884 void set_output_raw_strided(
8885 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8886 TensorOptions options, DimnameList names
8887 ) override {
8888 auto current_device = guard_.current_device();
8889 if (C10_UNLIKELY(current_device.has_value())) {
8890 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8891 "structured kernels don't support multi-device outputs");
8892 } else {
8893 guard_.reset_device(options.device());
8894 }
8895 outputs_[output_idx] = create_out(sizes, strides, options);
8896 if (!names.empty()) {
8897 namedinference::propagate_names(*outputs_[output_idx], names);
8898 }
8899 // super must happen after, so that downstream can use maybe_get_output
8900 // to retrieve the output
8901 at::meta::structured_trunc::set_output_raw_strided(output_idx, sizes, strides, options, names);
8902 }
8903 const Tensor& maybe_get_output(int64_t output_idx) override {
8904 return *outputs_[output_idx];
8905 }
8906 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
8907 c10::OptionalDeviceGuard guard_;
8908};
8909at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_trunc(const at::Tensor & self) {
8910structured_trunc_default_backend_functional op;
8911op.meta(self);
8912at::trunc_outf(self, *op.outputs_[0]);
8913return std::move(op.outputs_[0]).take();
8914}
8915struct structured_trunc_default_backend_inplace final : public at::meta::structured_trunc {
8916 structured_trunc_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
8917 void set_output_strided(
8918 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8919 TensorOptions options, DimnameList names
8920 ) override {
8921 auto current_device = guard_.current_device();
8922 if (C10_UNLIKELY(current_device.has_value())) {
8923 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8924 "structured kernels don't support multi-device outputs");
8925 } else {
8926 guard_.reset_device(options.device());
8927 }
8928 const auto& out = outputs_[output_idx].get();
8929 check_inplace(out, sizes, options);
8930 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
8931 if (C10_UNLIKELY(maybe_proxy.has_value())) {
8932 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
8933 }
8934 if (!names.empty()) {
8935 namedinference::propagate_names(outputs_[output_idx], names);
8936 }
8937 // super must happen after, so that downstream can use maybe_get_output
8938 // to retrieve the output
8939 at::meta::structured_trunc::set_output_raw_strided(output_idx, sizes, strides, options, names);
8940 }
8941 void set_output_raw_strided(
8942 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8943 TensorOptions options, DimnameList names
8944 ) override {
8945 auto current_device = guard_.current_device();
8946 if (C10_UNLIKELY(current_device.has_value())) {
8947 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8948 "structured kernels don't support multi-device outputs");
8949 } else {
8950 guard_.reset_device(options.device());
8951 }
8952 const auto& out = outputs_[output_idx].get();
8953 check_inplace(out, sizes, options);
8954 if (!names.empty()) {
8955 namedinference::propagate_names(outputs_[output_idx], names);
8956 }
8957 // super must happen after, so that downstream can use maybe_get_output
8958 // to retrieve the output
8959 at::meta::structured_trunc::set_output_raw_strided(output_idx, sizes, strides, options, names);
8960 }
8961 const Tensor& maybe_get_output(int64_t output_idx) override {
8962 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
8963 }
8964 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
8965 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
8966 c10::OptionalDeviceGuard guard_;
8967};
8968at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_trunc_(at::Tensor & self) {
8969structured_trunc_default_backend_inplace op(self);
8970op.meta(self);
8971at::trunc_outf(self, op.outputs_[0]);
8972if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
8973return self;
8974}
8975struct structured_norm_ScalarOpt_dim_dtype_default_backend_functional final : public at::meta::structured_norm_ScalarOpt_dim_dtype {
8976 void set_output_strided(
8977 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8978 TensorOptions options, DimnameList names
8979 ) override {
8980 auto current_device = guard_.current_device();
8981 if (C10_UNLIKELY(current_device.has_value())) {
8982 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
8983 "structured kernels don't support multi-device outputs");
8984 } else {
8985 guard_.reset_device(options.device());
8986 }
8987 outputs_[output_idx] = create_out(sizes, strides, options);
8988 if (!names.empty()) {
8989 namedinference::propagate_names(*outputs_[output_idx], names);
8990 }
8991 // super must happen after, so that downstream can use maybe_get_output
8992 // to retrieve the output
8993 }
8994 void set_output_raw_strided(
8995 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8996 TensorOptions options, DimnameList names
8997 ) override {
8998 auto current_device = guard_.current_device();
8999 if (C10_UNLIKELY(current_device.has_value())) {
9000 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9001 "structured kernels don't support multi-device outputs");
9002 } else {
9003 guard_.reset_device(options.device());
9004 }
9005 outputs_[output_idx] = create_out(sizes, strides, options);
9006 if (!names.empty()) {
9007 namedinference::propagate_names(*outputs_[output_idx], names);
9008 }
9009 // super must happen after, so that downstream can use maybe_get_output
9010 // to retrieve the output
9011 }
9012 const Tensor& maybe_get_output(int64_t output_idx) override {
9013 return *outputs_[output_idx];
9014 }
9015 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
9016 c10::OptionalDeviceGuard guard_;
9017};
9018at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_norm_ScalarOpt_dim_dtype(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) {
9019structured_norm_ScalarOpt_dim_dtype_default_backend_functional op;
9020op.meta(self, (p.has_value() ? at::OptionalScalarRef(&(p.value())) : at::OptionalScalarRef()), dim, keepdim, dtype);
9021at::norm_outf(self, p, dim, keepdim, dtype, *op.outputs_[0]);
9022return std::move(op.outputs_[0]).take();
9023}
9024struct structured_norm_ScalarOpt_dim_default_backend_functional final : public at::meta::structured_norm_ScalarOpt_dim {
9025 void set_output_strided(
9026 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9027 TensorOptions options, DimnameList names
9028 ) override {
9029 auto current_device = guard_.current_device();
9030 if (C10_UNLIKELY(current_device.has_value())) {
9031 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9032 "structured kernels don't support multi-device outputs");
9033 } else {
9034 guard_.reset_device(options.device());
9035 }
9036 outputs_[output_idx] = create_out(sizes, strides, options);
9037 if (!names.empty()) {
9038 namedinference::propagate_names(*outputs_[output_idx], names);
9039 }
9040 // super must happen after, so that downstream can use maybe_get_output
9041 // to retrieve the output
9042 }
9043 void set_output_raw_strided(
9044 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9045 TensorOptions options, DimnameList names
9046 ) override {
9047 auto current_device = guard_.current_device();
9048 if (C10_UNLIKELY(current_device.has_value())) {
9049 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9050 "structured kernels don't support multi-device outputs");
9051 } else {
9052 guard_.reset_device(options.device());
9053 }
9054 outputs_[output_idx] = create_out(sizes, strides, options);
9055 if (!names.empty()) {
9056 namedinference::propagate_names(*outputs_[output_idx], names);
9057 }
9058 // super must happen after, so that downstream can use maybe_get_output
9059 // to retrieve the output
9060 }
9061 const Tensor& maybe_get_output(int64_t output_idx) override {
9062 return *outputs_[output_idx];
9063 }
9064 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
9065 c10::OptionalDeviceGuard guard_;
9066};
9067at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_norm_ScalarOpt_dim(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) {
9068structured_norm_ScalarOpt_dim_default_backend_functional op;
9069op.meta(self, (p.has_value() ? at::OptionalScalarRef(&(p.value())) : at::OptionalScalarRef()), dim, keepdim);
9070at::norm_outf(self, p, dim, keepdim, *op.outputs_[0]);
9071return std::move(op.outputs_[0]).take();
9072}
9073struct structured_sub_Tensor_default_backend_functional final : public at::meta::structured_sub_Tensor {
9074 void set_output_strided(
9075 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9076 TensorOptions options, DimnameList names
9077 ) override {
9078 auto current_device = guard_.current_device();
9079 if (C10_UNLIKELY(current_device.has_value())) {
9080 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9081 "structured kernels don't support multi-device outputs");
9082 } else {
9083 guard_.reset_device(options.device());
9084 }
9085 outputs_[output_idx] = create_out(sizes, strides, options);
9086 if (!names.empty()) {
9087 namedinference::propagate_names(*outputs_[output_idx], names);
9088 }
9089 // super must happen after, so that downstream can use maybe_get_output
9090 // to retrieve the output
9091 at::meta::structured_sub_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
9092 }
9093 void set_output_raw_strided(
9094 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9095 TensorOptions options, DimnameList names
9096 ) override {
9097 auto current_device = guard_.current_device();
9098 if (C10_UNLIKELY(current_device.has_value())) {
9099 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9100 "structured kernels don't support multi-device outputs");
9101 } else {
9102 guard_.reset_device(options.device());
9103 }
9104 outputs_[output_idx] = create_out(sizes, strides, options);
9105 if (!names.empty()) {
9106 namedinference::propagate_names(*outputs_[output_idx], names);
9107 }
9108 // super must happen after, so that downstream can use maybe_get_output
9109 // to retrieve the output
9110 at::meta::structured_sub_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
9111 }
9112 const Tensor& maybe_get_output(int64_t output_idx) override {
9113 return *outputs_[output_idx];
9114 }
9115 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
9116 c10::OptionalDeviceGuard guard_;
9117};
9118at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_sub_Tensor(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
9119structured_sub_Tensor_default_backend_functional op;
9120op.meta(self, other, alpha);
9121at::sub_outf(self, other, alpha, *op.outputs_[0]);
9122return std::move(op.outputs_[0]).take();
9123}
9124struct structured_sub_Tensor_default_backend_inplace final : public at::meta::structured_sub_Tensor {
9125 structured_sub_Tensor_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
9126 void set_output_strided(
9127 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9128 TensorOptions options, DimnameList names
9129 ) override {
9130 auto current_device = guard_.current_device();
9131 if (C10_UNLIKELY(current_device.has_value())) {
9132 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9133 "structured kernels don't support multi-device outputs");
9134 } else {
9135 guard_.reset_device(options.device());
9136 }
9137 const auto& out = outputs_[output_idx].get();
9138 check_inplace(out, sizes, options);
9139 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
9140 if (C10_UNLIKELY(maybe_proxy.has_value())) {
9141 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
9142 }
9143 if (!names.empty()) {
9144 namedinference::propagate_names(outputs_[output_idx], names);
9145 }
9146 // super must happen after, so that downstream can use maybe_get_output
9147 // to retrieve the output
9148 at::meta::structured_sub_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
9149 }
9150 void set_output_raw_strided(
9151 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9152 TensorOptions options, DimnameList names
9153 ) override {
9154 auto current_device = guard_.current_device();
9155 if (C10_UNLIKELY(current_device.has_value())) {
9156 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9157 "structured kernels don't support multi-device outputs");
9158 } else {
9159 guard_.reset_device(options.device());
9160 }
9161 const auto& out = outputs_[output_idx].get();
9162 check_inplace(out, sizes, options);
9163 if (!names.empty()) {
9164 namedinference::propagate_names(outputs_[output_idx], names);
9165 }
9166 // super must happen after, so that downstream can use maybe_get_output
9167 // to retrieve the output
9168 at::meta::structured_sub_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
9169 }
9170 const Tensor& maybe_get_output(int64_t output_idx) override {
9171 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
9172 }
9173 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
9174 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
9175 c10::OptionalDeviceGuard guard_;
9176};
9177at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_sub__Tensor(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
9178structured_sub_Tensor_default_backend_inplace op(self);
9179op.meta(self, other, alpha);
9180at::sub_outf(self, other, alpha, op.outputs_[0]);
9181if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
9182return self;
9183}
9184struct structured_heaviside_default_backend_functional final : public at::meta::structured_heaviside {
9185 void set_output_strided(
9186 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9187 TensorOptions options, DimnameList names
9188 ) override {
9189 auto current_device = guard_.current_device();
9190 if (C10_UNLIKELY(current_device.has_value())) {
9191 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9192 "structured kernels don't support multi-device outputs");
9193 } else {
9194 guard_.reset_device(options.device());
9195 }
9196 outputs_[output_idx] = create_out(sizes, strides, options);
9197 if (!names.empty()) {
9198 namedinference::propagate_names(*outputs_[output_idx], names);
9199 }
9200 // super must happen after, so that downstream can use maybe_get_output
9201 // to retrieve the output
9202 at::meta::structured_heaviside::set_output_raw_strided(output_idx, sizes, strides, options, names);
9203 }
9204 void set_output_raw_strided(
9205 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9206 TensorOptions options, DimnameList names
9207 ) override {
9208 auto current_device = guard_.current_device();
9209 if (C10_UNLIKELY(current_device.has_value())) {
9210 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9211 "structured kernels don't support multi-device outputs");
9212 } else {
9213 guard_.reset_device(options.device());
9214 }
9215 outputs_[output_idx] = create_out(sizes, strides, options);
9216 if (!names.empty()) {
9217 namedinference::propagate_names(*outputs_[output_idx], names);
9218 }
9219 // super must happen after, so that downstream can use maybe_get_output
9220 // to retrieve the output
9221 at::meta::structured_heaviside::set_output_raw_strided(output_idx, sizes, strides, options, names);
9222 }
9223 const Tensor& maybe_get_output(int64_t output_idx) override {
9224 return *outputs_[output_idx];
9225 }
9226 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
9227 c10::OptionalDeviceGuard guard_;
9228};
9229at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_heaviside(const at::Tensor & self, const at::Tensor & values) {
9230structured_heaviside_default_backend_functional op;
9231op.meta(self, values);
9232at::heaviside_outf(self, values, *op.outputs_[0]);
9233return std::move(op.outputs_[0]).take();
9234}
9235struct structured_heaviside_default_backend_inplace final : public at::meta::structured_heaviside {
9236 structured_heaviside_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
9237 void set_output_strided(
9238 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9239 TensorOptions options, DimnameList names
9240 ) override {
9241 auto current_device = guard_.current_device();
9242 if (C10_UNLIKELY(current_device.has_value())) {
9243 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9244 "structured kernels don't support multi-device outputs");
9245 } else {
9246 guard_.reset_device(options.device());
9247 }
9248 const auto& out = outputs_[output_idx].get();
9249 check_inplace(out, sizes, options);
9250 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
9251 if (C10_UNLIKELY(maybe_proxy.has_value())) {
9252 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
9253 }
9254 if (!names.empty()) {
9255 namedinference::propagate_names(outputs_[output_idx], names);
9256 }
9257 // super must happen after, so that downstream can use maybe_get_output
9258 // to retrieve the output
9259 at::meta::structured_heaviside::set_output_raw_strided(output_idx, sizes, strides, options, names);
9260 }
9261 void set_output_raw_strided(
9262 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9263 TensorOptions options, DimnameList names
9264 ) override {
9265 auto current_device = guard_.current_device();
9266 if (C10_UNLIKELY(current_device.has_value())) {
9267 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9268 "structured kernels don't support multi-device outputs");
9269 } else {
9270 guard_.reset_device(options.device());
9271 }
9272 const auto& out = outputs_[output_idx].get();
9273 check_inplace(out, sizes, options);
9274 if (!names.empty()) {
9275 namedinference::propagate_names(outputs_[output_idx], names);
9276 }
9277 // super must happen after, so that downstream can use maybe_get_output
9278 // to retrieve the output
9279 at::meta::structured_heaviside::set_output_raw_strided(output_idx, sizes, strides, options, names);
9280 }
9281 const Tensor& maybe_get_output(int64_t output_idx) override {
9282 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
9283 }
9284 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
9285 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
9286 c10::OptionalDeviceGuard guard_;
9287};
9288at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_heaviside_(at::Tensor & self, const at::Tensor & values) {
9289structured_heaviside_default_backend_inplace op(self);
9290op.meta(self, values);
9291at::heaviside_outf(self, values, op.outputs_[0]);
9292if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
9293return self;
9294}
9295struct structured_addmm_default_backend_functional final : public at::meta::structured_addmm {
9296 void set_output_strided(
9297 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9298 TensorOptions options, DimnameList names
9299 ) override {
9300 auto current_device = guard_.current_device();
9301 if (C10_UNLIKELY(current_device.has_value())) {
9302 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9303 "structured kernels don't support multi-device outputs");
9304 } else {
9305 guard_.reset_device(options.device());
9306 }
9307 outputs_[output_idx] = create_out(sizes, strides, options);
9308 if (!names.empty()) {
9309 namedinference::propagate_names(*outputs_[output_idx], names);
9310 }
9311 // super must happen after, so that downstream can use maybe_get_output
9312 // to retrieve the output
9313 }
9314 void set_output_raw_strided(
9315 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9316 TensorOptions options, DimnameList names
9317 ) override {
9318 auto current_device = guard_.current_device();
9319 if (C10_UNLIKELY(current_device.has_value())) {
9320 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9321 "structured kernels don't support multi-device outputs");
9322 } else {
9323 guard_.reset_device(options.device());
9324 }
9325 outputs_[output_idx] = create_out(sizes, strides, options);
9326 if (!names.empty()) {
9327 namedinference::propagate_names(*outputs_[output_idx], names);
9328 }
9329 // super must happen after, so that downstream can use maybe_get_output
9330 // to retrieve the output
9331 }
9332 const Tensor& maybe_get_output(int64_t output_idx) override {
9333 return *outputs_[output_idx];
9334 }
9335 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
9336 c10::OptionalDeviceGuard guard_;
9337};
9338at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
9339structured_addmm_default_backend_functional op;
9340op.meta(self, mat1, mat2, beta, alpha);
9341at::addmm_outf(self, mat1, mat2, beta, alpha, *op.outputs_[0]);
9342return std::move(op.outputs_[0]).take();
9343}
9344struct structured_addmm_default_backend_inplace final : public at::meta::structured_addmm {
9345 structured_addmm_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
9346 void set_output_strided(
9347 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9348 TensorOptions options, DimnameList names
9349 ) override {
9350 auto current_device = guard_.current_device();
9351 if (C10_UNLIKELY(current_device.has_value())) {
9352 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9353 "structured kernels don't support multi-device outputs");
9354 } else {
9355 guard_.reset_device(options.device());
9356 }
9357 const auto& out = outputs_[output_idx].get();
9358 check_inplace(out, sizes, options);
9359 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
9360 if (C10_UNLIKELY(maybe_proxy.has_value())) {
9361 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
9362 }
9363 if (!names.empty()) {
9364 namedinference::propagate_names(outputs_[output_idx], names);
9365 }
9366 // super must happen after, so that downstream can use maybe_get_output
9367 // to retrieve the output
9368 }
9369 void set_output_raw_strided(
9370 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9371 TensorOptions options, DimnameList names
9372 ) override {
9373 auto current_device = guard_.current_device();
9374 if (C10_UNLIKELY(current_device.has_value())) {
9375 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9376 "structured kernels don't support multi-device outputs");
9377 } else {
9378 guard_.reset_device(options.device());
9379 }
9380 const auto& out = outputs_[output_idx].get();
9381 check_inplace(out, sizes, options);
9382 if (!names.empty()) {
9383 namedinference::propagate_names(outputs_[output_idx], names);
9384 }
9385 // super must happen after, so that downstream can use maybe_get_output
9386 // to retrieve the output
9387 }
9388 const Tensor& maybe_get_output(int64_t output_idx) override {
9389 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
9390 }
9391 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
9392 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
9393 c10::OptionalDeviceGuard guard_;
9394};
9395at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_addmm_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
9396structured_addmm_default_backend_inplace op(self);
9397op.meta(self, mat1, mat2, beta, alpha);
9398at::addmm_outf(self, mat1, mat2, beta, alpha, op.outputs_[0]);
9399if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
9400return self;
9401}
9402struct structured__addmm_activation_default_backend_functional final : public at::meta::structured__addmm_activation {
9403 void set_output_strided(
9404 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9405 TensorOptions options, DimnameList names
9406 ) override {
9407 auto current_device = guard_.current_device();
9408 if (C10_UNLIKELY(current_device.has_value())) {
9409 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9410 "structured kernels don't support multi-device outputs");
9411 } else {
9412 guard_.reset_device(options.device());
9413 }
9414 outputs_[output_idx] = create_out(sizes, strides, options);
9415 if (!names.empty()) {
9416 namedinference::propagate_names(*outputs_[output_idx], names);
9417 }
9418 // super must happen after, so that downstream can use maybe_get_output
9419 // to retrieve the output
9420 }
9421 void set_output_raw_strided(
9422 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9423 TensorOptions options, DimnameList names
9424 ) override {
9425 auto current_device = guard_.current_device();
9426 if (C10_UNLIKELY(current_device.has_value())) {
9427 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9428 "structured kernels don't support multi-device outputs");
9429 } else {
9430 guard_.reset_device(options.device());
9431 }
9432 outputs_[output_idx] = create_out(sizes, strides, options);
9433 if (!names.empty()) {
9434 namedinference::propagate_names(*outputs_[output_idx], names);
9435 }
9436 // super must happen after, so that downstream can use maybe_get_output
9437 // to retrieve the output
9438 }
9439 const Tensor& maybe_get_output(int64_t output_idx) override {
9440 return *outputs_[output_idx];
9441 }
9442 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
9443 c10::OptionalDeviceGuard guard_;
9444};
9445at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__addmm_activation(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu) {
9446structured__addmm_activation_default_backend_functional op;
9447op.meta(self, mat1, mat2, beta, alpha, use_gelu);
9448at::_addmm_activation_outf(self, mat1, mat2, beta, alpha, use_gelu, *op.outputs_[0]);
9449return std::move(op.outputs_[0]).take();
9450}
9451namespace {
9452at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__lift_fresh_copy(const at::Tensor & self) {
9453 // No device check
9454 // DeviceGuard omitted
9455 return at::native::lift_fresh_copy(self);
9456}
9457} // anonymous namespace
9458struct structured_index_add_default_backend_functional final : public at::meta::structured_index_add {
9459 void set_output_strided(
9460 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9461 TensorOptions options, DimnameList names
9462 ) override {
9463 auto current_device = guard_.current_device();
9464 if (C10_UNLIKELY(current_device.has_value())) {
9465 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9466 "structured kernels don't support multi-device outputs");
9467 } else {
9468 guard_.reset_device(options.device());
9469 }
9470 outputs_[output_idx] = create_out(sizes, strides, options);
9471 if (!names.empty()) {
9472 namedinference::propagate_names(*outputs_[output_idx], names);
9473 }
9474 // super must happen after, so that downstream can use maybe_get_output
9475 // to retrieve the output
9476 }
9477 void set_output_raw_strided(
9478 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9479 TensorOptions options, DimnameList names
9480 ) override {
9481 auto current_device = guard_.current_device();
9482 if (C10_UNLIKELY(current_device.has_value())) {
9483 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9484 "structured kernels don't support multi-device outputs");
9485 } else {
9486 guard_.reset_device(options.device());
9487 }
9488 outputs_[output_idx] = create_out(sizes, strides, options);
9489 if (!names.empty()) {
9490 namedinference::propagate_names(*outputs_[output_idx], names);
9491 }
9492 // super must happen after, so that downstream can use maybe_get_output
9493 // to retrieve the output
9494 }
9495 const Tensor& maybe_get_output(int64_t output_idx) override {
9496 return *outputs_[output_idx];
9497 }
9498 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
9499 c10::OptionalDeviceGuard guard_;
9500};
9501at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_index_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
9502structured_index_add_default_backend_functional op;
9503auto precompute = op.meta(self, dim, index, source, alpha);
9504(void)precompute;
9505at::index_add_outf(self, precompute.dim, index, source, alpha, *op.outputs_[0]);
9506return std::move(op.outputs_[0]).take();
9507}
9508struct structured_index_add_default_backend_inplace final : public at::meta::structured_index_add {
9509 structured_index_add_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
9510 void set_output_strided(
9511 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9512 TensorOptions options, DimnameList names
9513 ) override {
9514 auto current_device = guard_.current_device();
9515 if (C10_UNLIKELY(current_device.has_value())) {
9516 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9517 "structured kernels don't support multi-device outputs");
9518 } else {
9519 guard_.reset_device(options.device());
9520 }
9521 const auto& out = outputs_[output_idx].get();
9522 check_inplace(out, sizes, options);
9523 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
9524 if (C10_UNLIKELY(maybe_proxy.has_value())) {
9525 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
9526 }
9527 if (!names.empty()) {
9528 namedinference::propagate_names(outputs_[output_idx], names);
9529 }
9530 // super must happen after, so that downstream can use maybe_get_output
9531 // to retrieve the output
9532 }
9533 void set_output_raw_strided(
9534 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9535 TensorOptions options, DimnameList names
9536 ) override {
9537 auto current_device = guard_.current_device();
9538 if (C10_UNLIKELY(current_device.has_value())) {
9539 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9540 "structured kernels don't support multi-device outputs");
9541 } else {
9542 guard_.reset_device(options.device());
9543 }
9544 const auto& out = outputs_[output_idx].get();
9545 check_inplace(out, sizes, options);
9546 if (!names.empty()) {
9547 namedinference::propagate_names(outputs_[output_idx], names);
9548 }
9549 // super must happen after, so that downstream can use maybe_get_output
9550 // to retrieve the output
9551 }
9552 const Tensor& maybe_get_output(int64_t output_idx) override {
9553 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
9554 }
9555 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
9556 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
9557 c10::OptionalDeviceGuard guard_;
9558};
9559at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_index_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
9560structured_index_add_default_backend_inplace op(self);
9561auto precompute = op.meta(self, dim, index, source, alpha);
9562(void)precompute;
9563at::index_add_outf(self, precompute.dim, index, source, alpha, op.outputs_[0]);
9564if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
9565return self;
9566}
9567struct structured_index_reduce_default_backend_functional final : public at::meta::structured_index_reduce {
9568 void set_output_strided(
9569 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9570 TensorOptions options, DimnameList names
9571 ) override {
9572 auto current_device = guard_.current_device();
9573 if (C10_UNLIKELY(current_device.has_value())) {
9574 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9575 "structured kernels don't support multi-device outputs");
9576 } else {
9577 guard_.reset_device(options.device());
9578 }
9579 outputs_[output_idx] = create_out(sizes, strides, options);
9580 if (!names.empty()) {
9581 namedinference::propagate_names(*outputs_[output_idx], names);
9582 }
9583 // super must happen after, so that downstream can use maybe_get_output
9584 // to retrieve the output
9585 }
9586 void set_output_raw_strided(
9587 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9588 TensorOptions options, DimnameList names
9589 ) override {
9590 auto current_device = guard_.current_device();
9591 if (C10_UNLIKELY(current_device.has_value())) {
9592 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9593 "structured kernels don't support multi-device outputs");
9594 } else {
9595 guard_.reset_device(options.device());
9596 }
9597 outputs_[output_idx] = create_out(sizes, strides, options);
9598 if (!names.empty()) {
9599 namedinference::propagate_names(*outputs_[output_idx], names);
9600 }
9601 // super must happen after, so that downstream can use maybe_get_output
9602 // to retrieve the output
9603 }
9604 const Tensor& maybe_get_output(int64_t output_idx) override {
9605 return *outputs_[output_idx];
9606 }
9607 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
9608 c10::OptionalDeviceGuard guard_;
9609};
9610at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_index_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
9611structured_index_reduce_default_backend_functional op;
9612auto precompute = op.meta(self, dim, index, source, reduce, include_self);
9613(void)precompute;
9614at::index_reduce_outf(self, precompute.dim, index, source, reduce, include_self, *op.outputs_[0]);
9615return std::move(op.outputs_[0]).take();
9616}
9617struct structured_index_reduce_default_backend_inplace final : public at::meta::structured_index_reduce {
9618 structured_index_reduce_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
9619 void set_output_strided(
9620 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9621 TensorOptions options, DimnameList names
9622 ) override {
9623 auto current_device = guard_.current_device();
9624 if (C10_UNLIKELY(current_device.has_value())) {
9625 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9626 "structured kernels don't support multi-device outputs");
9627 } else {
9628 guard_.reset_device(options.device());
9629 }
9630 const auto& out = outputs_[output_idx].get();
9631 check_inplace(out, sizes, options);
9632 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
9633 if (C10_UNLIKELY(maybe_proxy.has_value())) {
9634 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
9635 }
9636 if (!names.empty()) {
9637 namedinference::propagate_names(outputs_[output_idx], names);
9638 }
9639 // super must happen after, so that downstream can use maybe_get_output
9640 // to retrieve the output
9641 }
9642 void set_output_raw_strided(
9643 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9644 TensorOptions options, DimnameList names
9645 ) override {
9646 auto current_device = guard_.current_device();
9647 if (C10_UNLIKELY(current_device.has_value())) {
9648 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9649 "structured kernels don't support multi-device outputs");
9650 } else {
9651 guard_.reset_device(options.device());
9652 }
9653 const auto& out = outputs_[output_idx].get();
9654 check_inplace(out, sizes, options);
9655 if (!names.empty()) {
9656 namedinference::propagate_names(outputs_[output_idx], names);
9657 }
9658 // super must happen after, so that downstream can use maybe_get_output
9659 // to retrieve the output
9660 }
9661 const Tensor& maybe_get_output(int64_t output_idx) override {
9662 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
9663 }
9664 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
9665 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
9666 c10::OptionalDeviceGuard guard_;
9667};
9668at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_index_reduce_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
9669structured_index_reduce_default_backend_inplace op(self);
9670auto precompute = op.meta(self, dim, index, source, reduce, include_self);
9671(void)precompute;
9672at::index_reduce_outf(self, precompute.dim, index, source, reduce, include_self, op.outputs_[0]);
9673if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
9674return self;
9675}
9676struct structured_scatter_src_default_backend_functional final : public at::meta::structured_scatter_src {
9677 void set_output_strided(
9678 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9679 TensorOptions options, DimnameList names
9680 ) override {
9681 auto current_device = guard_.current_device();
9682 if (C10_UNLIKELY(current_device.has_value())) {
9683 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9684 "structured kernels don't support multi-device outputs");
9685 } else {
9686 guard_.reset_device(options.device());
9687 }
9688 outputs_[output_idx] = create_out(sizes, strides, options);
9689 if (!names.empty()) {
9690 namedinference::propagate_names(*outputs_[output_idx], names);
9691 }
9692 // super must happen after, so that downstream can use maybe_get_output
9693 // to retrieve the output
9694 }
9695 void set_output_raw_strided(
9696 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9697 TensorOptions options, DimnameList names
9698 ) override {
9699 auto current_device = guard_.current_device();
9700 if (C10_UNLIKELY(current_device.has_value())) {
9701 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9702 "structured kernels don't support multi-device outputs");
9703 } else {
9704 guard_.reset_device(options.device());
9705 }
9706 outputs_[output_idx] = create_out(sizes, strides, options);
9707 if (!names.empty()) {
9708 namedinference::propagate_names(*outputs_[output_idx], names);
9709 }
9710 // super must happen after, so that downstream can use maybe_get_output
9711 // to retrieve the output
9712 }
9713 const Tensor& maybe_get_output(int64_t output_idx) override {
9714 return *outputs_[output_idx];
9715 }
9716 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
9717 c10::OptionalDeviceGuard guard_;
9718};
9719at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_scatter_src(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
9720structured_scatter_src_default_backend_functional op;
9721op.meta(self, dim, index, src);
9722at::scatter_outf(self, dim, index, src, *op.outputs_[0]);
9723return std::move(op.outputs_[0]).take();
9724}
9725struct structured_scatter_src_default_backend_inplace final : public at::meta::structured_scatter_src {
9726 structured_scatter_src_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
9727 void set_output_strided(
9728 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9729 TensorOptions options, DimnameList names
9730 ) override {
9731 auto current_device = guard_.current_device();
9732 if (C10_UNLIKELY(current_device.has_value())) {
9733 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9734 "structured kernels don't support multi-device outputs");
9735 } else {
9736 guard_.reset_device(options.device());
9737 }
9738 const auto& out = outputs_[output_idx].get();
9739 check_inplace(out, sizes, options);
9740 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
9741 if (C10_UNLIKELY(maybe_proxy.has_value())) {
9742 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
9743 }
9744 if (!names.empty()) {
9745 namedinference::propagate_names(outputs_[output_idx], names);
9746 }
9747 // super must happen after, so that downstream can use maybe_get_output
9748 // to retrieve the output
9749 }
9750 void set_output_raw_strided(
9751 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9752 TensorOptions options, DimnameList names
9753 ) override {
9754 auto current_device = guard_.current_device();
9755 if (C10_UNLIKELY(current_device.has_value())) {
9756 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9757 "structured kernels don't support multi-device outputs");
9758 } else {
9759 guard_.reset_device(options.device());
9760 }
9761 const auto& out = outputs_[output_idx].get();
9762 check_inplace(out, sizes, options);
9763 if (!names.empty()) {
9764 namedinference::propagate_names(outputs_[output_idx], names);
9765 }
9766 // super must happen after, so that downstream can use maybe_get_output
9767 // to retrieve the output
9768 }
9769 const Tensor& maybe_get_output(int64_t output_idx) override {
9770 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
9771 }
9772 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
9773 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
9774 c10::OptionalDeviceGuard guard_;
9775};
9776at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_scatter__src(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
9777structured_scatter_src_default_backend_inplace op(self);
9778op.meta(self, dim, index, src);
9779at::scatter_outf(self, dim, index, src, op.outputs_[0]);
9780if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
9781return self;
9782}
9783struct structured_scatter_value_default_backend_functional final : public at::meta::structured_scatter_value {
9784 void set_output_strided(
9785 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9786 TensorOptions options, DimnameList names
9787 ) override {
9788 auto current_device = guard_.current_device();
9789 if (C10_UNLIKELY(current_device.has_value())) {
9790 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9791 "structured kernels don't support multi-device outputs");
9792 } else {
9793 guard_.reset_device(options.device());
9794 }
9795 outputs_[output_idx] = create_out(sizes, strides, options);
9796 if (!names.empty()) {
9797 namedinference::propagate_names(*outputs_[output_idx], names);
9798 }
9799 // super must happen after, so that downstream can use maybe_get_output
9800 // to retrieve the output
9801 }
9802 void set_output_raw_strided(
9803 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9804 TensorOptions options, DimnameList names
9805 ) override {
9806 auto current_device = guard_.current_device();
9807 if (C10_UNLIKELY(current_device.has_value())) {
9808 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9809 "structured kernels don't support multi-device outputs");
9810 } else {
9811 guard_.reset_device(options.device());
9812 }
9813 outputs_[output_idx] = create_out(sizes, strides, options);
9814 if (!names.empty()) {
9815 namedinference::propagate_names(*outputs_[output_idx], names);
9816 }
9817 // super must happen after, so that downstream can use maybe_get_output
9818 // to retrieve the output
9819 }
9820 const Tensor& maybe_get_output(int64_t output_idx) override {
9821 return *outputs_[output_idx];
9822 }
9823 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
9824 c10::OptionalDeviceGuard guard_;
9825};
9826at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_scatter_value(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
9827structured_scatter_value_default_backend_functional op;
9828op.meta(self, dim, index, value);
9829at::scatter_outf(self, dim, index, value, *op.outputs_[0]);
9830return std::move(op.outputs_[0]).take();
9831}
9832struct structured_scatter_value_default_backend_inplace final : public at::meta::structured_scatter_value {
9833 structured_scatter_value_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
9834 void set_output_strided(
9835 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9836 TensorOptions options, DimnameList names
9837 ) override {
9838 auto current_device = guard_.current_device();
9839 if (C10_UNLIKELY(current_device.has_value())) {
9840 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9841 "structured kernels don't support multi-device outputs");
9842 } else {
9843 guard_.reset_device(options.device());
9844 }
9845 const auto& out = outputs_[output_idx].get();
9846 check_inplace(out, sizes, options);
9847 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
9848 if (C10_UNLIKELY(maybe_proxy.has_value())) {
9849 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
9850 }
9851 if (!names.empty()) {
9852 namedinference::propagate_names(outputs_[output_idx], names);
9853 }
9854 // super must happen after, so that downstream can use maybe_get_output
9855 // to retrieve the output
9856 }
9857 void set_output_raw_strided(
9858 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9859 TensorOptions options, DimnameList names
9860 ) override {
9861 auto current_device = guard_.current_device();
9862 if (C10_UNLIKELY(current_device.has_value())) {
9863 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9864 "structured kernels don't support multi-device outputs");
9865 } else {
9866 guard_.reset_device(options.device());
9867 }
9868 const auto& out = outputs_[output_idx].get();
9869 check_inplace(out, sizes, options);
9870 if (!names.empty()) {
9871 namedinference::propagate_names(outputs_[output_idx], names);
9872 }
9873 // super must happen after, so that downstream can use maybe_get_output
9874 // to retrieve the output
9875 }
9876 const Tensor& maybe_get_output(int64_t output_idx) override {
9877 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
9878 }
9879 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
9880 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
9881 c10::OptionalDeviceGuard guard_;
9882};
9883at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_scatter__value(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
9884structured_scatter_value_default_backend_inplace op(self);
9885op.meta(self, dim, index, value);
9886at::scatter_outf(self, dim, index, value, op.outputs_[0]);
9887if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
9888return self;
9889}
9890struct structured_scatter_reduce_default_backend_functional final : public at::meta::structured_scatter_reduce {
9891 void set_output_strided(
9892 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9893 TensorOptions options, DimnameList names
9894 ) override {
9895 auto current_device = guard_.current_device();
9896 if (C10_UNLIKELY(current_device.has_value())) {
9897 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9898 "structured kernels don't support multi-device outputs");
9899 } else {
9900 guard_.reset_device(options.device());
9901 }
9902 outputs_[output_idx] = create_out(sizes, strides, options);
9903 if (!names.empty()) {
9904 namedinference::propagate_names(*outputs_[output_idx], names);
9905 }
9906 // super must happen after, so that downstream can use maybe_get_output
9907 // to retrieve the output
9908 }
9909 void set_output_raw_strided(
9910 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9911 TensorOptions options, DimnameList names
9912 ) override {
9913 auto current_device = guard_.current_device();
9914 if (C10_UNLIKELY(current_device.has_value())) {
9915 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9916 "structured kernels don't support multi-device outputs");
9917 } else {
9918 guard_.reset_device(options.device());
9919 }
9920 outputs_[output_idx] = create_out(sizes, strides, options);
9921 if (!names.empty()) {
9922 namedinference::propagate_names(*outputs_[output_idx], names);
9923 }
9924 // super must happen after, so that downstream can use maybe_get_output
9925 // to retrieve the output
9926 }
9927 const Tensor& maybe_get_output(int64_t output_idx) override {
9928 return *outputs_[output_idx];
9929 }
9930 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
9931 c10::OptionalDeviceGuard guard_;
9932};
9933at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_scatter_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
9934structured_scatter_reduce_default_backend_functional op;
9935op.meta(self, dim, index, src, reduce);
9936at::scatter_outf(self, dim, index, src, reduce, *op.outputs_[0]);
9937return std::move(op.outputs_[0]).take();
9938}
9939struct structured_scatter_reduce_default_backend_inplace final : public at::meta::structured_scatter_reduce {
9940 structured_scatter_reduce_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
9941 void set_output_strided(
9942 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9943 TensorOptions options, DimnameList names
9944 ) override {
9945 auto current_device = guard_.current_device();
9946 if (C10_UNLIKELY(current_device.has_value())) {
9947 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9948 "structured kernels don't support multi-device outputs");
9949 } else {
9950 guard_.reset_device(options.device());
9951 }
9952 const auto& out = outputs_[output_idx].get();
9953 check_inplace(out, sizes, options);
9954 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
9955 if (C10_UNLIKELY(maybe_proxy.has_value())) {
9956 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
9957 }
9958 if (!names.empty()) {
9959 namedinference::propagate_names(outputs_[output_idx], names);
9960 }
9961 // super must happen after, so that downstream can use maybe_get_output
9962 // to retrieve the output
9963 }
9964 void set_output_raw_strided(
9965 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9966 TensorOptions options, DimnameList names
9967 ) override {
9968 auto current_device = guard_.current_device();
9969 if (C10_UNLIKELY(current_device.has_value())) {
9970 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
9971 "structured kernels don't support multi-device outputs");
9972 } else {
9973 guard_.reset_device(options.device());
9974 }
9975 const auto& out = outputs_[output_idx].get();
9976 check_inplace(out, sizes, options);
9977 if (!names.empty()) {
9978 namedinference::propagate_names(outputs_[output_idx], names);
9979 }
9980 // super must happen after, so that downstream can use maybe_get_output
9981 // to retrieve the output
9982 }
9983 const Tensor& maybe_get_output(int64_t output_idx) override {
9984 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
9985 }
9986 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
9987 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
9988 c10::OptionalDeviceGuard guard_;
9989};
9990at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_scatter__reduce(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
9991structured_scatter_reduce_default_backend_inplace op(self);
9992op.meta(self, dim, index, src, reduce);
9993at::scatter_outf(self, dim, index, src, reduce, op.outputs_[0]);
9994if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
9995return self;
9996}
9997struct structured_scatter_value_reduce_default_backend_functional final : public at::meta::structured_scatter_value_reduce {
9998 void set_output_strided(
9999 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10000 TensorOptions options, DimnameList names
10001 ) override {
10002 auto current_device = guard_.current_device();
10003 if (C10_UNLIKELY(current_device.has_value())) {
10004 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10005 "structured kernels don't support multi-device outputs");
10006 } else {
10007 guard_.reset_device(options.device());
10008 }
10009 outputs_[output_idx] = create_out(sizes, strides, options);
10010 if (!names.empty()) {
10011 namedinference::propagate_names(*outputs_[output_idx], names);
10012 }
10013 // super must happen after, so that downstream can use maybe_get_output
10014 // to retrieve the output
10015 }
10016 void set_output_raw_strided(
10017 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10018 TensorOptions options, DimnameList names
10019 ) override {
10020 auto current_device = guard_.current_device();
10021 if (C10_UNLIKELY(current_device.has_value())) {
10022 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10023 "structured kernels don't support multi-device outputs");
10024 } else {
10025 guard_.reset_device(options.device());
10026 }
10027 outputs_[output_idx] = create_out(sizes, strides, options);
10028 if (!names.empty()) {
10029 namedinference::propagate_names(*outputs_[output_idx], names);
10030 }
10031 // super must happen after, so that downstream can use maybe_get_output
10032 // to retrieve the output
10033 }
10034 const Tensor& maybe_get_output(int64_t output_idx) override {
10035 return *outputs_[output_idx];
10036 }
10037 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
10038 c10::OptionalDeviceGuard guard_;
10039};
10040at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_scatter_value_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
10041structured_scatter_value_reduce_default_backend_functional op;
10042op.meta(self, dim, index, value, reduce);
10043at::scatter_outf(self, dim, index, value, reduce, *op.outputs_[0]);
10044return std::move(op.outputs_[0]).take();
10045}
10046struct structured_scatter_value_reduce_default_backend_inplace final : public at::meta::structured_scatter_value_reduce {
10047 structured_scatter_value_reduce_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
10048 void set_output_strided(
10049 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10050 TensorOptions options, DimnameList names
10051 ) override {
10052 auto current_device = guard_.current_device();
10053 if (C10_UNLIKELY(current_device.has_value())) {
10054 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10055 "structured kernels don't support multi-device outputs");
10056 } else {
10057 guard_.reset_device(options.device());
10058 }
10059 const auto& out = outputs_[output_idx].get();
10060 check_inplace(out, sizes, options);
10061 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
10062 if (C10_UNLIKELY(maybe_proxy.has_value())) {
10063 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
10064 }
10065 if (!names.empty()) {
10066 namedinference::propagate_names(outputs_[output_idx], names);
10067 }
10068 // super must happen after, so that downstream can use maybe_get_output
10069 // to retrieve the output
10070 }
10071 void set_output_raw_strided(
10072 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10073 TensorOptions options, DimnameList names
10074 ) override {
10075 auto current_device = guard_.current_device();
10076 if (C10_UNLIKELY(current_device.has_value())) {
10077 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10078 "structured kernels don't support multi-device outputs");
10079 } else {
10080 guard_.reset_device(options.device());
10081 }
10082 const auto& out = outputs_[output_idx].get();
10083 check_inplace(out, sizes, options);
10084 if (!names.empty()) {
10085 namedinference::propagate_names(outputs_[output_idx], names);
10086 }
10087 // super must happen after, so that downstream can use maybe_get_output
10088 // to retrieve the output
10089 }
10090 const Tensor& maybe_get_output(int64_t output_idx) override {
10091 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
10092 }
10093 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
10094 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
10095 c10::OptionalDeviceGuard guard_;
10096};
10097at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_scatter__value_reduce(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
10098structured_scatter_value_reduce_default_backend_inplace op(self);
10099op.meta(self, dim, index, value, reduce);
10100at::scatter_outf(self, dim, index, value, reduce, op.outputs_[0]);
10101if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
10102return self;
10103}
10104struct structured_scatter_add_default_backend_functional final : public at::meta::structured_scatter_add {
10105 void set_output_strided(
10106 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10107 TensorOptions options, DimnameList names
10108 ) override {
10109 auto current_device = guard_.current_device();
10110 if (C10_UNLIKELY(current_device.has_value())) {
10111 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10112 "structured kernels don't support multi-device outputs");
10113 } else {
10114 guard_.reset_device(options.device());
10115 }
10116 outputs_[output_idx] = create_out(sizes, strides, options);
10117 if (!names.empty()) {
10118 namedinference::propagate_names(*outputs_[output_idx], names);
10119 }
10120 // super must happen after, so that downstream can use maybe_get_output
10121 // to retrieve the output
10122 }
10123 void set_output_raw_strided(
10124 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10125 TensorOptions options, DimnameList names
10126 ) override {
10127 auto current_device = guard_.current_device();
10128 if (C10_UNLIKELY(current_device.has_value())) {
10129 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10130 "structured kernels don't support multi-device outputs");
10131 } else {
10132 guard_.reset_device(options.device());
10133 }
10134 outputs_[output_idx] = create_out(sizes, strides, options);
10135 if (!names.empty()) {
10136 namedinference::propagate_names(*outputs_[output_idx], names);
10137 }
10138 // super must happen after, so that downstream can use maybe_get_output
10139 // to retrieve the output
10140 }
10141 const Tensor& maybe_get_output(int64_t output_idx) override {
10142 return *outputs_[output_idx];
10143 }
10144 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
10145 c10::OptionalDeviceGuard guard_;
10146};
10147at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_scatter_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
10148structured_scatter_add_default_backend_functional op;
10149op.meta(self, dim, index, src);
10150at::scatter_add_outf(self, dim, index, src, *op.outputs_[0]);
10151return std::move(op.outputs_[0]).take();
10152}
10153struct structured_scatter_add_default_backend_inplace final : public at::meta::structured_scatter_add {
10154 structured_scatter_add_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
10155 void set_output_strided(
10156 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10157 TensorOptions options, DimnameList names
10158 ) override {
10159 auto current_device = guard_.current_device();
10160 if (C10_UNLIKELY(current_device.has_value())) {
10161 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10162 "structured kernels don't support multi-device outputs");
10163 } else {
10164 guard_.reset_device(options.device());
10165 }
10166 const auto& out = outputs_[output_idx].get();
10167 check_inplace(out, sizes, options);
10168 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
10169 if (C10_UNLIKELY(maybe_proxy.has_value())) {
10170 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
10171 }
10172 if (!names.empty()) {
10173 namedinference::propagate_names(outputs_[output_idx], names);
10174 }
10175 // super must happen after, so that downstream can use maybe_get_output
10176 // to retrieve the output
10177 }
10178 void set_output_raw_strided(
10179 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10180 TensorOptions options, DimnameList names
10181 ) override {
10182 auto current_device = guard_.current_device();
10183 if (C10_UNLIKELY(current_device.has_value())) {
10184 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10185 "structured kernels don't support multi-device outputs");
10186 } else {
10187 guard_.reset_device(options.device());
10188 }
10189 const auto& out = outputs_[output_idx].get();
10190 check_inplace(out, sizes, options);
10191 if (!names.empty()) {
10192 namedinference::propagate_names(outputs_[output_idx], names);
10193 }
10194 // super must happen after, so that downstream can use maybe_get_output
10195 // to retrieve the output
10196 }
10197 const Tensor& maybe_get_output(int64_t output_idx) override {
10198 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
10199 }
10200 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
10201 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
10202 c10::OptionalDeviceGuard guard_;
10203};
10204at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_scatter_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
10205structured_scatter_add_default_backend_inplace op(self);
10206op.meta(self, dim, index, src);
10207at::scatter_add_outf(self, dim, index, src, op.outputs_[0]);
10208if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
10209return self;
10210}
10211struct structured_scatter_reduce_two_default_backend_functional final : public at::meta::structured_scatter_reduce_two {
10212 void set_output_strided(
10213 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10214 TensorOptions options, DimnameList names
10215 ) override {
10216 auto current_device = guard_.current_device();
10217 if (C10_UNLIKELY(current_device.has_value())) {
10218 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10219 "structured kernels don't support multi-device outputs");
10220 } else {
10221 guard_.reset_device(options.device());
10222 }
10223 outputs_[output_idx] = create_out(sizes, strides, options);
10224 if (!names.empty()) {
10225 namedinference::propagate_names(*outputs_[output_idx], names);
10226 }
10227 // super must happen after, so that downstream can use maybe_get_output
10228 // to retrieve the output
10229 }
10230 void set_output_raw_strided(
10231 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10232 TensorOptions options, DimnameList names
10233 ) override {
10234 auto current_device = guard_.current_device();
10235 if (C10_UNLIKELY(current_device.has_value())) {
10236 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10237 "structured kernels don't support multi-device outputs");
10238 } else {
10239 guard_.reset_device(options.device());
10240 }
10241 outputs_[output_idx] = create_out(sizes, strides, options);
10242 if (!names.empty()) {
10243 namedinference::propagate_names(*outputs_[output_idx], names);
10244 }
10245 // super must happen after, so that downstream can use maybe_get_output
10246 // to retrieve the output
10247 }
10248 const Tensor& maybe_get_output(int64_t output_idx) override {
10249 return *outputs_[output_idx];
10250 }
10251 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
10252 c10::OptionalDeviceGuard guard_;
10253};
10254at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_scatter_reduce_two(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
10255structured_scatter_reduce_two_default_backend_functional op;
10256op.meta(self, dim, index, src, reduce, include_self);
10257at::scatter_reduce_outf(self, dim, index, src, reduce, include_self, *op.outputs_[0]);
10258return std::move(op.outputs_[0]).take();
10259}
10260struct structured_scatter_reduce_two_default_backend_inplace final : public at::meta::structured_scatter_reduce_two {
10261 structured_scatter_reduce_two_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
10262 void set_output_strided(
10263 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10264 TensorOptions options, DimnameList names
10265 ) override {
10266 auto current_device = guard_.current_device();
10267 if (C10_UNLIKELY(current_device.has_value())) {
10268 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10269 "structured kernels don't support multi-device outputs");
10270 } else {
10271 guard_.reset_device(options.device());
10272 }
10273 const auto& out = outputs_[output_idx].get();
10274 check_inplace(out, sizes, options);
10275 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
10276 if (C10_UNLIKELY(maybe_proxy.has_value())) {
10277 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
10278 }
10279 if (!names.empty()) {
10280 namedinference::propagate_names(outputs_[output_idx], names);
10281 }
10282 // super must happen after, so that downstream can use maybe_get_output
10283 // to retrieve the output
10284 }
10285 void set_output_raw_strided(
10286 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10287 TensorOptions options, DimnameList names
10288 ) override {
10289 auto current_device = guard_.current_device();
10290 if (C10_UNLIKELY(current_device.has_value())) {
10291 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10292 "structured kernels don't support multi-device outputs");
10293 } else {
10294 guard_.reset_device(options.device());
10295 }
10296 const auto& out = outputs_[output_idx].get();
10297 check_inplace(out, sizes, options);
10298 if (!names.empty()) {
10299 namedinference::propagate_names(outputs_[output_idx], names);
10300 }
10301 // super must happen after, so that downstream can use maybe_get_output
10302 // to retrieve the output
10303 }
10304 const Tensor& maybe_get_output(int64_t output_idx) override {
10305 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
10306 }
10307 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
10308 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
10309 c10::OptionalDeviceGuard guard_;
10310};
10311at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_scatter_reduce__two(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
10312structured_scatter_reduce_two_default_backend_inplace op(self);
10313op.meta(self, dim, index, src, reduce, include_self);
10314at::scatter_reduce_outf(self, dim, index, src, reduce, include_self, op.outputs_[0]);
10315if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
10316return self;
10317}
10318struct structured_eq_Scalar_default_backend_functional final : public at::meta::structured_eq_Scalar {
10319 void set_output_strided(
10320 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10321 TensorOptions options, DimnameList names
10322 ) override {
10323 auto current_device = guard_.current_device();
10324 if (C10_UNLIKELY(current_device.has_value())) {
10325 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10326 "structured kernels don't support multi-device outputs");
10327 } else {
10328 guard_.reset_device(options.device());
10329 }
10330 outputs_[output_idx] = create_out(sizes, strides, options);
10331 if (!names.empty()) {
10332 namedinference::propagate_names(*outputs_[output_idx], names);
10333 }
10334 // super must happen after, so that downstream can use maybe_get_output
10335 // to retrieve the output
10336 at::meta::structured_eq_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
10337 }
10338 void set_output_raw_strided(
10339 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10340 TensorOptions options, DimnameList names
10341 ) override {
10342 auto current_device = guard_.current_device();
10343 if (C10_UNLIKELY(current_device.has_value())) {
10344 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10345 "structured kernels don't support multi-device outputs");
10346 } else {
10347 guard_.reset_device(options.device());
10348 }
10349 outputs_[output_idx] = create_out(sizes, strides, options);
10350 if (!names.empty()) {
10351 namedinference::propagate_names(*outputs_[output_idx], names);
10352 }
10353 // super must happen after, so that downstream can use maybe_get_output
10354 // to retrieve the output
10355 at::meta::structured_eq_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
10356 }
10357 const Tensor& maybe_get_output(int64_t output_idx) override {
10358 return *outputs_[output_idx];
10359 }
10360 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
10361 c10::OptionalDeviceGuard guard_;
10362};
10363at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_eq_Scalar(const at::Tensor & self, const at::Scalar & other) {
10364structured_eq_Scalar_default_backend_functional op;
10365op.meta(self, other);
10366at::eq_outf(self, other, *op.outputs_[0]);
10367return std::move(op.outputs_[0]).take();
10368}
10369struct structured_eq_Scalar_default_backend_inplace final : public at::meta::structured_eq_Scalar {
10370 structured_eq_Scalar_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
10371 void set_output_strided(
10372 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10373 TensorOptions options, DimnameList names
10374 ) override {
10375 auto current_device = guard_.current_device();
10376 if (C10_UNLIKELY(current_device.has_value())) {
10377 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10378 "structured kernels don't support multi-device outputs");
10379 } else {
10380 guard_.reset_device(options.device());
10381 }
10382 const auto& out = outputs_[output_idx].get();
10383 check_inplace(out, sizes, options);
10384 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
10385 if (C10_UNLIKELY(maybe_proxy.has_value())) {
10386 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
10387 }
10388 if (!names.empty()) {
10389 namedinference::propagate_names(outputs_[output_idx], names);
10390 }
10391 // super must happen after, so that downstream can use maybe_get_output
10392 // to retrieve the output
10393 at::meta::structured_eq_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
10394 }
10395 void set_output_raw_strided(
10396 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10397 TensorOptions options, DimnameList names
10398 ) override {
10399 auto current_device = guard_.current_device();
10400 if (C10_UNLIKELY(current_device.has_value())) {
10401 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10402 "structured kernels don't support multi-device outputs");
10403 } else {
10404 guard_.reset_device(options.device());
10405 }
10406 const auto& out = outputs_[output_idx].get();
10407 check_inplace(out, sizes, options);
10408 if (!names.empty()) {
10409 namedinference::propagate_names(outputs_[output_idx], names);
10410 }
10411 // super must happen after, so that downstream can use maybe_get_output
10412 // to retrieve the output
10413 at::meta::structured_eq_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
10414 }
10415 const Tensor& maybe_get_output(int64_t output_idx) override {
10416 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
10417 }
10418 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
10419 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
10420 c10::OptionalDeviceGuard guard_;
10421};
10422at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_eq__Scalar(at::Tensor & self, const at::Scalar & other) {
10423structured_eq_Scalar_default_backend_inplace op(self);
10424op.meta(self, other);
10425at::eq_outf(self, other, op.outputs_[0]);
10426if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
10427return self;
10428}
10429struct structured_eq_Tensor_default_backend_functional final : public at::meta::structured_eq_Tensor {
10430 void set_output_strided(
10431 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10432 TensorOptions options, DimnameList names
10433 ) override {
10434 auto current_device = guard_.current_device();
10435 if (C10_UNLIKELY(current_device.has_value())) {
10436 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10437 "structured kernels don't support multi-device outputs");
10438 } else {
10439 guard_.reset_device(options.device());
10440 }
10441 outputs_[output_idx] = create_out(sizes, strides, options);
10442 if (!names.empty()) {
10443 namedinference::propagate_names(*outputs_[output_idx], names);
10444 }
10445 // super must happen after, so that downstream can use maybe_get_output
10446 // to retrieve the output
10447 at::meta::structured_eq_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
10448 }
10449 void set_output_raw_strided(
10450 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10451 TensorOptions options, DimnameList names
10452 ) override {
10453 auto current_device = guard_.current_device();
10454 if (C10_UNLIKELY(current_device.has_value())) {
10455 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10456 "structured kernels don't support multi-device outputs");
10457 } else {
10458 guard_.reset_device(options.device());
10459 }
10460 outputs_[output_idx] = create_out(sizes, strides, options);
10461 if (!names.empty()) {
10462 namedinference::propagate_names(*outputs_[output_idx], names);
10463 }
10464 // super must happen after, so that downstream can use maybe_get_output
10465 // to retrieve the output
10466 at::meta::structured_eq_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
10467 }
10468 const Tensor& maybe_get_output(int64_t output_idx) override {
10469 return *outputs_[output_idx];
10470 }
10471 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
10472 c10::OptionalDeviceGuard guard_;
10473};
10474at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_eq_Tensor(const at::Tensor & self, const at::Tensor & other) {
10475structured_eq_Tensor_default_backend_functional op;
10476op.meta(self, other);
10477at::eq_outf(self, other, *op.outputs_[0]);
10478return std::move(op.outputs_[0]).take();
10479}
10480struct structured_eq_Tensor_default_backend_inplace final : public at::meta::structured_eq_Tensor {
10481 structured_eq_Tensor_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
10482 void set_output_strided(
10483 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10484 TensorOptions options, DimnameList names
10485 ) override {
10486 auto current_device = guard_.current_device();
10487 if (C10_UNLIKELY(current_device.has_value())) {
10488 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10489 "structured kernels don't support multi-device outputs");
10490 } else {
10491 guard_.reset_device(options.device());
10492 }
10493 const auto& out = outputs_[output_idx].get();
10494 check_inplace(out, sizes, options);
10495 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
10496 if (C10_UNLIKELY(maybe_proxy.has_value())) {
10497 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
10498 }
10499 if (!names.empty()) {
10500 namedinference::propagate_names(outputs_[output_idx], names);
10501 }
10502 // super must happen after, so that downstream can use maybe_get_output
10503 // to retrieve the output
10504 at::meta::structured_eq_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
10505 }
10506 void set_output_raw_strided(
10507 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10508 TensorOptions options, DimnameList names
10509 ) override {
10510 auto current_device = guard_.current_device();
10511 if (C10_UNLIKELY(current_device.has_value())) {
10512 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10513 "structured kernels don't support multi-device outputs");
10514 } else {
10515 guard_.reset_device(options.device());
10516 }
10517 const auto& out = outputs_[output_idx].get();
10518 check_inplace(out, sizes, options);
10519 if (!names.empty()) {
10520 namedinference::propagate_names(outputs_[output_idx], names);
10521 }
10522 // super must happen after, so that downstream can use maybe_get_output
10523 // to retrieve the output
10524 at::meta::structured_eq_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
10525 }
10526 const Tensor& maybe_get_output(int64_t output_idx) override {
10527 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
10528 }
10529 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
10530 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
10531 c10::OptionalDeviceGuard guard_;
10532};
10533at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_eq__Tensor(at::Tensor & self, const at::Tensor & other) {
10534structured_eq_Tensor_default_backend_inplace op(self);
10535op.meta(self, other);
10536at::eq_outf(self, other, op.outputs_[0]);
10537if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
10538return self;
10539}
10540struct structured_bitwise_and_Tensor_default_backend_functional final : public at::meta::structured_bitwise_and_Tensor {
10541 void set_output_strided(
10542 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10543 TensorOptions options, DimnameList names
10544 ) override {
10545 auto current_device = guard_.current_device();
10546 if (C10_UNLIKELY(current_device.has_value())) {
10547 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10548 "structured kernels don't support multi-device outputs");
10549 } else {
10550 guard_.reset_device(options.device());
10551 }
10552 outputs_[output_idx] = create_out(sizes, strides, options);
10553 if (!names.empty()) {
10554 namedinference::propagate_names(*outputs_[output_idx], names);
10555 }
10556 // super must happen after, so that downstream can use maybe_get_output
10557 // to retrieve the output
10558 at::meta::structured_bitwise_and_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
10559 }
10560 void set_output_raw_strided(
10561 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10562 TensorOptions options, DimnameList names
10563 ) override {
10564 auto current_device = guard_.current_device();
10565 if (C10_UNLIKELY(current_device.has_value())) {
10566 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10567 "structured kernels don't support multi-device outputs");
10568 } else {
10569 guard_.reset_device(options.device());
10570 }
10571 outputs_[output_idx] = create_out(sizes, strides, options);
10572 if (!names.empty()) {
10573 namedinference::propagate_names(*outputs_[output_idx], names);
10574 }
10575 // super must happen after, so that downstream can use maybe_get_output
10576 // to retrieve the output
10577 at::meta::structured_bitwise_and_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
10578 }
10579 const Tensor& maybe_get_output(int64_t output_idx) override {
10580 return *outputs_[output_idx];
10581 }
10582 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
10583 c10::OptionalDeviceGuard guard_;
10584};
10585at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_bitwise_and_Tensor(const at::Tensor & self, const at::Tensor & other) {
10586structured_bitwise_and_Tensor_default_backend_functional op;
10587op.meta(self, other);
10588at::bitwise_and_outf(self, other, *op.outputs_[0]);
10589return std::move(op.outputs_[0]).take();
10590}
10591struct structured_bitwise_and_Tensor_default_backend_inplace final : public at::meta::structured_bitwise_and_Tensor {
10592 structured_bitwise_and_Tensor_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
10593 void set_output_strided(
10594 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10595 TensorOptions options, DimnameList names
10596 ) override {
10597 auto current_device = guard_.current_device();
10598 if (C10_UNLIKELY(current_device.has_value())) {
10599 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10600 "structured kernels don't support multi-device outputs");
10601 } else {
10602 guard_.reset_device(options.device());
10603 }
10604 const auto& out = outputs_[output_idx].get();
10605 check_inplace(out, sizes, options);
10606 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
10607 if (C10_UNLIKELY(maybe_proxy.has_value())) {
10608 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
10609 }
10610 if (!names.empty()) {
10611 namedinference::propagate_names(outputs_[output_idx], names);
10612 }
10613 // super must happen after, so that downstream can use maybe_get_output
10614 // to retrieve the output
10615 at::meta::structured_bitwise_and_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
10616 }
10617 void set_output_raw_strided(
10618 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10619 TensorOptions options, DimnameList names
10620 ) override {
10621 auto current_device = guard_.current_device();
10622 if (C10_UNLIKELY(current_device.has_value())) {
10623 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10624 "structured kernels don't support multi-device outputs");
10625 } else {
10626 guard_.reset_device(options.device());
10627 }
10628 const auto& out = outputs_[output_idx].get();
10629 check_inplace(out, sizes, options);
10630 if (!names.empty()) {
10631 namedinference::propagate_names(outputs_[output_idx], names);
10632 }
10633 // super must happen after, so that downstream can use maybe_get_output
10634 // to retrieve the output
10635 at::meta::structured_bitwise_and_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
10636 }
10637 const Tensor& maybe_get_output(int64_t output_idx) override {
10638 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
10639 }
10640 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
10641 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
10642 c10::OptionalDeviceGuard guard_;
10643};
10644at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_bitwise_and__Tensor(at::Tensor & self, const at::Tensor & other) {
10645structured_bitwise_and_Tensor_default_backend_inplace op(self);
10646op.meta(self, other);
10647at::bitwise_and_outf(self, other, op.outputs_[0]);
10648if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
10649return self;
10650}
10651struct structured_bitwise_or_Tensor_default_backend_functional final : public at::meta::structured_bitwise_or_Tensor {
10652 void set_output_strided(
10653 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10654 TensorOptions options, DimnameList names
10655 ) override {
10656 auto current_device = guard_.current_device();
10657 if (C10_UNLIKELY(current_device.has_value())) {
10658 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10659 "structured kernels don't support multi-device outputs");
10660 } else {
10661 guard_.reset_device(options.device());
10662 }
10663 outputs_[output_idx] = create_out(sizes, strides, options);
10664 if (!names.empty()) {
10665 namedinference::propagate_names(*outputs_[output_idx], names);
10666 }
10667 // super must happen after, so that downstream can use maybe_get_output
10668 // to retrieve the output
10669 at::meta::structured_bitwise_or_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
10670 }
10671 void set_output_raw_strided(
10672 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10673 TensorOptions options, DimnameList names
10674 ) override {
10675 auto current_device = guard_.current_device();
10676 if (C10_UNLIKELY(current_device.has_value())) {
10677 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10678 "structured kernels don't support multi-device outputs");
10679 } else {
10680 guard_.reset_device(options.device());
10681 }
10682 outputs_[output_idx] = create_out(sizes, strides, options);
10683 if (!names.empty()) {
10684 namedinference::propagate_names(*outputs_[output_idx], names);
10685 }
10686 // super must happen after, so that downstream can use maybe_get_output
10687 // to retrieve the output
10688 at::meta::structured_bitwise_or_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
10689 }
10690 const Tensor& maybe_get_output(int64_t output_idx) override {
10691 return *outputs_[output_idx];
10692 }
10693 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
10694 c10::OptionalDeviceGuard guard_;
10695};
10696at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_bitwise_or_Tensor(const at::Tensor & self, const at::Tensor & other) {
10697structured_bitwise_or_Tensor_default_backend_functional op;
10698op.meta(self, other);
10699at::bitwise_or_outf(self, other, *op.outputs_[0]);
10700return std::move(op.outputs_[0]).take();
10701}
10702struct structured_bitwise_or_Tensor_default_backend_inplace final : public at::meta::structured_bitwise_or_Tensor {
10703 structured_bitwise_or_Tensor_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
10704 void set_output_strided(
10705 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10706 TensorOptions options, DimnameList names
10707 ) override {
10708 auto current_device = guard_.current_device();
10709 if (C10_UNLIKELY(current_device.has_value())) {
10710 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10711 "structured kernels don't support multi-device outputs");
10712 } else {
10713 guard_.reset_device(options.device());
10714 }
10715 const auto& out = outputs_[output_idx].get();
10716 check_inplace(out, sizes, options);
10717 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
10718 if (C10_UNLIKELY(maybe_proxy.has_value())) {
10719 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
10720 }
10721 if (!names.empty()) {
10722 namedinference::propagate_names(outputs_[output_idx], names);
10723 }
10724 // super must happen after, so that downstream can use maybe_get_output
10725 // to retrieve the output
10726 at::meta::structured_bitwise_or_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
10727 }
10728 void set_output_raw_strided(
10729 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10730 TensorOptions options, DimnameList names
10731 ) override {
10732 auto current_device = guard_.current_device();
10733 if (C10_UNLIKELY(current_device.has_value())) {
10734 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10735 "structured kernels don't support multi-device outputs");
10736 } else {
10737 guard_.reset_device(options.device());
10738 }
10739 const auto& out = outputs_[output_idx].get();
10740 check_inplace(out, sizes, options);
10741 if (!names.empty()) {
10742 namedinference::propagate_names(outputs_[output_idx], names);
10743 }
10744 // super must happen after, so that downstream can use maybe_get_output
10745 // to retrieve the output
10746 at::meta::structured_bitwise_or_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
10747 }
10748 const Tensor& maybe_get_output(int64_t output_idx) override {
10749 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
10750 }
10751 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
10752 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
10753 c10::OptionalDeviceGuard guard_;
10754};
10755at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_bitwise_or__Tensor(at::Tensor & self, const at::Tensor & other) {
10756structured_bitwise_or_Tensor_default_backend_inplace op(self);
10757op.meta(self, other);
10758at::bitwise_or_outf(self, other, op.outputs_[0]);
10759if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
10760return self;
10761}
10762struct structured_bitwise_xor_Tensor_default_backend_functional final : public at::meta::structured_bitwise_xor_Tensor {
10763 void set_output_strided(
10764 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10765 TensorOptions options, DimnameList names
10766 ) override {
10767 auto current_device = guard_.current_device();
10768 if (C10_UNLIKELY(current_device.has_value())) {
10769 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10770 "structured kernels don't support multi-device outputs");
10771 } else {
10772 guard_.reset_device(options.device());
10773 }
10774 outputs_[output_idx] = create_out(sizes, strides, options);
10775 if (!names.empty()) {
10776 namedinference::propagate_names(*outputs_[output_idx], names);
10777 }
10778 // super must happen after, so that downstream can use maybe_get_output
10779 // to retrieve the output
10780 at::meta::structured_bitwise_xor_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
10781 }
10782 void set_output_raw_strided(
10783 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10784 TensorOptions options, DimnameList names
10785 ) override {
10786 auto current_device = guard_.current_device();
10787 if (C10_UNLIKELY(current_device.has_value())) {
10788 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10789 "structured kernels don't support multi-device outputs");
10790 } else {
10791 guard_.reset_device(options.device());
10792 }
10793 outputs_[output_idx] = create_out(sizes, strides, options);
10794 if (!names.empty()) {
10795 namedinference::propagate_names(*outputs_[output_idx], names);
10796 }
10797 // super must happen after, so that downstream can use maybe_get_output
10798 // to retrieve the output
10799 at::meta::structured_bitwise_xor_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
10800 }
10801 const Tensor& maybe_get_output(int64_t output_idx) override {
10802 return *outputs_[output_idx];
10803 }
10804 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
10805 c10::OptionalDeviceGuard guard_;
10806};
10807at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_bitwise_xor_Tensor(const at::Tensor & self, const at::Tensor & other) {
10808structured_bitwise_xor_Tensor_default_backend_functional op;
10809op.meta(self, other);
10810at::bitwise_xor_outf(self, other, *op.outputs_[0]);
10811return std::move(op.outputs_[0]).take();
10812}
10813struct structured_bitwise_xor_Tensor_default_backend_inplace final : public at::meta::structured_bitwise_xor_Tensor {
10814 structured_bitwise_xor_Tensor_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
10815 void set_output_strided(
10816 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10817 TensorOptions options, DimnameList names
10818 ) override {
10819 auto current_device = guard_.current_device();
10820 if (C10_UNLIKELY(current_device.has_value())) {
10821 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10822 "structured kernels don't support multi-device outputs");
10823 } else {
10824 guard_.reset_device(options.device());
10825 }
10826 const auto& out = outputs_[output_idx].get();
10827 check_inplace(out, sizes, options);
10828 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
10829 if (C10_UNLIKELY(maybe_proxy.has_value())) {
10830 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
10831 }
10832 if (!names.empty()) {
10833 namedinference::propagate_names(outputs_[output_idx], names);
10834 }
10835 // super must happen after, so that downstream can use maybe_get_output
10836 // to retrieve the output
10837 at::meta::structured_bitwise_xor_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
10838 }
10839 void set_output_raw_strided(
10840 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10841 TensorOptions options, DimnameList names
10842 ) override {
10843 auto current_device = guard_.current_device();
10844 if (C10_UNLIKELY(current_device.has_value())) {
10845 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10846 "structured kernels don't support multi-device outputs");
10847 } else {
10848 guard_.reset_device(options.device());
10849 }
10850 const auto& out = outputs_[output_idx].get();
10851 check_inplace(out, sizes, options);
10852 if (!names.empty()) {
10853 namedinference::propagate_names(outputs_[output_idx], names);
10854 }
10855 // super must happen after, so that downstream can use maybe_get_output
10856 // to retrieve the output
10857 at::meta::structured_bitwise_xor_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
10858 }
10859 const Tensor& maybe_get_output(int64_t output_idx) override {
10860 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
10861 }
10862 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
10863 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
10864 c10::OptionalDeviceGuard guard_;
10865};
10866at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_bitwise_xor__Tensor(at::Tensor & self, const at::Tensor & other) {
10867structured_bitwise_xor_Tensor_default_backend_inplace op(self);
10868op.meta(self, other);
10869at::bitwise_xor_outf(self, other, op.outputs_[0]);
10870if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
10871return self;
10872}
10873struct structured_bitwise_left_shift_Tensor_default_backend_functional final : public at::meta::structured_bitwise_left_shift_Tensor {
10874 void set_output_strided(
10875 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10876 TensorOptions options, DimnameList names
10877 ) override {
10878 auto current_device = guard_.current_device();
10879 if (C10_UNLIKELY(current_device.has_value())) {
10880 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10881 "structured kernels don't support multi-device outputs");
10882 } else {
10883 guard_.reset_device(options.device());
10884 }
10885 outputs_[output_idx] = create_out(sizes, strides, options);
10886 if (!names.empty()) {
10887 namedinference::propagate_names(*outputs_[output_idx], names);
10888 }
10889 // super must happen after, so that downstream can use maybe_get_output
10890 // to retrieve the output
10891 at::meta::structured_bitwise_left_shift_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
10892 }
10893 void set_output_raw_strided(
10894 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10895 TensorOptions options, DimnameList names
10896 ) override {
10897 auto current_device = guard_.current_device();
10898 if (C10_UNLIKELY(current_device.has_value())) {
10899 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10900 "structured kernels don't support multi-device outputs");
10901 } else {
10902 guard_.reset_device(options.device());
10903 }
10904 outputs_[output_idx] = create_out(sizes, strides, options);
10905 if (!names.empty()) {
10906 namedinference::propagate_names(*outputs_[output_idx], names);
10907 }
10908 // super must happen after, so that downstream can use maybe_get_output
10909 // to retrieve the output
10910 at::meta::structured_bitwise_left_shift_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
10911 }
10912 const Tensor& maybe_get_output(int64_t output_idx) override {
10913 return *outputs_[output_idx];
10914 }
10915 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
10916 c10::OptionalDeviceGuard guard_;
10917};
10918at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_bitwise_left_shift_Tensor(const at::Tensor & self, const at::Tensor & other) {
10919structured_bitwise_left_shift_Tensor_default_backend_functional op;
10920op.meta(self, other);
10921at::bitwise_left_shift_outf(self, other, *op.outputs_[0]);
10922return std::move(op.outputs_[0]).take();
10923}
10924struct structured_bitwise_left_shift_Tensor_default_backend_inplace final : public at::meta::structured_bitwise_left_shift_Tensor {
10925 structured_bitwise_left_shift_Tensor_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
10926 void set_output_strided(
10927 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10928 TensorOptions options, DimnameList names
10929 ) override {
10930 auto current_device = guard_.current_device();
10931 if (C10_UNLIKELY(current_device.has_value())) {
10932 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10933 "structured kernels don't support multi-device outputs");
10934 } else {
10935 guard_.reset_device(options.device());
10936 }
10937 const auto& out = outputs_[output_idx].get();
10938 check_inplace(out, sizes, options);
10939 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
10940 if (C10_UNLIKELY(maybe_proxy.has_value())) {
10941 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
10942 }
10943 if (!names.empty()) {
10944 namedinference::propagate_names(outputs_[output_idx], names);
10945 }
10946 // super must happen after, so that downstream can use maybe_get_output
10947 // to retrieve the output
10948 at::meta::structured_bitwise_left_shift_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
10949 }
10950 void set_output_raw_strided(
10951 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10952 TensorOptions options, DimnameList names
10953 ) override {
10954 auto current_device = guard_.current_device();
10955 if (C10_UNLIKELY(current_device.has_value())) {
10956 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10957 "structured kernels don't support multi-device outputs");
10958 } else {
10959 guard_.reset_device(options.device());
10960 }
10961 const auto& out = outputs_[output_idx].get();
10962 check_inplace(out, sizes, options);
10963 if (!names.empty()) {
10964 namedinference::propagate_names(outputs_[output_idx], names);
10965 }
10966 // super must happen after, so that downstream can use maybe_get_output
10967 // to retrieve the output
10968 at::meta::structured_bitwise_left_shift_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
10969 }
10970 const Tensor& maybe_get_output(int64_t output_idx) override {
10971 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
10972 }
10973 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
10974 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
10975 c10::OptionalDeviceGuard guard_;
10976};
10977at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_bitwise_left_shift__Tensor(at::Tensor & self, const at::Tensor & other) {
10978structured_bitwise_left_shift_Tensor_default_backend_inplace op(self);
10979op.meta(self, other);
10980at::bitwise_left_shift_outf(self, other, op.outputs_[0]);
10981if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
10982return self;
10983}
10984struct structured_bitwise_right_shift_Tensor_default_backend_functional final : public at::meta::structured_bitwise_right_shift_Tensor {
10985 void set_output_strided(
10986 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10987 TensorOptions options, DimnameList names
10988 ) override {
10989 auto current_device = guard_.current_device();
10990 if (C10_UNLIKELY(current_device.has_value())) {
10991 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
10992 "structured kernels don't support multi-device outputs");
10993 } else {
10994 guard_.reset_device(options.device());
10995 }
10996 outputs_[output_idx] = create_out(sizes, strides, options);
10997 if (!names.empty()) {
10998 namedinference::propagate_names(*outputs_[output_idx], names);
10999 }
11000 // super must happen after, so that downstream can use maybe_get_output
11001 // to retrieve the output
11002 at::meta::structured_bitwise_right_shift_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
11003 }
11004 void set_output_raw_strided(
11005 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11006 TensorOptions options, DimnameList names
11007 ) override {
11008 auto current_device = guard_.current_device();
11009 if (C10_UNLIKELY(current_device.has_value())) {
11010 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11011 "structured kernels don't support multi-device outputs");
11012 } else {
11013 guard_.reset_device(options.device());
11014 }
11015 outputs_[output_idx] = create_out(sizes, strides, options);
11016 if (!names.empty()) {
11017 namedinference::propagate_names(*outputs_[output_idx], names);
11018 }
11019 // super must happen after, so that downstream can use maybe_get_output
11020 // to retrieve the output
11021 at::meta::structured_bitwise_right_shift_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
11022 }
11023 const Tensor& maybe_get_output(int64_t output_idx) override {
11024 return *outputs_[output_idx];
11025 }
11026 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
11027 c10::OptionalDeviceGuard guard_;
11028};
11029at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_bitwise_right_shift_Tensor(const at::Tensor & self, const at::Tensor & other) {
11030structured_bitwise_right_shift_Tensor_default_backend_functional op;
11031op.meta(self, other);
11032at::bitwise_right_shift_outf(self, other, *op.outputs_[0]);
11033return std::move(op.outputs_[0]).take();
11034}
11035struct structured_bitwise_right_shift_Tensor_default_backend_inplace final : public at::meta::structured_bitwise_right_shift_Tensor {
11036 structured_bitwise_right_shift_Tensor_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
11037 void set_output_strided(
11038 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11039 TensorOptions options, DimnameList names
11040 ) override {
11041 auto current_device = guard_.current_device();
11042 if (C10_UNLIKELY(current_device.has_value())) {
11043 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11044 "structured kernels don't support multi-device outputs");
11045 } else {
11046 guard_.reset_device(options.device());
11047 }
11048 const auto& out = outputs_[output_idx].get();
11049 check_inplace(out, sizes, options);
11050 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
11051 if (C10_UNLIKELY(maybe_proxy.has_value())) {
11052 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
11053 }
11054 if (!names.empty()) {
11055 namedinference::propagate_names(outputs_[output_idx], names);
11056 }
11057 // super must happen after, so that downstream can use maybe_get_output
11058 // to retrieve the output
11059 at::meta::structured_bitwise_right_shift_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
11060 }
11061 void set_output_raw_strided(
11062 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11063 TensorOptions options, DimnameList names
11064 ) override {
11065 auto current_device = guard_.current_device();
11066 if (C10_UNLIKELY(current_device.has_value())) {
11067 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11068 "structured kernels don't support multi-device outputs");
11069 } else {
11070 guard_.reset_device(options.device());
11071 }
11072 const auto& out = outputs_[output_idx].get();
11073 check_inplace(out, sizes, options);
11074 if (!names.empty()) {
11075 namedinference::propagate_names(outputs_[output_idx], names);
11076 }
11077 // super must happen after, so that downstream can use maybe_get_output
11078 // to retrieve the output
11079 at::meta::structured_bitwise_right_shift_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
11080 }
11081 const Tensor& maybe_get_output(int64_t output_idx) override {
11082 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
11083 }
11084 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
11085 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
11086 c10::OptionalDeviceGuard guard_;
11087};
11088at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_bitwise_right_shift__Tensor(at::Tensor & self, const at::Tensor & other) {
11089structured_bitwise_right_shift_Tensor_default_backend_inplace op(self);
11090op.meta(self, other);
11091at::bitwise_right_shift_outf(self, other, op.outputs_[0]);
11092if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
11093return self;
11094}
11095struct structured_tril_default_backend_functional final : public at::meta::structured_tril {
11096 void set_output_strided(
11097 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11098 TensorOptions options, DimnameList names
11099 ) override {
11100 auto current_device = guard_.current_device();
11101 if (C10_UNLIKELY(current_device.has_value())) {
11102 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11103 "structured kernels don't support multi-device outputs");
11104 } else {
11105 guard_.reset_device(options.device());
11106 }
11107 outputs_[output_idx] = create_out(sizes, strides, options);
11108 if (!names.empty()) {
11109 namedinference::propagate_names(*outputs_[output_idx], names);
11110 }
11111 // super must happen after, so that downstream can use maybe_get_output
11112 // to retrieve the output
11113 }
11114 void set_output_raw_strided(
11115 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11116 TensorOptions options, DimnameList names
11117 ) override {
11118 auto current_device = guard_.current_device();
11119 if (C10_UNLIKELY(current_device.has_value())) {
11120 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11121 "structured kernels don't support multi-device outputs");
11122 } else {
11123 guard_.reset_device(options.device());
11124 }
11125 outputs_[output_idx] = create_out(sizes, strides, options);
11126 if (!names.empty()) {
11127 namedinference::propagate_names(*outputs_[output_idx], names);
11128 }
11129 // super must happen after, so that downstream can use maybe_get_output
11130 // to retrieve the output
11131 }
11132 const Tensor& maybe_get_output(int64_t output_idx) override {
11133 return *outputs_[output_idx];
11134 }
11135 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
11136 c10::OptionalDeviceGuard guard_;
11137};
11138at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_tril(const at::Tensor & self, int64_t diagonal) {
11139structured_tril_default_backend_functional op;
11140op.meta(self, diagonal);
11141at::tril_outf(self, diagonal, *op.outputs_[0]);
11142return std::move(op.outputs_[0]).take();
11143}
11144struct structured_tril_default_backend_inplace final : public at::meta::structured_tril {
11145 structured_tril_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
11146 void set_output_strided(
11147 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11148 TensorOptions options, DimnameList names
11149 ) override {
11150 auto current_device = guard_.current_device();
11151 if (C10_UNLIKELY(current_device.has_value())) {
11152 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11153 "structured kernels don't support multi-device outputs");
11154 } else {
11155 guard_.reset_device(options.device());
11156 }
11157 const auto& out = outputs_[output_idx].get();
11158 check_inplace(out, sizes, options);
11159 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
11160 if (C10_UNLIKELY(maybe_proxy.has_value())) {
11161 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
11162 }
11163 if (!names.empty()) {
11164 namedinference::propagate_names(outputs_[output_idx], names);
11165 }
11166 // super must happen after, so that downstream can use maybe_get_output
11167 // to retrieve the output
11168 }
11169 void set_output_raw_strided(
11170 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11171 TensorOptions options, DimnameList names
11172 ) override {
11173 auto current_device = guard_.current_device();
11174 if (C10_UNLIKELY(current_device.has_value())) {
11175 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11176 "structured kernels don't support multi-device outputs");
11177 } else {
11178 guard_.reset_device(options.device());
11179 }
11180 const auto& out = outputs_[output_idx].get();
11181 check_inplace(out, sizes, options);
11182 if (!names.empty()) {
11183 namedinference::propagate_names(outputs_[output_idx], names);
11184 }
11185 // super must happen after, so that downstream can use maybe_get_output
11186 // to retrieve the output
11187 }
11188 const Tensor& maybe_get_output(int64_t output_idx) override {
11189 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
11190 }
11191 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
11192 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
11193 c10::OptionalDeviceGuard guard_;
11194};
11195at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_tril_(at::Tensor & self, int64_t diagonal) {
11196structured_tril_default_backend_inplace op(self);
11197op.meta(self, diagonal);
11198at::tril_outf(self, diagonal, op.outputs_[0]);
11199if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
11200return self;
11201}
11202struct structured_triu_default_backend_functional final : public at::meta::structured_triu {
11203 void set_output_strided(
11204 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11205 TensorOptions options, DimnameList names
11206 ) override {
11207 auto current_device = guard_.current_device();
11208 if (C10_UNLIKELY(current_device.has_value())) {
11209 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11210 "structured kernels don't support multi-device outputs");
11211 } else {
11212 guard_.reset_device(options.device());
11213 }
11214 outputs_[output_idx] = create_out(sizes, strides, options);
11215 if (!names.empty()) {
11216 namedinference::propagate_names(*outputs_[output_idx], names);
11217 }
11218 // super must happen after, so that downstream can use maybe_get_output
11219 // to retrieve the output
11220 }
11221 void set_output_raw_strided(
11222 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11223 TensorOptions options, DimnameList names
11224 ) override {
11225 auto current_device = guard_.current_device();
11226 if (C10_UNLIKELY(current_device.has_value())) {
11227 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11228 "structured kernels don't support multi-device outputs");
11229 } else {
11230 guard_.reset_device(options.device());
11231 }
11232 outputs_[output_idx] = create_out(sizes, strides, options);
11233 if (!names.empty()) {
11234 namedinference::propagate_names(*outputs_[output_idx], names);
11235 }
11236 // super must happen after, so that downstream can use maybe_get_output
11237 // to retrieve the output
11238 }
11239 const Tensor& maybe_get_output(int64_t output_idx) override {
11240 return *outputs_[output_idx];
11241 }
11242 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
11243 c10::OptionalDeviceGuard guard_;
11244};
11245at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_triu(const at::Tensor & self, int64_t diagonal) {
11246structured_triu_default_backend_functional op;
11247op.meta(self, diagonal);
11248at::triu_outf(self, diagonal, *op.outputs_[0]);
11249return std::move(op.outputs_[0]).take();
11250}
11251struct structured_triu_default_backend_inplace final : public at::meta::structured_triu {
11252 structured_triu_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
11253 void set_output_strided(
11254 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11255 TensorOptions options, DimnameList names
11256 ) override {
11257 auto current_device = guard_.current_device();
11258 if (C10_UNLIKELY(current_device.has_value())) {
11259 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11260 "structured kernels don't support multi-device outputs");
11261 } else {
11262 guard_.reset_device(options.device());
11263 }
11264 const auto& out = outputs_[output_idx].get();
11265 check_inplace(out, sizes, options);
11266 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
11267 if (C10_UNLIKELY(maybe_proxy.has_value())) {
11268 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
11269 }
11270 if (!names.empty()) {
11271 namedinference::propagate_names(outputs_[output_idx], names);
11272 }
11273 // super must happen after, so that downstream can use maybe_get_output
11274 // to retrieve the output
11275 }
11276 void set_output_raw_strided(
11277 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11278 TensorOptions options, DimnameList names
11279 ) override {
11280 auto current_device = guard_.current_device();
11281 if (C10_UNLIKELY(current_device.has_value())) {
11282 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11283 "structured kernels don't support multi-device outputs");
11284 } else {
11285 guard_.reset_device(options.device());
11286 }
11287 const auto& out = outputs_[output_idx].get();
11288 check_inplace(out, sizes, options);
11289 if (!names.empty()) {
11290 namedinference::propagate_names(outputs_[output_idx], names);
11291 }
11292 // super must happen after, so that downstream can use maybe_get_output
11293 // to retrieve the output
11294 }
11295 const Tensor& maybe_get_output(int64_t output_idx) override {
11296 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
11297 }
11298 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
11299 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
11300 c10::OptionalDeviceGuard guard_;
11301};
11302at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_triu_(at::Tensor & self, int64_t diagonal) {
11303structured_triu_default_backend_inplace op(self);
11304op.meta(self, diagonal);
11305at::triu_outf(self, diagonal, op.outputs_[0]);
11306if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
11307return self;
11308}
11309struct structured_digamma_default_backend_functional final : public at::meta::structured_digamma {
11310 void set_output_strided(
11311 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11312 TensorOptions options, DimnameList names
11313 ) override {
11314 auto current_device = guard_.current_device();
11315 if (C10_UNLIKELY(current_device.has_value())) {
11316 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11317 "structured kernels don't support multi-device outputs");
11318 } else {
11319 guard_.reset_device(options.device());
11320 }
11321 outputs_[output_idx] = create_out(sizes, strides, options);
11322 if (!names.empty()) {
11323 namedinference::propagate_names(*outputs_[output_idx], names);
11324 }
11325 // super must happen after, so that downstream can use maybe_get_output
11326 // to retrieve the output
11327 at::meta::structured_digamma::set_output_raw_strided(output_idx, sizes, strides, options, names);
11328 }
11329 void set_output_raw_strided(
11330 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11331 TensorOptions options, DimnameList names
11332 ) override {
11333 auto current_device = guard_.current_device();
11334 if (C10_UNLIKELY(current_device.has_value())) {
11335 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11336 "structured kernels don't support multi-device outputs");
11337 } else {
11338 guard_.reset_device(options.device());
11339 }
11340 outputs_[output_idx] = create_out(sizes, strides, options);
11341 if (!names.empty()) {
11342 namedinference::propagate_names(*outputs_[output_idx], names);
11343 }
11344 // super must happen after, so that downstream can use maybe_get_output
11345 // to retrieve the output
11346 at::meta::structured_digamma::set_output_raw_strided(output_idx, sizes, strides, options, names);
11347 }
11348 const Tensor& maybe_get_output(int64_t output_idx) override {
11349 return *outputs_[output_idx];
11350 }
11351 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
11352 c10::OptionalDeviceGuard guard_;
11353};
11354at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_digamma(const at::Tensor & self) {
11355structured_digamma_default_backend_functional op;
11356op.meta(self);
11357at::digamma_outf(self, *op.outputs_[0]);
11358return std::move(op.outputs_[0]).take();
11359}
11360struct structured_digamma_default_backend_inplace final : public at::meta::structured_digamma {
11361 structured_digamma_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
11362 void set_output_strided(
11363 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11364 TensorOptions options, DimnameList names
11365 ) override {
11366 auto current_device = guard_.current_device();
11367 if (C10_UNLIKELY(current_device.has_value())) {
11368 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11369 "structured kernels don't support multi-device outputs");
11370 } else {
11371 guard_.reset_device(options.device());
11372 }
11373 const auto& out = outputs_[output_idx].get();
11374 check_inplace(out, sizes, options);
11375 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
11376 if (C10_UNLIKELY(maybe_proxy.has_value())) {
11377 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
11378 }
11379 if (!names.empty()) {
11380 namedinference::propagate_names(outputs_[output_idx], names);
11381 }
11382 // super must happen after, so that downstream can use maybe_get_output
11383 // to retrieve the output
11384 at::meta::structured_digamma::set_output_raw_strided(output_idx, sizes, strides, options, names);
11385 }
11386 void set_output_raw_strided(
11387 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11388 TensorOptions options, DimnameList names
11389 ) override {
11390 auto current_device = guard_.current_device();
11391 if (C10_UNLIKELY(current_device.has_value())) {
11392 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11393 "structured kernels don't support multi-device outputs");
11394 } else {
11395 guard_.reset_device(options.device());
11396 }
11397 const auto& out = outputs_[output_idx].get();
11398 check_inplace(out, sizes, options);
11399 if (!names.empty()) {
11400 namedinference::propagate_names(outputs_[output_idx], names);
11401 }
11402 // super must happen after, so that downstream can use maybe_get_output
11403 // to retrieve the output
11404 at::meta::structured_digamma::set_output_raw_strided(output_idx, sizes, strides, options, names);
11405 }
11406 const Tensor& maybe_get_output(int64_t output_idx) override {
11407 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
11408 }
11409 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
11410 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
11411 c10::OptionalDeviceGuard guard_;
11412};
11413at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_digamma_(at::Tensor & self) {
11414structured_digamma_default_backend_inplace op(self);
11415op.meta(self);
11416at::digamma_outf(self, op.outputs_[0]);
11417if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
11418return self;
11419}
11420struct structured_lerp_Scalar_default_backend_functional final : public at::meta::structured_lerp_Scalar {
11421 void set_output_strided(
11422 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11423 TensorOptions options, DimnameList names
11424 ) override {
11425 auto current_device = guard_.current_device();
11426 if (C10_UNLIKELY(current_device.has_value())) {
11427 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11428 "structured kernels don't support multi-device outputs");
11429 } else {
11430 guard_.reset_device(options.device());
11431 }
11432 outputs_[output_idx] = create_out(sizes, strides, options);
11433 if (!names.empty()) {
11434 namedinference::propagate_names(*outputs_[output_idx], names);
11435 }
11436 // super must happen after, so that downstream can use maybe_get_output
11437 // to retrieve the output
11438 at::meta::structured_lerp_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
11439 }
11440 void set_output_raw_strided(
11441 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11442 TensorOptions options, DimnameList names
11443 ) override {
11444 auto current_device = guard_.current_device();
11445 if (C10_UNLIKELY(current_device.has_value())) {
11446 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11447 "structured kernels don't support multi-device outputs");
11448 } else {
11449 guard_.reset_device(options.device());
11450 }
11451 outputs_[output_idx] = create_out(sizes, strides, options);
11452 if (!names.empty()) {
11453 namedinference::propagate_names(*outputs_[output_idx], names);
11454 }
11455 // super must happen after, so that downstream can use maybe_get_output
11456 // to retrieve the output
11457 at::meta::structured_lerp_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
11458 }
11459 const Tensor& maybe_get_output(int64_t output_idx) override {
11460 return *outputs_[output_idx];
11461 }
11462 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
11463 c10::OptionalDeviceGuard guard_;
11464};
11465at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_lerp_Scalar(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
11466structured_lerp_Scalar_default_backend_functional op;
11467op.meta(self, end, weight);
11468at::lerp_outf(self, end, weight, *op.outputs_[0]);
11469return std::move(op.outputs_[0]).take();
11470}
11471struct structured_lerp_Scalar_default_backend_inplace final : public at::meta::structured_lerp_Scalar {
11472 structured_lerp_Scalar_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
11473 void set_output_strided(
11474 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11475 TensorOptions options, DimnameList names
11476 ) override {
11477 auto current_device = guard_.current_device();
11478 if (C10_UNLIKELY(current_device.has_value())) {
11479 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11480 "structured kernels don't support multi-device outputs");
11481 } else {
11482 guard_.reset_device(options.device());
11483 }
11484 const auto& out = outputs_[output_idx].get();
11485 check_inplace(out, sizes, options);
11486 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
11487 if (C10_UNLIKELY(maybe_proxy.has_value())) {
11488 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
11489 }
11490 if (!names.empty()) {
11491 namedinference::propagate_names(outputs_[output_idx], names);
11492 }
11493 // super must happen after, so that downstream can use maybe_get_output
11494 // to retrieve the output
11495 at::meta::structured_lerp_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
11496 }
11497 void set_output_raw_strided(
11498 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11499 TensorOptions options, DimnameList names
11500 ) override {
11501 auto current_device = guard_.current_device();
11502 if (C10_UNLIKELY(current_device.has_value())) {
11503 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11504 "structured kernels don't support multi-device outputs");
11505 } else {
11506 guard_.reset_device(options.device());
11507 }
11508 const auto& out = outputs_[output_idx].get();
11509 check_inplace(out, sizes, options);
11510 if (!names.empty()) {
11511 namedinference::propagate_names(outputs_[output_idx], names);
11512 }
11513 // super must happen after, so that downstream can use maybe_get_output
11514 // to retrieve the output
11515 at::meta::structured_lerp_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
11516 }
11517 const Tensor& maybe_get_output(int64_t output_idx) override {
11518 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
11519 }
11520 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
11521 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
11522 c10::OptionalDeviceGuard guard_;
11523};
11524at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_lerp__Scalar(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
11525structured_lerp_Scalar_default_backend_inplace op(self);
11526op.meta(self, end, weight);
11527at::lerp_outf(self, end, weight, op.outputs_[0]);
11528if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
11529return self;
11530}
11531struct structured_lerp_Tensor_default_backend_functional final : public at::meta::structured_lerp_Tensor {
11532 void set_output_strided(
11533 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11534 TensorOptions options, DimnameList names
11535 ) override {
11536 auto current_device = guard_.current_device();
11537 if (C10_UNLIKELY(current_device.has_value())) {
11538 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11539 "structured kernels don't support multi-device outputs");
11540 } else {
11541 guard_.reset_device(options.device());
11542 }
11543 outputs_[output_idx] = create_out(sizes, strides, options);
11544 if (!names.empty()) {
11545 namedinference::propagate_names(*outputs_[output_idx], names);
11546 }
11547 // super must happen after, so that downstream can use maybe_get_output
11548 // to retrieve the output
11549 at::meta::structured_lerp_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
11550 }
11551 void set_output_raw_strided(
11552 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11553 TensorOptions options, DimnameList names
11554 ) override {
11555 auto current_device = guard_.current_device();
11556 if (C10_UNLIKELY(current_device.has_value())) {
11557 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11558 "structured kernels don't support multi-device outputs");
11559 } else {
11560 guard_.reset_device(options.device());
11561 }
11562 outputs_[output_idx] = create_out(sizes, strides, options);
11563 if (!names.empty()) {
11564 namedinference::propagate_names(*outputs_[output_idx], names);
11565 }
11566 // super must happen after, so that downstream can use maybe_get_output
11567 // to retrieve the output
11568 at::meta::structured_lerp_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
11569 }
11570 const Tensor& maybe_get_output(int64_t output_idx) override {
11571 return *outputs_[output_idx];
11572 }
11573 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
11574 c10::OptionalDeviceGuard guard_;
11575};
11576at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_lerp_Tensor(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
11577structured_lerp_Tensor_default_backend_functional op;
11578op.meta(self, end, weight);
11579at::lerp_outf(self, end, weight, *op.outputs_[0]);
11580return std::move(op.outputs_[0]).take();
11581}
11582struct structured_lerp_Tensor_default_backend_inplace final : public at::meta::structured_lerp_Tensor {
11583 structured_lerp_Tensor_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
11584 void set_output_strided(
11585 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11586 TensorOptions options, DimnameList names
11587 ) override {
11588 auto current_device = guard_.current_device();
11589 if (C10_UNLIKELY(current_device.has_value())) {
11590 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11591 "structured kernels don't support multi-device outputs");
11592 } else {
11593 guard_.reset_device(options.device());
11594 }
11595 const auto& out = outputs_[output_idx].get();
11596 check_inplace(out, sizes, options);
11597 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
11598 if (C10_UNLIKELY(maybe_proxy.has_value())) {
11599 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
11600 }
11601 if (!names.empty()) {
11602 namedinference::propagate_names(outputs_[output_idx], names);
11603 }
11604 // super must happen after, so that downstream can use maybe_get_output
11605 // to retrieve the output
11606 at::meta::structured_lerp_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
11607 }
11608 void set_output_raw_strided(
11609 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11610 TensorOptions options, DimnameList names
11611 ) override {
11612 auto current_device = guard_.current_device();
11613 if (C10_UNLIKELY(current_device.has_value())) {
11614 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11615 "structured kernels don't support multi-device outputs");
11616 } else {
11617 guard_.reset_device(options.device());
11618 }
11619 const auto& out = outputs_[output_idx].get();
11620 check_inplace(out, sizes, options);
11621 if (!names.empty()) {
11622 namedinference::propagate_names(outputs_[output_idx], names);
11623 }
11624 // super must happen after, so that downstream can use maybe_get_output
11625 // to retrieve the output
11626 at::meta::structured_lerp_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
11627 }
11628 const Tensor& maybe_get_output(int64_t output_idx) override {
11629 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
11630 }
11631 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
11632 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
11633 c10::OptionalDeviceGuard guard_;
11634};
11635at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_lerp__Tensor(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
11636structured_lerp_Tensor_default_backend_inplace op(self);
11637op.meta(self, end, weight);
11638at::lerp_outf(self, end, weight, op.outputs_[0]);
11639if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
11640return self;
11641}
11642struct structured_ne_Scalar_default_backend_functional final : public at::meta::structured_ne_Scalar {
11643 void set_output_strided(
11644 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11645 TensorOptions options, DimnameList names
11646 ) override {
11647 auto current_device = guard_.current_device();
11648 if (C10_UNLIKELY(current_device.has_value())) {
11649 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11650 "structured kernels don't support multi-device outputs");
11651 } else {
11652 guard_.reset_device(options.device());
11653 }
11654 outputs_[output_idx] = create_out(sizes, strides, options);
11655 if (!names.empty()) {
11656 namedinference::propagate_names(*outputs_[output_idx], names);
11657 }
11658 // super must happen after, so that downstream can use maybe_get_output
11659 // to retrieve the output
11660 at::meta::structured_ne_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
11661 }
11662 void set_output_raw_strided(
11663 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11664 TensorOptions options, DimnameList names
11665 ) override {
11666 auto current_device = guard_.current_device();
11667 if (C10_UNLIKELY(current_device.has_value())) {
11668 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11669 "structured kernels don't support multi-device outputs");
11670 } else {
11671 guard_.reset_device(options.device());
11672 }
11673 outputs_[output_idx] = create_out(sizes, strides, options);
11674 if (!names.empty()) {
11675 namedinference::propagate_names(*outputs_[output_idx], names);
11676 }
11677 // super must happen after, so that downstream can use maybe_get_output
11678 // to retrieve the output
11679 at::meta::structured_ne_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
11680 }
11681 const Tensor& maybe_get_output(int64_t output_idx) override {
11682 return *outputs_[output_idx];
11683 }
11684 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
11685 c10::OptionalDeviceGuard guard_;
11686};
11687at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_ne_Scalar(const at::Tensor & self, const at::Scalar & other) {
11688structured_ne_Scalar_default_backend_functional op;
11689op.meta(self, other);
11690at::ne_outf(self, other, *op.outputs_[0]);
11691return std::move(op.outputs_[0]).take();
11692}
11693struct structured_ne_Scalar_default_backend_inplace final : public at::meta::structured_ne_Scalar {
11694 structured_ne_Scalar_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
11695 void set_output_strided(
11696 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11697 TensorOptions options, DimnameList names
11698 ) override {
11699 auto current_device = guard_.current_device();
11700 if (C10_UNLIKELY(current_device.has_value())) {
11701 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11702 "structured kernels don't support multi-device outputs");
11703 } else {
11704 guard_.reset_device(options.device());
11705 }
11706 const auto& out = outputs_[output_idx].get();
11707 check_inplace(out, sizes, options);
11708 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
11709 if (C10_UNLIKELY(maybe_proxy.has_value())) {
11710 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
11711 }
11712 if (!names.empty()) {
11713 namedinference::propagate_names(outputs_[output_idx], names);
11714 }
11715 // super must happen after, so that downstream can use maybe_get_output
11716 // to retrieve the output
11717 at::meta::structured_ne_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
11718 }
11719 void set_output_raw_strided(
11720 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11721 TensorOptions options, DimnameList names
11722 ) override {
11723 auto current_device = guard_.current_device();
11724 if (C10_UNLIKELY(current_device.has_value())) {
11725 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11726 "structured kernels don't support multi-device outputs");
11727 } else {
11728 guard_.reset_device(options.device());
11729 }
11730 const auto& out = outputs_[output_idx].get();
11731 check_inplace(out, sizes, options);
11732 if (!names.empty()) {
11733 namedinference::propagate_names(outputs_[output_idx], names);
11734 }
11735 // super must happen after, so that downstream can use maybe_get_output
11736 // to retrieve the output
11737 at::meta::structured_ne_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
11738 }
11739 const Tensor& maybe_get_output(int64_t output_idx) override {
11740 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
11741 }
11742 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
11743 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
11744 c10::OptionalDeviceGuard guard_;
11745};
11746at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_ne__Scalar(at::Tensor & self, const at::Scalar & other) {
11747structured_ne_Scalar_default_backend_inplace op(self);
11748op.meta(self, other);
11749at::ne_outf(self, other, op.outputs_[0]);
11750if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
11751return self;
11752}
11753struct structured_ne_Tensor_default_backend_functional final : public at::meta::structured_ne_Tensor {
11754 void set_output_strided(
11755 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11756 TensorOptions options, DimnameList names
11757 ) override {
11758 auto current_device = guard_.current_device();
11759 if (C10_UNLIKELY(current_device.has_value())) {
11760 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11761 "structured kernels don't support multi-device outputs");
11762 } else {
11763 guard_.reset_device(options.device());
11764 }
11765 outputs_[output_idx] = create_out(sizes, strides, options);
11766 if (!names.empty()) {
11767 namedinference::propagate_names(*outputs_[output_idx], names);
11768 }
11769 // super must happen after, so that downstream can use maybe_get_output
11770 // to retrieve the output
11771 at::meta::structured_ne_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
11772 }
11773 void set_output_raw_strided(
11774 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11775 TensorOptions options, DimnameList names
11776 ) override {
11777 auto current_device = guard_.current_device();
11778 if (C10_UNLIKELY(current_device.has_value())) {
11779 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11780 "structured kernels don't support multi-device outputs");
11781 } else {
11782 guard_.reset_device(options.device());
11783 }
11784 outputs_[output_idx] = create_out(sizes, strides, options);
11785 if (!names.empty()) {
11786 namedinference::propagate_names(*outputs_[output_idx], names);
11787 }
11788 // super must happen after, so that downstream can use maybe_get_output
11789 // to retrieve the output
11790 at::meta::structured_ne_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
11791 }
11792 const Tensor& maybe_get_output(int64_t output_idx) override {
11793 return *outputs_[output_idx];
11794 }
11795 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
11796 c10::OptionalDeviceGuard guard_;
11797};
11798at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_ne_Tensor(const at::Tensor & self, const at::Tensor & other) {
11799structured_ne_Tensor_default_backend_functional op;
11800op.meta(self, other);
11801at::ne_outf(self, other, *op.outputs_[0]);
11802return std::move(op.outputs_[0]).take();
11803}
11804struct structured_ne_Tensor_default_backend_inplace final : public at::meta::structured_ne_Tensor {
11805 structured_ne_Tensor_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
11806 void set_output_strided(
11807 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11808 TensorOptions options, DimnameList names
11809 ) override {
11810 auto current_device = guard_.current_device();
11811 if (C10_UNLIKELY(current_device.has_value())) {
11812 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11813 "structured kernels don't support multi-device outputs");
11814 } else {
11815 guard_.reset_device(options.device());
11816 }
11817 const auto& out = outputs_[output_idx].get();
11818 check_inplace(out, sizes, options);
11819 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
11820 if (C10_UNLIKELY(maybe_proxy.has_value())) {
11821 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
11822 }
11823 if (!names.empty()) {
11824 namedinference::propagate_names(outputs_[output_idx], names);
11825 }
11826 // super must happen after, so that downstream can use maybe_get_output
11827 // to retrieve the output
11828 at::meta::structured_ne_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
11829 }
11830 void set_output_raw_strided(
11831 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11832 TensorOptions options, DimnameList names
11833 ) override {
11834 auto current_device = guard_.current_device();
11835 if (C10_UNLIKELY(current_device.has_value())) {
11836 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11837 "structured kernels don't support multi-device outputs");
11838 } else {
11839 guard_.reset_device(options.device());
11840 }
11841 const auto& out = outputs_[output_idx].get();
11842 check_inplace(out, sizes, options);
11843 if (!names.empty()) {
11844 namedinference::propagate_names(outputs_[output_idx], names);
11845 }
11846 // super must happen after, so that downstream can use maybe_get_output
11847 // to retrieve the output
11848 at::meta::structured_ne_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
11849 }
11850 const Tensor& maybe_get_output(int64_t output_idx) override {
11851 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
11852 }
11853 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
11854 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
11855 c10::OptionalDeviceGuard guard_;
11856};
11857at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_ne__Tensor(at::Tensor & self, const at::Tensor & other) {
11858structured_ne_Tensor_default_backend_inplace op(self);
11859op.meta(self, other);
11860at::ne_outf(self, other, op.outputs_[0]);
11861if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
11862return self;
11863}
11864struct structured_ge_Scalar_default_backend_functional final : public at::meta::structured_ge_Scalar {
11865 void set_output_strided(
11866 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11867 TensorOptions options, DimnameList names
11868 ) override {
11869 auto current_device = guard_.current_device();
11870 if (C10_UNLIKELY(current_device.has_value())) {
11871 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11872 "structured kernels don't support multi-device outputs");
11873 } else {
11874 guard_.reset_device(options.device());
11875 }
11876 outputs_[output_idx] = create_out(sizes, strides, options);
11877 if (!names.empty()) {
11878 namedinference::propagate_names(*outputs_[output_idx], names);
11879 }
11880 // super must happen after, so that downstream can use maybe_get_output
11881 // to retrieve the output
11882 at::meta::structured_ge_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
11883 }
11884 void set_output_raw_strided(
11885 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11886 TensorOptions options, DimnameList names
11887 ) override {
11888 auto current_device = guard_.current_device();
11889 if (C10_UNLIKELY(current_device.has_value())) {
11890 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11891 "structured kernels don't support multi-device outputs");
11892 } else {
11893 guard_.reset_device(options.device());
11894 }
11895 outputs_[output_idx] = create_out(sizes, strides, options);
11896 if (!names.empty()) {
11897 namedinference::propagate_names(*outputs_[output_idx], names);
11898 }
11899 // super must happen after, so that downstream can use maybe_get_output
11900 // to retrieve the output
11901 at::meta::structured_ge_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
11902 }
11903 const Tensor& maybe_get_output(int64_t output_idx) override {
11904 return *outputs_[output_idx];
11905 }
11906 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
11907 c10::OptionalDeviceGuard guard_;
11908};
11909at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_ge_Scalar(const at::Tensor & self, const at::Scalar & other) {
11910structured_ge_Scalar_default_backend_functional op;
11911op.meta(self, other);
11912at::ge_outf(self, other, *op.outputs_[0]);
11913return std::move(op.outputs_[0]).take();
11914}
11915struct structured_ge_Scalar_default_backend_inplace final : public at::meta::structured_ge_Scalar {
11916 structured_ge_Scalar_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
11917 void set_output_strided(
11918 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11919 TensorOptions options, DimnameList names
11920 ) override {
11921 auto current_device = guard_.current_device();
11922 if (C10_UNLIKELY(current_device.has_value())) {
11923 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11924 "structured kernels don't support multi-device outputs");
11925 } else {
11926 guard_.reset_device(options.device());
11927 }
11928 const auto& out = outputs_[output_idx].get();
11929 check_inplace(out, sizes, options);
11930 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
11931 if (C10_UNLIKELY(maybe_proxy.has_value())) {
11932 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
11933 }
11934 if (!names.empty()) {
11935 namedinference::propagate_names(outputs_[output_idx], names);
11936 }
11937 // super must happen after, so that downstream can use maybe_get_output
11938 // to retrieve the output
11939 at::meta::structured_ge_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
11940 }
11941 void set_output_raw_strided(
11942 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11943 TensorOptions options, DimnameList names
11944 ) override {
11945 auto current_device = guard_.current_device();
11946 if (C10_UNLIKELY(current_device.has_value())) {
11947 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11948 "structured kernels don't support multi-device outputs");
11949 } else {
11950 guard_.reset_device(options.device());
11951 }
11952 const auto& out = outputs_[output_idx].get();
11953 check_inplace(out, sizes, options);
11954 if (!names.empty()) {
11955 namedinference::propagate_names(outputs_[output_idx], names);
11956 }
11957 // super must happen after, so that downstream can use maybe_get_output
11958 // to retrieve the output
11959 at::meta::structured_ge_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
11960 }
11961 const Tensor& maybe_get_output(int64_t output_idx) override {
11962 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
11963 }
11964 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
11965 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
11966 c10::OptionalDeviceGuard guard_;
11967};
11968at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_ge__Scalar(at::Tensor & self, const at::Scalar & other) {
11969structured_ge_Scalar_default_backend_inplace op(self);
11970op.meta(self, other);
11971at::ge_outf(self, other, op.outputs_[0]);
11972if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
11973return self;
11974}
11975struct structured_ge_Tensor_default_backend_functional final : public at::meta::structured_ge_Tensor {
11976 void set_output_strided(
11977 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11978 TensorOptions options, DimnameList names
11979 ) override {
11980 auto current_device = guard_.current_device();
11981 if (C10_UNLIKELY(current_device.has_value())) {
11982 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
11983 "structured kernels don't support multi-device outputs");
11984 } else {
11985 guard_.reset_device(options.device());
11986 }
11987 outputs_[output_idx] = create_out(sizes, strides, options);
11988 if (!names.empty()) {
11989 namedinference::propagate_names(*outputs_[output_idx], names);
11990 }
11991 // super must happen after, so that downstream can use maybe_get_output
11992 // to retrieve the output
11993 at::meta::structured_ge_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
11994 }
11995 void set_output_raw_strided(
11996 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11997 TensorOptions options, DimnameList names
11998 ) override {
11999 auto current_device = guard_.current_device();
12000 if (C10_UNLIKELY(current_device.has_value())) {
12001 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12002 "structured kernels don't support multi-device outputs");
12003 } else {
12004 guard_.reset_device(options.device());
12005 }
12006 outputs_[output_idx] = create_out(sizes, strides, options);
12007 if (!names.empty()) {
12008 namedinference::propagate_names(*outputs_[output_idx], names);
12009 }
12010 // super must happen after, so that downstream can use maybe_get_output
12011 // to retrieve the output
12012 at::meta::structured_ge_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
12013 }
12014 const Tensor& maybe_get_output(int64_t output_idx) override {
12015 return *outputs_[output_idx];
12016 }
12017 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
12018 c10::OptionalDeviceGuard guard_;
12019};
12020at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_ge_Tensor(const at::Tensor & self, const at::Tensor & other) {
12021structured_ge_Tensor_default_backend_functional op;
12022op.meta(self, other);
12023at::ge_outf(self, other, *op.outputs_[0]);
12024return std::move(op.outputs_[0]).take();
12025}
12026struct structured_ge_Tensor_default_backend_inplace final : public at::meta::structured_ge_Tensor {
12027 structured_ge_Tensor_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
12028 void set_output_strided(
12029 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12030 TensorOptions options, DimnameList names
12031 ) override {
12032 auto current_device = guard_.current_device();
12033 if (C10_UNLIKELY(current_device.has_value())) {
12034 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12035 "structured kernels don't support multi-device outputs");
12036 } else {
12037 guard_.reset_device(options.device());
12038 }
12039 const auto& out = outputs_[output_idx].get();
12040 check_inplace(out, sizes, options);
12041 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
12042 if (C10_UNLIKELY(maybe_proxy.has_value())) {
12043 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
12044 }
12045 if (!names.empty()) {
12046 namedinference::propagate_names(outputs_[output_idx], names);
12047 }
12048 // super must happen after, so that downstream can use maybe_get_output
12049 // to retrieve the output
12050 at::meta::structured_ge_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
12051 }
12052 void set_output_raw_strided(
12053 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12054 TensorOptions options, DimnameList names
12055 ) override {
12056 auto current_device = guard_.current_device();
12057 if (C10_UNLIKELY(current_device.has_value())) {
12058 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12059 "structured kernels don't support multi-device outputs");
12060 } else {
12061 guard_.reset_device(options.device());
12062 }
12063 const auto& out = outputs_[output_idx].get();
12064 check_inplace(out, sizes, options);
12065 if (!names.empty()) {
12066 namedinference::propagate_names(outputs_[output_idx], names);
12067 }
12068 // super must happen after, so that downstream can use maybe_get_output
12069 // to retrieve the output
12070 at::meta::structured_ge_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
12071 }
12072 const Tensor& maybe_get_output(int64_t output_idx) override {
12073 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
12074 }
12075 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
12076 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
12077 c10::OptionalDeviceGuard guard_;
12078};
12079at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_ge__Tensor(at::Tensor & self, const at::Tensor & other) {
12080structured_ge_Tensor_default_backend_inplace op(self);
12081op.meta(self, other);
12082at::ge_outf(self, other, op.outputs_[0]);
12083if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
12084return self;
12085}
12086struct structured_le_Scalar_default_backend_functional final : public at::meta::structured_le_Scalar {
12087 void set_output_strided(
12088 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12089 TensorOptions options, DimnameList names
12090 ) override {
12091 auto current_device = guard_.current_device();
12092 if (C10_UNLIKELY(current_device.has_value())) {
12093 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12094 "structured kernels don't support multi-device outputs");
12095 } else {
12096 guard_.reset_device(options.device());
12097 }
12098 outputs_[output_idx] = create_out(sizes, strides, options);
12099 if (!names.empty()) {
12100 namedinference::propagate_names(*outputs_[output_idx], names);
12101 }
12102 // super must happen after, so that downstream can use maybe_get_output
12103 // to retrieve the output
12104 at::meta::structured_le_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
12105 }
12106 void set_output_raw_strided(
12107 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12108 TensorOptions options, DimnameList names
12109 ) override {
12110 auto current_device = guard_.current_device();
12111 if (C10_UNLIKELY(current_device.has_value())) {
12112 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12113 "structured kernels don't support multi-device outputs");
12114 } else {
12115 guard_.reset_device(options.device());
12116 }
12117 outputs_[output_idx] = create_out(sizes, strides, options);
12118 if (!names.empty()) {
12119 namedinference::propagate_names(*outputs_[output_idx], names);
12120 }
12121 // super must happen after, so that downstream can use maybe_get_output
12122 // to retrieve the output
12123 at::meta::structured_le_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
12124 }
12125 const Tensor& maybe_get_output(int64_t output_idx) override {
12126 return *outputs_[output_idx];
12127 }
12128 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
12129 c10::OptionalDeviceGuard guard_;
12130};
12131at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_le_Scalar(const at::Tensor & self, const at::Scalar & other) {
12132structured_le_Scalar_default_backend_functional op;
12133op.meta(self, other);
12134at::le_outf(self, other, *op.outputs_[0]);
12135return std::move(op.outputs_[0]).take();
12136}
12137struct structured_le_Scalar_default_backend_inplace final : public at::meta::structured_le_Scalar {
12138 structured_le_Scalar_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
12139 void set_output_strided(
12140 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12141 TensorOptions options, DimnameList names
12142 ) override {
12143 auto current_device = guard_.current_device();
12144 if (C10_UNLIKELY(current_device.has_value())) {
12145 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12146 "structured kernels don't support multi-device outputs");
12147 } else {
12148 guard_.reset_device(options.device());
12149 }
12150 const auto& out = outputs_[output_idx].get();
12151 check_inplace(out, sizes, options);
12152 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
12153 if (C10_UNLIKELY(maybe_proxy.has_value())) {
12154 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
12155 }
12156 if (!names.empty()) {
12157 namedinference::propagate_names(outputs_[output_idx], names);
12158 }
12159 // super must happen after, so that downstream can use maybe_get_output
12160 // to retrieve the output
12161 at::meta::structured_le_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
12162 }
12163 void set_output_raw_strided(
12164 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12165 TensorOptions options, DimnameList names
12166 ) override {
12167 auto current_device = guard_.current_device();
12168 if (C10_UNLIKELY(current_device.has_value())) {
12169 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12170 "structured kernels don't support multi-device outputs");
12171 } else {
12172 guard_.reset_device(options.device());
12173 }
12174 const auto& out = outputs_[output_idx].get();
12175 check_inplace(out, sizes, options);
12176 if (!names.empty()) {
12177 namedinference::propagate_names(outputs_[output_idx], names);
12178 }
12179 // super must happen after, so that downstream can use maybe_get_output
12180 // to retrieve the output
12181 at::meta::structured_le_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
12182 }
12183 const Tensor& maybe_get_output(int64_t output_idx) override {
12184 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
12185 }
12186 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
12187 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
12188 c10::OptionalDeviceGuard guard_;
12189};
12190at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_le__Scalar(at::Tensor & self, const at::Scalar & other) {
12191structured_le_Scalar_default_backend_inplace op(self);
12192op.meta(self, other);
12193at::le_outf(self, other, op.outputs_[0]);
12194if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
12195return self;
12196}
12197struct structured_le_Tensor_default_backend_functional final : public at::meta::structured_le_Tensor {
12198 void set_output_strided(
12199 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12200 TensorOptions options, DimnameList names
12201 ) override {
12202 auto current_device = guard_.current_device();
12203 if (C10_UNLIKELY(current_device.has_value())) {
12204 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12205 "structured kernels don't support multi-device outputs");
12206 } else {
12207 guard_.reset_device(options.device());
12208 }
12209 outputs_[output_idx] = create_out(sizes, strides, options);
12210 if (!names.empty()) {
12211 namedinference::propagate_names(*outputs_[output_idx], names);
12212 }
12213 // super must happen after, so that downstream can use maybe_get_output
12214 // to retrieve the output
12215 at::meta::structured_le_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
12216 }
12217 void set_output_raw_strided(
12218 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12219 TensorOptions options, DimnameList names
12220 ) override {
12221 auto current_device = guard_.current_device();
12222 if (C10_UNLIKELY(current_device.has_value())) {
12223 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12224 "structured kernels don't support multi-device outputs");
12225 } else {
12226 guard_.reset_device(options.device());
12227 }
12228 outputs_[output_idx] = create_out(sizes, strides, options);
12229 if (!names.empty()) {
12230 namedinference::propagate_names(*outputs_[output_idx], names);
12231 }
12232 // super must happen after, so that downstream can use maybe_get_output
12233 // to retrieve the output
12234 at::meta::structured_le_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
12235 }
12236 const Tensor& maybe_get_output(int64_t output_idx) override {
12237 return *outputs_[output_idx];
12238 }
12239 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
12240 c10::OptionalDeviceGuard guard_;
12241};
12242at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_le_Tensor(const at::Tensor & self, const at::Tensor & other) {
12243structured_le_Tensor_default_backend_functional op;
12244op.meta(self, other);
12245at::le_outf(self, other, *op.outputs_[0]);
12246return std::move(op.outputs_[0]).take();
12247}
12248struct structured_le_Tensor_default_backend_inplace final : public at::meta::structured_le_Tensor {
12249 structured_le_Tensor_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
12250 void set_output_strided(
12251 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12252 TensorOptions options, DimnameList names
12253 ) override {
12254 auto current_device = guard_.current_device();
12255 if (C10_UNLIKELY(current_device.has_value())) {
12256 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12257 "structured kernels don't support multi-device outputs");
12258 } else {
12259 guard_.reset_device(options.device());
12260 }
12261 const auto& out = outputs_[output_idx].get();
12262 check_inplace(out, sizes, options);
12263 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
12264 if (C10_UNLIKELY(maybe_proxy.has_value())) {
12265 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
12266 }
12267 if (!names.empty()) {
12268 namedinference::propagate_names(outputs_[output_idx], names);
12269 }
12270 // super must happen after, so that downstream can use maybe_get_output
12271 // to retrieve the output
12272 at::meta::structured_le_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
12273 }
12274 void set_output_raw_strided(
12275 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12276 TensorOptions options, DimnameList names
12277 ) override {
12278 auto current_device = guard_.current_device();
12279 if (C10_UNLIKELY(current_device.has_value())) {
12280 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12281 "structured kernels don't support multi-device outputs");
12282 } else {
12283 guard_.reset_device(options.device());
12284 }
12285 const auto& out = outputs_[output_idx].get();
12286 check_inplace(out, sizes, options);
12287 if (!names.empty()) {
12288 namedinference::propagate_names(outputs_[output_idx], names);
12289 }
12290 // super must happen after, so that downstream can use maybe_get_output
12291 // to retrieve the output
12292 at::meta::structured_le_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
12293 }
12294 const Tensor& maybe_get_output(int64_t output_idx) override {
12295 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
12296 }
12297 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
12298 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
12299 c10::OptionalDeviceGuard guard_;
12300};
12301at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_le__Tensor(at::Tensor & self, const at::Tensor & other) {
12302structured_le_Tensor_default_backend_inplace op(self);
12303op.meta(self, other);
12304at::le_outf(self, other, op.outputs_[0]);
12305if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
12306return self;
12307}
12308struct structured_gt_Scalar_default_backend_functional final : public at::meta::structured_gt_Scalar {
12309 void set_output_strided(
12310 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12311 TensorOptions options, DimnameList names
12312 ) override {
12313 auto current_device = guard_.current_device();
12314 if (C10_UNLIKELY(current_device.has_value())) {
12315 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12316 "structured kernels don't support multi-device outputs");
12317 } else {
12318 guard_.reset_device(options.device());
12319 }
12320 outputs_[output_idx] = create_out(sizes, strides, options);
12321 if (!names.empty()) {
12322 namedinference::propagate_names(*outputs_[output_idx], names);
12323 }
12324 // super must happen after, so that downstream can use maybe_get_output
12325 // to retrieve the output
12326 at::meta::structured_gt_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
12327 }
12328 void set_output_raw_strided(
12329 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12330 TensorOptions options, DimnameList names
12331 ) override {
12332 auto current_device = guard_.current_device();
12333 if (C10_UNLIKELY(current_device.has_value())) {
12334 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12335 "structured kernels don't support multi-device outputs");
12336 } else {
12337 guard_.reset_device(options.device());
12338 }
12339 outputs_[output_idx] = create_out(sizes, strides, options);
12340 if (!names.empty()) {
12341 namedinference::propagate_names(*outputs_[output_idx], names);
12342 }
12343 // super must happen after, so that downstream can use maybe_get_output
12344 // to retrieve the output
12345 at::meta::structured_gt_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
12346 }
12347 const Tensor& maybe_get_output(int64_t output_idx) override {
12348 return *outputs_[output_idx];
12349 }
12350 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
12351 c10::OptionalDeviceGuard guard_;
12352};
12353at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_gt_Scalar(const at::Tensor & self, const at::Scalar & other) {
12354structured_gt_Scalar_default_backend_functional op;
12355op.meta(self, other);
12356at::gt_outf(self, other, *op.outputs_[0]);
12357return std::move(op.outputs_[0]).take();
12358}
12359struct structured_gt_Scalar_default_backend_inplace final : public at::meta::structured_gt_Scalar {
12360 structured_gt_Scalar_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
12361 void set_output_strided(
12362 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12363 TensorOptions options, DimnameList names
12364 ) override {
12365 auto current_device = guard_.current_device();
12366 if (C10_UNLIKELY(current_device.has_value())) {
12367 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12368 "structured kernels don't support multi-device outputs");
12369 } else {
12370 guard_.reset_device(options.device());
12371 }
12372 const auto& out = outputs_[output_idx].get();
12373 check_inplace(out, sizes, options);
12374 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
12375 if (C10_UNLIKELY(maybe_proxy.has_value())) {
12376 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
12377 }
12378 if (!names.empty()) {
12379 namedinference::propagate_names(outputs_[output_idx], names);
12380 }
12381 // super must happen after, so that downstream can use maybe_get_output
12382 // to retrieve the output
12383 at::meta::structured_gt_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
12384 }
12385 void set_output_raw_strided(
12386 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12387 TensorOptions options, DimnameList names
12388 ) override {
12389 auto current_device = guard_.current_device();
12390 if (C10_UNLIKELY(current_device.has_value())) {
12391 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12392 "structured kernels don't support multi-device outputs");
12393 } else {
12394 guard_.reset_device(options.device());
12395 }
12396 const auto& out = outputs_[output_idx].get();
12397 check_inplace(out, sizes, options);
12398 if (!names.empty()) {
12399 namedinference::propagate_names(outputs_[output_idx], names);
12400 }
12401 // super must happen after, so that downstream can use maybe_get_output
12402 // to retrieve the output
12403 at::meta::structured_gt_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
12404 }
12405 const Tensor& maybe_get_output(int64_t output_idx) override {
12406 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
12407 }
12408 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
12409 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
12410 c10::OptionalDeviceGuard guard_;
12411};
12412at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_gt__Scalar(at::Tensor & self, const at::Scalar & other) {
12413structured_gt_Scalar_default_backend_inplace op(self);
12414op.meta(self, other);
12415at::gt_outf(self, other, op.outputs_[0]);
12416if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
12417return self;
12418}
12419struct structured_gt_Tensor_default_backend_functional final : public at::meta::structured_gt_Tensor {
12420 void set_output_strided(
12421 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12422 TensorOptions options, DimnameList names
12423 ) override {
12424 auto current_device = guard_.current_device();
12425 if (C10_UNLIKELY(current_device.has_value())) {
12426 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12427 "structured kernels don't support multi-device outputs");
12428 } else {
12429 guard_.reset_device(options.device());
12430 }
12431 outputs_[output_idx] = create_out(sizes, strides, options);
12432 if (!names.empty()) {
12433 namedinference::propagate_names(*outputs_[output_idx], names);
12434 }
12435 // super must happen after, so that downstream can use maybe_get_output
12436 // to retrieve the output
12437 at::meta::structured_gt_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
12438 }
12439 void set_output_raw_strided(
12440 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12441 TensorOptions options, DimnameList names
12442 ) override {
12443 auto current_device = guard_.current_device();
12444 if (C10_UNLIKELY(current_device.has_value())) {
12445 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12446 "structured kernels don't support multi-device outputs");
12447 } else {
12448 guard_.reset_device(options.device());
12449 }
12450 outputs_[output_idx] = create_out(sizes, strides, options);
12451 if (!names.empty()) {
12452 namedinference::propagate_names(*outputs_[output_idx], names);
12453 }
12454 // super must happen after, so that downstream can use maybe_get_output
12455 // to retrieve the output
12456 at::meta::structured_gt_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
12457 }
12458 const Tensor& maybe_get_output(int64_t output_idx) override {
12459 return *outputs_[output_idx];
12460 }
12461 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
12462 c10::OptionalDeviceGuard guard_;
12463};
12464at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_gt_Tensor(const at::Tensor & self, const at::Tensor & other) {
12465structured_gt_Tensor_default_backend_functional op;
12466op.meta(self, other);
12467at::gt_outf(self, other, *op.outputs_[0]);
12468return std::move(op.outputs_[0]).take();
12469}
12470struct structured_gt_Tensor_default_backend_inplace final : public at::meta::structured_gt_Tensor {
12471 structured_gt_Tensor_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
12472 void set_output_strided(
12473 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12474 TensorOptions options, DimnameList names
12475 ) override {
12476 auto current_device = guard_.current_device();
12477 if (C10_UNLIKELY(current_device.has_value())) {
12478 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12479 "structured kernels don't support multi-device outputs");
12480 } else {
12481 guard_.reset_device(options.device());
12482 }
12483 const auto& out = outputs_[output_idx].get();
12484 check_inplace(out, sizes, options);
12485 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
12486 if (C10_UNLIKELY(maybe_proxy.has_value())) {
12487 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
12488 }
12489 if (!names.empty()) {
12490 namedinference::propagate_names(outputs_[output_idx], names);
12491 }
12492 // super must happen after, so that downstream can use maybe_get_output
12493 // to retrieve the output
12494 at::meta::structured_gt_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
12495 }
12496 void set_output_raw_strided(
12497 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12498 TensorOptions options, DimnameList names
12499 ) override {
12500 auto current_device = guard_.current_device();
12501 if (C10_UNLIKELY(current_device.has_value())) {
12502 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12503 "structured kernels don't support multi-device outputs");
12504 } else {
12505 guard_.reset_device(options.device());
12506 }
12507 const auto& out = outputs_[output_idx].get();
12508 check_inplace(out, sizes, options);
12509 if (!names.empty()) {
12510 namedinference::propagate_names(outputs_[output_idx], names);
12511 }
12512 // super must happen after, so that downstream can use maybe_get_output
12513 // to retrieve the output
12514 at::meta::structured_gt_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
12515 }
12516 const Tensor& maybe_get_output(int64_t output_idx) override {
12517 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
12518 }
12519 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
12520 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
12521 c10::OptionalDeviceGuard guard_;
12522};
12523at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_gt__Tensor(at::Tensor & self, const at::Tensor & other) {
12524structured_gt_Tensor_default_backend_inplace op(self);
12525op.meta(self, other);
12526at::gt_outf(self, other, op.outputs_[0]);
12527if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
12528return self;
12529}
12530struct structured_lt_Scalar_default_backend_functional final : public at::meta::structured_lt_Scalar {
12531 void set_output_strided(
12532 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12533 TensorOptions options, DimnameList names
12534 ) override {
12535 auto current_device = guard_.current_device();
12536 if (C10_UNLIKELY(current_device.has_value())) {
12537 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12538 "structured kernels don't support multi-device outputs");
12539 } else {
12540 guard_.reset_device(options.device());
12541 }
12542 outputs_[output_idx] = create_out(sizes, strides, options);
12543 if (!names.empty()) {
12544 namedinference::propagate_names(*outputs_[output_idx], names);
12545 }
12546 // super must happen after, so that downstream can use maybe_get_output
12547 // to retrieve the output
12548 at::meta::structured_lt_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
12549 }
12550 void set_output_raw_strided(
12551 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12552 TensorOptions options, DimnameList names
12553 ) override {
12554 auto current_device = guard_.current_device();
12555 if (C10_UNLIKELY(current_device.has_value())) {
12556 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12557 "structured kernels don't support multi-device outputs");
12558 } else {
12559 guard_.reset_device(options.device());
12560 }
12561 outputs_[output_idx] = create_out(sizes, strides, options);
12562 if (!names.empty()) {
12563 namedinference::propagate_names(*outputs_[output_idx], names);
12564 }
12565 // super must happen after, so that downstream can use maybe_get_output
12566 // to retrieve the output
12567 at::meta::structured_lt_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
12568 }
12569 const Tensor& maybe_get_output(int64_t output_idx) override {
12570 return *outputs_[output_idx];
12571 }
12572 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
12573 c10::OptionalDeviceGuard guard_;
12574};
12575at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_lt_Scalar(const at::Tensor & self, const at::Scalar & other) {
12576structured_lt_Scalar_default_backend_functional op;
12577op.meta(self, other);
12578at::lt_outf(self, other, *op.outputs_[0]);
12579return std::move(op.outputs_[0]).take();
12580}
12581struct structured_lt_Scalar_default_backend_inplace final : public at::meta::structured_lt_Scalar {
12582 structured_lt_Scalar_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
12583 void set_output_strided(
12584 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12585 TensorOptions options, DimnameList names
12586 ) override {
12587 auto current_device = guard_.current_device();
12588 if (C10_UNLIKELY(current_device.has_value())) {
12589 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12590 "structured kernels don't support multi-device outputs");
12591 } else {
12592 guard_.reset_device(options.device());
12593 }
12594 const auto& out = outputs_[output_idx].get();
12595 check_inplace(out, sizes, options);
12596 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
12597 if (C10_UNLIKELY(maybe_proxy.has_value())) {
12598 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
12599 }
12600 if (!names.empty()) {
12601 namedinference::propagate_names(outputs_[output_idx], names);
12602 }
12603 // super must happen after, so that downstream can use maybe_get_output
12604 // to retrieve the output
12605 at::meta::structured_lt_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
12606 }
12607 void set_output_raw_strided(
12608 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12609 TensorOptions options, DimnameList names
12610 ) override {
12611 auto current_device = guard_.current_device();
12612 if (C10_UNLIKELY(current_device.has_value())) {
12613 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12614 "structured kernels don't support multi-device outputs");
12615 } else {
12616 guard_.reset_device(options.device());
12617 }
12618 const auto& out = outputs_[output_idx].get();
12619 check_inplace(out, sizes, options);
12620 if (!names.empty()) {
12621 namedinference::propagate_names(outputs_[output_idx], names);
12622 }
12623 // super must happen after, so that downstream can use maybe_get_output
12624 // to retrieve the output
12625 at::meta::structured_lt_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
12626 }
12627 const Tensor& maybe_get_output(int64_t output_idx) override {
12628 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
12629 }
12630 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
12631 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
12632 c10::OptionalDeviceGuard guard_;
12633};
12634at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_lt__Scalar(at::Tensor & self, const at::Scalar & other) {
12635structured_lt_Scalar_default_backend_inplace op(self);
12636op.meta(self, other);
12637at::lt_outf(self, other, op.outputs_[0]);
12638if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
12639return self;
12640}
12641struct structured_lt_Tensor_default_backend_functional final : public at::meta::structured_lt_Tensor {
12642 void set_output_strided(
12643 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12644 TensorOptions options, DimnameList names
12645 ) override {
12646 auto current_device = guard_.current_device();
12647 if (C10_UNLIKELY(current_device.has_value())) {
12648 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12649 "structured kernels don't support multi-device outputs");
12650 } else {
12651 guard_.reset_device(options.device());
12652 }
12653 outputs_[output_idx] = create_out(sizes, strides, options);
12654 if (!names.empty()) {
12655 namedinference::propagate_names(*outputs_[output_idx], names);
12656 }
12657 // super must happen after, so that downstream can use maybe_get_output
12658 // to retrieve the output
12659 at::meta::structured_lt_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
12660 }
12661 void set_output_raw_strided(
12662 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12663 TensorOptions options, DimnameList names
12664 ) override {
12665 auto current_device = guard_.current_device();
12666 if (C10_UNLIKELY(current_device.has_value())) {
12667 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12668 "structured kernels don't support multi-device outputs");
12669 } else {
12670 guard_.reset_device(options.device());
12671 }
12672 outputs_[output_idx] = create_out(sizes, strides, options);
12673 if (!names.empty()) {
12674 namedinference::propagate_names(*outputs_[output_idx], names);
12675 }
12676 // super must happen after, so that downstream can use maybe_get_output
12677 // to retrieve the output
12678 at::meta::structured_lt_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
12679 }
12680 const Tensor& maybe_get_output(int64_t output_idx) override {
12681 return *outputs_[output_idx];
12682 }
12683 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
12684 c10::OptionalDeviceGuard guard_;
12685};
12686at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_lt_Tensor(const at::Tensor & self, const at::Tensor & other) {
12687structured_lt_Tensor_default_backend_functional op;
12688op.meta(self, other);
12689at::lt_outf(self, other, *op.outputs_[0]);
12690return std::move(op.outputs_[0]).take();
12691}
12692struct structured_lt_Tensor_default_backend_inplace final : public at::meta::structured_lt_Tensor {
12693 structured_lt_Tensor_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
12694 void set_output_strided(
12695 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12696 TensorOptions options, DimnameList names
12697 ) override {
12698 auto current_device = guard_.current_device();
12699 if (C10_UNLIKELY(current_device.has_value())) {
12700 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12701 "structured kernels don't support multi-device outputs");
12702 } else {
12703 guard_.reset_device(options.device());
12704 }
12705 const auto& out = outputs_[output_idx].get();
12706 check_inplace(out, sizes, options);
12707 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
12708 if (C10_UNLIKELY(maybe_proxy.has_value())) {
12709 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
12710 }
12711 if (!names.empty()) {
12712 namedinference::propagate_names(outputs_[output_idx], names);
12713 }
12714 // super must happen after, so that downstream can use maybe_get_output
12715 // to retrieve the output
12716 at::meta::structured_lt_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
12717 }
12718 void set_output_raw_strided(
12719 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12720 TensorOptions options, DimnameList names
12721 ) override {
12722 auto current_device = guard_.current_device();
12723 if (C10_UNLIKELY(current_device.has_value())) {
12724 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12725 "structured kernels don't support multi-device outputs");
12726 } else {
12727 guard_.reset_device(options.device());
12728 }
12729 const auto& out = outputs_[output_idx].get();
12730 check_inplace(out, sizes, options);
12731 if (!names.empty()) {
12732 namedinference::propagate_names(outputs_[output_idx], names);
12733 }
12734 // super must happen after, so that downstream can use maybe_get_output
12735 // to retrieve the output
12736 at::meta::structured_lt_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
12737 }
12738 const Tensor& maybe_get_output(int64_t output_idx) override {
12739 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
12740 }
12741 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
12742 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
12743 c10::OptionalDeviceGuard guard_;
12744};
12745at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_lt__Tensor(at::Tensor & self, const at::Tensor & other) {
12746structured_lt_Tensor_default_backend_inplace op(self);
12747op.meta(self, other);
12748at::lt_outf(self, other, op.outputs_[0]);
12749if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
12750return self;
12751}
12752struct structured_gather_default_backend_functional final : public at::meta::structured_gather {
12753 void set_output_strided(
12754 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12755 TensorOptions options, DimnameList names
12756 ) override {
12757 auto current_device = guard_.current_device();
12758 if (C10_UNLIKELY(current_device.has_value())) {
12759 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12760 "structured kernels don't support multi-device outputs");
12761 } else {
12762 guard_.reset_device(options.device());
12763 }
12764 outputs_[output_idx] = create_out(sizes, strides, options);
12765 if (!names.empty()) {
12766 namedinference::propagate_names(*outputs_[output_idx], names);
12767 }
12768 // super must happen after, so that downstream can use maybe_get_output
12769 // to retrieve the output
12770 }
12771 void set_output_raw_strided(
12772 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12773 TensorOptions options, DimnameList names
12774 ) override {
12775 auto current_device = guard_.current_device();
12776 if (C10_UNLIKELY(current_device.has_value())) {
12777 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12778 "structured kernels don't support multi-device outputs");
12779 } else {
12780 guard_.reset_device(options.device());
12781 }
12782 outputs_[output_idx] = create_out(sizes, strides, options);
12783 if (!names.empty()) {
12784 namedinference::propagate_names(*outputs_[output_idx], names);
12785 }
12786 // super must happen after, so that downstream can use maybe_get_output
12787 // to retrieve the output
12788 }
12789 const Tensor& maybe_get_output(int64_t output_idx) override {
12790 return *outputs_[output_idx];
12791 }
12792 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
12793 c10::OptionalDeviceGuard guard_;
12794};
12795at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_gather(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
12796structured_gather_default_backend_functional op;
12797op.meta(self, dim, index, sparse_grad);
12798at::gather_outf(self, dim, index, sparse_grad, *op.outputs_[0]);
12799return std::move(op.outputs_[0]).take();
12800}
12801struct structured_addcmul_default_backend_functional final : public at::meta::structured_addcmul {
12802 void set_output_strided(
12803 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12804 TensorOptions options, DimnameList names
12805 ) override {
12806 auto current_device = guard_.current_device();
12807 if (C10_UNLIKELY(current_device.has_value())) {
12808 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12809 "structured kernels don't support multi-device outputs");
12810 } else {
12811 guard_.reset_device(options.device());
12812 }
12813 outputs_[output_idx] = create_out(sizes, strides, options);
12814 if (!names.empty()) {
12815 namedinference::propagate_names(*outputs_[output_idx], names);
12816 }
12817 // super must happen after, so that downstream can use maybe_get_output
12818 // to retrieve the output
12819 at::meta::structured_addcmul::set_output_raw_strided(output_idx, sizes, strides, options, names);
12820 }
12821 void set_output_raw_strided(
12822 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12823 TensorOptions options, DimnameList names
12824 ) override {
12825 auto current_device = guard_.current_device();
12826 if (C10_UNLIKELY(current_device.has_value())) {
12827 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12828 "structured kernels don't support multi-device outputs");
12829 } else {
12830 guard_.reset_device(options.device());
12831 }
12832 outputs_[output_idx] = create_out(sizes, strides, options);
12833 if (!names.empty()) {
12834 namedinference::propagate_names(*outputs_[output_idx], names);
12835 }
12836 // super must happen after, so that downstream can use maybe_get_output
12837 // to retrieve the output
12838 at::meta::structured_addcmul::set_output_raw_strided(output_idx, sizes, strides, options, names);
12839 }
12840 const Tensor& maybe_get_output(int64_t output_idx) override {
12841 return *outputs_[output_idx];
12842 }
12843 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
12844 c10::OptionalDeviceGuard guard_;
12845};
12846at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_addcmul(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
12847structured_addcmul_default_backend_functional op;
12848op.meta(self, tensor1, tensor2, value);
12849at::addcmul_outf(self, tensor1, tensor2, value, *op.outputs_[0]);
12850return std::move(op.outputs_[0]).take();
12851}
12852struct structured_addcmul_default_backend_inplace final : public at::meta::structured_addcmul {
12853 structured_addcmul_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
12854 void set_output_strided(
12855 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12856 TensorOptions options, DimnameList names
12857 ) override {
12858 auto current_device = guard_.current_device();
12859 if (C10_UNLIKELY(current_device.has_value())) {
12860 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12861 "structured kernels don't support multi-device outputs");
12862 } else {
12863 guard_.reset_device(options.device());
12864 }
12865 const auto& out = outputs_[output_idx].get();
12866 check_inplace(out, sizes, options);
12867 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
12868 if (C10_UNLIKELY(maybe_proxy.has_value())) {
12869 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
12870 }
12871 if (!names.empty()) {
12872 namedinference::propagate_names(outputs_[output_idx], names);
12873 }
12874 // super must happen after, so that downstream can use maybe_get_output
12875 // to retrieve the output
12876 at::meta::structured_addcmul::set_output_raw_strided(output_idx, sizes, strides, options, names);
12877 }
12878 void set_output_raw_strided(
12879 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12880 TensorOptions options, DimnameList names
12881 ) override {
12882 auto current_device = guard_.current_device();
12883 if (C10_UNLIKELY(current_device.has_value())) {
12884 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12885 "structured kernels don't support multi-device outputs");
12886 } else {
12887 guard_.reset_device(options.device());
12888 }
12889 const auto& out = outputs_[output_idx].get();
12890 check_inplace(out, sizes, options);
12891 if (!names.empty()) {
12892 namedinference::propagate_names(outputs_[output_idx], names);
12893 }
12894 // super must happen after, so that downstream can use maybe_get_output
12895 // to retrieve the output
12896 at::meta::structured_addcmul::set_output_raw_strided(output_idx, sizes, strides, options, names);
12897 }
12898 const Tensor& maybe_get_output(int64_t output_idx) override {
12899 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
12900 }
12901 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
12902 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
12903 c10::OptionalDeviceGuard guard_;
12904};
12905at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_addcmul_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
12906structured_addcmul_default_backend_inplace op(self);
12907op.meta(self, tensor1, tensor2, value);
12908at::addcmul_outf(self, tensor1, tensor2, value, op.outputs_[0]);
12909if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
12910return self;
12911}
12912struct structured_addcdiv_default_backend_functional final : public at::meta::structured_addcdiv {
12913 void set_output_strided(
12914 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12915 TensorOptions options, DimnameList names
12916 ) override {
12917 auto current_device = guard_.current_device();
12918 if (C10_UNLIKELY(current_device.has_value())) {
12919 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12920 "structured kernels don't support multi-device outputs");
12921 } else {
12922 guard_.reset_device(options.device());
12923 }
12924 outputs_[output_idx] = create_out(sizes, strides, options);
12925 if (!names.empty()) {
12926 namedinference::propagate_names(*outputs_[output_idx], names);
12927 }
12928 // super must happen after, so that downstream can use maybe_get_output
12929 // to retrieve the output
12930 at::meta::structured_addcdiv::set_output_raw_strided(output_idx, sizes, strides, options, names);
12931 }
12932 void set_output_raw_strided(
12933 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12934 TensorOptions options, DimnameList names
12935 ) override {
12936 auto current_device = guard_.current_device();
12937 if (C10_UNLIKELY(current_device.has_value())) {
12938 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12939 "structured kernels don't support multi-device outputs");
12940 } else {
12941 guard_.reset_device(options.device());
12942 }
12943 outputs_[output_idx] = create_out(sizes, strides, options);
12944 if (!names.empty()) {
12945 namedinference::propagate_names(*outputs_[output_idx], names);
12946 }
12947 // super must happen after, so that downstream can use maybe_get_output
12948 // to retrieve the output
12949 at::meta::structured_addcdiv::set_output_raw_strided(output_idx, sizes, strides, options, names);
12950 }
12951 const Tensor& maybe_get_output(int64_t output_idx) override {
12952 return *outputs_[output_idx];
12953 }
12954 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
12955 c10::OptionalDeviceGuard guard_;
12956};
12957at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_addcdiv(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
12958structured_addcdiv_default_backend_functional op;
12959op.meta(self, tensor1, tensor2, value);
12960at::addcdiv_outf(self, tensor1, tensor2, value, *op.outputs_[0]);
12961return std::move(op.outputs_[0]).take();
12962}
12963struct structured_addcdiv_default_backend_inplace final : public at::meta::structured_addcdiv {
12964 structured_addcdiv_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
12965 void set_output_strided(
12966 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12967 TensorOptions options, DimnameList names
12968 ) override {
12969 auto current_device = guard_.current_device();
12970 if (C10_UNLIKELY(current_device.has_value())) {
12971 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12972 "structured kernels don't support multi-device outputs");
12973 } else {
12974 guard_.reset_device(options.device());
12975 }
12976 const auto& out = outputs_[output_idx].get();
12977 check_inplace(out, sizes, options);
12978 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
12979 if (C10_UNLIKELY(maybe_proxy.has_value())) {
12980 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
12981 }
12982 if (!names.empty()) {
12983 namedinference::propagate_names(outputs_[output_idx], names);
12984 }
12985 // super must happen after, so that downstream can use maybe_get_output
12986 // to retrieve the output
12987 at::meta::structured_addcdiv::set_output_raw_strided(output_idx, sizes, strides, options, names);
12988 }
12989 void set_output_raw_strided(
12990 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12991 TensorOptions options, DimnameList names
12992 ) override {
12993 auto current_device = guard_.current_device();
12994 if (C10_UNLIKELY(current_device.has_value())) {
12995 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
12996 "structured kernels don't support multi-device outputs");
12997 } else {
12998 guard_.reset_device(options.device());
12999 }
13000 const auto& out = outputs_[output_idx].get();
13001 check_inplace(out, sizes, options);
13002 if (!names.empty()) {
13003 namedinference::propagate_names(outputs_[output_idx], names);
13004 }
13005 // super must happen after, so that downstream can use maybe_get_output
13006 // to retrieve the output
13007 at::meta::structured_addcdiv::set_output_raw_strided(output_idx, sizes, strides, options, names);
13008 }
13009 const Tensor& maybe_get_output(int64_t output_idx) override {
13010 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
13011 }
13012 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
13013 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
13014 c10::OptionalDeviceGuard guard_;
13015};
13016at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_addcdiv_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
13017structured_addcdiv_default_backend_inplace op(self);
13018op.meta(self, tensor1, tensor2, value);
13019at::addcdiv_outf(self, tensor1, tensor2, value, op.outputs_[0]);
13020if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
13021return self;
13022}
13023struct structured_triangular_solve_default_backend_functional final : public at::meta::structured_triangular_solve {
13024 void set_output_strided(
13025 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13026 TensorOptions options, DimnameList names
13027 ) override {
13028 auto current_device = guard_.current_device();
13029 if (C10_UNLIKELY(current_device.has_value())) {
13030 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13031 "structured kernels don't support multi-device outputs");
13032 } else {
13033 guard_.reset_device(options.device());
13034 }
13035 outputs_[output_idx] = create_out(sizes, strides, options);
13036 if (!names.empty()) {
13037 namedinference::propagate_names(*outputs_[output_idx], names);
13038 }
13039 // super must happen after, so that downstream can use maybe_get_output
13040 // to retrieve the output
13041 }
13042 void set_output_raw_strided(
13043 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13044 TensorOptions options, DimnameList names
13045 ) override {
13046 auto current_device = guard_.current_device();
13047 if (C10_UNLIKELY(current_device.has_value())) {
13048 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13049 "structured kernels don't support multi-device outputs");
13050 } else {
13051 guard_.reset_device(options.device());
13052 }
13053 outputs_[output_idx] = create_out(sizes, strides, options);
13054 if (!names.empty()) {
13055 namedinference::propagate_names(*outputs_[output_idx], names);
13056 }
13057 // super must happen after, so that downstream can use maybe_get_output
13058 // to retrieve the output
13059 }
13060 const Tensor& maybe_get_output(int64_t output_idx) override {
13061 return *outputs_[output_idx];
13062 }
13063 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
13064 c10::OptionalDeviceGuard guard_;
13065};
13066::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeExplicitAutogradNonFunctional_triangular_solve(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) {
13067structured_triangular_solve_default_backend_functional op;
13068op.meta(self, A, upper, transpose, unitriangular);
13069at::triangular_solve_outf(self, A, upper, transpose, unitriangular, *op.outputs_[0], *op.outputs_[1]);
13070return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
13071}
13072struct structured_lu_unpack_default_backend_functional final : public at::meta::structured_lu_unpack {
13073 void set_output_strided(
13074 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13075 TensorOptions options, DimnameList names
13076 ) override {
13077 auto current_device = guard_.current_device();
13078 if (C10_UNLIKELY(current_device.has_value())) {
13079 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13080 "structured kernels don't support multi-device outputs");
13081 } else {
13082 guard_.reset_device(options.device());
13083 }
13084 outputs_[output_idx] = create_out(sizes, strides, options);
13085 if (!names.empty()) {
13086 namedinference::propagate_names(*outputs_[output_idx], names);
13087 }
13088 // super must happen after, so that downstream can use maybe_get_output
13089 // to retrieve the output
13090 }
13091 void set_output_raw_strided(
13092 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13093 TensorOptions options, DimnameList names
13094 ) override {
13095 auto current_device = guard_.current_device();
13096 if (C10_UNLIKELY(current_device.has_value())) {
13097 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13098 "structured kernels don't support multi-device outputs");
13099 } else {
13100 guard_.reset_device(options.device());
13101 }
13102 outputs_[output_idx] = create_out(sizes, strides, options);
13103 if (!names.empty()) {
13104 namedinference::propagate_names(*outputs_[output_idx], names);
13105 }
13106 // super must happen after, so that downstream can use maybe_get_output
13107 // to retrieve the output
13108 }
13109 const Tensor& maybe_get_output(int64_t output_idx) override {
13110 return *outputs_[output_idx];
13111 }
13112 std::array<c10::ExclusivelyOwned<Tensor>, 3> outputs_;
13113 c10::OptionalDeviceGuard guard_;
13114};
13115::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeExplicitAutogradNonFunctional_lu_unpack(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots) {
13116structured_lu_unpack_default_backend_functional op;
13117op.meta(LU_data, LU_pivots, unpack_data, unpack_pivots);
13118at::lu_unpack_outf(LU_data, LU_pivots, unpack_data, unpack_pivots, *op.outputs_[0], *op.outputs_[1], *op.outputs_[2]);
13119return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take(), std::move(op.outputs_[2]).take());
13120}
13121struct structured_lgamma_default_backend_functional final : public at::meta::structured_lgamma {
13122 void set_output_strided(
13123 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13124 TensorOptions options, DimnameList names
13125 ) override {
13126 auto current_device = guard_.current_device();
13127 if (C10_UNLIKELY(current_device.has_value())) {
13128 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13129 "structured kernels don't support multi-device outputs");
13130 } else {
13131 guard_.reset_device(options.device());
13132 }
13133 outputs_[output_idx] = create_out(sizes, strides, options);
13134 if (!names.empty()) {
13135 namedinference::propagate_names(*outputs_[output_idx], names);
13136 }
13137 // super must happen after, so that downstream can use maybe_get_output
13138 // to retrieve the output
13139 at::meta::structured_lgamma::set_output_raw_strided(output_idx, sizes, strides, options, names);
13140 }
13141 void set_output_raw_strided(
13142 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13143 TensorOptions options, DimnameList names
13144 ) override {
13145 auto current_device = guard_.current_device();
13146 if (C10_UNLIKELY(current_device.has_value())) {
13147 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13148 "structured kernels don't support multi-device outputs");
13149 } else {
13150 guard_.reset_device(options.device());
13151 }
13152 outputs_[output_idx] = create_out(sizes, strides, options);
13153 if (!names.empty()) {
13154 namedinference::propagate_names(*outputs_[output_idx], names);
13155 }
13156 // super must happen after, so that downstream can use maybe_get_output
13157 // to retrieve the output
13158 at::meta::structured_lgamma::set_output_raw_strided(output_idx, sizes, strides, options, names);
13159 }
13160 const Tensor& maybe_get_output(int64_t output_idx) override {
13161 return *outputs_[output_idx];
13162 }
13163 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
13164 c10::OptionalDeviceGuard guard_;
13165};
13166at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_lgamma(const at::Tensor & self) {
13167structured_lgamma_default_backend_functional op;
13168op.meta(self);
13169at::lgamma_outf(self, *op.outputs_[0]);
13170return std::move(op.outputs_[0]).take();
13171}
13172struct structured_lgamma_default_backend_inplace final : public at::meta::structured_lgamma {
13173 structured_lgamma_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
13174 void set_output_strided(
13175 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13176 TensorOptions options, DimnameList names
13177 ) override {
13178 auto current_device = guard_.current_device();
13179 if (C10_UNLIKELY(current_device.has_value())) {
13180 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13181 "structured kernels don't support multi-device outputs");
13182 } else {
13183 guard_.reset_device(options.device());
13184 }
13185 const auto& out = outputs_[output_idx].get();
13186 check_inplace(out, sizes, options);
13187 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
13188 if (C10_UNLIKELY(maybe_proxy.has_value())) {
13189 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
13190 }
13191 if (!names.empty()) {
13192 namedinference::propagate_names(outputs_[output_idx], names);
13193 }
13194 // super must happen after, so that downstream can use maybe_get_output
13195 // to retrieve the output
13196 at::meta::structured_lgamma::set_output_raw_strided(output_idx, sizes, strides, options, names);
13197 }
13198 void set_output_raw_strided(
13199 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13200 TensorOptions options, DimnameList names
13201 ) override {
13202 auto current_device = guard_.current_device();
13203 if (C10_UNLIKELY(current_device.has_value())) {
13204 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13205 "structured kernels don't support multi-device outputs");
13206 } else {
13207 guard_.reset_device(options.device());
13208 }
13209 const auto& out = outputs_[output_idx].get();
13210 check_inplace(out, sizes, options);
13211 if (!names.empty()) {
13212 namedinference::propagate_names(outputs_[output_idx], names);
13213 }
13214 // super must happen after, so that downstream can use maybe_get_output
13215 // to retrieve the output
13216 at::meta::structured_lgamma::set_output_raw_strided(output_idx, sizes, strides, options, names);
13217 }
13218 const Tensor& maybe_get_output(int64_t output_idx) override {
13219 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
13220 }
13221 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
13222 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
13223 c10::OptionalDeviceGuard guard_;
13224};
13225at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_lgamma_(at::Tensor & self) {
13226structured_lgamma_default_backend_inplace op(self);
13227op.meta(self);
13228at::lgamma_outf(self, op.outputs_[0]);
13229if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
13230return self;
13231}
13232struct structured_polygamma_default_backend_functional final : public at::meta::structured_polygamma {
13233 void set_output_strided(
13234 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13235 TensorOptions options, DimnameList names
13236 ) override {
13237 auto current_device = guard_.current_device();
13238 if (C10_UNLIKELY(current_device.has_value())) {
13239 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13240 "structured kernels don't support multi-device outputs");
13241 } else {
13242 guard_.reset_device(options.device());
13243 }
13244 outputs_[output_idx] = create_out(sizes, strides, options);
13245 if (!names.empty()) {
13246 namedinference::propagate_names(*outputs_[output_idx], names);
13247 }
13248 // super must happen after, so that downstream can use maybe_get_output
13249 // to retrieve the output
13250 at::meta::structured_polygamma::set_output_raw_strided(output_idx, sizes, strides, options, names);
13251 }
13252 void set_output_raw_strided(
13253 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13254 TensorOptions options, DimnameList names
13255 ) override {
13256 auto current_device = guard_.current_device();
13257 if (C10_UNLIKELY(current_device.has_value())) {
13258 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13259 "structured kernels don't support multi-device outputs");
13260 } else {
13261 guard_.reset_device(options.device());
13262 }
13263 outputs_[output_idx] = create_out(sizes, strides, options);
13264 if (!names.empty()) {
13265 namedinference::propagate_names(*outputs_[output_idx], names);
13266 }
13267 // super must happen after, so that downstream can use maybe_get_output
13268 // to retrieve the output
13269 at::meta::structured_polygamma::set_output_raw_strided(output_idx, sizes, strides, options, names);
13270 }
13271 const Tensor& maybe_get_output(int64_t output_idx) override {
13272 return *outputs_[output_idx];
13273 }
13274 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
13275 c10::OptionalDeviceGuard guard_;
13276};
13277at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_polygamma(int64_t n, const at::Tensor & self) {
13278structured_polygamma_default_backend_functional op;
13279op.meta(n, self);
13280at::polygamma_outf(n, self, *op.outputs_[0]);
13281return std::move(op.outputs_[0]).take();
13282}
13283struct structured_erfinv_default_backend_functional final : public at::meta::structured_erfinv {
13284 void set_output_strided(
13285 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13286 TensorOptions options, DimnameList names
13287 ) override {
13288 auto current_device = guard_.current_device();
13289 if (C10_UNLIKELY(current_device.has_value())) {
13290 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13291 "structured kernels don't support multi-device outputs");
13292 } else {
13293 guard_.reset_device(options.device());
13294 }
13295 outputs_[output_idx] = create_out(sizes, strides, options);
13296 if (!names.empty()) {
13297 namedinference::propagate_names(*outputs_[output_idx], names);
13298 }
13299 // super must happen after, so that downstream can use maybe_get_output
13300 // to retrieve the output
13301 at::meta::structured_erfinv::set_output_raw_strided(output_idx, sizes, strides, options, names);
13302 }
13303 void set_output_raw_strided(
13304 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13305 TensorOptions options, DimnameList names
13306 ) override {
13307 auto current_device = guard_.current_device();
13308 if (C10_UNLIKELY(current_device.has_value())) {
13309 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13310 "structured kernels don't support multi-device outputs");
13311 } else {
13312 guard_.reset_device(options.device());
13313 }
13314 outputs_[output_idx] = create_out(sizes, strides, options);
13315 if (!names.empty()) {
13316 namedinference::propagate_names(*outputs_[output_idx], names);
13317 }
13318 // super must happen after, so that downstream can use maybe_get_output
13319 // to retrieve the output
13320 at::meta::structured_erfinv::set_output_raw_strided(output_idx, sizes, strides, options, names);
13321 }
13322 const Tensor& maybe_get_output(int64_t output_idx) override {
13323 return *outputs_[output_idx];
13324 }
13325 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
13326 c10::OptionalDeviceGuard guard_;
13327};
13328at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_erfinv(const at::Tensor & self) {
13329structured_erfinv_default_backend_functional op;
13330op.meta(self);
13331at::erfinv_outf(self, *op.outputs_[0]);
13332return std::move(op.outputs_[0]).take();
13333}
13334struct structured_erfinv_default_backend_inplace final : public at::meta::structured_erfinv {
13335 structured_erfinv_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
13336 void set_output_strided(
13337 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13338 TensorOptions options, DimnameList names
13339 ) override {
13340 auto current_device = guard_.current_device();
13341 if (C10_UNLIKELY(current_device.has_value())) {
13342 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13343 "structured kernels don't support multi-device outputs");
13344 } else {
13345 guard_.reset_device(options.device());
13346 }
13347 const auto& out = outputs_[output_idx].get();
13348 check_inplace(out, sizes, options);
13349 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
13350 if (C10_UNLIKELY(maybe_proxy.has_value())) {
13351 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
13352 }
13353 if (!names.empty()) {
13354 namedinference::propagate_names(outputs_[output_idx], names);
13355 }
13356 // super must happen after, so that downstream can use maybe_get_output
13357 // to retrieve the output
13358 at::meta::structured_erfinv::set_output_raw_strided(output_idx, sizes, strides, options, names);
13359 }
13360 void set_output_raw_strided(
13361 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13362 TensorOptions options, DimnameList names
13363 ) override {
13364 auto current_device = guard_.current_device();
13365 if (C10_UNLIKELY(current_device.has_value())) {
13366 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13367 "structured kernels don't support multi-device outputs");
13368 } else {
13369 guard_.reset_device(options.device());
13370 }
13371 const auto& out = outputs_[output_idx].get();
13372 check_inplace(out, sizes, options);
13373 if (!names.empty()) {
13374 namedinference::propagate_names(outputs_[output_idx], names);
13375 }
13376 // super must happen after, so that downstream can use maybe_get_output
13377 // to retrieve the output
13378 at::meta::structured_erfinv::set_output_raw_strided(output_idx, sizes, strides, options, names);
13379 }
13380 const Tensor& maybe_get_output(int64_t output_idx) override {
13381 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
13382 }
13383 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
13384 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
13385 c10::OptionalDeviceGuard guard_;
13386};
13387at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_erfinv_(at::Tensor & self) {
13388structured_erfinv_default_backend_inplace op(self);
13389op.meta(self);
13390at::erfinv_outf(self, op.outputs_[0]);
13391if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
13392return self;
13393}
13394struct structured_i0_default_backend_functional final : public at::meta::structured_i0 {
13395 void set_output_strided(
13396 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13397 TensorOptions options, DimnameList names
13398 ) override {
13399 auto current_device = guard_.current_device();
13400 if (C10_UNLIKELY(current_device.has_value())) {
13401 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13402 "structured kernels don't support multi-device outputs");
13403 } else {
13404 guard_.reset_device(options.device());
13405 }
13406 outputs_[output_idx] = create_out(sizes, strides, options);
13407 if (!names.empty()) {
13408 namedinference::propagate_names(*outputs_[output_idx], names);
13409 }
13410 // super must happen after, so that downstream can use maybe_get_output
13411 // to retrieve the output
13412 at::meta::structured_i0::set_output_raw_strided(output_idx, sizes, strides, options, names);
13413 }
13414 void set_output_raw_strided(
13415 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13416 TensorOptions options, DimnameList names
13417 ) override {
13418 auto current_device = guard_.current_device();
13419 if (C10_UNLIKELY(current_device.has_value())) {
13420 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13421 "structured kernels don't support multi-device outputs");
13422 } else {
13423 guard_.reset_device(options.device());
13424 }
13425 outputs_[output_idx] = create_out(sizes, strides, options);
13426 if (!names.empty()) {
13427 namedinference::propagate_names(*outputs_[output_idx], names);
13428 }
13429 // super must happen after, so that downstream can use maybe_get_output
13430 // to retrieve the output
13431 at::meta::structured_i0::set_output_raw_strided(output_idx, sizes, strides, options, names);
13432 }
13433 const Tensor& maybe_get_output(int64_t output_idx) override {
13434 return *outputs_[output_idx];
13435 }
13436 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
13437 c10::OptionalDeviceGuard guard_;
13438};
13439at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_i0(const at::Tensor & self) {
13440structured_i0_default_backend_functional op;
13441op.meta(self);
13442at::i0_outf(self, *op.outputs_[0]);
13443return std::move(op.outputs_[0]).take();
13444}
13445struct structured_i0_default_backend_inplace final : public at::meta::structured_i0 {
13446 structured_i0_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
13447 void set_output_strided(
13448 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13449 TensorOptions options, DimnameList names
13450 ) override {
13451 auto current_device = guard_.current_device();
13452 if (C10_UNLIKELY(current_device.has_value())) {
13453 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13454 "structured kernels don't support multi-device outputs");
13455 } else {
13456 guard_.reset_device(options.device());
13457 }
13458 const auto& out = outputs_[output_idx].get();
13459 check_inplace(out, sizes, options);
13460 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
13461 if (C10_UNLIKELY(maybe_proxy.has_value())) {
13462 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
13463 }
13464 if (!names.empty()) {
13465 namedinference::propagate_names(outputs_[output_idx], names);
13466 }
13467 // super must happen after, so that downstream can use maybe_get_output
13468 // to retrieve the output
13469 at::meta::structured_i0::set_output_raw_strided(output_idx, sizes, strides, options, names);
13470 }
13471 void set_output_raw_strided(
13472 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13473 TensorOptions options, DimnameList names
13474 ) override {
13475 auto current_device = guard_.current_device();
13476 if (C10_UNLIKELY(current_device.has_value())) {
13477 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13478 "structured kernels don't support multi-device outputs");
13479 } else {
13480 guard_.reset_device(options.device());
13481 }
13482 const auto& out = outputs_[output_idx].get();
13483 check_inplace(out, sizes, options);
13484 if (!names.empty()) {
13485 namedinference::propagate_names(outputs_[output_idx], names);
13486 }
13487 // super must happen after, so that downstream can use maybe_get_output
13488 // to retrieve the output
13489 at::meta::structured_i0::set_output_raw_strided(output_idx, sizes, strides, options, names);
13490 }
13491 const Tensor& maybe_get_output(int64_t output_idx) override {
13492 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
13493 }
13494 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
13495 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
13496 c10::OptionalDeviceGuard guard_;
13497};
13498at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_i0_(at::Tensor & self) {
13499structured_i0_default_backend_inplace op(self);
13500op.meta(self);
13501at::i0_outf(self, op.outputs_[0]);
13502if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
13503return self;
13504}
13505struct structured_sign_default_backend_functional final : public at::meta::structured_sign {
13506 void set_output_strided(
13507 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13508 TensorOptions options, DimnameList names
13509 ) override {
13510 auto current_device = guard_.current_device();
13511 if (C10_UNLIKELY(current_device.has_value())) {
13512 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13513 "structured kernels don't support multi-device outputs");
13514 } else {
13515 guard_.reset_device(options.device());
13516 }
13517 outputs_[output_idx] = create_out(sizes, strides, options);
13518 if (!names.empty()) {
13519 namedinference::propagate_names(*outputs_[output_idx], names);
13520 }
13521 // super must happen after, so that downstream can use maybe_get_output
13522 // to retrieve the output
13523 at::meta::structured_sign::set_output_raw_strided(output_idx, sizes, strides, options, names);
13524 }
13525 void set_output_raw_strided(
13526 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13527 TensorOptions options, DimnameList names
13528 ) override {
13529 auto current_device = guard_.current_device();
13530 if (C10_UNLIKELY(current_device.has_value())) {
13531 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13532 "structured kernels don't support multi-device outputs");
13533 } else {
13534 guard_.reset_device(options.device());
13535 }
13536 outputs_[output_idx] = create_out(sizes, strides, options);
13537 if (!names.empty()) {
13538 namedinference::propagate_names(*outputs_[output_idx], names);
13539 }
13540 // super must happen after, so that downstream can use maybe_get_output
13541 // to retrieve the output
13542 at::meta::structured_sign::set_output_raw_strided(output_idx, sizes, strides, options, names);
13543 }
13544 const Tensor& maybe_get_output(int64_t output_idx) override {
13545 return *outputs_[output_idx];
13546 }
13547 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
13548 c10::OptionalDeviceGuard guard_;
13549};
13550at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_sign(const at::Tensor & self) {
13551structured_sign_default_backend_functional op;
13552op.meta(self);
13553at::sign_outf(self, *op.outputs_[0]);
13554return std::move(op.outputs_[0]).take();
13555}
13556struct structured_sign_default_backend_inplace final : public at::meta::structured_sign {
13557 structured_sign_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
13558 void set_output_strided(
13559 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13560 TensorOptions options, DimnameList names
13561 ) override {
13562 auto current_device = guard_.current_device();
13563 if (C10_UNLIKELY(current_device.has_value())) {
13564 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13565 "structured kernels don't support multi-device outputs");
13566 } else {
13567 guard_.reset_device(options.device());
13568 }
13569 const auto& out = outputs_[output_idx].get();
13570 check_inplace(out, sizes, options);
13571 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
13572 if (C10_UNLIKELY(maybe_proxy.has_value())) {
13573 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
13574 }
13575 if (!names.empty()) {
13576 namedinference::propagate_names(outputs_[output_idx], names);
13577 }
13578 // super must happen after, so that downstream can use maybe_get_output
13579 // to retrieve the output
13580 at::meta::structured_sign::set_output_raw_strided(output_idx, sizes, strides, options, names);
13581 }
13582 void set_output_raw_strided(
13583 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13584 TensorOptions options, DimnameList names
13585 ) override {
13586 auto current_device = guard_.current_device();
13587 if (C10_UNLIKELY(current_device.has_value())) {
13588 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13589 "structured kernels don't support multi-device outputs");
13590 } else {
13591 guard_.reset_device(options.device());
13592 }
13593 const auto& out = outputs_[output_idx].get();
13594 check_inplace(out, sizes, options);
13595 if (!names.empty()) {
13596 namedinference::propagate_names(outputs_[output_idx], names);
13597 }
13598 // super must happen after, so that downstream can use maybe_get_output
13599 // to retrieve the output
13600 at::meta::structured_sign::set_output_raw_strided(output_idx, sizes, strides, options, names);
13601 }
13602 const Tensor& maybe_get_output(int64_t output_idx) override {
13603 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
13604 }
13605 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
13606 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
13607 c10::OptionalDeviceGuard guard_;
13608};
13609at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_sign_(at::Tensor & self) {
13610structured_sign_default_backend_inplace op(self);
13611op.meta(self);
13612at::sign_outf(self, op.outputs_[0]);
13613if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
13614return self;
13615}
13616struct structured_signbit_default_backend_functional final : public at::meta::structured_signbit {
13617 void set_output_strided(
13618 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13619 TensorOptions options, DimnameList names
13620 ) override {
13621 auto current_device = guard_.current_device();
13622 if (C10_UNLIKELY(current_device.has_value())) {
13623 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13624 "structured kernels don't support multi-device outputs");
13625 } else {
13626 guard_.reset_device(options.device());
13627 }
13628 outputs_[output_idx] = create_out(sizes, strides, options);
13629 if (!names.empty()) {
13630 namedinference::propagate_names(*outputs_[output_idx], names);
13631 }
13632 // super must happen after, so that downstream can use maybe_get_output
13633 // to retrieve the output
13634 at::meta::structured_signbit::set_output_raw_strided(output_idx, sizes, strides, options, names);
13635 }
13636 void set_output_raw_strided(
13637 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13638 TensorOptions options, DimnameList names
13639 ) override {
13640 auto current_device = guard_.current_device();
13641 if (C10_UNLIKELY(current_device.has_value())) {
13642 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13643 "structured kernels don't support multi-device outputs");
13644 } else {
13645 guard_.reset_device(options.device());
13646 }
13647 outputs_[output_idx] = create_out(sizes, strides, options);
13648 if (!names.empty()) {
13649 namedinference::propagate_names(*outputs_[output_idx], names);
13650 }
13651 // super must happen after, so that downstream can use maybe_get_output
13652 // to retrieve the output
13653 at::meta::structured_signbit::set_output_raw_strided(output_idx, sizes, strides, options, names);
13654 }
13655 const Tensor& maybe_get_output(int64_t output_idx) override {
13656 return *outputs_[output_idx];
13657 }
13658 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
13659 c10::OptionalDeviceGuard guard_;
13660};
13661at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_signbit(const at::Tensor & self) {
13662structured_signbit_default_backend_functional op;
13663op.meta(self);
13664at::signbit_outf(self, *op.outputs_[0]);
13665return std::move(op.outputs_[0]).take();
13666}
13667struct structured_atan2_default_backend_functional final : public at::meta::structured_atan2 {
13668 void set_output_strided(
13669 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13670 TensorOptions options, DimnameList names
13671 ) override {
13672 auto current_device = guard_.current_device();
13673 if (C10_UNLIKELY(current_device.has_value())) {
13674 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13675 "structured kernels don't support multi-device outputs");
13676 } else {
13677 guard_.reset_device(options.device());
13678 }
13679 outputs_[output_idx] = create_out(sizes, strides, options);
13680 if (!names.empty()) {
13681 namedinference::propagate_names(*outputs_[output_idx], names);
13682 }
13683 // super must happen after, so that downstream can use maybe_get_output
13684 // to retrieve the output
13685 at::meta::structured_atan2::set_output_raw_strided(output_idx, sizes, strides, options, names);
13686 }
13687 void set_output_raw_strided(
13688 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13689 TensorOptions options, DimnameList names
13690 ) override {
13691 auto current_device = guard_.current_device();
13692 if (C10_UNLIKELY(current_device.has_value())) {
13693 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13694 "structured kernels don't support multi-device outputs");
13695 } else {
13696 guard_.reset_device(options.device());
13697 }
13698 outputs_[output_idx] = create_out(sizes, strides, options);
13699 if (!names.empty()) {
13700 namedinference::propagate_names(*outputs_[output_idx], names);
13701 }
13702 // super must happen after, so that downstream can use maybe_get_output
13703 // to retrieve the output
13704 at::meta::structured_atan2::set_output_raw_strided(output_idx, sizes, strides, options, names);
13705 }
13706 const Tensor& maybe_get_output(int64_t output_idx) override {
13707 return *outputs_[output_idx];
13708 }
13709 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
13710 c10::OptionalDeviceGuard guard_;
13711};
13712at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_atan2(const at::Tensor & self, const at::Tensor & other) {
13713structured_atan2_default_backend_functional op;
13714op.meta(self, other);
13715at::atan2_outf(self, other, *op.outputs_[0]);
13716return std::move(op.outputs_[0]).take();
13717}
13718struct structured_atan2_default_backend_inplace final : public at::meta::structured_atan2 {
13719 structured_atan2_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
13720 void set_output_strided(
13721 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13722 TensorOptions options, DimnameList names
13723 ) override {
13724 auto current_device = guard_.current_device();
13725 if (C10_UNLIKELY(current_device.has_value())) {
13726 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13727 "structured kernels don't support multi-device outputs");
13728 } else {
13729 guard_.reset_device(options.device());
13730 }
13731 const auto& out = outputs_[output_idx].get();
13732 check_inplace(out, sizes, options);
13733 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
13734 if (C10_UNLIKELY(maybe_proxy.has_value())) {
13735 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
13736 }
13737 if (!names.empty()) {
13738 namedinference::propagate_names(outputs_[output_idx], names);
13739 }
13740 // super must happen after, so that downstream can use maybe_get_output
13741 // to retrieve the output
13742 at::meta::structured_atan2::set_output_raw_strided(output_idx, sizes, strides, options, names);
13743 }
13744 void set_output_raw_strided(
13745 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13746 TensorOptions options, DimnameList names
13747 ) override {
13748 auto current_device = guard_.current_device();
13749 if (C10_UNLIKELY(current_device.has_value())) {
13750 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13751 "structured kernels don't support multi-device outputs");
13752 } else {
13753 guard_.reset_device(options.device());
13754 }
13755 const auto& out = outputs_[output_idx].get();
13756 check_inplace(out, sizes, options);
13757 if (!names.empty()) {
13758 namedinference::propagate_names(outputs_[output_idx], names);
13759 }
13760 // super must happen after, so that downstream can use maybe_get_output
13761 // to retrieve the output
13762 at::meta::structured_atan2::set_output_raw_strided(output_idx, sizes, strides, options, names);
13763 }
13764 const Tensor& maybe_get_output(int64_t output_idx) override {
13765 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
13766 }
13767 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
13768 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
13769 c10::OptionalDeviceGuard guard_;
13770};
13771at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_atan2_(at::Tensor & self, const at::Tensor & other) {
13772structured_atan2_default_backend_inplace op(self);
13773op.meta(self, other);
13774at::atan2_outf(self, other, op.outputs_[0]);
13775if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
13776return self;
13777}
13778struct structured_fmod_Tensor_default_backend_functional final : public at::meta::structured_fmod_Tensor {
13779 void set_output_strided(
13780 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13781 TensorOptions options, DimnameList names
13782 ) override {
13783 auto current_device = guard_.current_device();
13784 if (C10_UNLIKELY(current_device.has_value())) {
13785 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13786 "structured kernels don't support multi-device outputs");
13787 } else {
13788 guard_.reset_device(options.device());
13789 }
13790 outputs_[output_idx] = create_out(sizes, strides, options);
13791 if (!names.empty()) {
13792 namedinference::propagate_names(*outputs_[output_idx], names);
13793 }
13794 // super must happen after, so that downstream can use maybe_get_output
13795 // to retrieve the output
13796 at::meta::structured_fmod_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
13797 }
13798 void set_output_raw_strided(
13799 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13800 TensorOptions options, DimnameList names
13801 ) override {
13802 auto current_device = guard_.current_device();
13803 if (C10_UNLIKELY(current_device.has_value())) {
13804 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13805 "structured kernels don't support multi-device outputs");
13806 } else {
13807 guard_.reset_device(options.device());
13808 }
13809 outputs_[output_idx] = create_out(sizes, strides, options);
13810 if (!names.empty()) {
13811 namedinference::propagate_names(*outputs_[output_idx], names);
13812 }
13813 // super must happen after, so that downstream can use maybe_get_output
13814 // to retrieve the output
13815 at::meta::structured_fmod_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
13816 }
13817 const Tensor& maybe_get_output(int64_t output_idx) override {
13818 return *outputs_[output_idx];
13819 }
13820 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
13821 c10::OptionalDeviceGuard guard_;
13822};
13823at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_fmod_Tensor(const at::Tensor & self, const at::Tensor & other) {
13824structured_fmod_Tensor_default_backend_functional op;
13825op.meta(self, other);
13826at::fmod_outf(self, other, *op.outputs_[0]);
13827return std::move(op.outputs_[0]).take();
13828}
13829struct structured_fmod_Tensor_default_backend_inplace final : public at::meta::structured_fmod_Tensor {
13830 structured_fmod_Tensor_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
13831 void set_output_strided(
13832 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13833 TensorOptions options, DimnameList names
13834 ) override {
13835 auto current_device = guard_.current_device();
13836 if (C10_UNLIKELY(current_device.has_value())) {
13837 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13838 "structured kernels don't support multi-device outputs");
13839 } else {
13840 guard_.reset_device(options.device());
13841 }
13842 const auto& out = outputs_[output_idx].get();
13843 check_inplace(out, sizes, options);
13844 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
13845 if (C10_UNLIKELY(maybe_proxy.has_value())) {
13846 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
13847 }
13848 if (!names.empty()) {
13849 namedinference::propagate_names(outputs_[output_idx], names);
13850 }
13851 // super must happen after, so that downstream can use maybe_get_output
13852 // to retrieve the output
13853 at::meta::structured_fmod_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
13854 }
13855 void set_output_raw_strided(
13856 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13857 TensorOptions options, DimnameList names
13858 ) override {
13859 auto current_device = guard_.current_device();
13860 if (C10_UNLIKELY(current_device.has_value())) {
13861 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13862 "structured kernels don't support multi-device outputs");
13863 } else {
13864 guard_.reset_device(options.device());
13865 }
13866 const auto& out = outputs_[output_idx].get();
13867 check_inplace(out, sizes, options);
13868 if (!names.empty()) {
13869 namedinference::propagate_names(outputs_[output_idx], names);
13870 }
13871 // super must happen after, so that downstream can use maybe_get_output
13872 // to retrieve the output
13873 at::meta::structured_fmod_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
13874 }
13875 const Tensor& maybe_get_output(int64_t output_idx) override {
13876 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
13877 }
13878 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
13879 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
13880 c10::OptionalDeviceGuard guard_;
13881};
13882at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_fmod__Tensor(at::Tensor & self, const at::Tensor & other) {
13883structured_fmod_Tensor_default_backend_inplace op(self);
13884op.meta(self, other);
13885at::fmod_outf(self, other, op.outputs_[0]);
13886if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
13887return self;
13888}
13889struct structured_hypot_default_backend_functional final : public at::meta::structured_hypot {
13890 void set_output_strided(
13891 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13892 TensorOptions options, DimnameList names
13893 ) override {
13894 auto current_device = guard_.current_device();
13895 if (C10_UNLIKELY(current_device.has_value())) {
13896 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13897 "structured kernels don't support multi-device outputs");
13898 } else {
13899 guard_.reset_device(options.device());
13900 }
13901 outputs_[output_idx] = create_out(sizes, strides, options);
13902 if (!names.empty()) {
13903 namedinference::propagate_names(*outputs_[output_idx], names);
13904 }
13905 // super must happen after, so that downstream can use maybe_get_output
13906 // to retrieve the output
13907 at::meta::structured_hypot::set_output_raw_strided(output_idx, sizes, strides, options, names);
13908 }
13909 void set_output_raw_strided(
13910 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13911 TensorOptions options, DimnameList names
13912 ) override {
13913 auto current_device = guard_.current_device();
13914 if (C10_UNLIKELY(current_device.has_value())) {
13915 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13916 "structured kernels don't support multi-device outputs");
13917 } else {
13918 guard_.reset_device(options.device());
13919 }
13920 outputs_[output_idx] = create_out(sizes, strides, options);
13921 if (!names.empty()) {
13922 namedinference::propagate_names(*outputs_[output_idx], names);
13923 }
13924 // super must happen after, so that downstream can use maybe_get_output
13925 // to retrieve the output
13926 at::meta::structured_hypot::set_output_raw_strided(output_idx, sizes, strides, options, names);
13927 }
13928 const Tensor& maybe_get_output(int64_t output_idx) override {
13929 return *outputs_[output_idx];
13930 }
13931 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
13932 c10::OptionalDeviceGuard guard_;
13933};
13934at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_hypot(const at::Tensor & self, const at::Tensor & other) {
13935structured_hypot_default_backend_functional op;
13936op.meta(self, other);
13937at::hypot_outf(self, other, *op.outputs_[0]);
13938return std::move(op.outputs_[0]).take();
13939}
13940struct structured_hypot_default_backend_inplace final : public at::meta::structured_hypot {
13941 structured_hypot_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
13942 void set_output_strided(
13943 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13944 TensorOptions options, DimnameList names
13945 ) override {
13946 auto current_device = guard_.current_device();
13947 if (C10_UNLIKELY(current_device.has_value())) {
13948 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13949 "structured kernels don't support multi-device outputs");
13950 } else {
13951 guard_.reset_device(options.device());
13952 }
13953 const auto& out = outputs_[output_idx].get();
13954 check_inplace(out, sizes, options);
13955 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
13956 if (C10_UNLIKELY(maybe_proxy.has_value())) {
13957 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
13958 }
13959 if (!names.empty()) {
13960 namedinference::propagate_names(outputs_[output_idx], names);
13961 }
13962 // super must happen after, so that downstream can use maybe_get_output
13963 // to retrieve the output
13964 at::meta::structured_hypot::set_output_raw_strided(output_idx, sizes, strides, options, names);
13965 }
13966 void set_output_raw_strided(
13967 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13968 TensorOptions options, DimnameList names
13969 ) override {
13970 auto current_device = guard_.current_device();
13971 if (C10_UNLIKELY(current_device.has_value())) {
13972 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
13973 "structured kernels don't support multi-device outputs");
13974 } else {
13975 guard_.reset_device(options.device());
13976 }
13977 const auto& out = outputs_[output_idx].get();
13978 check_inplace(out, sizes, options);
13979 if (!names.empty()) {
13980 namedinference::propagate_names(outputs_[output_idx], names);
13981 }
13982 // super must happen after, so that downstream can use maybe_get_output
13983 // to retrieve the output
13984 at::meta::structured_hypot::set_output_raw_strided(output_idx, sizes, strides, options, names);
13985 }
13986 const Tensor& maybe_get_output(int64_t output_idx) override {
13987 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
13988 }
13989 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
13990 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
13991 c10::OptionalDeviceGuard guard_;
13992};
13993at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_hypot_(at::Tensor & self, const at::Tensor & other) {
13994structured_hypot_default_backend_inplace op(self);
13995op.meta(self, other);
13996at::hypot_outf(self, other, op.outputs_[0]);
13997if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
13998return self;
13999}
14000struct structured_igamma_default_backend_functional final : public at::meta::structured_igamma {
14001 void set_output_strided(
14002 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14003 TensorOptions options, DimnameList names
14004 ) override {
14005 auto current_device = guard_.current_device();
14006 if (C10_UNLIKELY(current_device.has_value())) {
14007 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14008 "structured kernels don't support multi-device outputs");
14009 } else {
14010 guard_.reset_device(options.device());
14011 }
14012 outputs_[output_idx] = create_out(sizes, strides, options);
14013 if (!names.empty()) {
14014 namedinference::propagate_names(*outputs_[output_idx], names);
14015 }
14016 // super must happen after, so that downstream can use maybe_get_output
14017 // to retrieve the output
14018 at::meta::structured_igamma::set_output_raw_strided(output_idx, sizes, strides, options, names);
14019 }
14020 void set_output_raw_strided(
14021 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14022 TensorOptions options, DimnameList names
14023 ) override {
14024 auto current_device = guard_.current_device();
14025 if (C10_UNLIKELY(current_device.has_value())) {
14026 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14027 "structured kernels don't support multi-device outputs");
14028 } else {
14029 guard_.reset_device(options.device());
14030 }
14031 outputs_[output_idx] = create_out(sizes, strides, options);
14032 if (!names.empty()) {
14033 namedinference::propagate_names(*outputs_[output_idx], names);
14034 }
14035 // super must happen after, so that downstream can use maybe_get_output
14036 // to retrieve the output
14037 at::meta::structured_igamma::set_output_raw_strided(output_idx, sizes, strides, options, names);
14038 }
14039 const Tensor& maybe_get_output(int64_t output_idx) override {
14040 return *outputs_[output_idx];
14041 }
14042 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
14043 c10::OptionalDeviceGuard guard_;
14044};
14045at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_igamma(const at::Tensor & self, const at::Tensor & other) {
14046structured_igamma_default_backend_functional op;
14047op.meta(self, other);
14048at::igamma_outf(self, other, *op.outputs_[0]);
14049return std::move(op.outputs_[0]).take();
14050}
14051struct structured_igamma_default_backend_inplace final : public at::meta::structured_igamma {
14052 structured_igamma_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
14053 void set_output_strided(
14054 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14055 TensorOptions options, DimnameList names
14056 ) override {
14057 auto current_device = guard_.current_device();
14058 if (C10_UNLIKELY(current_device.has_value())) {
14059 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14060 "structured kernels don't support multi-device outputs");
14061 } else {
14062 guard_.reset_device(options.device());
14063 }
14064 const auto& out = outputs_[output_idx].get();
14065 check_inplace(out, sizes, options);
14066 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
14067 if (C10_UNLIKELY(maybe_proxy.has_value())) {
14068 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
14069 }
14070 if (!names.empty()) {
14071 namedinference::propagate_names(outputs_[output_idx], names);
14072 }
14073 // super must happen after, so that downstream can use maybe_get_output
14074 // to retrieve the output
14075 at::meta::structured_igamma::set_output_raw_strided(output_idx, sizes, strides, options, names);
14076 }
14077 void set_output_raw_strided(
14078 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14079 TensorOptions options, DimnameList names
14080 ) override {
14081 auto current_device = guard_.current_device();
14082 if (C10_UNLIKELY(current_device.has_value())) {
14083 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14084 "structured kernels don't support multi-device outputs");
14085 } else {
14086 guard_.reset_device(options.device());
14087 }
14088 const auto& out = outputs_[output_idx].get();
14089 check_inplace(out, sizes, options);
14090 if (!names.empty()) {
14091 namedinference::propagate_names(outputs_[output_idx], names);
14092 }
14093 // super must happen after, so that downstream can use maybe_get_output
14094 // to retrieve the output
14095 at::meta::structured_igamma::set_output_raw_strided(output_idx, sizes, strides, options, names);
14096 }
14097 const Tensor& maybe_get_output(int64_t output_idx) override {
14098 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
14099 }
14100 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
14101 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
14102 c10::OptionalDeviceGuard guard_;
14103};
14104at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_igamma_(at::Tensor & self, const at::Tensor & other) {
14105structured_igamma_default_backend_inplace op(self);
14106op.meta(self, other);
14107at::igamma_outf(self, other, op.outputs_[0]);
14108if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
14109return self;
14110}
14111struct structured_igammac_default_backend_functional final : public at::meta::structured_igammac {
14112 void set_output_strided(
14113 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14114 TensorOptions options, DimnameList names
14115 ) override {
14116 auto current_device = guard_.current_device();
14117 if (C10_UNLIKELY(current_device.has_value())) {
14118 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14119 "structured kernels don't support multi-device outputs");
14120 } else {
14121 guard_.reset_device(options.device());
14122 }
14123 outputs_[output_idx] = create_out(sizes, strides, options);
14124 if (!names.empty()) {
14125 namedinference::propagate_names(*outputs_[output_idx], names);
14126 }
14127 // super must happen after, so that downstream can use maybe_get_output
14128 // to retrieve the output
14129 at::meta::structured_igammac::set_output_raw_strided(output_idx, sizes, strides, options, names);
14130 }
14131 void set_output_raw_strided(
14132 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14133 TensorOptions options, DimnameList names
14134 ) override {
14135 auto current_device = guard_.current_device();
14136 if (C10_UNLIKELY(current_device.has_value())) {
14137 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14138 "structured kernels don't support multi-device outputs");
14139 } else {
14140 guard_.reset_device(options.device());
14141 }
14142 outputs_[output_idx] = create_out(sizes, strides, options);
14143 if (!names.empty()) {
14144 namedinference::propagate_names(*outputs_[output_idx], names);
14145 }
14146 // super must happen after, so that downstream can use maybe_get_output
14147 // to retrieve the output
14148 at::meta::structured_igammac::set_output_raw_strided(output_idx, sizes, strides, options, names);
14149 }
14150 const Tensor& maybe_get_output(int64_t output_idx) override {
14151 return *outputs_[output_idx];
14152 }
14153 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
14154 c10::OptionalDeviceGuard guard_;
14155};
14156at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_igammac(const at::Tensor & self, const at::Tensor & other) {
14157structured_igammac_default_backend_functional op;
14158op.meta(self, other);
14159at::igammac_outf(self, other, *op.outputs_[0]);
14160return std::move(op.outputs_[0]).take();
14161}
14162struct structured_igammac_default_backend_inplace final : public at::meta::structured_igammac {
14163 structured_igammac_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
14164 void set_output_strided(
14165 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14166 TensorOptions options, DimnameList names
14167 ) override {
14168 auto current_device = guard_.current_device();
14169 if (C10_UNLIKELY(current_device.has_value())) {
14170 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14171 "structured kernels don't support multi-device outputs");
14172 } else {
14173 guard_.reset_device(options.device());
14174 }
14175 const auto& out = outputs_[output_idx].get();
14176 check_inplace(out, sizes, options);
14177 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
14178 if (C10_UNLIKELY(maybe_proxy.has_value())) {
14179 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
14180 }
14181 if (!names.empty()) {
14182 namedinference::propagate_names(outputs_[output_idx], names);
14183 }
14184 // super must happen after, so that downstream can use maybe_get_output
14185 // to retrieve the output
14186 at::meta::structured_igammac::set_output_raw_strided(output_idx, sizes, strides, options, names);
14187 }
14188 void set_output_raw_strided(
14189 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14190 TensorOptions options, DimnameList names
14191 ) override {
14192 auto current_device = guard_.current_device();
14193 if (C10_UNLIKELY(current_device.has_value())) {
14194 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14195 "structured kernels don't support multi-device outputs");
14196 } else {
14197 guard_.reset_device(options.device());
14198 }
14199 const auto& out = outputs_[output_idx].get();
14200 check_inplace(out, sizes, options);
14201 if (!names.empty()) {
14202 namedinference::propagate_names(outputs_[output_idx], names);
14203 }
14204 // super must happen after, so that downstream can use maybe_get_output
14205 // to retrieve the output
14206 at::meta::structured_igammac::set_output_raw_strided(output_idx, sizes, strides, options, names);
14207 }
14208 const Tensor& maybe_get_output(int64_t output_idx) override {
14209 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
14210 }
14211 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
14212 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
14213 c10::OptionalDeviceGuard guard_;
14214};
14215at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_igammac_(at::Tensor & self, const at::Tensor & other) {
14216structured_igammac_default_backend_inplace op(self);
14217op.meta(self, other);
14218at::igammac_outf(self, other, op.outputs_[0]);
14219if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
14220return self;
14221}
14222struct structured_nextafter_default_backend_functional final : public at::meta::structured_nextafter {
14223 void set_output_strided(
14224 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14225 TensorOptions options, DimnameList names
14226 ) override {
14227 auto current_device = guard_.current_device();
14228 if (C10_UNLIKELY(current_device.has_value())) {
14229 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14230 "structured kernels don't support multi-device outputs");
14231 } else {
14232 guard_.reset_device(options.device());
14233 }
14234 outputs_[output_idx] = create_out(sizes, strides, options);
14235 if (!names.empty()) {
14236 namedinference::propagate_names(*outputs_[output_idx], names);
14237 }
14238 // super must happen after, so that downstream can use maybe_get_output
14239 // to retrieve the output
14240 at::meta::structured_nextafter::set_output_raw_strided(output_idx, sizes, strides, options, names);
14241 }
14242 void set_output_raw_strided(
14243 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14244 TensorOptions options, DimnameList names
14245 ) override {
14246 auto current_device = guard_.current_device();
14247 if (C10_UNLIKELY(current_device.has_value())) {
14248 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14249 "structured kernels don't support multi-device outputs");
14250 } else {
14251 guard_.reset_device(options.device());
14252 }
14253 outputs_[output_idx] = create_out(sizes, strides, options);
14254 if (!names.empty()) {
14255 namedinference::propagate_names(*outputs_[output_idx], names);
14256 }
14257 // super must happen after, so that downstream can use maybe_get_output
14258 // to retrieve the output
14259 at::meta::structured_nextafter::set_output_raw_strided(output_idx, sizes, strides, options, names);
14260 }
14261 const Tensor& maybe_get_output(int64_t output_idx) override {
14262 return *outputs_[output_idx];
14263 }
14264 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
14265 c10::OptionalDeviceGuard guard_;
14266};
14267at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_nextafter(const at::Tensor & self, const at::Tensor & other) {
14268structured_nextafter_default_backend_functional op;
14269op.meta(self, other);
14270at::nextafter_outf(self, other, *op.outputs_[0]);
14271return std::move(op.outputs_[0]).take();
14272}
14273struct structured_nextafter_default_backend_inplace final : public at::meta::structured_nextafter {
14274 structured_nextafter_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
14275 void set_output_strided(
14276 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14277 TensorOptions options, DimnameList names
14278 ) override {
14279 auto current_device = guard_.current_device();
14280 if (C10_UNLIKELY(current_device.has_value())) {
14281 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14282 "structured kernels don't support multi-device outputs");
14283 } else {
14284 guard_.reset_device(options.device());
14285 }
14286 const auto& out = outputs_[output_idx].get();
14287 check_inplace(out, sizes, options);
14288 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
14289 if (C10_UNLIKELY(maybe_proxy.has_value())) {
14290 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
14291 }
14292 if (!names.empty()) {
14293 namedinference::propagate_names(outputs_[output_idx], names);
14294 }
14295 // super must happen after, so that downstream can use maybe_get_output
14296 // to retrieve the output
14297 at::meta::structured_nextafter::set_output_raw_strided(output_idx, sizes, strides, options, names);
14298 }
14299 void set_output_raw_strided(
14300 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14301 TensorOptions options, DimnameList names
14302 ) override {
14303 auto current_device = guard_.current_device();
14304 if (C10_UNLIKELY(current_device.has_value())) {
14305 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14306 "structured kernels don't support multi-device outputs");
14307 } else {
14308 guard_.reset_device(options.device());
14309 }
14310 const auto& out = outputs_[output_idx].get();
14311 check_inplace(out, sizes, options);
14312 if (!names.empty()) {
14313 namedinference::propagate_names(outputs_[output_idx], names);
14314 }
14315 // super must happen after, so that downstream can use maybe_get_output
14316 // to retrieve the output
14317 at::meta::structured_nextafter::set_output_raw_strided(output_idx, sizes, strides, options, names);
14318 }
14319 const Tensor& maybe_get_output(int64_t output_idx) override {
14320 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
14321 }
14322 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
14323 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
14324 c10::OptionalDeviceGuard guard_;
14325};
14326at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_nextafter_(at::Tensor & self, const at::Tensor & other) {
14327structured_nextafter_default_backend_inplace op(self);
14328op.meta(self, other);
14329at::nextafter_outf(self, other, op.outputs_[0]);
14330if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
14331return self;
14332}
14333struct structured_remainder_Tensor_default_backend_functional final : public at::meta::structured_remainder_Tensor {
14334 void set_output_strided(
14335 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14336 TensorOptions options, DimnameList names
14337 ) override {
14338 auto current_device = guard_.current_device();
14339 if (C10_UNLIKELY(current_device.has_value())) {
14340 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14341 "structured kernels don't support multi-device outputs");
14342 } else {
14343 guard_.reset_device(options.device());
14344 }
14345 outputs_[output_idx] = create_out(sizes, strides, options);
14346 if (!names.empty()) {
14347 namedinference::propagate_names(*outputs_[output_idx], names);
14348 }
14349 // super must happen after, so that downstream can use maybe_get_output
14350 // to retrieve the output
14351 at::meta::structured_remainder_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
14352 }
14353 void set_output_raw_strided(
14354 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14355 TensorOptions options, DimnameList names
14356 ) override {
14357 auto current_device = guard_.current_device();
14358 if (C10_UNLIKELY(current_device.has_value())) {
14359 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14360 "structured kernels don't support multi-device outputs");
14361 } else {
14362 guard_.reset_device(options.device());
14363 }
14364 outputs_[output_idx] = create_out(sizes, strides, options);
14365 if (!names.empty()) {
14366 namedinference::propagate_names(*outputs_[output_idx], names);
14367 }
14368 // super must happen after, so that downstream can use maybe_get_output
14369 // to retrieve the output
14370 at::meta::structured_remainder_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
14371 }
14372 const Tensor& maybe_get_output(int64_t output_idx) override {
14373 return *outputs_[output_idx];
14374 }
14375 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
14376 c10::OptionalDeviceGuard guard_;
14377};
14378at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_remainder_Tensor(const at::Tensor & self, const at::Tensor & other) {
14379structured_remainder_Tensor_default_backend_functional op;
14380op.meta(self, other);
14381at::remainder_outf(self, other, *op.outputs_[0]);
14382return std::move(op.outputs_[0]).take();
14383}
14384struct structured_remainder_Tensor_default_backend_inplace final : public at::meta::structured_remainder_Tensor {
14385 structured_remainder_Tensor_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
14386 void set_output_strided(
14387 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14388 TensorOptions options, DimnameList names
14389 ) override {
14390 auto current_device = guard_.current_device();
14391 if (C10_UNLIKELY(current_device.has_value())) {
14392 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14393 "structured kernels don't support multi-device outputs");
14394 } else {
14395 guard_.reset_device(options.device());
14396 }
14397 const auto& out = outputs_[output_idx].get();
14398 check_inplace(out, sizes, options);
14399 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
14400 if (C10_UNLIKELY(maybe_proxy.has_value())) {
14401 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
14402 }
14403 if (!names.empty()) {
14404 namedinference::propagate_names(outputs_[output_idx], names);
14405 }
14406 // super must happen after, so that downstream can use maybe_get_output
14407 // to retrieve the output
14408 at::meta::structured_remainder_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
14409 }
14410 void set_output_raw_strided(
14411 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14412 TensorOptions options, DimnameList names
14413 ) override {
14414 auto current_device = guard_.current_device();
14415 if (C10_UNLIKELY(current_device.has_value())) {
14416 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14417 "structured kernels don't support multi-device outputs");
14418 } else {
14419 guard_.reset_device(options.device());
14420 }
14421 const auto& out = outputs_[output_idx].get();
14422 check_inplace(out, sizes, options);
14423 if (!names.empty()) {
14424 namedinference::propagate_names(outputs_[output_idx], names);
14425 }
14426 // super must happen after, so that downstream can use maybe_get_output
14427 // to retrieve the output
14428 at::meta::structured_remainder_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
14429 }
14430 const Tensor& maybe_get_output(int64_t output_idx) override {
14431 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
14432 }
14433 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
14434 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
14435 c10::OptionalDeviceGuard guard_;
14436};
14437at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_remainder__Tensor(at::Tensor & self, const at::Tensor & other) {
14438structured_remainder_Tensor_default_backend_inplace op(self);
14439op.meta(self, other);
14440at::remainder_outf(self, other, op.outputs_[0]);
14441if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
14442return self;
14443}
14444struct structured_fmin_default_backend_functional final : public at::meta::structured_fmin {
14445 void set_output_strided(
14446 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14447 TensorOptions options, DimnameList names
14448 ) override {
14449 auto current_device = guard_.current_device();
14450 if (C10_UNLIKELY(current_device.has_value())) {
14451 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14452 "structured kernels don't support multi-device outputs");
14453 } else {
14454 guard_.reset_device(options.device());
14455 }
14456 outputs_[output_idx] = create_out(sizes, strides, options);
14457 if (!names.empty()) {
14458 namedinference::propagate_names(*outputs_[output_idx], names);
14459 }
14460 // super must happen after, so that downstream can use maybe_get_output
14461 // to retrieve the output
14462 at::meta::structured_fmin::set_output_raw_strided(output_idx, sizes, strides, options, names);
14463 }
14464 void set_output_raw_strided(
14465 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14466 TensorOptions options, DimnameList names
14467 ) override {
14468 auto current_device = guard_.current_device();
14469 if (C10_UNLIKELY(current_device.has_value())) {
14470 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14471 "structured kernels don't support multi-device outputs");
14472 } else {
14473 guard_.reset_device(options.device());
14474 }
14475 outputs_[output_idx] = create_out(sizes, strides, options);
14476 if (!names.empty()) {
14477 namedinference::propagate_names(*outputs_[output_idx], names);
14478 }
14479 // super must happen after, so that downstream can use maybe_get_output
14480 // to retrieve the output
14481 at::meta::structured_fmin::set_output_raw_strided(output_idx, sizes, strides, options, names);
14482 }
14483 const Tensor& maybe_get_output(int64_t output_idx) override {
14484 return *outputs_[output_idx];
14485 }
14486 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
14487 c10::OptionalDeviceGuard guard_;
14488};
14489at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_fmin(const at::Tensor & self, const at::Tensor & other) {
14490structured_fmin_default_backend_functional op;
14491op.meta(self, other);
14492at::fmin_outf(self, other, *op.outputs_[0]);
14493return std::move(op.outputs_[0]).take();
14494}
14495struct structured_fmax_default_backend_functional final : public at::meta::structured_fmax {
14496 void set_output_strided(
14497 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14498 TensorOptions options, DimnameList names
14499 ) override {
14500 auto current_device = guard_.current_device();
14501 if (C10_UNLIKELY(current_device.has_value())) {
14502 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14503 "structured kernels don't support multi-device outputs");
14504 } else {
14505 guard_.reset_device(options.device());
14506 }
14507 outputs_[output_idx] = create_out(sizes, strides, options);
14508 if (!names.empty()) {
14509 namedinference::propagate_names(*outputs_[output_idx], names);
14510 }
14511 // super must happen after, so that downstream can use maybe_get_output
14512 // to retrieve the output
14513 at::meta::structured_fmax::set_output_raw_strided(output_idx, sizes, strides, options, names);
14514 }
14515 void set_output_raw_strided(
14516 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14517 TensorOptions options, DimnameList names
14518 ) override {
14519 auto current_device = guard_.current_device();
14520 if (C10_UNLIKELY(current_device.has_value())) {
14521 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14522 "structured kernels don't support multi-device outputs");
14523 } else {
14524 guard_.reset_device(options.device());
14525 }
14526 outputs_[output_idx] = create_out(sizes, strides, options);
14527 if (!names.empty()) {
14528 namedinference::propagate_names(*outputs_[output_idx], names);
14529 }
14530 // super must happen after, so that downstream can use maybe_get_output
14531 // to retrieve the output
14532 at::meta::structured_fmax::set_output_raw_strided(output_idx, sizes, strides, options, names);
14533 }
14534 const Tensor& maybe_get_output(int64_t output_idx) override {
14535 return *outputs_[output_idx];
14536 }
14537 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
14538 c10::OptionalDeviceGuard guard_;
14539};
14540at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_fmax(const at::Tensor & self, const at::Tensor & other) {
14541structured_fmax_default_backend_functional op;
14542op.meta(self, other);
14543at::fmax_outf(self, other, *op.outputs_[0]);
14544return std::move(op.outputs_[0]).take();
14545}
14546struct structured_maximum_default_backend_functional final : public at::meta::structured_maximum {
14547 void set_output_strided(
14548 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14549 TensorOptions options, DimnameList names
14550 ) override {
14551 auto current_device = guard_.current_device();
14552 if (C10_UNLIKELY(current_device.has_value())) {
14553 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14554 "structured kernels don't support multi-device outputs");
14555 } else {
14556 guard_.reset_device(options.device());
14557 }
14558 outputs_[output_idx] = create_out(sizes, strides, options);
14559 if (!names.empty()) {
14560 namedinference::propagate_names(*outputs_[output_idx], names);
14561 }
14562 // super must happen after, so that downstream can use maybe_get_output
14563 // to retrieve the output
14564 at::meta::structured_maximum::set_output_raw_strided(output_idx, sizes, strides, options, names);
14565 }
14566 void set_output_raw_strided(
14567 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14568 TensorOptions options, DimnameList names
14569 ) override {
14570 auto current_device = guard_.current_device();
14571 if (C10_UNLIKELY(current_device.has_value())) {
14572 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14573 "structured kernels don't support multi-device outputs");
14574 } else {
14575 guard_.reset_device(options.device());
14576 }
14577 outputs_[output_idx] = create_out(sizes, strides, options);
14578 if (!names.empty()) {
14579 namedinference::propagate_names(*outputs_[output_idx], names);
14580 }
14581 // super must happen after, so that downstream can use maybe_get_output
14582 // to retrieve the output
14583 at::meta::structured_maximum::set_output_raw_strided(output_idx, sizes, strides, options, names);
14584 }
14585 const Tensor& maybe_get_output(int64_t output_idx) override {
14586 return *outputs_[output_idx];
14587 }
14588 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
14589 c10::OptionalDeviceGuard guard_;
14590};
14591at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_maximum(const at::Tensor & self, const at::Tensor & other) {
14592structured_maximum_default_backend_functional op;
14593op.meta(self, other);
14594at::maximum_outf(self, other, *op.outputs_[0]);
14595return std::move(op.outputs_[0]).take();
14596}
14597struct structured_minimum_default_backend_functional final : public at::meta::structured_minimum {
14598 void set_output_strided(
14599 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14600 TensorOptions options, DimnameList names
14601 ) override {
14602 auto current_device = guard_.current_device();
14603 if (C10_UNLIKELY(current_device.has_value())) {
14604 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14605 "structured kernels don't support multi-device outputs");
14606 } else {
14607 guard_.reset_device(options.device());
14608 }
14609 outputs_[output_idx] = create_out(sizes, strides, options);
14610 if (!names.empty()) {
14611 namedinference::propagate_names(*outputs_[output_idx], names);
14612 }
14613 // super must happen after, so that downstream can use maybe_get_output
14614 // to retrieve the output
14615 at::meta::structured_minimum::set_output_raw_strided(output_idx, sizes, strides, options, names);
14616 }
14617 void set_output_raw_strided(
14618 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14619 TensorOptions options, DimnameList names
14620 ) override {
14621 auto current_device = guard_.current_device();
14622 if (C10_UNLIKELY(current_device.has_value())) {
14623 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14624 "structured kernels don't support multi-device outputs");
14625 } else {
14626 guard_.reset_device(options.device());
14627 }
14628 outputs_[output_idx] = create_out(sizes, strides, options);
14629 if (!names.empty()) {
14630 namedinference::propagate_names(*outputs_[output_idx], names);
14631 }
14632 // super must happen after, so that downstream can use maybe_get_output
14633 // to retrieve the output
14634 at::meta::structured_minimum::set_output_raw_strided(output_idx, sizes, strides, options, names);
14635 }
14636 const Tensor& maybe_get_output(int64_t output_idx) override {
14637 return *outputs_[output_idx];
14638 }
14639 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
14640 c10::OptionalDeviceGuard guard_;
14641};
14642at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_minimum(const at::Tensor & self, const at::Tensor & other) {
14643structured_minimum_default_backend_functional op;
14644op.meta(self, other);
14645at::minimum_outf(self, other, *op.outputs_[0]);
14646return std::move(op.outputs_[0]).take();
14647}
14648struct structured_sort_stable_default_backend_functional final : public at::meta::structured_sort_stable {
14649 void set_output_strided(
14650 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14651 TensorOptions options, DimnameList names
14652 ) override {
14653 auto current_device = guard_.current_device();
14654 if (C10_UNLIKELY(current_device.has_value())) {
14655 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14656 "structured kernels don't support multi-device outputs");
14657 } else {
14658 guard_.reset_device(options.device());
14659 }
14660 outputs_[output_idx] = create_out(sizes, strides, options);
14661 if (!names.empty()) {
14662 namedinference::propagate_names(*outputs_[output_idx], names);
14663 }
14664 // super must happen after, so that downstream can use maybe_get_output
14665 // to retrieve the output
14666 }
14667 void set_output_raw_strided(
14668 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14669 TensorOptions options, DimnameList names
14670 ) override {
14671 auto current_device = guard_.current_device();
14672 if (C10_UNLIKELY(current_device.has_value())) {
14673 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14674 "structured kernels don't support multi-device outputs");
14675 } else {
14676 guard_.reset_device(options.device());
14677 }
14678 outputs_[output_idx] = create_out(sizes, strides, options);
14679 if (!names.empty()) {
14680 namedinference::propagate_names(*outputs_[output_idx], names);
14681 }
14682 // super must happen after, so that downstream can use maybe_get_output
14683 // to retrieve the output
14684 }
14685 const Tensor& maybe_get_output(int64_t output_idx) override {
14686 return *outputs_[output_idx];
14687 }
14688 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
14689 c10::OptionalDeviceGuard guard_;
14690};
14691::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeExplicitAutogradNonFunctional_sort_stable(const at::Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending) {
14692structured_sort_stable_default_backend_functional op;
14693op.meta(self, stable, dim, descending);
14694at::sort_outf(self, stable, dim, descending, *op.outputs_[0], *op.outputs_[1]);
14695return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
14696}
14697struct structured_topk_default_backend_functional final : public at::meta::structured_topk {
14698 void set_output_strided(
14699 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14700 TensorOptions options, DimnameList names
14701 ) override {
14702 auto current_device = guard_.current_device();
14703 if (C10_UNLIKELY(current_device.has_value())) {
14704 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14705 "structured kernels don't support multi-device outputs");
14706 } else {
14707 guard_.reset_device(options.device());
14708 }
14709 outputs_[output_idx] = create_out(sizes, strides, options);
14710 if (!names.empty()) {
14711 namedinference::propagate_names(*outputs_[output_idx], names);
14712 }
14713 // super must happen after, so that downstream can use maybe_get_output
14714 // to retrieve the output
14715 }
14716 void set_output_raw_strided(
14717 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14718 TensorOptions options, DimnameList names
14719 ) override {
14720 auto current_device = guard_.current_device();
14721 if (C10_UNLIKELY(current_device.has_value())) {
14722 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14723 "structured kernels don't support multi-device outputs");
14724 } else {
14725 guard_.reset_device(options.device());
14726 }
14727 outputs_[output_idx] = create_out(sizes, strides, options);
14728 if (!names.empty()) {
14729 namedinference::propagate_names(*outputs_[output_idx], names);
14730 }
14731 // super must happen after, so that downstream can use maybe_get_output
14732 // to retrieve the output
14733 }
14734 const Tensor& maybe_get_output(int64_t output_idx) override {
14735 return *outputs_[output_idx];
14736 }
14737 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
14738 c10::OptionalDeviceGuard guard_;
14739};
14740::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeExplicitAutogradNonFunctional_topk(const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) {
14741structured_topk_default_backend_functional op;
14742op.meta(self, k, dim, largest, sorted);
14743at::topk_outf(self, k, dim, largest, sorted, *op.outputs_[0], *op.outputs_[1]);
14744return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
14745}
14746struct structured_all_default_backend_functional final : public at::meta::structured_all {
14747 void set_output_strided(
14748 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14749 TensorOptions options, DimnameList names
14750 ) override {
14751 auto current_device = guard_.current_device();
14752 if (C10_UNLIKELY(current_device.has_value())) {
14753 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14754 "structured kernels don't support multi-device outputs");
14755 } else {
14756 guard_.reset_device(options.device());
14757 }
14758 outputs_[output_idx] = create_out(sizes, strides, options);
14759 if (!names.empty()) {
14760 namedinference::propagate_names(*outputs_[output_idx], names);
14761 }
14762 // super must happen after, so that downstream can use maybe_get_output
14763 // to retrieve the output
14764 }
14765 void set_output_raw_strided(
14766 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14767 TensorOptions options, DimnameList names
14768 ) override {
14769 auto current_device = guard_.current_device();
14770 if (C10_UNLIKELY(current_device.has_value())) {
14771 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14772 "structured kernels don't support multi-device outputs");
14773 } else {
14774 guard_.reset_device(options.device());
14775 }
14776 outputs_[output_idx] = create_out(sizes, strides, options);
14777 if (!names.empty()) {
14778 namedinference::propagate_names(*outputs_[output_idx], names);
14779 }
14780 // super must happen after, so that downstream can use maybe_get_output
14781 // to retrieve the output
14782 }
14783 const Tensor& maybe_get_output(int64_t output_idx) override {
14784 return *outputs_[output_idx];
14785 }
14786 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
14787 c10::OptionalDeviceGuard guard_;
14788};
14789at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_all(const at::Tensor & self) {
14790structured_all_default_backend_functional op;
14791op.meta(self);
14792at::all_outf(self, *op.outputs_[0]);
14793return std::move(op.outputs_[0]).take();
14794}
14795struct structured_any_default_backend_functional final : public at::meta::structured_any {
14796 void set_output_strided(
14797 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14798 TensorOptions options, DimnameList names
14799 ) override {
14800 auto current_device = guard_.current_device();
14801 if (C10_UNLIKELY(current_device.has_value())) {
14802 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14803 "structured kernels don't support multi-device outputs");
14804 } else {
14805 guard_.reset_device(options.device());
14806 }
14807 outputs_[output_idx] = create_out(sizes, strides, options);
14808 if (!names.empty()) {
14809 namedinference::propagate_names(*outputs_[output_idx], names);
14810 }
14811 // super must happen after, so that downstream can use maybe_get_output
14812 // to retrieve the output
14813 }
14814 void set_output_raw_strided(
14815 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14816 TensorOptions options, DimnameList names
14817 ) override {
14818 auto current_device = guard_.current_device();
14819 if (C10_UNLIKELY(current_device.has_value())) {
14820 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14821 "structured kernels don't support multi-device outputs");
14822 } else {
14823 guard_.reset_device(options.device());
14824 }
14825 outputs_[output_idx] = create_out(sizes, strides, options);
14826 if (!names.empty()) {
14827 namedinference::propagate_names(*outputs_[output_idx], names);
14828 }
14829 // super must happen after, so that downstream can use maybe_get_output
14830 // to retrieve the output
14831 }
14832 const Tensor& maybe_get_output(int64_t output_idx) override {
14833 return *outputs_[output_idx];
14834 }
14835 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
14836 c10::OptionalDeviceGuard guard_;
14837};
14838at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_any(const at::Tensor & self) {
14839structured_any_default_backend_functional op;
14840op.meta(self);
14841at::any_outf(self, *op.outputs_[0]);
14842return std::move(op.outputs_[0]).take();
14843}
14844struct structured_renorm_default_backend_functional final : public at::meta::structured_renorm {
14845 void set_output_strided(
14846 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14847 TensorOptions options, DimnameList names
14848 ) override {
14849 auto current_device = guard_.current_device();
14850 if (C10_UNLIKELY(current_device.has_value())) {
14851 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14852 "structured kernels don't support multi-device outputs");
14853 } else {
14854 guard_.reset_device(options.device());
14855 }
14856 outputs_[output_idx] = create_out(sizes, strides, options);
14857 if (!names.empty()) {
14858 namedinference::propagate_names(*outputs_[output_idx], names);
14859 }
14860 // super must happen after, so that downstream can use maybe_get_output
14861 // to retrieve the output
14862 }
14863 void set_output_raw_strided(
14864 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14865 TensorOptions options, DimnameList names
14866 ) override {
14867 auto current_device = guard_.current_device();
14868 if (C10_UNLIKELY(current_device.has_value())) {
14869 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14870 "structured kernels don't support multi-device outputs");
14871 } else {
14872 guard_.reset_device(options.device());
14873 }
14874 outputs_[output_idx] = create_out(sizes, strides, options);
14875 if (!names.empty()) {
14876 namedinference::propagate_names(*outputs_[output_idx], names);
14877 }
14878 // super must happen after, so that downstream can use maybe_get_output
14879 // to retrieve the output
14880 }
14881 const Tensor& maybe_get_output(int64_t output_idx) override {
14882 return *outputs_[output_idx];
14883 }
14884 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
14885 c10::OptionalDeviceGuard guard_;
14886};
14887at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_renorm(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
14888structured_renorm_default_backend_functional op;
14889op.meta(self, p, dim, maxnorm);
14890at::renorm_outf(self, p, dim, maxnorm, *op.outputs_[0]);
14891return std::move(op.outputs_[0]).take();
14892}
14893struct structured_renorm_default_backend_inplace final : public at::meta::structured_renorm {
14894 structured_renorm_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
14895 void set_output_strided(
14896 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14897 TensorOptions options, DimnameList names
14898 ) override {
14899 auto current_device = guard_.current_device();
14900 if (C10_UNLIKELY(current_device.has_value())) {
14901 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14902 "structured kernels don't support multi-device outputs");
14903 } else {
14904 guard_.reset_device(options.device());
14905 }
14906 const auto& out = outputs_[output_idx].get();
14907 check_inplace(out, sizes, options);
14908 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
14909 if (C10_UNLIKELY(maybe_proxy.has_value())) {
14910 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
14911 }
14912 if (!names.empty()) {
14913 namedinference::propagate_names(outputs_[output_idx], names);
14914 }
14915 // super must happen after, so that downstream can use maybe_get_output
14916 // to retrieve the output
14917 }
14918 void set_output_raw_strided(
14919 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14920 TensorOptions options, DimnameList names
14921 ) override {
14922 auto current_device = guard_.current_device();
14923 if (C10_UNLIKELY(current_device.has_value())) {
14924 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14925 "structured kernels don't support multi-device outputs");
14926 } else {
14927 guard_.reset_device(options.device());
14928 }
14929 const auto& out = outputs_[output_idx].get();
14930 check_inplace(out, sizes, options);
14931 if (!names.empty()) {
14932 namedinference::propagate_names(outputs_[output_idx], names);
14933 }
14934 // super must happen after, so that downstream can use maybe_get_output
14935 // to retrieve the output
14936 }
14937 const Tensor& maybe_get_output(int64_t output_idx) override {
14938 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
14939 }
14940 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
14941 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
14942 c10::OptionalDeviceGuard guard_;
14943};
14944at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_renorm_(at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
14945structured_renorm_default_backend_inplace op(self);
14946op.meta(self, p, dim, maxnorm);
14947at::renorm_outf(self, p, dim, maxnorm, op.outputs_[0]);
14948if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
14949return self;
14950}
14951struct structured_pow_Tensor_Tensor_default_backend_functional final : public at::meta::structured_pow_Tensor_Tensor {
14952 void set_output_strided(
14953 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14954 TensorOptions options, DimnameList names
14955 ) override {
14956 auto current_device = guard_.current_device();
14957 if (C10_UNLIKELY(current_device.has_value())) {
14958 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14959 "structured kernels don't support multi-device outputs");
14960 } else {
14961 guard_.reset_device(options.device());
14962 }
14963 outputs_[output_idx] = create_out(sizes, strides, options);
14964 if (!names.empty()) {
14965 namedinference::propagate_names(*outputs_[output_idx], names);
14966 }
14967 // super must happen after, so that downstream can use maybe_get_output
14968 // to retrieve the output
14969 at::meta::structured_pow_Tensor_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
14970 }
14971 void set_output_raw_strided(
14972 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14973 TensorOptions options, DimnameList names
14974 ) override {
14975 auto current_device = guard_.current_device();
14976 if (C10_UNLIKELY(current_device.has_value())) {
14977 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
14978 "structured kernels don't support multi-device outputs");
14979 } else {
14980 guard_.reset_device(options.device());
14981 }
14982 outputs_[output_idx] = create_out(sizes, strides, options);
14983 if (!names.empty()) {
14984 namedinference::propagate_names(*outputs_[output_idx], names);
14985 }
14986 // super must happen after, so that downstream can use maybe_get_output
14987 // to retrieve the output
14988 at::meta::structured_pow_Tensor_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
14989 }
14990 const Tensor& maybe_get_output(int64_t output_idx) override {
14991 return *outputs_[output_idx];
14992 }
14993 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
14994 c10::OptionalDeviceGuard guard_;
14995};
14996at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_pow_Tensor_Tensor(const at::Tensor & self, const at::Tensor & exponent) {
14997structured_pow_Tensor_Tensor_default_backend_functional op;
14998op.meta(self, exponent);
14999at::pow_outf(self, exponent, *op.outputs_[0]);
15000return std::move(op.outputs_[0]).take();
15001}
15002struct structured_pow_Tensor_Tensor_default_backend_inplace final : public at::meta::structured_pow_Tensor_Tensor {
15003 structured_pow_Tensor_Tensor_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
15004 void set_output_strided(
15005 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15006 TensorOptions options, DimnameList names
15007 ) override {
15008 auto current_device = guard_.current_device();
15009 if (C10_UNLIKELY(current_device.has_value())) {
15010 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15011 "structured kernels don't support multi-device outputs");
15012 } else {
15013 guard_.reset_device(options.device());
15014 }
15015 const auto& out = outputs_[output_idx].get();
15016 check_inplace(out, sizes, options);
15017 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
15018 if (C10_UNLIKELY(maybe_proxy.has_value())) {
15019 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
15020 }
15021 if (!names.empty()) {
15022 namedinference::propagate_names(outputs_[output_idx], names);
15023 }
15024 // super must happen after, so that downstream can use maybe_get_output
15025 // to retrieve the output
15026 at::meta::structured_pow_Tensor_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
15027 }
15028 void set_output_raw_strided(
15029 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15030 TensorOptions options, DimnameList names
15031 ) override {
15032 auto current_device = guard_.current_device();
15033 if (C10_UNLIKELY(current_device.has_value())) {
15034 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15035 "structured kernels don't support multi-device outputs");
15036 } else {
15037 guard_.reset_device(options.device());
15038 }
15039 const auto& out = outputs_[output_idx].get();
15040 check_inplace(out, sizes, options);
15041 if (!names.empty()) {
15042 namedinference::propagate_names(outputs_[output_idx], names);
15043 }
15044 // super must happen after, so that downstream can use maybe_get_output
15045 // to retrieve the output
15046 at::meta::structured_pow_Tensor_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
15047 }
15048 const Tensor& maybe_get_output(int64_t output_idx) override {
15049 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
15050 }
15051 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
15052 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
15053 c10::OptionalDeviceGuard guard_;
15054};
15055at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_pow__Tensor(at::Tensor & self, const at::Tensor & exponent) {
15056structured_pow_Tensor_Tensor_default_backend_inplace op(self);
15057op.meta(self, exponent);
15058at::pow_outf(self, exponent, op.outputs_[0]);
15059if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
15060return self;
15061}
15062struct structured_pow_Scalar_default_backend_functional final : public at::meta::structured_pow_Scalar {
15063 void set_output_strided(
15064 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15065 TensorOptions options, DimnameList names
15066 ) override {
15067 auto current_device = guard_.current_device();
15068 if (C10_UNLIKELY(current_device.has_value())) {
15069 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15070 "structured kernels don't support multi-device outputs");
15071 } else {
15072 guard_.reset_device(options.device());
15073 }
15074 outputs_[output_idx] = create_out(sizes, strides, options);
15075 if (!names.empty()) {
15076 namedinference::propagate_names(*outputs_[output_idx], names);
15077 }
15078 // super must happen after, so that downstream can use maybe_get_output
15079 // to retrieve the output
15080 }
15081 void set_output_raw_strided(
15082 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15083 TensorOptions options, DimnameList names
15084 ) override {
15085 auto current_device = guard_.current_device();
15086 if (C10_UNLIKELY(current_device.has_value())) {
15087 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15088 "structured kernels don't support multi-device outputs");
15089 } else {
15090 guard_.reset_device(options.device());
15091 }
15092 outputs_[output_idx] = create_out(sizes, strides, options);
15093 if (!names.empty()) {
15094 namedinference::propagate_names(*outputs_[output_idx], names);
15095 }
15096 // super must happen after, so that downstream can use maybe_get_output
15097 // to retrieve the output
15098 }
15099 const Tensor& maybe_get_output(int64_t output_idx) override {
15100 return *outputs_[output_idx];
15101 }
15102 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
15103 c10::OptionalDeviceGuard guard_;
15104};
15105at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_pow_Scalar(const at::Scalar & self, const at::Tensor & exponent) {
15106structured_pow_Scalar_default_backend_functional op;
15107op.meta(self, exponent);
15108at::pow_outf(self, exponent, *op.outputs_[0]);
15109return std::move(op.outputs_[0]).take();
15110}
15111struct structured_pow_Tensor_Scalar_default_backend_functional final : public at::meta::structured_pow_Tensor_Scalar {
15112 void set_output_strided(
15113 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15114 TensorOptions options, DimnameList names
15115 ) override {
15116 auto current_device = guard_.current_device();
15117 if (C10_UNLIKELY(current_device.has_value())) {
15118 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15119 "structured kernels don't support multi-device outputs");
15120 } else {
15121 guard_.reset_device(options.device());
15122 }
15123 outputs_[output_idx] = create_out(sizes, strides, options);
15124 if (!names.empty()) {
15125 namedinference::propagate_names(*outputs_[output_idx], names);
15126 }
15127 // super must happen after, so that downstream can use maybe_get_output
15128 // to retrieve the output
15129 at::meta::structured_pow_Tensor_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
15130 }
15131 void set_output_raw_strided(
15132 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15133 TensorOptions options, DimnameList names
15134 ) override {
15135 auto current_device = guard_.current_device();
15136 if (C10_UNLIKELY(current_device.has_value())) {
15137 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15138 "structured kernels don't support multi-device outputs");
15139 } else {
15140 guard_.reset_device(options.device());
15141 }
15142 outputs_[output_idx] = create_out(sizes, strides, options);
15143 if (!names.empty()) {
15144 namedinference::propagate_names(*outputs_[output_idx], names);
15145 }
15146 // super must happen after, so that downstream can use maybe_get_output
15147 // to retrieve the output
15148 at::meta::structured_pow_Tensor_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
15149 }
15150 const Tensor& maybe_get_output(int64_t output_idx) override {
15151 return *outputs_[output_idx];
15152 }
15153 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
15154 c10::OptionalDeviceGuard guard_;
15155};
15156at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_pow_Tensor_Scalar(const at::Tensor & self, const at::Scalar & exponent) {
15157structured_pow_Tensor_Scalar_default_backend_functional op;
15158op.meta(self, exponent);
15159at::pow_outf(self, exponent, *op.outputs_[0]);
15160return std::move(op.outputs_[0]).take();
15161}
15162struct structured_pow_Tensor_Scalar_default_backend_inplace final : public at::meta::structured_pow_Tensor_Scalar {
15163 structured_pow_Tensor_Scalar_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
15164 void set_output_strided(
15165 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15166 TensorOptions options, DimnameList names
15167 ) override {
15168 auto current_device = guard_.current_device();
15169 if (C10_UNLIKELY(current_device.has_value())) {
15170 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15171 "structured kernels don't support multi-device outputs");
15172 } else {
15173 guard_.reset_device(options.device());
15174 }
15175 const auto& out = outputs_[output_idx].get();
15176 check_inplace(out, sizes, options);
15177 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
15178 if (C10_UNLIKELY(maybe_proxy.has_value())) {
15179 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
15180 }
15181 if (!names.empty()) {
15182 namedinference::propagate_names(outputs_[output_idx], names);
15183 }
15184 // super must happen after, so that downstream can use maybe_get_output
15185 // to retrieve the output
15186 at::meta::structured_pow_Tensor_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
15187 }
15188 void set_output_raw_strided(
15189 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15190 TensorOptions options, DimnameList names
15191 ) override {
15192 auto current_device = guard_.current_device();
15193 if (C10_UNLIKELY(current_device.has_value())) {
15194 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15195 "structured kernels don't support multi-device outputs");
15196 } else {
15197 guard_.reset_device(options.device());
15198 }
15199 const auto& out = outputs_[output_idx].get();
15200 check_inplace(out, sizes, options);
15201 if (!names.empty()) {
15202 namedinference::propagate_names(outputs_[output_idx], names);
15203 }
15204 // super must happen after, so that downstream can use maybe_get_output
15205 // to retrieve the output
15206 at::meta::structured_pow_Tensor_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
15207 }
15208 const Tensor& maybe_get_output(int64_t output_idx) override {
15209 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
15210 }
15211 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
15212 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
15213 c10::OptionalDeviceGuard guard_;
15214};
15215at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_pow__Scalar(at::Tensor & self, const at::Scalar & exponent) {
15216structured_pow_Tensor_Scalar_default_backend_inplace op(self);
15217op.meta(self, exponent);
15218at::pow_outf(self, exponent, op.outputs_[0]);
15219if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
15220return self;
15221}
15222struct structured__convert_indices_from_coo_to_csr_default_backend_functional final : public at::meta::structured__convert_indices_from_coo_to_csr {
15223 void set_output_strided(
15224 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15225 TensorOptions options, DimnameList names
15226 ) override {
15227 auto current_device = guard_.current_device();
15228 if (C10_UNLIKELY(current_device.has_value())) {
15229 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15230 "structured kernels don't support multi-device outputs");
15231 } else {
15232 guard_.reset_device(options.device());
15233 }
15234 outputs_[output_idx] = create_out(sizes, strides, options);
15235 if (!names.empty()) {
15236 namedinference::propagate_names(*outputs_[output_idx], names);
15237 }
15238 // super must happen after, so that downstream can use maybe_get_output
15239 // to retrieve the output
15240 }
15241 void set_output_raw_strided(
15242 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15243 TensorOptions options, DimnameList names
15244 ) override {
15245 auto current_device = guard_.current_device();
15246 if (C10_UNLIKELY(current_device.has_value())) {
15247 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15248 "structured kernels don't support multi-device outputs");
15249 } else {
15250 guard_.reset_device(options.device());
15251 }
15252 outputs_[output_idx] = create_out(sizes, strides, options);
15253 if (!names.empty()) {
15254 namedinference::propagate_names(*outputs_[output_idx], names);
15255 }
15256 // super must happen after, so that downstream can use maybe_get_output
15257 // to retrieve the output
15258 }
15259 const Tensor& maybe_get_output(int64_t output_idx) override {
15260 return *outputs_[output_idx];
15261 }
15262 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
15263 c10::OptionalDeviceGuard guard_;
15264};
15265at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__convert_indices_from_coo_to_csr(const at::Tensor & self, int64_t size, bool out_int32) {
15266structured__convert_indices_from_coo_to_csr_default_backend_functional op;
15267op.meta(self, size, out_int32);
15268at::_convert_indices_from_coo_to_csr_outf(self, size, out_int32, *op.outputs_[0]);
15269return std::move(op.outputs_[0]).take();
15270}
15271struct structured__convert_indices_from_csr_to_coo_default_backend_functional final : public at::meta::structured__convert_indices_from_csr_to_coo {
15272 void set_output_strided(
15273 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15274 TensorOptions options, DimnameList names
15275 ) override {
15276 auto current_device = guard_.current_device();
15277 if (C10_UNLIKELY(current_device.has_value())) {
15278 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15279 "structured kernels don't support multi-device outputs");
15280 } else {
15281 guard_.reset_device(options.device());
15282 }
15283 outputs_[output_idx] = create_out(sizes, strides, options);
15284 if (!names.empty()) {
15285 namedinference::propagate_names(*outputs_[output_idx], names);
15286 }
15287 // super must happen after, so that downstream can use maybe_get_output
15288 // to retrieve the output
15289 }
15290 void set_output_raw_strided(
15291 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15292 TensorOptions options, DimnameList names
15293 ) override {
15294 auto current_device = guard_.current_device();
15295 if (C10_UNLIKELY(current_device.has_value())) {
15296 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15297 "structured kernels don't support multi-device outputs");
15298 } else {
15299 guard_.reset_device(options.device());
15300 }
15301 outputs_[output_idx] = create_out(sizes, strides, options);
15302 if (!names.empty()) {
15303 namedinference::propagate_names(*outputs_[output_idx], names);
15304 }
15305 // super must happen after, so that downstream can use maybe_get_output
15306 // to retrieve the output
15307 }
15308 const Tensor& maybe_get_output(int64_t output_idx) override {
15309 return *outputs_[output_idx];
15310 }
15311 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
15312 c10::OptionalDeviceGuard guard_;
15313};
15314at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__convert_indices_from_csr_to_coo(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose) {
15315structured__convert_indices_from_csr_to_coo_default_backend_functional op;
15316op.meta(crow_indices, col_indices, out_int32, transpose);
15317at::_convert_indices_from_csr_to_coo_outf(crow_indices, col_indices, out_int32, transpose, *op.outputs_[0]);
15318return std::move(op.outputs_[0]).take();
15319}
15320struct structured_mse_loss_default_backend_functional final : public at::meta::structured_mse_loss {
15321 void set_output_strided(
15322 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15323 TensorOptions options, DimnameList names
15324 ) override {
15325 auto current_device = guard_.current_device();
15326 if (C10_UNLIKELY(current_device.has_value())) {
15327 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15328 "structured kernels don't support multi-device outputs");
15329 } else {
15330 guard_.reset_device(options.device());
15331 }
15332 outputs_[output_idx] = create_out(sizes, strides, options);
15333 if (!names.empty()) {
15334 namedinference::propagate_names(*outputs_[output_idx], names);
15335 }
15336 // super must happen after, so that downstream can use maybe_get_output
15337 // to retrieve the output
15338 at::meta::structured_mse_loss::set_output_raw_strided(output_idx, sizes, strides, options, names);
15339 }
15340 void set_output_raw_strided(
15341 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15342 TensorOptions options, DimnameList names
15343 ) override {
15344 auto current_device = guard_.current_device();
15345 if (C10_UNLIKELY(current_device.has_value())) {
15346 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15347 "structured kernels don't support multi-device outputs");
15348 } else {
15349 guard_.reset_device(options.device());
15350 }
15351 outputs_[output_idx] = create_out(sizes, strides, options);
15352 if (!names.empty()) {
15353 namedinference::propagate_names(*outputs_[output_idx], names);
15354 }
15355 // super must happen after, so that downstream can use maybe_get_output
15356 // to retrieve the output
15357 at::meta::structured_mse_loss::set_output_raw_strided(output_idx, sizes, strides, options, names);
15358 }
15359 const Tensor& maybe_get_output(int64_t output_idx) override {
15360 return *outputs_[output_idx];
15361 }
15362 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
15363 c10::OptionalDeviceGuard guard_;
15364};
15365at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_mse_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
15366structured_mse_loss_default_backend_functional op;
15367op.meta(self, target, reduction);
15368at::mse_loss_outf(self, target, reduction, *op.outputs_[0]);
15369return std::move(op.outputs_[0]).take();
15370}
15371struct structured_nll_loss_forward_default_backend_functional final : public at::meta::structured_nll_loss_forward {
15372 void set_output_strided(
15373 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15374 TensorOptions options, DimnameList names
15375 ) override {
15376 auto current_device = guard_.current_device();
15377 if (C10_UNLIKELY(current_device.has_value())) {
15378 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15379 "structured kernels don't support multi-device outputs");
15380 } else {
15381 guard_.reset_device(options.device());
15382 }
15383 outputs_[output_idx] = create_out(sizes, strides, options);
15384 if (!names.empty()) {
15385 namedinference::propagate_names(*outputs_[output_idx], names);
15386 }
15387 // super must happen after, so that downstream can use maybe_get_output
15388 // to retrieve the output
15389 }
15390 void set_output_raw_strided(
15391 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15392 TensorOptions options, DimnameList names
15393 ) override {
15394 auto current_device = guard_.current_device();
15395 if (C10_UNLIKELY(current_device.has_value())) {
15396 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15397 "structured kernels don't support multi-device outputs");
15398 } else {
15399 guard_.reset_device(options.device());
15400 }
15401 outputs_[output_idx] = create_out(sizes, strides, options);
15402 if (!names.empty()) {
15403 namedinference::propagate_names(*outputs_[output_idx], names);
15404 }
15405 // super must happen after, so that downstream can use maybe_get_output
15406 // to retrieve the output
15407 }
15408 const Tensor& maybe_get_output(int64_t output_idx) override {
15409 return *outputs_[output_idx];
15410 }
15411 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
15412 c10::OptionalDeviceGuard guard_;
15413};
15414::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeExplicitAutogradNonFunctional_nll_loss_forward(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
15415structured_nll_loss_forward_default_backend_functional op;
15416op.meta(self, target, ((weight.has_value() && (*weight).defined()) ? at::OptionalTensorRef(*weight) : at::OptionalTensorRef()), reduction, ignore_index);
15417at::nll_loss_forward_outf(self, target, weight, reduction, ignore_index, *op.outputs_[0], *op.outputs_[1]);
15418return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
15419}
15420struct structured_nll_loss_backward_default_backend_functional final : public at::meta::structured_nll_loss_backward {
15421 void set_output_strided(
15422 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15423 TensorOptions options, DimnameList names
15424 ) override {
15425 auto current_device = guard_.current_device();
15426 if (C10_UNLIKELY(current_device.has_value())) {
15427 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15428 "structured kernels don't support multi-device outputs");
15429 } else {
15430 guard_.reset_device(options.device());
15431 }
15432 outputs_[output_idx] = create_out(sizes, strides, options);
15433 if (!names.empty()) {
15434 namedinference::propagate_names(*outputs_[output_idx], names);
15435 }
15436 // super must happen after, so that downstream can use maybe_get_output
15437 // to retrieve the output
15438 }
15439 void set_output_raw_strided(
15440 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15441 TensorOptions options, DimnameList names
15442 ) override {
15443 auto current_device = guard_.current_device();
15444 if (C10_UNLIKELY(current_device.has_value())) {
15445 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15446 "structured kernels don't support multi-device outputs");
15447 } else {
15448 guard_.reset_device(options.device());
15449 }
15450 outputs_[output_idx] = create_out(sizes, strides, options);
15451 if (!names.empty()) {
15452 namedinference::propagate_names(*outputs_[output_idx], names);
15453 }
15454 // super must happen after, so that downstream can use maybe_get_output
15455 // to retrieve the output
15456 }
15457 const Tensor& maybe_get_output(int64_t output_idx) override {
15458 return *outputs_[output_idx];
15459 }
15460 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
15461 c10::OptionalDeviceGuard guard_;
15462};
15463at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_nll_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) {
15464structured_nll_loss_backward_default_backend_functional op;
15465op.meta(grad_output, self, target, ((weight.has_value() && (*weight).defined()) ? at::OptionalTensorRef(*weight) : at::OptionalTensorRef()), reduction, ignore_index, total_weight);
15466at::nll_loss_backward_outf(grad_output, self, target, weight, reduction, ignore_index, total_weight, *op.outputs_[0]);
15467return std::move(op.outputs_[0]).take();
15468}
15469struct structured_smooth_l1_loss_default_backend_functional final : public at::meta::structured_smooth_l1_loss {
15470 void set_output_strided(
15471 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15472 TensorOptions options, DimnameList names
15473 ) override {
15474 auto current_device = guard_.current_device();
15475 if (C10_UNLIKELY(current_device.has_value())) {
15476 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15477 "structured kernels don't support multi-device outputs");
15478 } else {
15479 guard_.reset_device(options.device());
15480 }
15481 outputs_[output_idx] = create_out(sizes, strides, options);
15482 if (!names.empty()) {
15483 namedinference::propagate_names(*outputs_[output_idx], names);
15484 }
15485 // super must happen after, so that downstream can use maybe_get_output
15486 // to retrieve the output
15487 at::meta::structured_smooth_l1_loss::set_output_raw_strided(output_idx, sizes, strides, options, names);
15488 }
15489 void set_output_raw_strided(
15490 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15491 TensorOptions options, DimnameList names
15492 ) override {
15493 auto current_device = guard_.current_device();
15494 if (C10_UNLIKELY(current_device.has_value())) {
15495 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15496 "structured kernels don't support multi-device outputs");
15497 } else {
15498 guard_.reset_device(options.device());
15499 }
15500 outputs_[output_idx] = create_out(sizes, strides, options);
15501 if (!names.empty()) {
15502 namedinference::propagate_names(*outputs_[output_idx], names);
15503 }
15504 // super must happen after, so that downstream can use maybe_get_output
15505 // to retrieve the output
15506 at::meta::structured_smooth_l1_loss::set_output_raw_strided(output_idx, sizes, strides, options, names);
15507 }
15508 const Tensor& maybe_get_output(int64_t output_idx) override {
15509 return *outputs_[output_idx];
15510 }
15511 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
15512 c10::OptionalDeviceGuard guard_;
15513};
15514at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_smooth_l1_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
15515structured_smooth_l1_loss_default_backend_functional op;
15516op.meta(self, target, reduction, beta);
15517at::smooth_l1_loss_outf(self, target, reduction, beta, *op.outputs_[0]);
15518return std::move(op.outputs_[0]).take();
15519}
15520struct structured_elu_default_backend_functional final : public at::meta::structured_elu {
15521 void set_output_strided(
15522 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15523 TensorOptions options, DimnameList names
15524 ) override {
15525 auto current_device = guard_.current_device();
15526 if (C10_UNLIKELY(current_device.has_value())) {
15527 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15528 "structured kernels don't support multi-device outputs");
15529 } else {
15530 guard_.reset_device(options.device());
15531 }
15532 outputs_[output_idx] = create_out(sizes, strides, options);
15533 if (!names.empty()) {
15534 namedinference::propagate_names(*outputs_[output_idx], names);
15535 }
15536 // super must happen after, so that downstream can use maybe_get_output
15537 // to retrieve the output
15538 at::meta::structured_elu::set_output_raw_strided(output_idx, sizes, strides, options, names);
15539 }
15540 void set_output_raw_strided(
15541 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15542 TensorOptions options, DimnameList names
15543 ) override {
15544 auto current_device = guard_.current_device();
15545 if (C10_UNLIKELY(current_device.has_value())) {
15546 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15547 "structured kernels don't support multi-device outputs");
15548 } else {
15549 guard_.reset_device(options.device());
15550 }
15551 outputs_[output_idx] = create_out(sizes, strides, options);
15552 if (!names.empty()) {
15553 namedinference::propagate_names(*outputs_[output_idx], names);
15554 }
15555 // super must happen after, so that downstream can use maybe_get_output
15556 // to retrieve the output
15557 at::meta::structured_elu::set_output_raw_strided(output_idx, sizes, strides, options, names);
15558 }
15559 const Tensor& maybe_get_output(int64_t output_idx) override {
15560 return *outputs_[output_idx];
15561 }
15562 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
15563 c10::OptionalDeviceGuard guard_;
15564};
15565at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_elu(const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
15566structured_elu_default_backend_functional op;
15567op.meta(self, alpha, scale, input_scale);
15568at::elu_outf(self, alpha, scale, input_scale, *op.outputs_[0]);
15569return std::move(op.outputs_[0]).take();
15570}
15571struct structured_elu_default_backend_inplace final : public at::meta::structured_elu {
15572 structured_elu_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
15573 void set_output_strided(
15574 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15575 TensorOptions options, DimnameList names
15576 ) override {
15577 auto current_device = guard_.current_device();
15578 if (C10_UNLIKELY(current_device.has_value())) {
15579 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15580 "structured kernels don't support multi-device outputs");
15581 } else {
15582 guard_.reset_device(options.device());
15583 }
15584 const auto& out = outputs_[output_idx].get();
15585 check_inplace(out, sizes, options);
15586 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
15587 if (C10_UNLIKELY(maybe_proxy.has_value())) {
15588 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
15589 }
15590 if (!names.empty()) {
15591 namedinference::propagate_names(outputs_[output_idx], names);
15592 }
15593 // super must happen after, so that downstream can use maybe_get_output
15594 // to retrieve the output
15595 at::meta::structured_elu::set_output_raw_strided(output_idx, sizes, strides, options, names);
15596 }
15597 void set_output_raw_strided(
15598 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15599 TensorOptions options, DimnameList names
15600 ) override {
15601 auto current_device = guard_.current_device();
15602 if (C10_UNLIKELY(current_device.has_value())) {
15603 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15604 "structured kernels don't support multi-device outputs");
15605 } else {
15606 guard_.reset_device(options.device());
15607 }
15608 const auto& out = outputs_[output_idx].get();
15609 check_inplace(out, sizes, options);
15610 if (!names.empty()) {
15611 namedinference::propagate_names(outputs_[output_idx], names);
15612 }
15613 // super must happen after, so that downstream can use maybe_get_output
15614 // to retrieve the output
15615 at::meta::structured_elu::set_output_raw_strided(output_idx, sizes, strides, options, names);
15616 }
15617 const Tensor& maybe_get_output(int64_t output_idx) override {
15618 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
15619 }
15620 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
15621 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
15622 c10::OptionalDeviceGuard guard_;
15623};
15624at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_elu_(at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
15625structured_elu_default_backend_inplace op(self);
15626op.meta(self, alpha, scale, input_scale);
15627at::elu_outf(self, alpha, scale, input_scale, op.outputs_[0]);
15628if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
15629return self;
15630}
15631struct structured_elu_backward_default_backend_functional final : public at::meta::structured_elu_backward {
15632 void set_output_strided(
15633 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15634 TensorOptions options, DimnameList names
15635 ) override {
15636 auto current_device = guard_.current_device();
15637 if (C10_UNLIKELY(current_device.has_value())) {
15638 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15639 "structured kernels don't support multi-device outputs");
15640 } else {
15641 guard_.reset_device(options.device());
15642 }
15643 outputs_[output_idx] = create_out(sizes, strides, options);
15644 if (!names.empty()) {
15645 namedinference::propagate_names(*outputs_[output_idx], names);
15646 }
15647 // super must happen after, so that downstream can use maybe_get_output
15648 // to retrieve the output
15649 at::meta::structured_elu_backward::set_output_raw_strided(output_idx, sizes, strides, options, names);
15650 }
15651 void set_output_raw_strided(
15652 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15653 TensorOptions options, DimnameList names
15654 ) override {
15655 auto current_device = guard_.current_device();
15656 if (C10_UNLIKELY(current_device.has_value())) {
15657 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15658 "structured kernels don't support multi-device outputs");
15659 } else {
15660 guard_.reset_device(options.device());
15661 }
15662 outputs_[output_idx] = create_out(sizes, strides, options);
15663 if (!names.empty()) {
15664 namedinference::propagate_names(*outputs_[output_idx], names);
15665 }
15666 // super must happen after, so that downstream can use maybe_get_output
15667 // to retrieve the output
15668 at::meta::structured_elu_backward::set_output_raw_strided(output_idx, sizes, strides, options, names);
15669 }
15670 const Tensor& maybe_get_output(int64_t output_idx) override {
15671 return *outputs_[output_idx];
15672 }
15673 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
15674 c10::OptionalDeviceGuard guard_;
15675};
15676at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_elu_backward(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) {
15677structured_elu_backward_default_backend_functional op;
15678op.meta(grad_output, alpha, scale, input_scale, is_result, self_or_result);
15679at::elu_backward_outf(grad_output, alpha, scale, input_scale, is_result, self_or_result, *op.outputs_[0]);
15680return std::move(op.outputs_[0]).take();
15681}
15682struct structured_glu_default_backend_functional final : public at::meta::structured_glu {
15683 void set_output_strided(
15684 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15685 TensorOptions options, DimnameList names
15686 ) override {
15687 auto current_device = guard_.current_device();
15688 if (C10_UNLIKELY(current_device.has_value())) {
15689 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15690 "structured kernels don't support multi-device outputs");
15691 } else {
15692 guard_.reset_device(options.device());
15693 }
15694 outputs_[output_idx] = create_out(sizes, strides, options);
15695 if (!names.empty()) {
15696 namedinference::propagate_names(*outputs_[output_idx], names);
15697 }
15698 // super must happen after, so that downstream can use maybe_get_output
15699 // to retrieve the output
15700 at::meta::structured_glu::set_output_raw_strided(output_idx, sizes, strides, options, names);
15701 }
15702 void set_output_raw_strided(
15703 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15704 TensorOptions options, DimnameList names
15705 ) override {
15706 auto current_device = guard_.current_device();
15707 if (C10_UNLIKELY(current_device.has_value())) {
15708 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15709 "structured kernels don't support multi-device outputs");
15710 } else {
15711 guard_.reset_device(options.device());
15712 }
15713 outputs_[output_idx] = create_out(sizes, strides, options);
15714 if (!names.empty()) {
15715 namedinference::propagate_names(*outputs_[output_idx], names);
15716 }
15717 // super must happen after, so that downstream can use maybe_get_output
15718 // to retrieve the output
15719 at::meta::structured_glu::set_output_raw_strided(output_idx, sizes, strides, options, names);
15720 }
15721 const Tensor& maybe_get_output(int64_t output_idx) override {
15722 return *outputs_[output_idx];
15723 }
15724 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
15725 c10::OptionalDeviceGuard guard_;
15726};
15727at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_glu(const at::Tensor & self, int64_t dim) {
15728structured_glu_default_backend_functional op;
15729op.meta(self, dim);
15730at::glu_outf(self, dim, *op.outputs_[0]);
15731return std::move(op.outputs_[0]).take();
15732}
15733struct structured_hardsigmoid_default_backend_functional final : public at::meta::structured_hardsigmoid {
15734 void set_output_strided(
15735 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15736 TensorOptions options, DimnameList names
15737 ) override {
15738 auto current_device = guard_.current_device();
15739 if (C10_UNLIKELY(current_device.has_value())) {
15740 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15741 "structured kernels don't support multi-device outputs");
15742 } else {
15743 guard_.reset_device(options.device());
15744 }
15745 outputs_[output_idx] = create_out(sizes, strides, options);
15746 if (!names.empty()) {
15747 namedinference::propagate_names(*outputs_[output_idx], names);
15748 }
15749 // super must happen after, so that downstream can use maybe_get_output
15750 // to retrieve the output
15751 at::meta::structured_hardsigmoid::set_output_raw_strided(output_idx, sizes, strides, options, names);
15752 }
15753 void set_output_raw_strided(
15754 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15755 TensorOptions options, DimnameList names
15756 ) override {
15757 auto current_device = guard_.current_device();
15758 if (C10_UNLIKELY(current_device.has_value())) {
15759 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15760 "structured kernels don't support multi-device outputs");
15761 } else {
15762 guard_.reset_device(options.device());
15763 }
15764 outputs_[output_idx] = create_out(sizes, strides, options);
15765 if (!names.empty()) {
15766 namedinference::propagate_names(*outputs_[output_idx], names);
15767 }
15768 // super must happen after, so that downstream can use maybe_get_output
15769 // to retrieve the output
15770 at::meta::structured_hardsigmoid::set_output_raw_strided(output_idx, sizes, strides, options, names);
15771 }
15772 const Tensor& maybe_get_output(int64_t output_idx) override {
15773 return *outputs_[output_idx];
15774 }
15775 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
15776 c10::OptionalDeviceGuard guard_;
15777};
15778at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_hardsigmoid(const at::Tensor & self) {
15779structured_hardsigmoid_default_backend_functional op;
15780op.meta(self);
15781at::hardsigmoid_outf(self, *op.outputs_[0]);
15782return std::move(op.outputs_[0]).take();
15783}
15784struct structured_hardsigmoid_default_backend_inplace final : public at::meta::structured_hardsigmoid {
15785 structured_hardsigmoid_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
15786 void set_output_strided(
15787 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15788 TensorOptions options, DimnameList names
15789 ) override {
15790 auto current_device = guard_.current_device();
15791 if (C10_UNLIKELY(current_device.has_value())) {
15792 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15793 "structured kernels don't support multi-device outputs");
15794 } else {
15795 guard_.reset_device(options.device());
15796 }
15797 const auto& out = outputs_[output_idx].get();
15798 check_inplace(out, sizes, options);
15799 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
15800 if (C10_UNLIKELY(maybe_proxy.has_value())) {
15801 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
15802 }
15803 if (!names.empty()) {
15804 namedinference::propagate_names(outputs_[output_idx], names);
15805 }
15806 // super must happen after, so that downstream can use maybe_get_output
15807 // to retrieve the output
15808 at::meta::structured_hardsigmoid::set_output_raw_strided(output_idx, sizes, strides, options, names);
15809 }
15810 void set_output_raw_strided(
15811 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15812 TensorOptions options, DimnameList names
15813 ) override {
15814 auto current_device = guard_.current_device();
15815 if (C10_UNLIKELY(current_device.has_value())) {
15816 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15817 "structured kernels don't support multi-device outputs");
15818 } else {
15819 guard_.reset_device(options.device());
15820 }
15821 const auto& out = outputs_[output_idx].get();
15822 check_inplace(out, sizes, options);
15823 if (!names.empty()) {
15824 namedinference::propagate_names(outputs_[output_idx], names);
15825 }
15826 // super must happen after, so that downstream can use maybe_get_output
15827 // to retrieve the output
15828 at::meta::structured_hardsigmoid::set_output_raw_strided(output_idx, sizes, strides, options, names);
15829 }
15830 const Tensor& maybe_get_output(int64_t output_idx) override {
15831 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
15832 }
15833 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
15834 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
15835 c10::OptionalDeviceGuard guard_;
15836};
15837at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_hardsigmoid_(at::Tensor & self) {
15838structured_hardsigmoid_default_backend_inplace op(self);
15839op.meta(self);
15840at::hardsigmoid_outf(self, op.outputs_[0]);
15841if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
15842return self;
15843}
15844struct structured_hardsigmoid_backward_default_backend_functional final : public at::meta::structured_hardsigmoid_backward {
15845 void set_output_strided(
15846 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15847 TensorOptions options, DimnameList names
15848 ) override {
15849 auto current_device = guard_.current_device();
15850 if (C10_UNLIKELY(current_device.has_value())) {
15851 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15852 "structured kernels don't support multi-device outputs");
15853 } else {
15854 guard_.reset_device(options.device());
15855 }
15856 outputs_[output_idx] = create_out(sizes, strides, options);
15857 if (!names.empty()) {
15858 namedinference::propagate_names(*outputs_[output_idx], names);
15859 }
15860 // super must happen after, so that downstream can use maybe_get_output
15861 // to retrieve the output
15862 at::meta::structured_hardsigmoid_backward::set_output_raw_strided(output_idx, sizes, strides, options, names);
15863 }
15864 void set_output_raw_strided(
15865 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15866 TensorOptions options, DimnameList names
15867 ) override {
15868 auto current_device = guard_.current_device();
15869 if (C10_UNLIKELY(current_device.has_value())) {
15870 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15871 "structured kernels don't support multi-device outputs");
15872 } else {
15873 guard_.reset_device(options.device());
15874 }
15875 outputs_[output_idx] = create_out(sizes, strides, options);
15876 if (!names.empty()) {
15877 namedinference::propagate_names(*outputs_[output_idx], names);
15878 }
15879 // super must happen after, so that downstream can use maybe_get_output
15880 // to retrieve the output
15881 at::meta::structured_hardsigmoid_backward::set_output_raw_strided(output_idx, sizes, strides, options, names);
15882 }
15883 const Tensor& maybe_get_output(int64_t output_idx) override {
15884 return *outputs_[output_idx];
15885 }
15886 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
15887 c10::OptionalDeviceGuard guard_;
15888};
15889at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_hardsigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self) {
15890structured_hardsigmoid_backward_default_backend_functional op;
15891op.meta(grad_output, self);
15892at::hardsigmoid_backward_outf(grad_output, self, *op.outputs_[0]);
15893return std::move(op.outputs_[0]).take();
15894}
15895struct structured_leaky_relu_default_backend_functional final : public at::meta::structured_leaky_relu {
15896 void set_output_strided(
15897 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15898 TensorOptions options, DimnameList names
15899 ) override {
15900 auto current_device = guard_.current_device();
15901 if (C10_UNLIKELY(current_device.has_value())) {
15902 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15903 "structured kernels don't support multi-device outputs");
15904 } else {
15905 guard_.reset_device(options.device());
15906 }
15907 outputs_[output_idx] = create_out(sizes, strides, options);
15908 if (!names.empty()) {
15909 namedinference::propagate_names(*outputs_[output_idx], names);
15910 }
15911 // super must happen after, so that downstream can use maybe_get_output
15912 // to retrieve the output
15913 at::meta::structured_leaky_relu::set_output_raw_strided(output_idx, sizes, strides, options, names);
15914 }
15915 void set_output_raw_strided(
15916 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15917 TensorOptions options, DimnameList names
15918 ) override {
15919 auto current_device = guard_.current_device();
15920 if (C10_UNLIKELY(current_device.has_value())) {
15921 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15922 "structured kernels don't support multi-device outputs");
15923 } else {
15924 guard_.reset_device(options.device());
15925 }
15926 outputs_[output_idx] = create_out(sizes, strides, options);
15927 if (!names.empty()) {
15928 namedinference::propagate_names(*outputs_[output_idx], names);
15929 }
15930 // super must happen after, so that downstream can use maybe_get_output
15931 // to retrieve the output
15932 at::meta::structured_leaky_relu::set_output_raw_strided(output_idx, sizes, strides, options, names);
15933 }
15934 const Tensor& maybe_get_output(int64_t output_idx) override {
15935 return *outputs_[output_idx];
15936 }
15937 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
15938 c10::OptionalDeviceGuard guard_;
15939};
15940at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope) {
15941structured_leaky_relu_default_backend_functional op;
15942op.meta(self, negative_slope);
15943at::leaky_relu_outf(self, negative_slope, *op.outputs_[0]);
15944return std::move(op.outputs_[0]).take();
15945}
15946struct structured_leaky_relu_default_backend_inplace final : public at::meta::structured_leaky_relu {
15947 structured_leaky_relu_default_backend_inplace(Tensor& self) : outputs_{std::ref(self)} {}
15948 void set_output_strided(
15949 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15950 TensorOptions options, DimnameList names
15951 ) override {
15952 auto current_device = guard_.current_device();
15953 if (C10_UNLIKELY(current_device.has_value())) {
15954 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15955 "structured kernels don't support multi-device outputs");
15956 } else {
15957 guard_.reset_device(options.device());
15958 }
15959 const auto& out = outputs_[output_idx].get();
15960 check_inplace(out, sizes, options);
15961 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
15962 if (C10_UNLIKELY(maybe_proxy.has_value())) {
15963 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
15964 }
15965 if (!names.empty()) {
15966 namedinference::propagate_names(outputs_[output_idx], names);
15967 }
15968 // super must happen after, so that downstream can use maybe_get_output
15969 // to retrieve the output
15970 at::meta::structured_leaky_relu::set_output_raw_strided(output_idx, sizes, strides, options, names);
15971 }
15972 void set_output_raw_strided(
15973 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15974 TensorOptions options, DimnameList names
15975 ) override {
15976 auto current_device = guard_.current_device();
15977 if (C10_UNLIKELY(current_device.has_value())) {
15978 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
15979 "structured kernels don't support multi-device outputs");
15980 } else {
15981 guard_.reset_device(options.device());
15982 }
15983 const auto& out = outputs_[output_idx].get();
15984 check_inplace(out, sizes, options);
15985 if (!names.empty()) {
15986 namedinference::propagate_names(outputs_[output_idx], names);
15987 }
15988 // super must happen after, so that downstream can use maybe_get_output
15989 // to retrieve the output
15990 at::meta::structured_leaky_relu::set_output_raw_strided(output_idx, sizes, strides, options, names);
15991 }
15992 const Tensor& maybe_get_output(int64_t output_idx) override {
15993 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
15994 }
15995 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
15996 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
15997 c10::OptionalDeviceGuard guard_;
15998};
15999at::Tensor & wrapper_CompositeExplicitAutogradNonFunctional_leaky_relu_(at::Tensor & self, const at::Scalar & negative_slope) {
16000structured_leaky_relu_default_backend_inplace op(self);
16001op.meta(self, negative_slope);
16002at::leaky_relu_outf(self, negative_slope, op.outputs_[0]);
16003if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
16004return self;
16005}
16006struct structured_leaky_relu_backward_default_backend_functional final : public at::meta::structured_leaky_relu_backward {
16007 void set_output_strided(
16008 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16009 TensorOptions options, DimnameList names
16010 ) override {
16011 auto current_device = guard_.current_device();
16012 if (C10_UNLIKELY(current_device.has_value())) {
16013 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16014 "structured kernels don't support multi-device outputs");
16015 } else {
16016 guard_.reset_device(options.device());
16017 }
16018 outputs_[output_idx] = create_out(sizes, strides, options);
16019 if (!names.empty()) {
16020 namedinference::propagate_names(*outputs_[output_idx], names);
16021 }
16022 // super must happen after, so that downstream can use maybe_get_output
16023 // to retrieve the output
16024 at::meta::structured_leaky_relu_backward::set_output_raw_strided(output_idx, sizes, strides, options, names);
16025 }
16026 void set_output_raw_strided(
16027 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16028 TensorOptions options, DimnameList names
16029 ) override {
16030 auto current_device = guard_.current_device();
16031 if (C10_UNLIKELY(current_device.has_value())) {
16032 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16033 "structured kernels don't support multi-device outputs");
16034 } else {
16035 guard_.reset_device(options.device());
16036 }
16037 outputs_[output_idx] = create_out(sizes, strides, options);
16038 if (!names.empty()) {
16039 namedinference::propagate_names(*outputs_[output_idx], names);
16040 }
16041 // super must happen after, so that downstream can use maybe_get_output
16042 // to retrieve the output
16043 at::meta::structured_leaky_relu_backward::set_output_raw_strided(output_idx, sizes, strides, options, names);
16044 }
16045 const Tensor& maybe_get_output(int64_t output_idx) override {
16046 return *outputs_[output_idx];
16047 }
16048 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
16049 c10::OptionalDeviceGuard guard_;
16050};
16051at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) {
16052structured_leaky_relu_backward_default_backend_functional op;
16053op.meta(grad_output, self, negative_slope, self_is_result);
16054at::leaky_relu_backward_outf(grad_output, self, negative_slope, self_is_result, *op.outputs_[0]);
16055return std::move(op.outputs_[0]).take();
16056}
16057struct structured_softplus_default_backend_functional final : public at::meta::structured_softplus {
16058 void set_output_strided(
16059 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16060 TensorOptions options, DimnameList names
16061 ) override {
16062 auto current_device = guard_.current_device();
16063 if (C10_UNLIKELY(current_device.has_value())) {
16064 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16065 "structured kernels don't support multi-device outputs");
16066 } else {
16067 guard_.reset_device(options.device());
16068 }
16069 outputs_[output_idx] = create_out(sizes, strides, options);
16070 if (!names.empty()) {
16071 namedinference::propagate_names(*outputs_[output_idx], names);
16072 }
16073 // super must happen after, so that downstream can use maybe_get_output
16074 // to retrieve the output
16075 at::meta::structured_softplus::set_output_raw_strided(output_idx, sizes, strides, options, names);
16076 }
16077 void set_output_raw_strided(
16078 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16079 TensorOptions options, DimnameList names
16080 ) override {
16081 auto current_device = guard_.current_device();
16082 if (C10_UNLIKELY(current_device.has_value())) {
16083 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16084 "structured kernels don't support multi-device outputs");
16085 } else {
16086 guard_.reset_device(options.device());
16087 }
16088 outputs_[output_idx] = create_out(sizes, strides, options);
16089 if (!names.empty()) {
16090 namedinference::propagate_names(*outputs_[output_idx], names);
16091 }
16092 // super must happen after, so that downstream can use maybe_get_output
16093 // to retrieve the output
16094 at::meta::structured_softplus::set_output_raw_strided(output_idx, sizes, strides, options, names);
16095 }
16096 const Tensor& maybe_get_output(int64_t output_idx) override {
16097 return *outputs_[output_idx];
16098 }
16099 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
16100 c10::OptionalDeviceGuard guard_;
16101};
16102at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_softplus(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
16103structured_softplus_default_backend_functional op;
16104op.meta(self, beta, threshold);
16105at::softplus_outf(self, beta, threshold, *op.outputs_[0]);
16106return std::move(op.outputs_[0]).take();
16107}
16108struct structured_softplus_backward_default_backend_functional final : public at::meta::structured_softplus_backward {
16109 void set_output_strided(
16110 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16111 TensorOptions options, DimnameList names
16112 ) override {
16113 auto current_device = guard_.current_device();
16114 if (C10_UNLIKELY(current_device.has_value())) {
16115 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16116 "structured kernels don't support multi-device outputs");
16117 } else {
16118 guard_.reset_device(options.device());
16119 }
16120 outputs_[output_idx] = create_out(sizes, strides, options);
16121 if (!names.empty()) {
16122 namedinference::propagate_names(*outputs_[output_idx], names);
16123 }
16124 // super must happen after, so that downstream can use maybe_get_output
16125 // to retrieve the output
16126 at::meta::structured_softplus_backward::set_output_raw_strided(output_idx, sizes, strides, options, names);
16127 }
16128 void set_output_raw_strided(
16129 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16130 TensorOptions options, DimnameList names
16131 ) override {
16132 auto current_device = guard_.current_device();
16133 if (C10_UNLIKELY(current_device.has_value())) {
16134 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16135 "structured kernels don't support multi-device outputs");
16136 } else {
16137 guard_.reset_device(options.device());
16138 }
16139 outputs_[output_idx] = create_out(sizes, strides, options);
16140 if (!names.empty()) {
16141 namedinference::propagate_names(*outputs_[output_idx], names);
16142 }
16143 // super must happen after, so that downstream can use maybe_get_output
16144 // to retrieve the output
16145 at::meta::structured_softplus_backward::set_output_raw_strided(output_idx, sizes, strides, options, names);
16146 }
16147 const Tensor& maybe_get_output(int64_t output_idx) override {
16148 return *outputs_[output_idx];
16149 }
16150 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
16151 c10::OptionalDeviceGuard guard_;
16152};
16153at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_softplus_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
16154structured_softplus_backward_default_backend_functional op;
16155op.meta(grad_output, self, beta, threshold);
16156at::softplus_backward_outf(grad_output, self, beta, threshold, *op.outputs_[0]);
16157return std::move(op.outputs_[0]).take();
16158}
16159struct structured_softshrink_default_backend_functional final : public at::meta::structured_softshrink {
16160 void set_output_strided(
16161 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16162 TensorOptions options, DimnameList names
16163 ) override {
16164 auto current_device = guard_.current_device();
16165 if (C10_UNLIKELY(current_device.has_value())) {
16166 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16167 "structured kernels don't support multi-device outputs");
16168 } else {
16169 guard_.reset_device(options.device());
16170 }
16171 outputs_[output_idx] = create_out(sizes, strides, options);
16172 if (!names.empty()) {
16173 namedinference::propagate_names(*outputs_[output_idx], names);
16174 }
16175 // super must happen after, so that downstream can use maybe_get_output
16176 // to retrieve the output
16177 at::meta::structured_softshrink::set_output_raw_strided(output_idx, sizes, strides, options, names);
16178 }
16179 void set_output_raw_strided(
16180 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16181 TensorOptions options, DimnameList names
16182 ) override {
16183 auto current_device = guard_.current_device();
16184 if (C10_UNLIKELY(current_device.has_value())) {
16185 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16186 "structured kernels don't support multi-device outputs");
16187 } else {
16188 guard_.reset_device(options.device());
16189 }
16190 outputs_[output_idx] = create_out(sizes, strides, options);
16191 if (!names.empty()) {
16192 namedinference::propagate_names(*outputs_[output_idx], names);
16193 }
16194 // super must happen after, so that downstream can use maybe_get_output
16195 // to retrieve the output
16196 at::meta::structured_softshrink::set_output_raw_strided(output_idx, sizes, strides, options, names);
16197 }
16198 const Tensor& maybe_get_output(int64_t output_idx) override {
16199 return *outputs_[output_idx];
16200 }
16201 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
16202 c10::OptionalDeviceGuard guard_;
16203};
16204at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_softshrink(const at::Tensor & self, const at::Scalar & lambd) {
16205structured_softshrink_default_backend_functional op;
16206op.meta(self, lambd);
16207at::softshrink_outf(self, lambd, *op.outputs_[0]);
16208return std::move(op.outputs_[0]).take();
16209}
16210struct structured_softshrink_backward_default_backend_functional final : public at::meta::structured_softshrink_backward {
16211 void set_output_strided(
16212 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16213 TensorOptions options, DimnameList names
16214 ) override {
16215 auto current_device = guard_.current_device();
16216 if (C10_UNLIKELY(current_device.has_value())) {
16217 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16218 "structured kernels don't support multi-device outputs");
16219 } else {
16220 guard_.reset_device(options.device());
16221 }
16222 outputs_[output_idx] = create_out(sizes, strides, options);
16223 if (!names.empty()) {
16224 namedinference::propagate_names(*outputs_[output_idx], names);
16225 }
16226 // super must happen after, so that downstream can use maybe_get_output
16227 // to retrieve the output
16228 at::meta::structured_softshrink_backward::set_output_raw_strided(output_idx, sizes, strides, options, names);
16229 }
16230 void set_output_raw_strided(
16231 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16232 TensorOptions options, DimnameList names
16233 ) override {
16234 auto current_device = guard_.current_device();
16235 if (C10_UNLIKELY(current_device.has_value())) {
16236 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16237 "structured kernels don't support multi-device outputs");
16238 } else {
16239 guard_.reset_device(options.device());
16240 }
16241 outputs_[output_idx] = create_out(sizes, strides, options);
16242 if (!names.empty()) {
16243 namedinference::propagate_names(*outputs_[output_idx], names);
16244 }
16245 // super must happen after, so that downstream can use maybe_get_output
16246 // to retrieve the output
16247 at::meta::structured_softshrink_backward::set_output_raw_strided(output_idx, sizes, strides, options, names);
16248 }
16249 const Tensor& maybe_get_output(int64_t output_idx) override {
16250 return *outputs_[output_idx];
16251 }
16252 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
16253 c10::OptionalDeviceGuard guard_;
16254};
16255at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_softshrink_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) {
16256structured_softshrink_backward_default_backend_functional op;
16257op.meta(grad_output, self, lambd);
16258at::softshrink_backward_outf(grad_output, self, lambd, *op.outputs_[0]);
16259return std::move(op.outputs_[0]).take();
16260}
16261struct structured_adaptive_max_pool2d_default_backend_functional final : public at::meta::structured_adaptive_max_pool2d {
16262 void set_output_strided(
16263 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16264 TensorOptions options, DimnameList names
16265 ) override {
16266 auto current_device = guard_.current_device();
16267 if (C10_UNLIKELY(current_device.has_value())) {
16268 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16269 "structured kernels don't support multi-device outputs");
16270 } else {
16271 guard_.reset_device(options.device());
16272 }
16273 outputs_[output_idx] = create_out(sizes, strides, options);
16274 if (!names.empty()) {
16275 namedinference::propagate_names(*outputs_[output_idx], names);
16276 }
16277 // super must happen after, so that downstream can use maybe_get_output
16278 // to retrieve the output
16279 }
16280 void set_output_raw_strided(
16281 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16282 TensorOptions options, DimnameList names
16283 ) override {
16284 auto current_device = guard_.current_device();
16285 if (C10_UNLIKELY(current_device.has_value())) {
16286 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16287 "structured kernels don't support multi-device outputs");
16288 } else {
16289 guard_.reset_device(options.device());
16290 }
16291 outputs_[output_idx] = create_out(sizes, strides, options);
16292 if (!names.empty()) {
16293 namedinference::propagate_names(*outputs_[output_idx], names);
16294 }
16295 // super must happen after, so that downstream can use maybe_get_output
16296 // to retrieve the output
16297 }
16298 const Tensor& maybe_get_output(int64_t output_idx) override {
16299 return *outputs_[output_idx];
16300 }
16301 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
16302 c10::OptionalDeviceGuard guard_;
16303};
16304::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeExplicitAutogradNonFunctional_adaptive_max_pool2d(const at::Tensor & self, at::IntArrayRef output_size) {
16305structured_adaptive_max_pool2d_default_backend_functional op;
16306op.meta(self, output_size);
16307at::adaptive_max_pool2d_outf(self, output_size, *op.outputs_[0], *op.outputs_[1]);
16308return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
16309}
16310struct structured_adaptive_max_pool2d_backward_default_backend_functional final : public at::meta::structured_adaptive_max_pool2d_backward {
16311 void set_output_strided(
16312 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16313 TensorOptions options, DimnameList names
16314 ) override {
16315 auto current_device = guard_.current_device();
16316 if (C10_UNLIKELY(current_device.has_value())) {
16317 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16318 "structured kernels don't support multi-device outputs");
16319 } else {
16320 guard_.reset_device(options.device());
16321 }
16322 outputs_[output_idx] = create_out(sizes, strides, options);
16323 if (!names.empty()) {
16324 namedinference::propagate_names(*outputs_[output_idx], names);
16325 }
16326 // super must happen after, so that downstream can use maybe_get_output
16327 // to retrieve the output
16328 }
16329 void set_output_raw_strided(
16330 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16331 TensorOptions options, DimnameList names
16332 ) override {
16333 auto current_device = guard_.current_device();
16334 if (C10_UNLIKELY(current_device.has_value())) {
16335 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16336 "structured kernels don't support multi-device outputs");
16337 } else {
16338 guard_.reset_device(options.device());
16339 }
16340 outputs_[output_idx] = create_out(sizes, strides, options);
16341 if (!names.empty()) {
16342 namedinference::propagate_names(*outputs_[output_idx], names);
16343 }
16344 // super must happen after, so that downstream can use maybe_get_output
16345 // to retrieve the output
16346 }
16347 const Tensor& maybe_get_output(int64_t output_idx) override {
16348 return *outputs_[output_idx];
16349 }
16350 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
16351 c10::OptionalDeviceGuard guard_;
16352};
16353at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_adaptive_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
16354structured_adaptive_max_pool2d_backward_default_backend_functional op;
16355op.meta(grad_output, self, indices);
16356at::adaptive_max_pool2d_backward_outf(grad_output, self, indices, *op.outputs_[0]);
16357return std::move(op.outputs_[0]).take();
16358}
16359struct structured_adaptive_max_pool3d_default_backend_functional final : public at::meta::structured_adaptive_max_pool3d {
16360 void set_output_strided(
16361 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16362 TensorOptions options, DimnameList names
16363 ) override {
16364 auto current_device = guard_.current_device();
16365 if (C10_UNLIKELY(current_device.has_value())) {
16366 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16367 "structured kernels don't support multi-device outputs");
16368 } else {
16369 guard_.reset_device(options.device());
16370 }
16371 outputs_[output_idx] = create_out(sizes, strides, options);
16372 if (!names.empty()) {
16373 namedinference::propagate_names(*outputs_[output_idx], names);
16374 }
16375 // super must happen after, so that downstream can use maybe_get_output
16376 // to retrieve the output
16377 }
16378 void set_output_raw_strided(
16379 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16380 TensorOptions options, DimnameList names
16381 ) override {
16382 auto current_device = guard_.current_device();
16383 if (C10_UNLIKELY(current_device.has_value())) {
16384 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16385 "structured kernels don't support multi-device outputs");
16386 } else {
16387 guard_.reset_device(options.device());
16388 }
16389 outputs_[output_idx] = create_out(sizes, strides, options);
16390 if (!names.empty()) {
16391 namedinference::propagate_names(*outputs_[output_idx], names);
16392 }
16393 // super must happen after, so that downstream can use maybe_get_output
16394 // to retrieve the output
16395 }
16396 const Tensor& maybe_get_output(int64_t output_idx) override {
16397 return *outputs_[output_idx];
16398 }
16399 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
16400 c10::OptionalDeviceGuard guard_;
16401};
16402::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeExplicitAutogradNonFunctional_adaptive_max_pool3d(const at::Tensor & self, at::IntArrayRef output_size) {
16403structured_adaptive_max_pool3d_default_backend_functional op;
16404op.meta(self, output_size);
16405at::adaptive_max_pool3d_outf(self, output_size, *op.outputs_[0], *op.outputs_[1]);
16406return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
16407}
16408struct structured_adaptive_max_pool3d_backward_default_backend_functional final : public at::meta::structured_adaptive_max_pool3d_backward {
16409 void set_output_strided(
16410 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16411 TensorOptions options, DimnameList names
16412 ) override {
16413 auto current_device = guard_.current_device();
16414 if (C10_UNLIKELY(current_device.has_value())) {
16415 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16416 "structured kernels don't support multi-device outputs");
16417 } else {
16418 guard_.reset_device(options.device());
16419 }
16420 outputs_[output_idx] = create_out(sizes, strides, options);
16421 if (!names.empty()) {
16422 namedinference::propagate_names(*outputs_[output_idx], names);
16423 }
16424 // super must happen after, so that downstream can use maybe_get_output
16425 // to retrieve the output
16426 }
16427 void set_output_raw_strided(
16428 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16429 TensorOptions options, DimnameList names
16430 ) override {
16431 auto current_device = guard_.current_device();
16432 if (C10_UNLIKELY(current_device.has_value())) {
16433 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16434 "structured kernels don't support multi-device outputs");
16435 } else {
16436 guard_.reset_device(options.device());
16437 }
16438 outputs_[output_idx] = create_out(sizes, strides, options);
16439 if (!names.empty()) {
16440 namedinference::propagate_names(*outputs_[output_idx], names);
16441 }
16442 // super must happen after, so that downstream can use maybe_get_output
16443 // to retrieve the output
16444 }
16445 const Tensor& maybe_get_output(int64_t output_idx) override {
16446 return *outputs_[output_idx];
16447 }
16448 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
16449 c10::OptionalDeviceGuard guard_;
16450};
16451at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_adaptive_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
16452structured_adaptive_max_pool3d_backward_default_backend_functional op;
16453op.meta(grad_output, self, indices);
16454at::adaptive_max_pool3d_backward_outf(grad_output, self, indices, *op.outputs_[0]);
16455return std::move(op.outputs_[0]).take();
16456}
16457struct structured_avg_pool2d_default_backend_functional final : public at::meta::structured_avg_pool2d {
16458 void set_output_strided(
16459 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16460 TensorOptions options, DimnameList names
16461 ) override {
16462 auto current_device = guard_.current_device();
16463 if (C10_UNLIKELY(current_device.has_value())) {
16464 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16465 "structured kernels don't support multi-device outputs");
16466 } else {
16467 guard_.reset_device(options.device());
16468 }
16469 outputs_[output_idx] = create_out(sizes, strides, options);
16470 if (!names.empty()) {
16471 namedinference::propagate_names(*outputs_[output_idx], names);
16472 }
16473 // super must happen after, so that downstream can use maybe_get_output
16474 // to retrieve the output
16475 }
16476 void set_output_raw_strided(
16477 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16478 TensorOptions options, DimnameList names
16479 ) override {
16480 auto current_device = guard_.current_device();
16481 if (C10_UNLIKELY(current_device.has_value())) {
16482 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16483 "structured kernels don't support multi-device outputs");
16484 } else {
16485 guard_.reset_device(options.device());
16486 }
16487 outputs_[output_idx] = create_out(sizes, strides, options);
16488 if (!names.empty()) {
16489 namedinference::propagate_names(*outputs_[output_idx], names);
16490 }
16491 // super must happen after, so that downstream can use maybe_get_output
16492 // to retrieve the output
16493 }
16494 const Tensor& maybe_get_output(int64_t output_idx) override {
16495 return *outputs_[output_idx];
16496 }
16497 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
16498 c10::OptionalDeviceGuard guard_;
16499};
16500at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_avg_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
16501structured_avg_pool2d_default_backend_functional op;
16502auto precompute = op.meta(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
16503(void)precompute;
16504at::avg_pool2d_outf(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, *op.outputs_[0]);
16505return std::move(op.outputs_[0]).take();
16506}
16507struct structured_avg_pool2d_backward_default_backend_functional final : public at::meta::structured_avg_pool2d_backward {
16508 void set_output_strided(
16509 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16510 TensorOptions options, DimnameList names
16511 ) override {
16512 auto current_device = guard_.current_device();
16513 if (C10_UNLIKELY(current_device.has_value())) {
16514 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16515 "structured kernels don't support multi-device outputs");
16516 } else {
16517 guard_.reset_device(options.device());
16518 }
16519 outputs_[output_idx] = create_out(sizes, strides, options);
16520 if (!names.empty()) {
16521 namedinference::propagate_names(*outputs_[output_idx], names);
16522 }
16523 // super must happen after, so that downstream can use maybe_get_output
16524 // to retrieve the output
16525 }
16526 void set_output_raw_strided(
16527 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16528 TensorOptions options, DimnameList names
16529 ) override {
16530 auto current_device = guard_.current_device();
16531 if (C10_UNLIKELY(current_device.has_value())) {
16532 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16533 "structured kernels don't support multi-device outputs");
16534 } else {
16535 guard_.reset_device(options.device());
16536 }
16537 outputs_[output_idx] = create_out(sizes, strides, options);
16538 if (!names.empty()) {
16539 namedinference::propagate_names(*outputs_[output_idx], names);
16540 }
16541 // super must happen after, so that downstream can use maybe_get_output
16542 // to retrieve the output
16543 }
16544 const Tensor& maybe_get_output(int64_t output_idx) override {
16545 return *outputs_[output_idx];
16546 }
16547 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
16548 c10::OptionalDeviceGuard guard_;
16549};
16550at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
16551structured_avg_pool2d_backward_default_backend_functional op;
16552op.meta(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
16553at::avg_pool2d_backward_outf(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, *op.outputs_[0]);
16554return std::move(op.outputs_[0]).take();
16555}
16556struct structured_avg_pool3d_default_backend_functional final : public at::meta::structured_avg_pool3d {
16557 void set_output_strided(
16558 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16559 TensorOptions options, DimnameList names
16560 ) override {
16561 auto current_device = guard_.current_device();
16562 if (C10_UNLIKELY(current_device.has_value())) {
16563 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16564 "structured kernels don't support multi-device outputs");
16565 } else {
16566 guard_.reset_device(options.device());
16567 }
16568 outputs_[output_idx] = create_out(sizes, strides, options);
16569 if (!names.empty()) {
16570 namedinference::propagate_names(*outputs_[output_idx], names);
16571 }
16572 // super must happen after, so that downstream can use maybe_get_output
16573 // to retrieve the output
16574 }
16575 void set_output_raw_strided(
16576 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16577 TensorOptions options, DimnameList names
16578 ) override {
16579 auto current_device = guard_.current_device();
16580 if (C10_UNLIKELY(current_device.has_value())) {
16581 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16582 "structured kernels don't support multi-device outputs");
16583 } else {
16584 guard_.reset_device(options.device());
16585 }
16586 outputs_[output_idx] = create_out(sizes, strides, options);
16587 if (!names.empty()) {
16588 namedinference::propagate_names(*outputs_[output_idx], names);
16589 }
16590 // super must happen after, so that downstream can use maybe_get_output
16591 // to retrieve the output
16592 }
16593 const Tensor& maybe_get_output(int64_t output_idx) override {
16594 return *outputs_[output_idx];
16595 }
16596 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
16597 c10::OptionalDeviceGuard guard_;
16598};
16599at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_avg_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
16600structured_avg_pool3d_default_backend_functional op;
16601op.meta(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
16602at::avg_pool3d_outf(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, *op.outputs_[0]);
16603return std::move(op.outputs_[0]).take();
16604}
16605struct structured_avg_pool3d_backward_default_backend_functional final : public at::meta::structured_avg_pool3d_backward {
16606 void set_output_strided(
16607 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16608 TensorOptions options, DimnameList names
16609 ) override {
16610 auto current_device = guard_.current_device();
16611 if (C10_UNLIKELY(current_device.has_value())) {
16612 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16613 "structured kernels don't support multi-device outputs");
16614 } else {
16615 guard_.reset_device(options.device());
16616 }
16617 outputs_[output_idx] = create_out(sizes, strides, options);
16618 if (!names.empty()) {
16619 namedinference::propagate_names(*outputs_[output_idx], names);
16620 }
16621 // super must happen after, so that downstream can use maybe_get_output
16622 // to retrieve the output
16623 }
16624 void set_output_raw_strided(
16625 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16626 TensorOptions options, DimnameList names
16627 ) override {
16628 auto current_device = guard_.current_device();
16629 if (C10_UNLIKELY(current_device.has_value())) {
16630 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16631 "structured kernels don't support multi-device outputs");
16632 } else {
16633 guard_.reset_device(options.device());
16634 }
16635 outputs_[output_idx] = create_out(sizes, strides, options);
16636 if (!names.empty()) {
16637 namedinference::propagate_names(*outputs_[output_idx], names);
16638 }
16639 // super must happen after, so that downstream can use maybe_get_output
16640 // to retrieve the output
16641 }
16642 const Tensor& maybe_get_output(int64_t output_idx) override {
16643 return *outputs_[output_idx];
16644 }
16645 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
16646 c10::OptionalDeviceGuard guard_;
16647};
16648at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
16649structured_avg_pool3d_backward_default_backend_functional op;
16650op.meta(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
16651at::avg_pool3d_backward_outf(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, *op.outputs_[0]);
16652return std::move(op.outputs_[0]).take();
16653}
16654struct structured_fractional_max_pool2d_default_backend_functional final : public at::meta::structured_fractional_max_pool2d {
16655 void set_output_strided(
16656 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16657 TensorOptions options, DimnameList names
16658 ) override {
16659 auto current_device = guard_.current_device();
16660 if (C10_UNLIKELY(current_device.has_value())) {
16661 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16662 "structured kernels don't support multi-device outputs");
16663 } else {
16664 guard_.reset_device(options.device());
16665 }
16666 outputs_[output_idx] = create_out(sizes, strides, options);
16667 if (!names.empty()) {
16668 namedinference::propagate_names(*outputs_[output_idx], names);
16669 }
16670 // super must happen after, so that downstream can use maybe_get_output
16671 // to retrieve the output
16672 }
16673 void set_output_raw_strided(
16674 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16675 TensorOptions options, DimnameList names
16676 ) override {
16677 auto current_device = guard_.current_device();
16678 if (C10_UNLIKELY(current_device.has_value())) {
16679 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16680 "structured kernels don't support multi-device outputs");
16681 } else {
16682 guard_.reset_device(options.device());
16683 }
16684 outputs_[output_idx] = create_out(sizes, strides, options);
16685 if (!names.empty()) {
16686 namedinference::propagate_names(*outputs_[output_idx], names);
16687 }
16688 // super must happen after, so that downstream can use maybe_get_output
16689 // to retrieve the output
16690 }
16691 const Tensor& maybe_get_output(int64_t output_idx) override {
16692 return *outputs_[output_idx];
16693 }
16694 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
16695 c10::OptionalDeviceGuard guard_;
16696};
16697::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeExplicitAutogradNonFunctional_fractional_max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
16698structured_fractional_max_pool2d_default_backend_functional op;
16699op.meta(self, kernel_size, output_size, random_samples);
16700at::fractional_max_pool2d_outf(self, kernel_size, output_size, random_samples, *op.outputs_[0], *op.outputs_[1]);
16701return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
16702}
16703struct structured_fractional_max_pool2d_backward_default_backend_functional final : public at::meta::structured_fractional_max_pool2d_backward {
16704 void set_output_strided(
16705 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16706 TensorOptions options, DimnameList names
16707 ) override {
16708 auto current_device = guard_.current_device();
16709 if (C10_UNLIKELY(current_device.has_value())) {
16710 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16711 "structured kernels don't support multi-device outputs");
16712 } else {
16713 guard_.reset_device(options.device());
16714 }
16715 outputs_[output_idx] = create_out(sizes, strides, options);
16716 if (!names.empty()) {
16717 namedinference::propagate_names(*outputs_[output_idx], names);
16718 }
16719 // super must happen after, so that downstream can use maybe_get_output
16720 // to retrieve the output
16721 }
16722 void set_output_raw_strided(
16723 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16724 TensorOptions options, DimnameList names
16725 ) override {
16726 auto current_device = guard_.current_device();
16727 if (C10_UNLIKELY(current_device.has_value())) {
16728 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16729 "structured kernels don't support multi-device outputs");
16730 } else {
16731 guard_.reset_device(options.device());
16732 }
16733 outputs_[output_idx] = create_out(sizes, strides, options);
16734 if (!names.empty()) {
16735 namedinference::propagate_names(*outputs_[output_idx], names);
16736 }
16737 // super must happen after, so that downstream can use maybe_get_output
16738 // to retrieve the output
16739 }
16740 const Tensor& maybe_get_output(int64_t output_idx) override {
16741 return *outputs_[output_idx];
16742 }
16743 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
16744 c10::OptionalDeviceGuard guard_;
16745};
16746at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_fractional_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
16747structured_fractional_max_pool2d_backward_default_backend_functional op;
16748op.meta(grad_output, self, kernel_size, output_size, indices);
16749at::fractional_max_pool2d_backward_outf(grad_output, self, kernel_size, output_size, indices, *op.outputs_[0]);
16750return std::move(op.outputs_[0]).take();
16751}
16752struct structured_fractional_max_pool3d_default_backend_functional final : public at::meta::structured_fractional_max_pool3d {
16753 void set_output_strided(
16754 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16755 TensorOptions options, DimnameList names
16756 ) override {
16757 auto current_device = guard_.current_device();
16758 if (C10_UNLIKELY(current_device.has_value())) {
16759 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16760 "structured kernels don't support multi-device outputs");
16761 } else {
16762 guard_.reset_device(options.device());
16763 }
16764 outputs_[output_idx] = create_out(sizes, strides, options);
16765 if (!names.empty()) {
16766 namedinference::propagate_names(*outputs_[output_idx], names);
16767 }
16768 // super must happen after, so that downstream can use maybe_get_output
16769 // to retrieve the output
16770 }
16771 void set_output_raw_strided(
16772 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16773 TensorOptions options, DimnameList names
16774 ) override {
16775 auto current_device = guard_.current_device();
16776 if (C10_UNLIKELY(current_device.has_value())) {
16777 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16778 "structured kernels don't support multi-device outputs");
16779 } else {
16780 guard_.reset_device(options.device());
16781 }
16782 outputs_[output_idx] = create_out(sizes, strides, options);
16783 if (!names.empty()) {
16784 namedinference::propagate_names(*outputs_[output_idx], names);
16785 }
16786 // super must happen after, so that downstream can use maybe_get_output
16787 // to retrieve the output
16788 }
16789 const Tensor& maybe_get_output(int64_t output_idx) override {
16790 return *outputs_[output_idx];
16791 }
16792 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
16793 c10::OptionalDeviceGuard guard_;
16794};
16795::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeExplicitAutogradNonFunctional_fractional_max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
16796structured_fractional_max_pool3d_default_backend_functional op;
16797auto precompute = op.meta(self, kernel_size, output_size, random_samples);
16798(void)precompute;
16799at::fractional_max_pool3d_outf(self, kernel_size, output_size, random_samples, *op.outputs_[0], *op.outputs_[1]);
16800return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
16801}
16802struct structured_max_pool2d_with_indices_default_backend_functional final : public at::meta::structured_max_pool2d_with_indices {
16803 void set_output_strided(
16804 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16805 TensorOptions options, DimnameList names
16806 ) override {
16807 auto current_device = guard_.current_device();
16808 if (C10_UNLIKELY(current_device.has_value())) {
16809 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16810 "structured kernels don't support multi-device outputs");
16811 } else {
16812 guard_.reset_device(options.device());
16813 }
16814 outputs_[output_idx] = create_out(sizes, strides, options);
16815 if (!names.empty()) {
16816 namedinference::propagate_names(*outputs_[output_idx], names);
16817 }
16818 // super must happen after, so that downstream can use maybe_get_output
16819 // to retrieve the output
16820 }
16821 void set_output_raw_strided(
16822 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16823 TensorOptions options, DimnameList names
16824 ) override {
16825 auto current_device = guard_.current_device();
16826 if (C10_UNLIKELY(current_device.has_value())) {
16827 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16828 "structured kernels don't support multi-device outputs");
16829 } else {
16830 guard_.reset_device(options.device());
16831 }
16832 outputs_[output_idx] = create_out(sizes, strides, options);
16833 if (!names.empty()) {
16834 namedinference::propagate_names(*outputs_[output_idx], names);
16835 }
16836 // super must happen after, so that downstream can use maybe_get_output
16837 // to retrieve the output
16838 }
16839 const Tensor& maybe_get_output(int64_t output_idx) override {
16840 return *outputs_[output_idx];
16841 }
16842 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
16843 c10::OptionalDeviceGuard guard_;
16844};
16845::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeExplicitAutogradNonFunctional_max_pool2d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
16846structured_max_pool2d_with_indices_default_backend_functional op;
16847op.meta(self, kernel_size, stride, padding, dilation, ceil_mode);
16848at::max_pool2d_with_indices_outf(self, kernel_size, stride, padding, dilation, ceil_mode, *op.outputs_[0], *op.outputs_[1]);
16849return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
16850}
16851struct structured_max_pool2d_with_indices_backward_default_backend_functional final : public at::meta::structured_max_pool2d_with_indices_backward {
16852 void set_output_strided(
16853 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16854 TensorOptions options, DimnameList names
16855 ) override {
16856 auto current_device = guard_.current_device();
16857 if (C10_UNLIKELY(current_device.has_value())) {
16858 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16859 "structured kernels don't support multi-device outputs");
16860 } else {
16861 guard_.reset_device(options.device());
16862 }
16863 outputs_[output_idx] = create_out(sizes, strides, options);
16864 if (!names.empty()) {
16865 namedinference::propagate_names(*outputs_[output_idx], names);
16866 }
16867 // super must happen after, so that downstream can use maybe_get_output
16868 // to retrieve the output
16869 }
16870 void set_output_raw_strided(
16871 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16872 TensorOptions options, DimnameList names
16873 ) override {
16874 auto current_device = guard_.current_device();
16875 if (C10_UNLIKELY(current_device.has_value())) {
16876 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16877 "structured kernels don't support multi-device outputs");
16878 } else {
16879 guard_.reset_device(options.device());
16880 }
16881 outputs_[output_idx] = create_out(sizes, strides, options);
16882 if (!names.empty()) {
16883 namedinference::propagate_names(*outputs_[output_idx], names);
16884 }
16885 // super must happen after, so that downstream can use maybe_get_output
16886 // to retrieve the output
16887 }
16888 const Tensor& maybe_get_output(int64_t output_idx) override {
16889 return *outputs_[output_idx];
16890 }
16891 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
16892 c10::OptionalDeviceGuard guard_;
16893};
16894at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_max_pool2d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
16895structured_max_pool2d_with_indices_backward_default_backend_functional op;
16896op.meta(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
16897at::max_pool2d_with_indices_backward_outf(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, *op.outputs_[0]);
16898return std::move(op.outputs_[0]).take();
16899}
16900struct structured_reflection_pad1d_default_backend_functional final : public at::meta::structured_reflection_pad1d {
16901 void set_output_strided(
16902 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16903 TensorOptions options, DimnameList names
16904 ) override {
16905 auto current_device = guard_.current_device();
16906 if (C10_UNLIKELY(current_device.has_value())) {
16907 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16908 "structured kernels don't support multi-device outputs");
16909 } else {
16910 guard_.reset_device(options.device());
16911 }
16912 outputs_[output_idx] = create_out(sizes, strides, options);
16913 if (!names.empty()) {
16914 namedinference::propagate_names(*outputs_[output_idx], names);
16915 }
16916 // super must happen after, so that downstream can use maybe_get_output
16917 // to retrieve the output
16918 }
16919 void set_output_raw_strided(
16920 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16921 TensorOptions options, DimnameList names
16922 ) override {
16923 auto current_device = guard_.current_device();
16924 if (C10_UNLIKELY(current_device.has_value())) {
16925 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16926 "structured kernels don't support multi-device outputs");
16927 } else {
16928 guard_.reset_device(options.device());
16929 }
16930 outputs_[output_idx] = create_out(sizes, strides, options);
16931 if (!names.empty()) {
16932 namedinference::propagate_names(*outputs_[output_idx], names);
16933 }
16934 // super must happen after, so that downstream can use maybe_get_output
16935 // to retrieve the output
16936 }
16937 const Tensor& maybe_get_output(int64_t output_idx) override {
16938 return *outputs_[output_idx];
16939 }
16940 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
16941 c10::OptionalDeviceGuard guard_;
16942};
16943at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_reflection_pad1d(const at::Tensor & self, at::IntArrayRef padding) {
16944structured_reflection_pad1d_default_backend_functional op;
16945op.meta(self, padding);
16946at::reflection_pad1d_outf(self, padding, *op.outputs_[0]);
16947return std::move(op.outputs_[0]).take();
16948}
16949struct structured_reflection_pad1d_backward_default_backend_functional final : public at::meta::structured_reflection_pad1d_backward {
16950 void set_output_strided(
16951 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16952 TensorOptions options, DimnameList names
16953 ) override {
16954 auto current_device = guard_.current_device();
16955 if (C10_UNLIKELY(current_device.has_value())) {
16956 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16957 "structured kernels don't support multi-device outputs");
16958 } else {
16959 guard_.reset_device(options.device());
16960 }
16961 outputs_[output_idx] = create_out(sizes, strides, options);
16962 if (!names.empty()) {
16963 namedinference::propagate_names(*outputs_[output_idx], names);
16964 }
16965 // super must happen after, so that downstream can use maybe_get_output
16966 // to retrieve the output
16967 }
16968 void set_output_raw_strided(
16969 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16970 TensorOptions options, DimnameList names
16971 ) override {
16972 auto current_device = guard_.current_device();
16973 if (C10_UNLIKELY(current_device.has_value())) {
16974 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
16975 "structured kernels don't support multi-device outputs");
16976 } else {
16977 guard_.reset_device(options.device());
16978 }
16979 outputs_[output_idx] = create_out(sizes, strides, options);
16980 if (!names.empty()) {
16981 namedinference::propagate_names(*outputs_[output_idx], names);
16982 }
16983 // super must happen after, so that downstream can use maybe_get_output
16984 // to retrieve the output
16985 }
16986 const Tensor& maybe_get_output(int64_t output_idx) override {
16987 return *outputs_[output_idx];
16988 }
16989 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
16990 c10::OptionalDeviceGuard guard_;
16991};
16992at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_reflection_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
16993structured_reflection_pad1d_backward_default_backend_functional op;
16994op.meta(grad_output, self, padding);
16995at::reflection_pad1d_backward_outf(grad_output, self, padding, *op.outputs_[0]);
16996return std::move(op.outputs_[0]).take();
16997}
16998struct structured_reflection_pad3d_default_backend_functional final : public at::meta::structured_reflection_pad3d {
16999 void set_output_strided(
17000 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17001 TensorOptions options, DimnameList names
17002 ) override {
17003 auto current_device = guard_.current_device();
17004 if (C10_UNLIKELY(current_device.has_value())) {
17005 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17006 "structured kernels don't support multi-device outputs");
17007 } else {
17008 guard_.reset_device(options.device());
17009 }
17010 outputs_[output_idx] = create_out(sizes, strides, options);
17011 if (!names.empty()) {
17012 namedinference::propagate_names(*outputs_[output_idx], names);
17013 }
17014 // super must happen after, so that downstream can use maybe_get_output
17015 // to retrieve the output
17016 }
17017 void set_output_raw_strided(
17018 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17019 TensorOptions options, DimnameList names
17020 ) override {
17021 auto current_device = guard_.current_device();
17022 if (C10_UNLIKELY(current_device.has_value())) {
17023 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17024 "structured kernels don't support multi-device outputs");
17025 } else {
17026 guard_.reset_device(options.device());
17027 }
17028 outputs_[output_idx] = create_out(sizes, strides, options);
17029 if (!names.empty()) {
17030 namedinference::propagate_names(*outputs_[output_idx], names);
17031 }
17032 // super must happen after, so that downstream can use maybe_get_output
17033 // to retrieve the output
17034 }
17035 const Tensor& maybe_get_output(int64_t output_idx) override {
17036 return *outputs_[output_idx];
17037 }
17038 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
17039 c10::OptionalDeviceGuard guard_;
17040};
17041at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_reflection_pad3d(const at::Tensor & self, at::IntArrayRef padding) {
17042structured_reflection_pad3d_default_backend_functional op;
17043op.meta(self, padding);
17044at::reflection_pad3d_outf(self, padding, *op.outputs_[0]);
17045return std::move(op.outputs_[0]).take();
17046}
17047struct structured_reflection_pad3d_backward_default_backend_functional final : public at::meta::structured_reflection_pad3d_backward {
17048 void set_output_strided(
17049 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17050 TensorOptions options, DimnameList names
17051 ) override {
17052 auto current_device = guard_.current_device();
17053 if (C10_UNLIKELY(current_device.has_value())) {
17054 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17055 "structured kernels don't support multi-device outputs");
17056 } else {
17057 guard_.reset_device(options.device());
17058 }
17059 outputs_[output_idx] = create_out(sizes, strides, options);
17060 if (!names.empty()) {
17061 namedinference::propagate_names(*outputs_[output_idx], names);
17062 }
17063 // super must happen after, so that downstream can use maybe_get_output
17064 // to retrieve the output
17065 }
17066 void set_output_raw_strided(
17067 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17068 TensorOptions options, DimnameList names
17069 ) override {
17070 auto current_device = guard_.current_device();
17071 if (C10_UNLIKELY(current_device.has_value())) {
17072 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17073 "structured kernels don't support multi-device outputs");
17074 } else {
17075 guard_.reset_device(options.device());
17076 }
17077 outputs_[output_idx] = create_out(sizes, strides, options);
17078 if (!names.empty()) {
17079 namedinference::propagate_names(*outputs_[output_idx], names);
17080 }
17081 // super must happen after, so that downstream can use maybe_get_output
17082 // to retrieve the output
17083 }
17084 const Tensor& maybe_get_output(int64_t output_idx) override {
17085 return *outputs_[output_idx];
17086 }
17087 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
17088 c10::OptionalDeviceGuard guard_;
17089};
17090at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_reflection_pad3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
17091structured_reflection_pad3d_backward_default_backend_functional op;
17092op.meta(grad_output, self, padding);
17093at::reflection_pad3d_backward_outf(grad_output, self, padding, *op.outputs_[0]);
17094return std::move(op.outputs_[0]).take();
17095}
17096struct structured_replication_pad1d_default_backend_functional final : public at::meta::structured_replication_pad1d {
17097 void set_output_strided(
17098 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17099 TensorOptions options, DimnameList names
17100 ) override {
17101 auto current_device = guard_.current_device();
17102 if (C10_UNLIKELY(current_device.has_value())) {
17103 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17104 "structured kernels don't support multi-device outputs");
17105 } else {
17106 guard_.reset_device(options.device());
17107 }
17108 outputs_[output_idx] = create_out(sizes, strides, options);
17109 if (!names.empty()) {
17110 namedinference::propagate_names(*outputs_[output_idx], names);
17111 }
17112 // super must happen after, so that downstream can use maybe_get_output
17113 // to retrieve the output
17114 }
17115 void set_output_raw_strided(
17116 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17117 TensorOptions options, DimnameList names
17118 ) override {
17119 auto current_device = guard_.current_device();
17120 if (C10_UNLIKELY(current_device.has_value())) {
17121 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17122 "structured kernels don't support multi-device outputs");
17123 } else {
17124 guard_.reset_device(options.device());
17125 }
17126 outputs_[output_idx] = create_out(sizes, strides, options);
17127 if (!names.empty()) {
17128 namedinference::propagate_names(*outputs_[output_idx], names);
17129 }
17130 // super must happen after, so that downstream can use maybe_get_output
17131 // to retrieve the output
17132 }
17133 const Tensor& maybe_get_output(int64_t output_idx) override {
17134 return *outputs_[output_idx];
17135 }
17136 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
17137 c10::OptionalDeviceGuard guard_;
17138};
17139at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_replication_pad1d(const at::Tensor & self, at::IntArrayRef padding) {
17140structured_replication_pad1d_default_backend_functional op;
17141op.meta(self, padding);
17142at::replication_pad1d_outf(self, padding, *op.outputs_[0]);
17143return std::move(op.outputs_[0]).take();
17144}
17145struct structured_replication_pad1d_backward_default_backend_functional final : public at::meta::structured_replication_pad1d_backward {
17146 void set_output_strided(
17147 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17148 TensorOptions options, DimnameList names
17149 ) override {
17150 auto current_device = guard_.current_device();
17151 if (C10_UNLIKELY(current_device.has_value())) {
17152 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17153 "structured kernels don't support multi-device outputs");
17154 } else {
17155 guard_.reset_device(options.device());
17156 }
17157 outputs_[output_idx] = create_out(sizes, strides, options);
17158 if (!names.empty()) {
17159 namedinference::propagate_names(*outputs_[output_idx], names);
17160 }
17161 // super must happen after, so that downstream can use maybe_get_output
17162 // to retrieve the output
17163 }
17164 void set_output_raw_strided(
17165 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17166 TensorOptions options, DimnameList names
17167 ) override {
17168 auto current_device = guard_.current_device();
17169 if (C10_UNLIKELY(current_device.has_value())) {
17170 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17171 "structured kernels don't support multi-device outputs");
17172 } else {
17173 guard_.reset_device(options.device());
17174 }
17175 outputs_[output_idx] = create_out(sizes, strides, options);
17176 if (!names.empty()) {
17177 namedinference::propagate_names(*outputs_[output_idx], names);
17178 }
17179 // super must happen after, so that downstream can use maybe_get_output
17180 // to retrieve the output
17181 }
17182 const Tensor& maybe_get_output(int64_t output_idx) override {
17183 return *outputs_[output_idx];
17184 }
17185 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
17186 c10::OptionalDeviceGuard guard_;
17187};
17188at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_replication_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
17189structured_replication_pad1d_backward_default_backend_functional op;
17190op.meta(grad_output, self, padding);
17191at::replication_pad1d_backward_outf(grad_output, self, padding, *op.outputs_[0]);
17192return std::move(op.outputs_[0]).take();
17193}
17194struct structured_replication_pad2d_default_backend_functional final : public at::meta::structured_replication_pad2d {
17195 void set_output_strided(
17196 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17197 TensorOptions options, DimnameList names
17198 ) override {
17199 auto current_device = guard_.current_device();
17200 if (C10_UNLIKELY(current_device.has_value())) {
17201 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17202 "structured kernels don't support multi-device outputs");
17203 } else {
17204 guard_.reset_device(options.device());
17205 }
17206 outputs_[output_idx] = create_out(sizes, strides, options);
17207 if (!names.empty()) {
17208 namedinference::propagate_names(*outputs_[output_idx], names);
17209 }
17210 // super must happen after, so that downstream can use maybe_get_output
17211 // to retrieve the output
17212 }
17213 void set_output_raw_strided(
17214 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17215 TensorOptions options, DimnameList names
17216 ) override {
17217 auto current_device = guard_.current_device();
17218 if (C10_UNLIKELY(current_device.has_value())) {
17219 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17220 "structured kernels don't support multi-device outputs");
17221 } else {
17222 guard_.reset_device(options.device());
17223 }
17224 outputs_[output_idx] = create_out(sizes, strides, options);
17225 if (!names.empty()) {
17226 namedinference::propagate_names(*outputs_[output_idx], names);
17227 }
17228 // super must happen after, so that downstream can use maybe_get_output
17229 // to retrieve the output
17230 }
17231 const Tensor& maybe_get_output(int64_t output_idx) override {
17232 return *outputs_[output_idx];
17233 }
17234 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
17235 c10::OptionalDeviceGuard guard_;
17236};
17237at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_replication_pad2d(const at::Tensor & self, at::IntArrayRef padding) {
17238structured_replication_pad2d_default_backend_functional op;
17239op.meta(self, padding);
17240at::replication_pad2d_outf(self, padding, *op.outputs_[0]);
17241return std::move(op.outputs_[0]).take();
17242}
17243struct structured_replication_pad3d_default_backend_functional final : public at::meta::structured_replication_pad3d {
17244 void set_output_strided(
17245 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17246 TensorOptions options, DimnameList names
17247 ) override {
17248 auto current_device = guard_.current_device();
17249 if (C10_UNLIKELY(current_device.has_value())) {
17250 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17251 "structured kernels don't support multi-device outputs");
17252 } else {
17253 guard_.reset_device(options.device());
17254 }
17255 outputs_[output_idx] = create_out(sizes, strides, options);
17256 if (!names.empty()) {
17257 namedinference::propagate_names(*outputs_[output_idx], names);
17258 }
17259 // super must happen after, so that downstream can use maybe_get_output
17260 // to retrieve the output
17261 }
17262 void set_output_raw_strided(
17263 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17264 TensorOptions options, DimnameList names
17265 ) override {
17266 auto current_device = guard_.current_device();
17267 if (C10_UNLIKELY(current_device.has_value())) {
17268 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17269 "structured kernels don't support multi-device outputs");
17270 } else {
17271 guard_.reset_device(options.device());
17272 }
17273 outputs_[output_idx] = create_out(sizes, strides, options);
17274 if (!names.empty()) {
17275 namedinference::propagate_names(*outputs_[output_idx], names);
17276 }
17277 // super must happen after, so that downstream can use maybe_get_output
17278 // to retrieve the output
17279 }
17280 const Tensor& maybe_get_output(int64_t output_idx) override {
17281 return *outputs_[output_idx];
17282 }
17283 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
17284 c10::OptionalDeviceGuard guard_;
17285};
17286at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_replication_pad3d(const at::Tensor & self, at::IntArrayRef padding) {
17287structured_replication_pad3d_default_backend_functional op;
17288op.meta(self, padding);
17289at::replication_pad3d_outf(self, padding, *op.outputs_[0]);
17290return std::move(op.outputs_[0]).take();
17291}
17292struct structured_upsample_linear1d_default_backend_functional final : public at::meta::structured_upsample_linear1d {
17293 void set_output_strided(
17294 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17295 TensorOptions options, DimnameList names
17296 ) override {
17297 auto current_device = guard_.current_device();
17298 if (C10_UNLIKELY(current_device.has_value())) {
17299 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17300 "structured kernels don't support multi-device outputs");
17301 } else {
17302 guard_.reset_device(options.device());
17303 }
17304 outputs_[output_idx] = create_out(sizes, strides, options);
17305 if (!names.empty()) {
17306 namedinference::propagate_names(*outputs_[output_idx], names);
17307 }
17308 // super must happen after, so that downstream can use maybe_get_output
17309 // to retrieve the output
17310 }
17311 void set_output_raw_strided(
17312 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17313 TensorOptions options, DimnameList names
17314 ) override {
17315 auto current_device = guard_.current_device();
17316 if (C10_UNLIKELY(current_device.has_value())) {
17317 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17318 "structured kernels don't support multi-device outputs");
17319 } else {
17320 guard_.reset_device(options.device());
17321 }
17322 outputs_[output_idx] = create_out(sizes, strides, options);
17323 if (!names.empty()) {
17324 namedinference::propagate_names(*outputs_[output_idx], names);
17325 }
17326 // super must happen after, so that downstream can use maybe_get_output
17327 // to retrieve the output
17328 }
17329 const Tensor& maybe_get_output(int64_t output_idx) override {
17330 return *outputs_[output_idx];
17331 }
17332 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
17333 c10::OptionalDeviceGuard guard_;
17334};
17335at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_upsample_linear1d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales) {
17336structured_upsample_linear1d_default_backend_functional op;
17337op.meta(self, output_size, align_corners, scales);
17338at::upsample_linear1d_outf(self, output_size, align_corners, scales, *op.outputs_[0]);
17339return std::move(op.outputs_[0]).take();
17340}
17341struct structured_upsample_linear1d_backward_default_backend_functional final : public at::meta::structured_upsample_linear1d_backward {
17342 void set_output_strided(
17343 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17344 TensorOptions options, DimnameList names
17345 ) override {
17346 auto current_device = guard_.current_device();
17347 if (C10_UNLIKELY(current_device.has_value())) {
17348 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17349 "structured kernels don't support multi-device outputs");
17350 } else {
17351 guard_.reset_device(options.device());
17352 }
17353 outputs_[output_idx] = create_out(sizes, strides, options);
17354 if (!names.empty()) {
17355 namedinference::propagate_names(*outputs_[output_idx], names);
17356 }
17357 // super must happen after, so that downstream can use maybe_get_output
17358 // to retrieve the output
17359 }
17360 void set_output_raw_strided(
17361 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17362 TensorOptions options, DimnameList names
17363 ) override {
17364 auto current_device = guard_.current_device();
17365 if (C10_UNLIKELY(current_device.has_value())) {
17366 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17367 "structured kernels don't support multi-device outputs");
17368 } else {
17369 guard_.reset_device(options.device());
17370 }
17371 outputs_[output_idx] = create_out(sizes, strides, options);
17372 if (!names.empty()) {
17373 namedinference::propagate_names(*outputs_[output_idx], names);
17374 }
17375 // super must happen after, so that downstream can use maybe_get_output
17376 // to retrieve the output
17377 }
17378 const Tensor& maybe_get_output(int64_t output_idx) override {
17379 return *outputs_[output_idx];
17380 }
17381 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
17382 c10::OptionalDeviceGuard guard_;
17383};
17384at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_upsample_linear1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales) {
17385structured_upsample_linear1d_backward_default_backend_functional op;
17386op.meta(grad_output, output_size, input_size, align_corners, scales);
17387at::upsample_linear1d_backward_outf(grad_output, output_size, input_size, align_corners, scales, *op.outputs_[0]);
17388return std::move(op.outputs_[0]).take();
17389}
17390struct structured_upsample_bilinear2d_default_backend_functional final : public at::meta::structured_upsample_bilinear2d {
17391 void set_output_strided(
17392 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17393 TensorOptions options, DimnameList names
17394 ) override {
17395 auto current_device = guard_.current_device();
17396 if (C10_UNLIKELY(current_device.has_value())) {
17397 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17398 "structured kernels don't support multi-device outputs");
17399 } else {
17400 guard_.reset_device(options.device());
17401 }
17402 outputs_[output_idx] = create_out(sizes, strides, options);
17403 if (!names.empty()) {
17404 namedinference::propagate_names(*outputs_[output_idx], names);
17405 }
17406 // super must happen after, so that downstream can use maybe_get_output
17407 // to retrieve the output
17408 }
17409 void set_output_raw_strided(
17410 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17411 TensorOptions options, DimnameList names
17412 ) override {
17413 auto current_device = guard_.current_device();
17414 if (C10_UNLIKELY(current_device.has_value())) {
17415 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17416 "structured kernels don't support multi-device outputs");
17417 } else {
17418 guard_.reset_device(options.device());
17419 }
17420 outputs_[output_idx] = create_out(sizes, strides, options);
17421 if (!names.empty()) {
17422 namedinference::propagate_names(*outputs_[output_idx], names);
17423 }
17424 // super must happen after, so that downstream can use maybe_get_output
17425 // to retrieve the output
17426 }
17427 const Tensor& maybe_get_output(int64_t output_idx) override {
17428 return *outputs_[output_idx];
17429 }
17430 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
17431 c10::OptionalDeviceGuard guard_;
17432};
17433at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_upsample_bilinear2d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
17434structured_upsample_bilinear2d_default_backend_functional op;
17435op.meta(self, output_size, align_corners, scales_h, scales_w);
17436at::upsample_bilinear2d_outf(self, output_size, align_corners, scales_h, scales_w, *op.outputs_[0]);
17437return std::move(op.outputs_[0]).take();
17438}
17439struct structured_upsample_bilinear2d_backward_default_backend_functional final : public at::meta::structured_upsample_bilinear2d_backward {
17440 void set_output_strided(
17441 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17442 TensorOptions options, DimnameList names
17443 ) override {
17444 auto current_device = guard_.current_device();
17445 if (C10_UNLIKELY(current_device.has_value())) {
17446 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17447 "structured kernels don't support multi-device outputs");
17448 } else {
17449 guard_.reset_device(options.device());
17450 }
17451 outputs_[output_idx] = create_out(sizes, strides, options);
17452 if (!names.empty()) {
17453 namedinference::propagate_names(*outputs_[output_idx], names);
17454 }
17455 // super must happen after, so that downstream can use maybe_get_output
17456 // to retrieve the output
17457 }
17458 void set_output_raw_strided(
17459 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17460 TensorOptions options, DimnameList names
17461 ) override {
17462 auto current_device = guard_.current_device();
17463 if (C10_UNLIKELY(current_device.has_value())) {
17464 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17465 "structured kernels don't support multi-device outputs");
17466 } else {
17467 guard_.reset_device(options.device());
17468 }
17469 outputs_[output_idx] = create_out(sizes, strides, options);
17470 if (!names.empty()) {
17471 namedinference::propagate_names(*outputs_[output_idx], names);
17472 }
17473 // super must happen after, so that downstream can use maybe_get_output
17474 // to retrieve the output
17475 }
17476 const Tensor& maybe_get_output(int64_t output_idx) override {
17477 return *outputs_[output_idx];
17478 }
17479 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
17480 c10::OptionalDeviceGuard guard_;
17481};
17482at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_upsample_bilinear2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
17483structured_upsample_bilinear2d_backward_default_backend_functional op;
17484op.meta(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
17485at::upsample_bilinear2d_backward_outf(grad_output, output_size, input_size, align_corners, scales_h, scales_w, *op.outputs_[0]);
17486return std::move(op.outputs_[0]).take();
17487}
17488struct structured__upsample_bilinear2d_aa_default_backend_functional final : public at::meta::structured__upsample_bilinear2d_aa {
17489 void set_output_strided(
17490 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17491 TensorOptions options, DimnameList names
17492 ) override {
17493 auto current_device = guard_.current_device();
17494 if (C10_UNLIKELY(current_device.has_value())) {
17495 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17496 "structured kernels don't support multi-device outputs");
17497 } else {
17498 guard_.reset_device(options.device());
17499 }
17500 outputs_[output_idx] = create_out(sizes, strides, options);
17501 if (!names.empty()) {
17502 namedinference::propagate_names(*outputs_[output_idx], names);
17503 }
17504 // super must happen after, so that downstream can use maybe_get_output
17505 // to retrieve the output
17506 }
17507 void set_output_raw_strided(
17508 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17509 TensorOptions options, DimnameList names
17510 ) override {
17511 auto current_device = guard_.current_device();
17512 if (C10_UNLIKELY(current_device.has_value())) {
17513 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17514 "structured kernels don't support multi-device outputs");
17515 } else {
17516 guard_.reset_device(options.device());
17517 }
17518 outputs_[output_idx] = create_out(sizes, strides, options);
17519 if (!names.empty()) {
17520 namedinference::propagate_names(*outputs_[output_idx], names);
17521 }
17522 // super must happen after, so that downstream can use maybe_get_output
17523 // to retrieve the output
17524 }
17525 const Tensor& maybe_get_output(int64_t output_idx) override {
17526 return *outputs_[output_idx];
17527 }
17528 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
17529 c10::OptionalDeviceGuard guard_;
17530};
17531at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__upsample_bilinear2d_aa(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
17532structured__upsample_bilinear2d_aa_default_backend_functional op;
17533op.meta(self, output_size, align_corners, scales_h, scales_w);
17534at::_upsample_bilinear2d_aa_outf(self, output_size, align_corners, scales_h, scales_w, *op.outputs_[0]);
17535return std::move(op.outputs_[0]).take();
17536}
17537struct structured__upsample_bilinear2d_aa_backward_default_backend_functional final : public at::meta::structured__upsample_bilinear2d_aa_backward {
17538 void set_output_strided(
17539 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17540 TensorOptions options, DimnameList names
17541 ) override {
17542 auto current_device = guard_.current_device();
17543 if (C10_UNLIKELY(current_device.has_value())) {
17544 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17545 "structured kernels don't support multi-device outputs");
17546 } else {
17547 guard_.reset_device(options.device());
17548 }
17549 outputs_[output_idx] = create_out(sizes, strides, options);
17550 if (!names.empty()) {
17551 namedinference::propagate_names(*outputs_[output_idx], names);
17552 }
17553 // super must happen after, so that downstream can use maybe_get_output
17554 // to retrieve the output
17555 }
17556 void set_output_raw_strided(
17557 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17558 TensorOptions options, DimnameList names
17559 ) override {
17560 auto current_device = guard_.current_device();
17561 if (C10_UNLIKELY(current_device.has_value())) {
17562 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17563 "structured kernels don't support multi-device outputs");
17564 } else {
17565 guard_.reset_device(options.device());
17566 }
17567 outputs_[output_idx] = create_out(sizes, strides, options);
17568 if (!names.empty()) {
17569 namedinference::propagate_names(*outputs_[output_idx], names);
17570 }
17571 // super must happen after, so that downstream can use maybe_get_output
17572 // to retrieve the output
17573 }
17574 const Tensor& maybe_get_output(int64_t output_idx) override {
17575 return *outputs_[output_idx];
17576 }
17577 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
17578 c10::OptionalDeviceGuard guard_;
17579};
17580at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__upsample_bilinear2d_aa_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
17581structured__upsample_bilinear2d_aa_backward_default_backend_functional op;
17582op.meta(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
17583at::_upsample_bilinear2d_aa_backward_outf(grad_output, output_size, input_size, align_corners, scales_h, scales_w, *op.outputs_[0]);
17584return std::move(op.outputs_[0]).take();
17585}
17586struct structured_upsample_bicubic2d_default_backend_functional final : public at::meta::structured_upsample_bicubic2d {
17587 void set_output_strided(
17588 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17589 TensorOptions options, DimnameList names
17590 ) override {
17591 auto current_device = guard_.current_device();
17592 if (C10_UNLIKELY(current_device.has_value())) {
17593 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17594 "structured kernels don't support multi-device outputs");
17595 } else {
17596 guard_.reset_device(options.device());
17597 }
17598 outputs_[output_idx] = create_out(sizes, strides, options);
17599 if (!names.empty()) {
17600 namedinference::propagate_names(*outputs_[output_idx], names);
17601 }
17602 // super must happen after, so that downstream can use maybe_get_output
17603 // to retrieve the output
17604 }
17605 void set_output_raw_strided(
17606 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17607 TensorOptions options, DimnameList names
17608 ) override {
17609 auto current_device = guard_.current_device();
17610 if (C10_UNLIKELY(current_device.has_value())) {
17611 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17612 "structured kernels don't support multi-device outputs");
17613 } else {
17614 guard_.reset_device(options.device());
17615 }
17616 outputs_[output_idx] = create_out(sizes, strides, options);
17617 if (!names.empty()) {
17618 namedinference::propagate_names(*outputs_[output_idx], names);
17619 }
17620 // super must happen after, so that downstream can use maybe_get_output
17621 // to retrieve the output
17622 }
17623 const Tensor& maybe_get_output(int64_t output_idx) override {
17624 return *outputs_[output_idx];
17625 }
17626 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
17627 c10::OptionalDeviceGuard guard_;
17628};
17629at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_upsample_bicubic2d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
17630structured_upsample_bicubic2d_default_backend_functional op;
17631op.meta(self, output_size, align_corners, scales_h, scales_w);
17632at::upsample_bicubic2d_outf(self, output_size, align_corners, scales_h, scales_w, *op.outputs_[0]);
17633return std::move(op.outputs_[0]).take();
17634}
17635struct structured_upsample_bicubic2d_backward_default_backend_functional final : public at::meta::structured_upsample_bicubic2d_backward {
17636 void set_output_strided(
17637 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17638 TensorOptions options, DimnameList names
17639 ) override {
17640 auto current_device = guard_.current_device();
17641 if (C10_UNLIKELY(current_device.has_value())) {
17642 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17643 "structured kernels don't support multi-device outputs");
17644 } else {
17645 guard_.reset_device(options.device());
17646 }
17647 outputs_[output_idx] = create_out(sizes, strides, options);
17648 if (!names.empty()) {
17649 namedinference::propagate_names(*outputs_[output_idx], names);
17650 }
17651 // super must happen after, so that downstream can use maybe_get_output
17652 // to retrieve the output
17653 }
17654 void set_output_raw_strided(
17655 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17656 TensorOptions options, DimnameList names
17657 ) override {
17658 auto current_device = guard_.current_device();
17659 if (C10_UNLIKELY(current_device.has_value())) {
17660 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17661 "structured kernels don't support multi-device outputs");
17662 } else {
17663 guard_.reset_device(options.device());
17664 }
17665 outputs_[output_idx] = create_out(sizes, strides, options);
17666 if (!names.empty()) {
17667 namedinference::propagate_names(*outputs_[output_idx], names);
17668 }
17669 // super must happen after, so that downstream can use maybe_get_output
17670 // to retrieve the output
17671 }
17672 const Tensor& maybe_get_output(int64_t output_idx) override {
17673 return *outputs_[output_idx];
17674 }
17675 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
17676 c10::OptionalDeviceGuard guard_;
17677};
17678at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_upsample_bicubic2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
17679structured_upsample_bicubic2d_backward_default_backend_functional op;
17680op.meta(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
17681at::upsample_bicubic2d_backward_outf(grad_output, output_size, input_size, align_corners, scales_h, scales_w, *op.outputs_[0]);
17682return std::move(op.outputs_[0]).take();
17683}
17684struct structured__upsample_bicubic2d_aa_default_backend_functional final : public at::meta::structured__upsample_bicubic2d_aa {
17685 void set_output_strided(
17686 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17687 TensorOptions options, DimnameList names
17688 ) override {
17689 auto current_device = guard_.current_device();
17690 if (C10_UNLIKELY(current_device.has_value())) {
17691 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17692 "structured kernels don't support multi-device outputs");
17693 } else {
17694 guard_.reset_device(options.device());
17695 }
17696 outputs_[output_idx] = create_out(sizes, strides, options);
17697 if (!names.empty()) {
17698 namedinference::propagate_names(*outputs_[output_idx], names);
17699 }
17700 // super must happen after, so that downstream can use maybe_get_output
17701 // to retrieve the output
17702 }
17703 void set_output_raw_strided(
17704 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17705 TensorOptions options, DimnameList names
17706 ) override {
17707 auto current_device = guard_.current_device();
17708 if (C10_UNLIKELY(current_device.has_value())) {
17709 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17710 "structured kernels don't support multi-device outputs");
17711 } else {
17712 guard_.reset_device(options.device());
17713 }
17714 outputs_[output_idx] = create_out(sizes, strides, options);
17715 if (!names.empty()) {
17716 namedinference::propagate_names(*outputs_[output_idx], names);
17717 }
17718 // super must happen after, so that downstream can use maybe_get_output
17719 // to retrieve the output
17720 }
17721 const Tensor& maybe_get_output(int64_t output_idx) override {
17722 return *outputs_[output_idx];
17723 }
17724 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
17725 c10::OptionalDeviceGuard guard_;
17726};
17727at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__upsample_bicubic2d_aa(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
17728structured__upsample_bicubic2d_aa_default_backend_functional op;
17729op.meta(self, output_size, align_corners, scales_h, scales_w);
17730at::_upsample_bicubic2d_aa_outf(self, output_size, align_corners, scales_h, scales_w, *op.outputs_[0]);
17731return std::move(op.outputs_[0]).take();
17732}
17733struct structured__upsample_bicubic2d_aa_backward_default_backend_functional final : public at::meta::structured__upsample_bicubic2d_aa_backward {
17734 void set_output_strided(
17735 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17736 TensorOptions options, DimnameList names
17737 ) override {
17738 auto current_device = guard_.current_device();
17739 if (C10_UNLIKELY(current_device.has_value())) {
17740 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17741 "structured kernels don't support multi-device outputs");
17742 } else {
17743 guard_.reset_device(options.device());
17744 }
17745 outputs_[output_idx] = create_out(sizes, strides, options);
17746 if (!names.empty()) {
17747 namedinference::propagate_names(*outputs_[output_idx], names);
17748 }
17749 // super must happen after, so that downstream can use maybe_get_output
17750 // to retrieve the output
17751 }
17752 void set_output_raw_strided(
17753 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17754 TensorOptions options, DimnameList names
17755 ) override {
17756 auto current_device = guard_.current_device();
17757 if (C10_UNLIKELY(current_device.has_value())) {
17758 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17759 "structured kernels don't support multi-device outputs");
17760 } else {
17761 guard_.reset_device(options.device());
17762 }
17763 outputs_[output_idx] = create_out(sizes, strides, options);
17764 if (!names.empty()) {
17765 namedinference::propagate_names(*outputs_[output_idx], names);
17766 }
17767 // super must happen after, so that downstream can use maybe_get_output
17768 // to retrieve the output
17769 }
17770 const Tensor& maybe_get_output(int64_t output_idx) override {
17771 return *outputs_[output_idx];
17772 }
17773 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
17774 c10::OptionalDeviceGuard guard_;
17775};
17776at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__upsample_bicubic2d_aa_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
17777structured__upsample_bicubic2d_aa_backward_default_backend_functional op;
17778op.meta(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
17779at::_upsample_bicubic2d_aa_backward_outf(grad_output, output_size, input_size, align_corners, scales_h, scales_w, *op.outputs_[0]);
17780return std::move(op.outputs_[0]).take();
17781}
17782struct structured_upsample_trilinear3d_default_backend_functional final : public at::meta::structured_upsample_trilinear3d {
17783 void set_output_strided(
17784 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17785 TensorOptions options, DimnameList names
17786 ) override {
17787 auto current_device = guard_.current_device();
17788 if (C10_UNLIKELY(current_device.has_value())) {
17789 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17790 "structured kernels don't support multi-device outputs");
17791 } else {
17792 guard_.reset_device(options.device());
17793 }
17794 outputs_[output_idx] = create_out(sizes, strides, options);
17795 if (!names.empty()) {
17796 namedinference::propagate_names(*outputs_[output_idx], names);
17797 }
17798 // super must happen after, so that downstream can use maybe_get_output
17799 // to retrieve the output
17800 }
17801 void set_output_raw_strided(
17802 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17803 TensorOptions options, DimnameList names
17804 ) override {
17805 auto current_device = guard_.current_device();
17806 if (C10_UNLIKELY(current_device.has_value())) {
17807 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17808 "structured kernels don't support multi-device outputs");
17809 } else {
17810 guard_.reset_device(options.device());
17811 }
17812 outputs_[output_idx] = create_out(sizes, strides, options);
17813 if (!names.empty()) {
17814 namedinference::propagate_names(*outputs_[output_idx], names);
17815 }
17816 // super must happen after, so that downstream can use maybe_get_output
17817 // to retrieve the output
17818 }
17819 const Tensor& maybe_get_output(int64_t output_idx) override {
17820 return *outputs_[output_idx];
17821 }
17822 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
17823 c10::OptionalDeviceGuard guard_;
17824};
17825at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_upsample_trilinear3d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
17826structured_upsample_trilinear3d_default_backend_functional op;
17827op.meta(self, output_size, align_corners, scales_d, scales_h, scales_w);
17828at::upsample_trilinear3d_outf(self, output_size, align_corners, scales_d, scales_h, scales_w, *op.outputs_[0]);
17829return std::move(op.outputs_[0]).take();
17830}
17831struct structured_upsample_trilinear3d_backward_default_backend_functional final : public at::meta::structured_upsample_trilinear3d_backward {
17832 void set_output_strided(
17833 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17834 TensorOptions options, DimnameList names
17835 ) override {
17836 auto current_device = guard_.current_device();
17837 if (C10_UNLIKELY(current_device.has_value())) {
17838 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17839 "structured kernels don't support multi-device outputs");
17840 } else {
17841 guard_.reset_device(options.device());
17842 }
17843 outputs_[output_idx] = create_out(sizes, strides, options);
17844 if (!names.empty()) {
17845 namedinference::propagate_names(*outputs_[output_idx], names);
17846 }
17847 // super must happen after, so that downstream can use maybe_get_output
17848 // to retrieve the output
17849 }
17850 void set_output_raw_strided(
17851 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17852 TensorOptions options, DimnameList names
17853 ) override {
17854 auto current_device = guard_.current_device();
17855 if (C10_UNLIKELY(current_device.has_value())) {
17856 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17857 "structured kernels don't support multi-device outputs");
17858 } else {
17859 guard_.reset_device(options.device());
17860 }
17861 outputs_[output_idx] = create_out(sizes, strides, options);
17862 if (!names.empty()) {
17863 namedinference::propagate_names(*outputs_[output_idx], names);
17864 }
17865 // super must happen after, so that downstream can use maybe_get_output
17866 // to retrieve the output
17867 }
17868 const Tensor& maybe_get_output(int64_t output_idx) override {
17869 return *outputs_[output_idx];
17870 }
17871 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
17872 c10::OptionalDeviceGuard guard_;
17873};
17874at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_upsample_trilinear3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
17875structured_upsample_trilinear3d_backward_default_backend_functional op;
17876op.meta(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w);
17877at::upsample_trilinear3d_backward_outf(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w, *op.outputs_[0]);
17878return std::move(op.outputs_[0]).take();
17879}
17880struct structured_upsample_nearest1d_default_backend_functional final : public at::meta::structured_upsample_nearest1d {
17881 void set_output_strided(
17882 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17883 TensorOptions options, DimnameList names
17884 ) override {
17885 auto current_device = guard_.current_device();
17886 if (C10_UNLIKELY(current_device.has_value())) {
17887 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17888 "structured kernels don't support multi-device outputs");
17889 } else {
17890 guard_.reset_device(options.device());
17891 }
17892 outputs_[output_idx] = create_out(sizes, strides, options);
17893 if (!names.empty()) {
17894 namedinference::propagate_names(*outputs_[output_idx], names);
17895 }
17896 // super must happen after, so that downstream can use maybe_get_output
17897 // to retrieve the output
17898 }
17899 void set_output_raw_strided(
17900 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17901 TensorOptions options, DimnameList names
17902 ) override {
17903 auto current_device = guard_.current_device();
17904 if (C10_UNLIKELY(current_device.has_value())) {
17905 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17906 "structured kernels don't support multi-device outputs");
17907 } else {
17908 guard_.reset_device(options.device());
17909 }
17910 outputs_[output_idx] = create_out(sizes, strides, options);
17911 if (!names.empty()) {
17912 namedinference::propagate_names(*outputs_[output_idx], names);
17913 }
17914 // super must happen after, so that downstream can use maybe_get_output
17915 // to retrieve the output
17916 }
17917 const Tensor& maybe_get_output(int64_t output_idx) override {
17918 return *outputs_[output_idx];
17919 }
17920 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
17921 c10::OptionalDeviceGuard guard_;
17922};
17923at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_upsample_nearest1d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales) {
17924structured_upsample_nearest1d_default_backend_functional op;
17925op.meta(self, output_size, scales);
17926at::upsample_nearest1d_outf(self, output_size, scales, *op.outputs_[0]);
17927return std::move(op.outputs_[0]).take();
17928}
17929struct structured__upsample_nearest_exact1d_default_backend_functional final : public at::meta::structured__upsample_nearest_exact1d {
17930 void set_output_strided(
17931 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17932 TensorOptions options, DimnameList names
17933 ) override {
17934 auto current_device = guard_.current_device();
17935 if (C10_UNLIKELY(current_device.has_value())) {
17936 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17937 "structured kernels don't support multi-device outputs");
17938 } else {
17939 guard_.reset_device(options.device());
17940 }
17941 outputs_[output_idx] = create_out(sizes, strides, options);
17942 if (!names.empty()) {
17943 namedinference::propagate_names(*outputs_[output_idx], names);
17944 }
17945 // super must happen after, so that downstream can use maybe_get_output
17946 // to retrieve the output
17947 }
17948 void set_output_raw_strided(
17949 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17950 TensorOptions options, DimnameList names
17951 ) override {
17952 auto current_device = guard_.current_device();
17953 if (C10_UNLIKELY(current_device.has_value())) {
17954 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17955 "structured kernels don't support multi-device outputs");
17956 } else {
17957 guard_.reset_device(options.device());
17958 }
17959 outputs_[output_idx] = create_out(sizes, strides, options);
17960 if (!names.empty()) {
17961 namedinference::propagate_names(*outputs_[output_idx], names);
17962 }
17963 // super must happen after, so that downstream can use maybe_get_output
17964 // to retrieve the output
17965 }
17966 const Tensor& maybe_get_output(int64_t output_idx) override {
17967 return *outputs_[output_idx];
17968 }
17969 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
17970 c10::OptionalDeviceGuard guard_;
17971};
17972at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__upsample_nearest_exact1d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales) {
17973structured__upsample_nearest_exact1d_default_backend_functional op;
17974op.meta(self, output_size, scales);
17975at::_upsample_nearest_exact1d_outf(self, output_size, scales, *op.outputs_[0]);
17976return std::move(op.outputs_[0]).take();
17977}
17978struct structured_upsample_nearest1d_backward_default_backend_functional final : public at::meta::structured_upsample_nearest1d_backward {
17979 void set_output_strided(
17980 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17981 TensorOptions options, DimnameList names
17982 ) override {
17983 auto current_device = guard_.current_device();
17984 if (C10_UNLIKELY(current_device.has_value())) {
17985 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
17986 "structured kernels don't support multi-device outputs");
17987 } else {
17988 guard_.reset_device(options.device());
17989 }
17990 outputs_[output_idx] = create_out(sizes, strides, options);
17991 if (!names.empty()) {
17992 namedinference::propagate_names(*outputs_[output_idx], names);
17993 }
17994 // super must happen after, so that downstream can use maybe_get_output
17995 // to retrieve the output
17996 }
17997 void set_output_raw_strided(
17998 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17999 TensorOptions options, DimnameList names
18000 ) override {
18001 auto current_device = guard_.current_device();
18002 if (C10_UNLIKELY(current_device.has_value())) {
18003 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18004 "structured kernels don't support multi-device outputs");
18005 } else {
18006 guard_.reset_device(options.device());
18007 }
18008 outputs_[output_idx] = create_out(sizes, strides, options);
18009 if (!names.empty()) {
18010 namedinference::propagate_names(*outputs_[output_idx], names);
18011 }
18012 // super must happen after, so that downstream can use maybe_get_output
18013 // to retrieve the output
18014 }
18015 const Tensor& maybe_get_output(int64_t output_idx) override {
18016 return *outputs_[output_idx];
18017 }
18018 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
18019 c10::OptionalDeviceGuard guard_;
18020};
18021at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_upsample_nearest1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales) {
18022structured_upsample_nearest1d_backward_default_backend_functional op;
18023op.meta(grad_output, output_size, input_size, scales);
18024at::upsample_nearest1d_backward_outf(grad_output, output_size, input_size, scales, *op.outputs_[0]);
18025return std::move(op.outputs_[0]).take();
18026}
18027struct structured__upsample_nearest_exact1d_backward_default_backend_functional final : public at::meta::structured__upsample_nearest_exact1d_backward {
18028 void set_output_strided(
18029 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18030 TensorOptions options, DimnameList names
18031 ) override {
18032 auto current_device = guard_.current_device();
18033 if (C10_UNLIKELY(current_device.has_value())) {
18034 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18035 "structured kernels don't support multi-device outputs");
18036 } else {
18037 guard_.reset_device(options.device());
18038 }
18039 outputs_[output_idx] = create_out(sizes, strides, options);
18040 if (!names.empty()) {
18041 namedinference::propagate_names(*outputs_[output_idx], names);
18042 }
18043 // super must happen after, so that downstream can use maybe_get_output
18044 // to retrieve the output
18045 }
18046 void set_output_raw_strided(
18047 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18048 TensorOptions options, DimnameList names
18049 ) override {
18050 auto current_device = guard_.current_device();
18051 if (C10_UNLIKELY(current_device.has_value())) {
18052 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18053 "structured kernels don't support multi-device outputs");
18054 } else {
18055 guard_.reset_device(options.device());
18056 }
18057 outputs_[output_idx] = create_out(sizes, strides, options);
18058 if (!names.empty()) {
18059 namedinference::propagate_names(*outputs_[output_idx], names);
18060 }
18061 // super must happen after, so that downstream can use maybe_get_output
18062 // to retrieve the output
18063 }
18064 const Tensor& maybe_get_output(int64_t output_idx) override {
18065 return *outputs_[output_idx];
18066 }
18067 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
18068 c10::OptionalDeviceGuard guard_;
18069};
18070at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__upsample_nearest_exact1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales) {
18071structured__upsample_nearest_exact1d_backward_default_backend_functional op;
18072op.meta(grad_output, output_size, input_size, scales);
18073at::_upsample_nearest_exact1d_backward_outf(grad_output, output_size, input_size, scales, *op.outputs_[0]);
18074return std::move(op.outputs_[0]).take();
18075}
18076struct structured_upsample_nearest2d_default_backend_functional final : public at::meta::structured_upsample_nearest2d {
18077 void set_output_strided(
18078 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18079 TensorOptions options, DimnameList names
18080 ) override {
18081 auto current_device = guard_.current_device();
18082 if (C10_UNLIKELY(current_device.has_value())) {
18083 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18084 "structured kernels don't support multi-device outputs");
18085 } else {
18086 guard_.reset_device(options.device());
18087 }
18088 outputs_[output_idx] = create_out(sizes, strides, options);
18089 if (!names.empty()) {
18090 namedinference::propagate_names(*outputs_[output_idx], names);
18091 }
18092 // super must happen after, so that downstream can use maybe_get_output
18093 // to retrieve the output
18094 }
18095 void set_output_raw_strided(
18096 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18097 TensorOptions options, DimnameList names
18098 ) override {
18099 auto current_device = guard_.current_device();
18100 if (C10_UNLIKELY(current_device.has_value())) {
18101 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18102 "structured kernels don't support multi-device outputs");
18103 } else {
18104 guard_.reset_device(options.device());
18105 }
18106 outputs_[output_idx] = create_out(sizes, strides, options);
18107 if (!names.empty()) {
18108 namedinference::propagate_names(*outputs_[output_idx], names);
18109 }
18110 // super must happen after, so that downstream can use maybe_get_output
18111 // to retrieve the output
18112 }
18113 const Tensor& maybe_get_output(int64_t output_idx) override {
18114 return *outputs_[output_idx];
18115 }
18116 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
18117 c10::OptionalDeviceGuard guard_;
18118};
18119at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_upsample_nearest2d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
18120structured_upsample_nearest2d_default_backend_functional op;
18121op.meta(self, output_size, scales_h, scales_w);
18122at::upsample_nearest2d_outf(self, output_size, scales_h, scales_w, *op.outputs_[0]);
18123return std::move(op.outputs_[0]).take();
18124}
18125struct structured__upsample_nearest_exact2d_default_backend_functional final : public at::meta::structured__upsample_nearest_exact2d {
18126 void set_output_strided(
18127 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18128 TensorOptions options, DimnameList names
18129 ) override {
18130 auto current_device = guard_.current_device();
18131 if (C10_UNLIKELY(current_device.has_value())) {
18132 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18133 "structured kernels don't support multi-device outputs");
18134 } else {
18135 guard_.reset_device(options.device());
18136 }
18137 outputs_[output_idx] = create_out(sizes, strides, options);
18138 if (!names.empty()) {
18139 namedinference::propagate_names(*outputs_[output_idx], names);
18140 }
18141 // super must happen after, so that downstream can use maybe_get_output
18142 // to retrieve the output
18143 }
18144 void set_output_raw_strided(
18145 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18146 TensorOptions options, DimnameList names
18147 ) override {
18148 auto current_device = guard_.current_device();
18149 if (C10_UNLIKELY(current_device.has_value())) {
18150 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18151 "structured kernels don't support multi-device outputs");
18152 } else {
18153 guard_.reset_device(options.device());
18154 }
18155 outputs_[output_idx] = create_out(sizes, strides, options);
18156 if (!names.empty()) {
18157 namedinference::propagate_names(*outputs_[output_idx], names);
18158 }
18159 // super must happen after, so that downstream can use maybe_get_output
18160 // to retrieve the output
18161 }
18162 const Tensor& maybe_get_output(int64_t output_idx) override {
18163 return *outputs_[output_idx];
18164 }
18165 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
18166 c10::OptionalDeviceGuard guard_;
18167};
18168at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__upsample_nearest_exact2d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
18169structured__upsample_nearest_exact2d_default_backend_functional op;
18170op.meta(self, output_size, scales_h, scales_w);
18171at::_upsample_nearest_exact2d_outf(self, output_size, scales_h, scales_w, *op.outputs_[0]);
18172return std::move(op.outputs_[0]).take();
18173}
18174struct structured_upsample_nearest2d_backward_default_backend_functional final : public at::meta::structured_upsample_nearest2d_backward {
18175 void set_output_strided(
18176 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18177 TensorOptions options, DimnameList names
18178 ) override {
18179 auto current_device = guard_.current_device();
18180 if (C10_UNLIKELY(current_device.has_value())) {
18181 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18182 "structured kernels don't support multi-device outputs");
18183 } else {
18184 guard_.reset_device(options.device());
18185 }
18186 outputs_[output_idx] = create_out(sizes, strides, options);
18187 if (!names.empty()) {
18188 namedinference::propagate_names(*outputs_[output_idx], names);
18189 }
18190 // super must happen after, so that downstream can use maybe_get_output
18191 // to retrieve the output
18192 }
18193 void set_output_raw_strided(
18194 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18195 TensorOptions options, DimnameList names
18196 ) override {
18197 auto current_device = guard_.current_device();
18198 if (C10_UNLIKELY(current_device.has_value())) {
18199 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18200 "structured kernels don't support multi-device outputs");
18201 } else {
18202 guard_.reset_device(options.device());
18203 }
18204 outputs_[output_idx] = create_out(sizes, strides, options);
18205 if (!names.empty()) {
18206 namedinference::propagate_names(*outputs_[output_idx], names);
18207 }
18208 // super must happen after, so that downstream can use maybe_get_output
18209 // to retrieve the output
18210 }
18211 const Tensor& maybe_get_output(int64_t output_idx) override {
18212 return *outputs_[output_idx];
18213 }
18214 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
18215 c10::OptionalDeviceGuard guard_;
18216};
18217at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_upsample_nearest2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
18218structured_upsample_nearest2d_backward_default_backend_functional op;
18219op.meta(grad_output, output_size, input_size, scales_h, scales_w);
18220at::upsample_nearest2d_backward_outf(grad_output, output_size, input_size, scales_h, scales_w, *op.outputs_[0]);
18221return std::move(op.outputs_[0]).take();
18222}
18223struct structured__upsample_nearest_exact2d_backward_default_backend_functional final : public at::meta::structured__upsample_nearest_exact2d_backward {
18224 void set_output_strided(
18225 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18226 TensorOptions options, DimnameList names
18227 ) override {
18228 auto current_device = guard_.current_device();
18229 if (C10_UNLIKELY(current_device.has_value())) {
18230 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18231 "structured kernels don't support multi-device outputs");
18232 } else {
18233 guard_.reset_device(options.device());
18234 }
18235 outputs_[output_idx] = create_out(sizes, strides, options);
18236 if (!names.empty()) {
18237 namedinference::propagate_names(*outputs_[output_idx], names);
18238 }
18239 // super must happen after, so that downstream can use maybe_get_output
18240 // to retrieve the output
18241 }
18242 void set_output_raw_strided(
18243 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18244 TensorOptions options, DimnameList names
18245 ) override {
18246 auto current_device = guard_.current_device();
18247 if (C10_UNLIKELY(current_device.has_value())) {
18248 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18249 "structured kernels don't support multi-device outputs");
18250 } else {
18251 guard_.reset_device(options.device());
18252 }
18253 outputs_[output_idx] = create_out(sizes, strides, options);
18254 if (!names.empty()) {
18255 namedinference::propagate_names(*outputs_[output_idx], names);
18256 }
18257 // super must happen after, so that downstream can use maybe_get_output
18258 // to retrieve the output
18259 }
18260 const Tensor& maybe_get_output(int64_t output_idx) override {
18261 return *outputs_[output_idx];
18262 }
18263 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
18264 c10::OptionalDeviceGuard guard_;
18265};
18266at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__upsample_nearest_exact2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
18267structured__upsample_nearest_exact2d_backward_default_backend_functional op;
18268op.meta(grad_output, output_size, input_size, scales_h, scales_w);
18269at::_upsample_nearest_exact2d_backward_outf(grad_output, output_size, input_size, scales_h, scales_w, *op.outputs_[0]);
18270return std::move(op.outputs_[0]).take();
18271}
18272struct structured_upsample_nearest3d_default_backend_functional final : public at::meta::structured_upsample_nearest3d {
18273 void set_output_strided(
18274 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18275 TensorOptions options, DimnameList names
18276 ) override {
18277 auto current_device = guard_.current_device();
18278 if (C10_UNLIKELY(current_device.has_value())) {
18279 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18280 "structured kernels don't support multi-device outputs");
18281 } else {
18282 guard_.reset_device(options.device());
18283 }
18284 outputs_[output_idx] = create_out(sizes, strides, options);
18285 if (!names.empty()) {
18286 namedinference::propagate_names(*outputs_[output_idx], names);
18287 }
18288 // super must happen after, so that downstream can use maybe_get_output
18289 // to retrieve the output
18290 }
18291 void set_output_raw_strided(
18292 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18293 TensorOptions options, DimnameList names
18294 ) override {
18295 auto current_device = guard_.current_device();
18296 if (C10_UNLIKELY(current_device.has_value())) {
18297 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18298 "structured kernels don't support multi-device outputs");
18299 } else {
18300 guard_.reset_device(options.device());
18301 }
18302 outputs_[output_idx] = create_out(sizes, strides, options);
18303 if (!names.empty()) {
18304 namedinference::propagate_names(*outputs_[output_idx], names);
18305 }
18306 // super must happen after, so that downstream can use maybe_get_output
18307 // to retrieve the output
18308 }
18309 const Tensor& maybe_get_output(int64_t output_idx) override {
18310 return *outputs_[output_idx];
18311 }
18312 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
18313 c10::OptionalDeviceGuard guard_;
18314};
18315at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_upsample_nearest3d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
18316structured_upsample_nearest3d_default_backend_functional op;
18317op.meta(self, output_size, scales_d, scales_h, scales_w);
18318at::upsample_nearest3d_outf(self, output_size, scales_d, scales_h, scales_w, *op.outputs_[0]);
18319return std::move(op.outputs_[0]).take();
18320}
18321struct structured__upsample_nearest_exact3d_default_backend_functional final : public at::meta::structured__upsample_nearest_exact3d {
18322 void set_output_strided(
18323 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18324 TensorOptions options, DimnameList names
18325 ) override {
18326 auto current_device = guard_.current_device();
18327 if (C10_UNLIKELY(current_device.has_value())) {
18328 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18329 "structured kernels don't support multi-device outputs");
18330 } else {
18331 guard_.reset_device(options.device());
18332 }
18333 outputs_[output_idx] = create_out(sizes, strides, options);
18334 if (!names.empty()) {
18335 namedinference::propagate_names(*outputs_[output_idx], names);
18336 }
18337 // super must happen after, so that downstream can use maybe_get_output
18338 // to retrieve the output
18339 }
18340 void set_output_raw_strided(
18341 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18342 TensorOptions options, DimnameList names
18343 ) override {
18344 auto current_device = guard_.current_device();
18345 if (C10_UNLIKELY(current_device.has_value())) {
18346 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18347 "structured kernels don't support multi-device outputs");
18348 } else {
18349 guard_.reset_device(options.device());
18350 }
18351 outputs_[output_idx] = create_out(sizes, strides, options);
18352 if (!names.empty()) {
18353 namedinference::propagate_names(*outputs_[output_idx], names);
18354 }
18355 // super must happen after, so that downstream can use maybe_get_output
18356 // to retrieve the output
18357 }
18358 const Tensor& maybe_get_output(int64_t output_idx) override {
18359 return *outputs_[output_idx];
18360 }
18361 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
18362 c10::OptionalDeviceGuard guard_;
18363};
18364at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__upsample_nearest_exact3d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
18365structured__upsample_nearest_exact3d_default_backend_functional op;
18366op.meta(self, output_size, scales_d, scales_h, scales_w);
18367at::_upsample_nearest_exact3d_outf(self, output_size, scales_d, scales_h, scales_w, *op.outputs_[0]);
18368return std::move(op.outputs_[0]).take();
18369}
18370struct structured_upsample_nearest3d_backward_default_backend_functional final : public at::meta::structured_upsample_nearest3d_backward {
18371 void set_output_strided(
18372 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18373 TensorOptions options, DimnameList names
18374 ) override {
18375 auto current_device = guard_.current_device();
18376 if (C10_UNLIKELY(current_device.has_value())) {
18377 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18378 "structured kernels don't support multi-device outputs");
18379 } else {
18380 guard_.reset_device(options.device());
18381 }
18382 outputs_[output_idx] = create_out(sizes, strides, options);
18383 if (!names.empty()) {
18384 namedinference::propagate_names(*outputs_[output_idx], names);
18385 }
18386 // super must happen after, so that downstream can use maybe_get_output
18387 // to retrieve the output
18388 }
18389 void set_output_raw_strided(
18390 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18391 TensorOptions options, DimnameList names
18392 ) override {
18393 auto current_device = guard_.current_device();
18394 if (C10_UNLIKELY(current_device.has_value())) {
18395 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18396 "structured kernels don't support multi-device outputs");
18397 } else {
18398 guard_.reset_device(options.device());
18399 }
18400 outputs_[output_idx] = create_out(sizes, strides, options);
18401 if (!names.empty()) {
18402 namedinference::propagate_names(*outputs_[output_idx], names);
18403 }
18404 // super must happen after, so that downstream can use maybe_get_output
18405 // to retrieve the output
18406 }
18407 const Tensor& maybe_get_output(int64_t output_idx) override {
18408 return *outputs_[output_idx];
18409 }
18410 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
18411 c10::OptionalDeviceGuard guard_;
18412};
18413at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_upsample_nearest3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
18414structured_upsample_nearest3d_backward_default_backend_functional op;
18415op.meta(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
18416at::upsample_nearest3d_backward_outf(grad_output, output_size, input_size, scales_d, scales_h, scales_w, *op.outputs_[0]);
18417return std::move(op.outputs_[0]).take();
18418}
18419struct structured__upsample_nearest_exact3d_backward_default_backend_functional final : public at::meta::structured__upsample_nearest_exact3d_backward {
18420 void set_output_strided(
18421 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18422 TensorOptions options, DimnameList names
18423 ) override {
18424 auto current_device = guard_.current_device();
18425 if (C10_UNLIKELY(current_device.has_value())) {
18426 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18427 "structured kernels don't support multi-device outputs");
18428 } else {
18429 guard_.reset_device(options.device());
18430 }
18431 outputs_[output_idx] = create_out(sizes, strides, options);
18432 if (!names.empty()) {
18433 namedinference::propagate_names(*outputs_[output_idx], names);
18434 }
18435 // super must happen after, so that downstream can use maybe_get_output
18436 // to retrieve the output
18437 }
18438 void set_output_raw_strided(
18439 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18440 TensorOptions options, DimnameList names
18441 ) override {
18442 auto current_device = guard_.current_device();
18443 if (C10_UNLIKELY(current_device.has_value())) {
18444 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18445 "structured kernels don't support multi-device outputs");
18446 } else {
18447 guard_.reset_device(options.device());
18448 }
18449 outputs_[output_idx] = create_out(sizes, strides, options);
18450 if (!names.empty()) {
18451 namedinference::propagate_names(*outputs_[output_idx], names);
18452 }
18453 // super must happen after, so that downstream can use maybe_get_output
18454 // to retrieve the output
18455 }
18456 const Tensor& maybe_get_output(int64_t output_idx) override {
18457 return *outputs_[output_idx];
18458 }
18459 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
18460 c10::OptionalDeviceGuard guard_;
18461};
18462at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__upsample_nearest_exact3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
18463structured__upsample_nearest_exact3d_backward_default_backend_functional op;
18464op.meta(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
18465at::_upsample_nearest_exact3d_backward_outf(grad_output, output_size, input_size, scales_d, scales_h, scales_w, *op.outputs_[0]);
18466return std::move(op.outputs_[0]).take();
18467}
18468struct structured_sigmoid_backward_default_backend_functional final : public at::meta::structured_sigmoid_backward {
18469 void set_output_strided(
18470 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18471 TensorOptions options, DimnameList names
18472 ) override {
18473 auto current_device = guard_.current_device();
18474 if (C10_UNLIKELY(current_device.has_value())) {
18475 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18476 "structured kernels don't support multi-device outputs");
18477 } else {
18478 guard_.reset_device(options.device());
18479 }
18480 outputs_[output_idx] = create_out(sizes, strides, options);
18481 if (!names.empty()) {
18482 namedinference::propagate_names(*outputs_[output_idx], names);
18483 }
18484 // super must happen after, so that downstream can use maybe_get_output
18485 // to retrieve the output
18486 at::meta::structured_sigmoid_backward::set_output_raw_strided(output_idx, sizes, strides, options, names);
18487 }
18488 void set_output_raw_strided(
18489 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18490 TensorOptions options, DimnameList names
18491 ) override {
18492 auto current_device = guard_.current_device();
18493 if (C10_UNLIKELY(current_device.has_value())) {
18494 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18495 "structured kernels don't support multi-device outputs");
18496 } else {
18497 guard_.reset_device(options.device());
18498 }
18499 outputs_[output_idx] = create_out(sizes, strides, options);
18500 if (!names.empty()) {
18501 namedinference::propagate_names(*outputs_[output_idx], names);
18502 }
18503 // super must happen after, so that downstream can use maybe_get_output
18504 // to retrieve the output
18505 at::meta::structured_sigmoid_backward::set_output_raw_strided(output_idx, sizes, strides, options, names);
18506 }
18507 const Tensor& maybe_get_output(int64_t output_idx) override {
18508 return *outputs_[output_idx];
18509 }
18510 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
18511 c10::OptionalDeviceGuard guard_;
18512};
18513at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & output) {
18514structured_sigmoid_backward_default_backend_functional op;
18515op.meta(grad_output, output);
18516at::sigmoid_backward_outf(grad_output, output, *op.outputs_[0]);
18517return std::move(op.outputs_[0]).take();
18518}
18519struct structured_logit_backward_default_backend_functional final : public at::meta::structured_logit_backward {
18520 void set_output_strided(
18521 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18522 TensorOptions options, DimnameList names
18523 ) override {
18524 auto current_device = guard_.current_device();
18525 if (C10_UNLIKELY(current_device.has_value())) {
18526 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18527 "structured kernels don't support multi-device outputs");
18528 } else {
18529 guard_.reset_device(options.device());
18530 }
18531 outputs_[output_idx] = create_out(sizes, strides, options);
18532 if (!names.empty()) {
18533 namedinference::propagate_names(*outputs_[output_idx], names);
18534 }
18535 // super must happen after, so that downstream can use maybe_get_output
18536 // to retrieve the output
18537 at::meta::structured_logit_backward::set_output_raw_strided(output_idx, sizes, strides, options, names);
18538 }
18539 void set_output_raw_strided(
18540 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18541 TensorOptions options, DimnameList names
18542 ) override {
18543 auto current_device = guard_.current_device();
18544 if (C10_UNLIKELY(current_device.has_value())) {
18545 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18546 "structured kernels don't support multi-device outputs");
18547 } else {
18548 guard_.reset_device(options.device());
18549 }
18550 outputs_[output_idx] = create_out(sizes, strides, options);
18551 if (!names.empty()) {
18552 namedinference::propagate_names(*outputs_[output_idx], names);
18553 }
18554 // super must happen after, so that downstream can use maybe_get_output
18555 // to retrieve the output
18556 at::meta::structured_logit_backward::set_output_raw_strided(output_idx, sizes, strides, options, names);
18557 }
18558 const Tensor& maybe_get_output(int64_t output_idx) override {
18559 return *outputs_[output_idx];
18560 }
18561 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
18562 c10::OptionalDeviceGuard guard_;
18563};
18564at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_logit_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::optional<double> eps) {
18565structured_logit_backward_default_backend_functional op;
18566op.meta(grad_output, self, eps);
18567at::logit_backward_outf(grad_output, self, eps, *op.outputs_[0]);
18568return std::move(op.outputs_[0]).take();
18569}
18570struct structured_tanh_backward_default_backend_functional final : public at::meta::structured_tanh_backward {
18571 void set_output_strided(
18572 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18573 TensorOptions options, DimnameList names
18574 ) override {
18575 auto current_device = guard_.current_device();
18576 if (C10_UNLIKELY(current_device.has_value())) {
18577 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18578 "structured kernels don't support multi-device outputs");
18579 } else {
18580 guard_.reset_device(options.device());
18581 }
18582 outputs_[output_idx] = create_out(sizes, strides, options);
18583 if (!names.empty()) {
18584 namedinference::propagate_names(*outputs_[output_idx], names);
18585 }
18586 // super must happen after, so that downstream can use maybe_get_output
18587 // to retrieve the output
18588 at::meta::structured_tanh_backward::set_output_raw_strided(output_idx, sizes, strides, options, names);
18589 }
18590 void set_output_raw_strided(
18591 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18592 TensorOptions options, DimnameList names
18593 ) override {
18594 auto current_device = guard_.current_device();
18595 if (C10_UNLIKELY(current_device.has_value())) {
18596 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18597 "structured kernels don't support multi-device outputs");
18598 } else {
18599 guard_.reset_device(options.device());
18600 }
18601 outputs_[output_idx] = create_out(sizes, strides, options);
18602 if (!names.empty()) {
18603 namedinference::propagate_names(*outputs_[output_idx], names);
18604 }
18605 // super must happen after, so that downstream can use maybe_get_output
18606 // to retrieve the output
18607 at::meta::structured_tanh_backward::set_output_raw_strided(output_idx, sizes, strides, options, names);
18608 }
18609 const Tensor& maybe_get_output(int64_t output_idx) override {
18610 return *outputs_[output_idx];
18611 }
18612 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
18613 c10::OptionalDeviceGuard guard_;
18614};
18615at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_tanh_backward(const at::Tensor & grad_output, const at::Tensor & output) {
18616structured_tanh_backward_default_backend_functional op;
18617op.meta(grad_output, output);
18618at::tanh_backward_outf(grad_output, output, *op.outputs_[0]);
18619return std::move(op.outputs_[0]).take();
18620}
18621struct structured_slow_conv_transpose2d_default_backend_functional final : public at::meta::structured_slow_conv_transpose2d {
18622 void set_output_strided(
18623 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18624 TensorOptions options, DimnameList names
18625 ) override {
18626 auto current_device = guard_.current_device();
18627 if (C10_UNLIKELY(current_device.has_value())) {
18628 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18629 "structured kernels don't support multi-device outputs");
18630 } else {
18631 guard_.reset_device(options.device());
18632 }
18633 outputs_[output_idx] = create_out(sizes, strides, options);
18634 if (!names.empty()) {
18635 namedinference::propagate_names(*outputs_[output_idx], names);
18636 }
18637 // super must happen after, so that downstream can use maybe_get_output
18638 // to retrieve the output
18639 }
18640 void set_output_raw_strided(
18641 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18642 TensorOptions options, DimnameList names
18643 ) override {
18644 auto current_device = guard_.current_device();
18645 if (C10_UNLIKELY(current_device.has_value())) {
18646 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18647 "structured kernels don't support multi-device outputs");
18648 } else {
18649 guard_.reset_device(options.device());
18650 }
18651 outputs_[output_idx] = create_out(sizes, strides, options);
18652 if (!names.empty()) {
18653 namedinference::propagate_names(*outputs_[output_idx], names);
18654 }
18655 // super must happen after, so that downstream can use maybe_get_output
18656 // to retrieve the output
18657 }
18658 const Tensor& maybe_get_output(int64_t output_idx) override {
18659 return *outputs_[output_idx];
18660 }
18661 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
18662 c10::OptionalDeviceGuard guard_;
18663};
18664at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_slow_conv_transpose2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation) {
18665structured_slow_conv_transpose2d_default_backend_functional op;
18666op.meta(self, weight, kernel_size, ((bias.has_value() && (*bias).defined()) ? at::OptionalTensorRef(*bias) : at::OptionalTensorRef()), stride, padding, output_padding, dilation);
18667at::slow_conv_transpose2d_outf(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, *op.outputs_[0]);
18668return std::move(op.outputs_[0]).take();
18669}
18670struct structured_isposinf_default_backend_functional final : public at::meta::structured_isposinf {
18671 void set_output_strided(
18672 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18673 TensorOptions options, DimnameList names
18674 ) override {
18675 auto current_device = guard_.current_device();
18676 if (C10_UNLIKELY(current_device.has_value())) {
18677 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18678 "structured kernels don't support multi-device outputs");
18679 } else {
18680 guard_.reset_device(options.device());
18681 }
18682 outputs_[output_idx] = create_out(sizes, strides, options);
18683 if (!names.empty()) {
18684 namedinference::propagate_names(*outputs_[output_idx], names);
18685 }
18686 // super must happen after, so that downstream can use maybe_get_output
18687 // to retrieve the output
18688 at::meta::structured_isposinf::set_output_raw_strided(output_idx, sizes, strides, options, names);
18689 }
18690 void set_output_raw_strided(
18691 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18692 TensorOptions options, DimnameList names
18693 ) override {
18694 auto current_device = guard_.current_device();
18695 if (C10_UNLIKELY(current_device.has_value())) {
18696 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18697 "structured kernels don't support multi-device outputs");
18698 } else {
18699 guard_.reset_device(options.device());
18700 }
18701 outputs_[output_idx] = create_out(sizes, strides, options);
18702 if (!names.empty()) {
18703 namedinference::propagate_names(*outputs_[output_idx], names);
18704 }
18705 // super must happen after, so that downstream can use maybe_get_output
18706 // to retrieve the output
18707 at::meta::structured_isposinf::set_output_raw_strided(output_idx, sizes, strides, options, names);
18708 }
18709 const Tensor& maybe_get_output(int64_t output_idx) override {
18710 return *outputs_[output_idx];
18711 }
18712 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
18713 c10::OptionalDeviceGuard guard_;
18714};
18715at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_isposinf(const at::Tensor & self) {
18716structured_isposinf_default_backend_functional op;
18717op.meta(self);
18718at::isposinf_outf(self, *op.outputs_[0]);
18719return std::move(op.outputs_[0]).take();
18720}
18721struct structured_isneginf_default_backend_functional final : public at::meta::structured_isneginf {
18722 void set_output_strided(
18723 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18724 TensorOptions options, DimnameList names
18725 ) override {
18726 auto current_device = guard_.current_device();
18727 if (C10_UNLIKELY(current_device.has_value())) {
18728 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18729 "structured kernels don't support multi-device outputs");
18730 } else {
18731 guard_.reset_device(options.device());
18732 }
18733 outputs_[output_idx] = create_out(sizes, strides, options);
18734 if (!names.empty()) {
18735 namedinference::propagate_names(*outputs_[output_idx], names);
18736 }
18737 // super must happen after, so that downstream can use maybe_get_output
18738 // to retrieve the output
18739 at::meta::structured_isneginf::set_output_raw_strided(output_idx, sizes, strides, options, names);
18740 }
18741 void set_output_raw_strided(
18742 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18743 TensorOptions options, DimnameList names
18744 ) override {
18745 auto current_device = guard_.current_device();
18746 if (C10_UNLIKELY(current_device.has_value())) {
18747 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18748 "structured kernels don't support multi-device outputs");
18749 } else {
18750 guard_.reset_device(options.device());
18751 }
18752 outputs_[output_idx] = create_out(sizes, strides, options);
18753 if (!names.empty()) {
18754 namedinference::propagate_names(*outputs_[output_idx], names);
18755 }
18756 // super must happen after, so that downstream can use maybe_get_output
18757 // to retrieve the output
18758 at::meta::structured_isneginf::set_output_raw_strided(output_idx, sizes, strides, options, names);
18759 }
18760 const Tensor& maybe_get_output(int64_t output_idx) override {
18761 return *outputs_[output_idx];
18762 }
18763 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
18764 c10::OptionalDeviceGuard guard_;
18765};
18766at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_isneginf(const at::Tensor & self) {
18767structured_isneginf_default_backend_functional op;
18768op.meta(self);
18769at::isneginf_outf(self, *op.outputs_[0]);
18770return std::move(op.outputs_[0]).take();
18771}
18772struct structured_special_entr_default_backend_functional final : public at::meta::structured_special_entr {
18773 void set_output_strided(
18774 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18775 TensorOptions options, DimnameList names
18776 ) override {
18777 auto current_device = guard_.current_device();
18778 if (C10_UNLIKELY(current_device.has_value())) {
18779 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18780 "structured kernels don't support multi-device outputs");
18781 } else {
18782 guard_.reset_device(options.device());
18783 }
18784 outputs_[output_idx] = create_out(sizes, strides, options);
18785 if (!names.empty()) {
18786 namedinference::propagate_names(*outputs_[output_idx], names);
18787 }
18788 // super must happen after, so that downstream can use maybe_get_output
18789 // to retrieve the output
18790 at::meta::structured_special_entr::set_output_raw_strided(output_idx, sizes, strides, options, names);
18791 }
18792 void set_output_raw_strided(
18793 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18794 TensorOptions options, DimnameList names
18795 ) override {
18796 auto current_device = guard_.current_device();
18797 if (C10_UNLIKELY(current_device.has_value())) {
18798 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18799 "structured kernels don't support multi-device outputs");
18800 } else {
18801 guard_.reset_device(options.device());
18802 }
18803 outputs_[output_idx] = create_out(sizes, strides, options);
18804 if (!names.empty()) {
18805 namedinference::propagate_names(*outputs_[output_idx], names);
18806 }
18807 // super must happen after, so that downstream can use maybe_get_output
18808 // to retrieve the output
18809 at::meta::structured_special_entr::set_output_raw_strided(output_idx, sizes, strides, options, names);
18810 }
18811 const Tensor& maybe_get_output(int64_t output_idx) override {
18812 return *outputs_[output_idx];
18813 }
18814 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
18815 c10::OptionalDeviceGuard guard_;
18816};
18817at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_entr(const at::Tensor & self) {
18818structured_special_entr_default_backend_functional op;
18819op.meta(self);
18820at::special_entr_outf(self, *op.outputs_[0]);
18821return std::move(op.outputs_[0]).take();
18822}
18823struct structured_special_ndtri_default_backend_functional final : public at::meta::structured_special_ndtri {
18824 void set_output_strided(
18825 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18826 TensorOptions options, DimnameList names
18827 ) override {
18828 auto current_device = guard_.current_device();
18829 if (C10_UNLIKELY(current_device.has_value())) {
18830 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18831 "structured kernels don't support multi-device outputs");
18832 } else {
18833 guard_.reset_device(options.device());
18834 }
18835 outputs_[output_idx] = create_out(sizes, strides, options);
18836 if (!names.empty()) {
18837 namedinference::propagate_names(*outputs_[output_idx], names);
18838 }
18839 // super must happen after, so that downstream can use maybe_get_output
18840 // to retrieve the output
18841 at::meta::structured_special_ndtri::set_output_raw_strided(output_idx, sizes, strides, options, names);
18842 }
18843 void set_output_raw_strided(
18844 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18845 TensorOptions options, DimnameList names
18846 ) override {
18847 auto current_device = guard_.current_device();
18848 if (C10_UNLIKELY(current_device.has_value())) {
18849 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18850 "structured kernels don't support multi-device outputs");
18851 } else {
18852 guard_.reset_device(options.device());
18853 }
18854 outputs_[output_idx] = create_out(sizes, strides, options);
18855 if (!names.empty()) {
18856 namedinference::propagate_names(*outputs_[output_idx], names);
18857 }
18858 // super must happen after, so that downstream can use maybe_get_output
18859 // to retrieve the output
18860 at::meta::structured_special_ndtri::set_output_raw_strided(output_idx, sizes, strides, options, names);
18861 }
18862 const Tensor& maybe_get_output(int64_t output_idx) override {
18863 return *outputs_[output_idx];
18864 }
18865 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
18866 c10::OptionalDeviceGuard guard_;
18867};
18868at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_ndtri(const at::Tensor & self) {
18869structured_special_ndtri_default_backend_functional op;
18870op.meta(self);
18871at::special_ndtri_outf(self, *op.outputs_[0]);
18872return std::move(op.outputs_[0]).take();
18873}
18874struct structured_special_log_ndtr_default_backend_functional final : public at::meta::structured_special_log_ndtr {
18875 void set_output_strided(
18876 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18877 TensorOptions options, DimnameList names
18878 ) override {
18879 auto current_device = guard_.current_device();
18880 if (C10_UNLIKELY(current_device.has_value())) {
18881 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18882 "structured kernels don't support multi-device outputs");
18883 } else {
18884 guard_.reset_device(options.device());
18885 }
18886 outputs_[output_idx] = create_out(sizes, strides, options);
18887 if (!names.empty()) {
18888 namedinference::propagate_names(*outputs_[output_idx], names);
18889 }
18890 // super must happen after, so that downstream can use maybe_get_output
18891 // to retrieve the output
18892 at::meta::structured_special_log_ndtr::set_output_raw_strided(output_idx, sizes, strides, options, names);
18893 }
18894 void set_output_raw_strided(
18895 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18896 TensorOptions options, DimnameList names
18897 ) override {
18898 auto current_device = guard_.current_device();
18899 if (C10_UNLIKELY(current_device.has_value())) {
18900 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18901 "structured kernels don't support multi-device outputs");
18902 } else {
18903 guard_.reset_device(options.device());
18904 }
18905 outputs_[output_idx] = create_out(sizes, strides, options);
18906 if (!names.empty()) {
18907 namedinference::propagate_names(*outputs_[output_idx], names);
18908 }
18909 // super must happen after, so that downstream can use maybe_get_output
18910 // to retrieve the output
18911 at::meta::structured_special_log_ndtr::set_output_raw_strided(output_idx, sizes, strides, options, names);
18912 }
18913 const Tensor& maybe_get_output(int64_t output_idx) override {
18914 return *outputs_[output_idx];
18915 }
18916 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
18917 c10::OptionalDeviceGuard guard_;
18918};
18919at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_log_ndtr(const at::Tensor & self) {
18920structured_special_log_ndtr_default_backend_functional op;
18921op.meta(self);
18922at::special_log_ndtr_outf(self, *op.outputs_[0]);
18923return std::move(op.outputs_[0]).take();
18924}
18925struct structured_special_erfcx_default_backend_functional final : public at::meta::structured_special_erfcx {
18926 void set_output_strided(
18927 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18928 TensorOptions options, DimnameList names
18929 ) override {
18930 auto current_device = guard_.current_device();
18931 if (C10_UNLIKELY(current_device.has_value())) {
18932 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18933 "structured kernels don't support multi-device outputs");
18934 } else {
18935 guard_.reset_device(options.device());
18936 }
18937 outputs_[output_idx] = create_out(sizes, strides, options);
18938 if (!names.empty()) {
18939 namedinference::propagate_names(*outputs_[output_idx], names);
18940 }
18941 // super must happen after, so that downstream can use maybe_get_output
18942 // to retrieve the output
18943 at::meta::structured_special_erfcx::set_output_raw_strided(output_idx, sizes, strides, options, names);
18944 }
18945 void set_output_raw_strided(
18946 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18947 TensorOptions options, DimnameList names
18948 ) override {
18949 auto current_device = guard_.current_device();
18950 if (C10_UNLIKELY(current_device.has_value())) {
18951 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18952 "structured kernels don't support multi-device outputs");
18953 } else {
18954 guard_.reset_device(options.device());
18955 }
18956 outputs_[output_idx] = create_out(sizes, strides, options);
18957 if (!names.empty()) {
18958 namedinference::propagate_names(*outputs_[output_idx], names);
18959 }
18960 // super must happen after, so that downstream can use maybe_get_output
18961 // to retrieve the output
18962 at::meta::structured_special_erfcx::set_output_raw_strided(output_idx, sizes, strides, options, names);
18963 }
18964 const Tensor& maybe_get_output(int64_t output_idx) override {
18965 return *outputs_[output_idx];
18966 }
18967 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
18968 c10::OptionalDeviceGuard guard_;
18969};
18970at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_erfcx(const at::Tensor & self) {
18971structured_special_erfcx_default_backend_functional op;
18972op.meta(self);
18973at::special_erfcx_outf(self, *op.outputs_[0]);
18974return std::move(op.outputs_[0]).take();
18975}
18976struct structured_special_xlog1py_default_backend_functional final : public at::meta::structured_special_xlog1py {
18977 void set_output_strided(
18978 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18979 TensorOptions options, DimnameList names
18980 ) override {
18981 auto current_device = guard_.current_device();
18982 if (C10_UNLIKELY(current_device.has_value())) {
18983 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
18984 "structured kernels don't support multi-device outputs");
18985 } else {
18986 guard_.reset_device(options.device());
18987 }
18988 outputs_[output_idx] = create_out(sizes, strides, options);
18989 if (!names.empty()) {
18990 namedinference::propagate_names(*outputs_[output_idx], names);
18991 }
18992 // super must happen after, so that downstream can use maybe_get_output
18993 // to retrieve the output
18994 at::meta::structured_special_xlog1py::set_output_raw_strided(output_idx, sizes, strides, options, names);
18995 }
18996 void set_output_raw_strided(
18997 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18998 TensorOptions options, DimnameList names
18999 ) override {
19000 auto current_device = guard_.current_device();
19001 if (C10_UNLIKELY(current_device.has_value())) {
19002 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19003 "structured kernels don't support multi-device outputs");
19004 } else {
19005 guard_.reset_device(options.device());
19006 }
19007 outputs_[output_idx] = create_out(sizes, strides, options);
19008 if (!names.empty()) {
19009 namedinference::propagate_names(*outputs_[output_idx], names);
19010 }
19011 // super must happen after, so that downstream can use maybe_get_output
19012 // to retrieve the output
19013 at::meta::structured_special_xlog1py::set_output_raw_strided(output_idx, sizes, strides, options, names);
19014 }
19015 const Tensor& maybe_get_output(int64_t output_idx) override {
19016 return *outputs_[output_idx];
19017 }
19018 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
19019 c10::OptionalDeviceGuard guard_;
19020};
19021at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_xlog1py(const at::Tensor & self, const at::Tensor & other) {
19022structured_special_xlog1py_default_backend_functional op;
19023op.meta(self, other);
19024at::special_xlog1py_outf(self, other, *op.outputs_[0]);
19025return std::move(op.outputs_[0]).take();
19026}
19027struct structured_special_zeta_default_backend_functional final : public at::meta::structured_special_zeta {
19028 void set_output_strided(
19029 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19030 TensorOptions options, DimnameList names
19031 ) override {
19032 auto current_device = guard_.current_device();
19033 if (C10_UNLIKELY(current_device.has_value())) {
19034 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19035 "structured kernels don't support multi-device outputs");
19036 } else {
19037 guard_.reset_device(options.device());
19038 }
19039 outputs_[output_idx] = create_out(sizes, strides, options);
19040 if (!names.empty()) {
19041 namedinference::propagate_names(*outputs_[output_idx], names);
19042 }
19043 // super must happen after, so that downstream can use maybe_get_output
19044 // to retrieve the output
19045 at::meta::structured_special_zeta::set_output_raw_strided(output_idx, sizes, strides, options, names);
19046 }
19047 void set_output_raw_strided(
19048 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19049 TensorOptions options, DimnameList names
19050 ) override {
19051 auto current_device = guard_.current_device();
19052 if (C10_UNLIKELY(current_device.has_value())) {
19053 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19054 "structured kernels don't support multi-device outputs");
19055 } else {
19056 guard_.reset_device(options.device());
19057 }
19058 outputs_[output_idx] = create_out(sizes, strides, options);
19059 if (!names.empty()) {
19060 namedinference::propagate_names(*outputs_[output_idx], names);
19061 }
19062 // super must happen after, so that downstream can use maybe_get_output
19063 // to retrieve the output
19064 at::meta::structured_special_zeta::set_output_raw_strided(output_idx, sizes, strides, options, names);
19065 }
19066 const Tensor& maybe_get_output(int64_t output_idx) override {
19067 return *outputs_[output_idx];
19068 }
19069 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
19070 c10::OptionalDeviceGuard guard_;
19071};
19072at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_zeta(const at::Tensor & self, const at::Tensor & other) {
19073structured_special_zeta_default_backend_functional op;
19074op.meta(self, other);
19075at::special_zeta_outf(self, other, *op.outputs_[0]);
19076return std::move(op.outputs_[0]).take();
19077}
19078struct structured_special_i0e_default_backend_functional final : public at::meta::structured_special_i0e {
19079 void set_output_strided(
19080 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19081 TensorOptions options, DimnameList names
19082 ) override {
19083 auto current_device = guard_.current_device();
19084 if (C10_UNLIKELY(current_device.has_value())) {
19085 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19086 "structured kernels don't support multi-device outputs");
19087 } else {
19088 guard_.reset_device(options.device());
19089 }
19090 outputs_[output_idx] = create_out(sizes, strides, options);
19091 if (!names.empty()) {
19092 namedinference::propagate_names(*outputs_[output_idx], names);
19093 }
19094 // super must happen after, so that downstream can use maybe_get_output
19095 // to retrieve the output
19096 at::meta::structured_special_i0e::set_output_raw_strided(output_idx, sizes, strides, options, names);
19097 }
19098 void set_output_raw_strided(
19099 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19100 TensorOptions options, DimnameList names
19101 ) override {
19102 auto current_device = guard_.current_device();
19103 if (C10_UNLIKELY(current_device.has_value())) {
19104 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19105 "structured kernels don't support multi-device outputs");
19106 } else {
19107 guard_.reset_device(options.device());
19108 }
19109 outputs_[output_idx] = create_out(sizes, strides, options);
19110 if (!names.empty()) {
19111 namedinference::propagate_names(*outputs_[output_idx], names);
19112 }
19113 // super must happen after, so that downstream can use maybe_get_output
19114 // to retrieve the output
19115 at::meta::structured_special_i0e::set_output_raw_strided(output_idx, sizes, strides, options, names);
19116 }
19117 const Tensor& maybe_get_output(int64_t output_idx) override {
19118 return *outputs_[output_idx];
19119 }
19120 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
19121 c10::OptionalDeviceGuard guard_;
19122};
19123at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_i0e(const at::Tensor & self) {
19124structured_special_i0e_default_backend_functional op;
19125op.meta(self);
19126at::special_i0e_outf(self, *op.outputs_[0]);
19127return std::move(op.outputs_[0]).take();
19128}
19129struct structured_special_i1_default_backend_functional final : public at::meta::structured_special_i1 {
19130 void set_output_strided(
19131 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19132 TensorOptions options, DimnameList names
19133 ) override {
19134 auto current_device = guard_.current_device();
19135 if (C10_UNLIKELY(current_device.has_value())) {
19136 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19137 "structured kernels don't support multi-device outputs");
19138 } else {
19139 guard_.reset_device(options.device());
19140 }
19141 outputs_[output_idx] = create_out(sizes, strides, options);
19142 if (!names.empty()) {
19143 namedinference::propagate_names(*outputs_[output_idx], names);
19144 }
19145 // super must happen after, so that downstream can use maybe_get_output
19146 // to retrieve the output
19147 at::meta::structured_special_i1::set_output_raw_strided(output_idx, sizes, strides, options, names);
19148 }
19149 void set_output_raw_strided(
19150 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19151 TensorOptions options, DimnameList names
19152 ) override {
19153 auto current_device = guard_.current_device();
19154 if (C10_UNLIKELY(current_device.has_value())) {
19155 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19156 "structured kernels don't support multi-device outputs");
19157 } else {
19158 guard_.reset_device(options.device());
19159 }
19160 outputs_[output_idx] = create_out(sizes, strides, options);
19161 if (!names.empty()) {
19162 namedinference::propagate_names(*outputs_[output_idx], names);
19163 }
19164 // super must happen after, so that downstream can use maybe_get_output
19165 // to retrieve the output
19166 at::meta::structured_special_i1::set_output_raw_strided(output_idx, sizes, strides, options, names);
19167 }
19168 const Tensor& maybe_get_output(int64_t output_idx) override {
19169 return *outputs_[output_idx];
19170 }
19171 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
19172 c10::OptionalDeviceGuard guard_;
19173};
19174at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_i1(const at::Tensor & self) {
19175structured_special_i1_default_backend_functional op;
19176op.meta(self);
19177at::special_i1_outf(self, *op.outputs_[0]);
19178return std::move(op.outputs_[0]).take();
19179}
19180struct structured_special_i1e_default_backend_functional final : public at::meta::structured_special_i1e {
19181 void set_output_strided(
19182 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19183 TensorOptions options, DimnameList names
19184 ) override {
19185 auto current_device = guard_.current_device();
19186 if (C10_UNLIKELY(current_device.has_value())) {
19187 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19188 "structured kernels don't support multi-device outputs");
19189 } else {
19190 guard_.reset_device(options.device());
19191 }
19192 outputs_[output_idx] = create_out(sizes, strides, options);
19193 if (!names.empty()) {
19194 namedinference::propagate_names(*outputs_[output_idx], names);
19195 }
19196 // super must happen after, so that downstream can use maybe_get_output
19197 // to retrieve the output
19198 at::meta::structured_special_i1e::set_output_raw_strided(output_idx, sizes, strides, options, names);
19199 }
19200 void set_output_raw_strided(
19201 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19202 TensorOptions options, DimnameList names
19203 ) override {
19204 auto current_device = guard_.current_device();
19205 if (C10_UNLIKELY(current_device.has_value())) {
19206 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19207 "structured kernels don't support multi-device outputs");
19208 } else {
19209 guard_.reset_device(options.device());
19210 }
19211 outputs_[output_idx] = create_out(sizes, strides, options);
19212 if (!names.empty()) {
19213 namedinference::propagate_names(*outputs_[output_idx], names);
19214 }
19215 // super must happen after, so that downstream can use maybe_get_output
19216 // to retrieve the output
19217 at::meta::structured_special_i1e::set_output_raw_strided(output_idx, sizes, strides, options, names);
19218 }
19219 const Tensor& maybe_get_output(int64_t output_idx) override {
19220 return *outputs_[output_idx];
19221 }
19222 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
19223 c10::OptionalDeviceGuard guard_;
19224};
19225at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_i1e(const at::Tensor & self) {
19226structured_special_i1e_default_backend_functional op;
19227op.meta(self);
19228at::special_i1e_outf(self, *op.outputs_[0]);
19229return std::move(op.outputs_[0]).take();
19230}
19231struct structured_linalg_cholesky_ex_default_backend_functional final : public at::meta::structured_linalg_cholesky_ex {
19232 void set_output_strided(
19233 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19234 TensorOptions options, DimnameList names
19235 ) override {
19236 auto current_device = guard_.current_device();
19237 if (C10_UNLIKELY(current_device.has_value())) {
19238 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19239 "structured kernels don't support multi-device outputs");
19240 } else {
19241 guard_.reset_device(options.device());
19242 }
19243 outputs_[output_idx] = create_out(sizes, strides, options);
19244 if (!names.empty()) {
19245 namedinference::propagate_names(*outputs_[output_idx], names);
19246 }
19247 // super must happen after, so that downstream can use maybe_get_output
19248 // to retrieve the output
19249 }
19250 void set_output_raw_strided(
19251 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19252 TensorOptions options, DimnameList names
19253 ) override {
19254 auto current_device = guard_.current_device();
19255 if (C10_UNLIKELY(current_device.has_value())) {
19256 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19257 "structured kernels don't support multi-device outputs");
19258 } else {
19259 guard_.reset_device(options.device());
19260 }
19261 outputs_[output_idx] = create_out(sizes, strides, options);
19262 if (!names.empty()) {
19263 namedinference::propagate_names(*outputs_[output_idx], names);
19264 }
19265 // super must happen after, so that downstream can use maybe_get_output
19266 // to retrieve the output
19267 }
19268 const Tensor& maybe_get_output(int64_t output_idx) override {
19269 return *outputs_[output_idx];
19270 }
19271 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
19272 c10::OptionalDeviceGuard guard_;
19273};
19274::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeExplicitAutogradNonFunctional_linalg_cholesky_ex(const at::Tensor & self, bool upper, bool check_errors) {
19275structured_linalg_cholesky_ex_default_backend_functional op;
19276op.meta(self, upper, check_errors);
19277at::linalg_cholesky_ex_outf(self, upper, check_errors, *op.outputs_[0], *op.outputs_[1]);
19278return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
19279}
19280struct structured_linalg_cross_default_backend_functional final : public at::meta::structured_linalg_cross {
19281 void set_output_strided(
19282 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19283 TensorOptions options, DimnameList names
19284 ) override {
19285 auto current_device = guard_.current_device();
19286 if (C10_UNLIKELY(current_device.has_value())) {
19287 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19288 "structured kernels don't support multi-device outputs");
19289 } else {
19290 guard_.reset_device(options.device());
19291 }
19292 outputs_[output_idx] = create_out(sizes, strides, options);
19293 if (!names.empty()) {
19294 namedinference::propagate_names(*outputs_[output_idx], names);
19295 }
19296 // super must happen after, so that downstream can use maybe_get_output
19297 // to retrieve the output
19298 }
19299 void set_output_raw_strided(
19300 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19301 TensorOptions options, DimnameList names
19302 ) override {
19303 auto current_device = guard_.current_device();
19304 if (C10_UNLIKELY(current_device.has_value())) {
19305 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19306 "structured kernels don't support multi-device outputs");
19307 } else {
19308 guard_.reset_device(options.device());
19309 }
19310 outputs_[output_idx] = create_out(sizes, strides, options);
19311 if (!names.empty()) {
19312 namedinference::propagate_names(*outputs_[output_idx], names);
19313 }
19314 // super must happen after, so that downstream can use maybe_get_output
19315 // to retrieve the output
19316 }
19317 const Tensor& maybe_get_output(int64_t output_idx) override {
19318 return *outputs_[output_idx];
19319 }
19320 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
19321 c10::OptionalDeviceGuard guard_;
19322};
19323at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_linalg_cross(const at::Tensor & self, const at::Tensor & other, int64_t dim) {
19324structured_linalg_cross_default_backend_functional op;
19325op.meta(self, other, dim);
19326at::linalg_cross_outf(self, other, dim, *op.outputs_[0]);
19327return std::move(op.outputs_[0]).take();
19328}
19329struct structured_linalg_lu_factor_ex_default_backend_functional final : public at::meta::structured_linalg_lu_factor_ex {
19330 void set_output_strided(
19331 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19332 TensorOptions options, DimnameList names
19333 ) override {
19334 auto current_device = guard_.current_device();
19335 if (C10_UNLIKELY(current_device.has_value())) {
19336 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19337 "structured kernels don't support multi-device outputs");
19338 } else {
19339 guard_.reset_device(options.device());
19340 }
19341 outputs_[output_idx] = create_out(sizes, strides, options);
19342 if (!names.empty()) {
19343 namedinference::propagate_names(*outputs_[output_idx], names);
19344 }
19345 // super must happen after, so that downstream can use maybe_get_output
19346 // to retrieve the output
19347 }
19348 void set_output_raw_strided(
19349 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19350 TensorOptions options, DimnameList names
19351 ) override {
19352 auto current_device = guard_.current_device();
19353 if (C10_UNLIKELY(current_device.has_value())) {
19354 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19355 "structured kernels don't support multi-device outputs");
19356 } else {
19357 guard_.reset_device(options.device());
19358 }
19359 outputs_[output_idx] = create_out(sizes, strides, options);
19360 if (!names.empty()) {
19361 namedinference::propagate_names(*outputs_[output_idx], names);
19362 }
19363 // super must happen after, so that downstream can use maybe_get_output
19364 // to retrieve the output
19365 }
19366 const Tensor& maybe_get_output(int64_t output_idx) override {
19367 return *outputs_[output_idx];
19368 }
19369 std::array<c10::ExclusivelyOwned<Tensor>, 3> outputs_;
19370 c10::OptionalDeviceGuard guard_;
19371};
19372::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeExplicitAutogradNonFunctional_linalg_lu_factor_ex(const at::Tensor & A, bool pivot, bool check_errors) {
19373structured_linalg_lu_factor_ex_default_backend_functional op;
19374op.meta(A, pivot, check_errors);
19375at::linalg_lu_factor_ex_outf(A, pivot, check_errors, *op.outputs_[0], *op.outputs_[1], *op.outputs_[2]);
19376return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take(), std::move(op.outputs_[2]).take());
19377}
19378struct structured_linalg_lu_default_backend_functional final : public at::meta::structured_linalg_lu {
19379 void set_output_strided(
19380 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19381 TensorOptions options, DimnameList names
19382 ) override {
19383 auto current_device = guard_.current_device();
19384 if (C10_UNLIKELY(current_device.has_value())) {
19385 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19386 "structured kernels don't support multi-device outputs");
19387 } else {
19388 guard_.reset_device(options.device());
19389 }
19390 outputs_[output_idx] = create_out(sizes, strides, options);
19391 if (!names.empty()) {
19392 namedinference::propagate_names(*outputs_[output_idx], names);
19393 }
19394 // super must happen after, so that downstream can use maybe_get_output
19395 // to retrieve the output
19396 }
19397 void set_output_raw_strided(
19398 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19399 TensorOptions options, DimnameList names
19400 ) override {
19401 auto current_device = guard_.current_device();
19402 if (C10_UNLIKELY(current_device.has_value())) {
19403 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19404 "structured kernels don't support multi-device outputs");
19405 } else {
19406 guard_.reset_device(options.device());
19407 }
19408 outputs_[output_idx] = create_out(sizes, strides, options);
19409 if (!names.empty()) {
19410 namedinference::propagate_names(*outputs_[output_idx], names);
19411 }
19412 // super must happen after, so that downstream can use maybe_get_output
19413 // to retrieve the output
19414 }
19415 const Tensor& maybe_get_output(int64_t output_idx) override {
19416 return *outputs_[output_idx];
19417 }
19418 std::array<c10::ExclusivelyOwned<Tensor>, 3> outputs_;
19419 c10::OptionalDeviceGuard guard_;
19420};
19421::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeExplicitAutogradNonFunctional_linalg_lu(const at::Tensor & A, bool pivot) {
19422structured_linalg_lu_default_backend_functional op;
19423op.meta(A, pivot);
19424at::linalg_lu_outf(A, pivot, *op.outputs_[0], *op.outputs_[1], *op.outputs_[2]);
19425return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take(), std::move(op.outputs_[2]).take());
19426}
19427struct structured_linalg_lu_solve_default_backend_functional final : public at::meta::structured_linalg_lu_solve {
19428 void set_output_strided(
19429 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19430 TensorOptions options, DimnameList names
19431 ) override {
19432 auto current_device = guard_.current_device();
19433 if (C10_UNLIKELY(current_device.has_value())) {
19434 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19435 "structured kernels don't support multi-device outputs");
19436 } else {
19437 guard_.reset_device(options.device());
19438 }
19439 outputs_[output_idx] = create_out(sizes, strides, options);
19440 if (!names.empty()) {
19441 namedinference::propagate_names(*outputs_[output_idx], names);
19442 }
19443 // super must happen after, so that downstream can use maybe_get_output
19444 // to retrieve the output
19445 }
19446 void set_output_raw_strided(
19447 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19448 TensorOptions options, DimnameList names
19449 ) override {
19450 auto current_device = guard_.current_device();
19451 if (C10_UNLIKELY(current_device.has_value())) {
19452 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19453 "structured kernels don't support multi-device outputs");
19454 } else {
19455 guard_.reset_device(options.device());
19456 }
19457 outputs_[output_idx] = create_out(sizes, strides, options);
19458 if (!names.empty()) {
19459 namedinference::propagate_names(*outputs_[output_idx], names);
19460 }
19461 // super must happen after, so that downstream can use maybe_get_output
19462 // to retrieve the output
19463 }
19464 const Tensor& maybe_get_output(int64_t output_idx) override {
19465 return *outputs_[output_idx];
19466 }
19467 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
19468 c10::OptionalDeviceGuard guard_;
19469};
19470at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_linalg_lu_solve(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint) {
19471structured_linalg_lu_solve_default_backend_functional op;
19472op.meta(LU, pivots, B, left, adjoint);
19473at::linalg_lu_solve_outf(LU, pivots, B, left, adjoint, *op.outputs_[0]);
19474return std::move(op.outputs_[0]).take();
19475}
19476struct structured__linalg_det_default_backend_functional final : public at::meta::structured__linalg_det {
19477 void set_output_strided(
19478 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19479 TensorOptions options, DimnameList names
19480 ) override {
19481 auto current_device = guard_.current_device();
19482 if (C10_UNLIKELY(current_device.has_value())) {
19483 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19484 "structured kernels don't support multi-device outputs");
19485 } else {
19486 guard_.reset_device(options.device());
19487 }
19488 outputs_[output_idx] = create_out(sizes, strides, options);
19489 if (!names.empty()) {
19490 namedinference::propagate_names(*outputs_[output_idx], names);
19491 }
19492 // super must happen after, so that downstream can use maybe_get_output
19493 // to retrieve the output
19494 }
19495 void set_output_raw_strided(
19496 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19497 TensorOptions options, DimnameList names
19498 ) override {
19499 auto current_device = guard_.current_device();
19500 if (C10_UNLIKELY(current_device.has_value())) {
19501 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19502 "structured kernels don't support multi-device outputs");
19503 } else {
19504 guard_.reset_device(options.device());
19505 }
19506 outputs_[output_idx] = create_out(sizes, strides, options);
19507 if (!names.empty()) {
19508 namedinference::propagate_names(*outputs_[output_idx], names);
19509 }
19510 // super must happen after, so that downstream can use maybe_get_output
19511 // to retrieve the output
19512 }
19513 const Tensor& maybe_get_output(int64_t output_idx) override {
19514 return *outputs_[output_idx];
19515 }
19516 std::array<c10::ExclusivelyOwned<Tensor>, 3> outputs_;
19517 c10::OptionalDeviceGuard guard_;
19518};
19519::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeExplicitAutogradNonFunctional__linalg_det(const at::Tensor & A) {
19520structured__linalg_det_default_backend_functional op;
19521op.meta(A);
19522at::_linalg_det_outf(A, *op.outputs_[0], *op.outputs_[1], *op.outputs_[2]);
19523return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take(), std::move(op.outputs_[2]).take());
19524}
19525struct structured_linalg_ldl_factor_ex_default_backend_functional final : public at::meta::structured_linalg_ldl_factor_ex {
19526 void set_output_strided(
19527 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19528 TensorOptions options, DimnameList names
19529 ) override {
19530 auto current_device = guard_.current_device();
19531 if (C10_UNLIKELY(current_device.has_value())) {
19532 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19533 "structured kernels don't support multi-device outputs");
19534 } else {
19535 guard_.reset_device(options.device());
19536 }
19537 outputs_[output_idx] = create_out(sizes, strides, options);
19538 if (!names.empty()) {
19539 namedinference::propagate_names(*outputs_[output_idx], names);
19540 }
19541 // super must happen after, so that downstream can use maybe_get_output
19542 // to retrieve the output
19543 }
19544 void set_output_raw_strided(
19545 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19546 TensorOptions options, DimnameList names
19547 ) override {
19548 auto current_device = guard_.current_device();
19549 if (C10_UNLIKELY(current_device.has_value())) {
19550 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19551 "structured kernels don't support multi-device outputs");
19552 } else {
19553 guard_.reset_device(options.device());
19554 }
19555 outputs_[output_idx] = create_out(sizes, strides, options);
19556 if (!names.empty()) {
19557 namedinference::propagate_names(*outputs_[output_idx], names);
19558 }
19559 // super must happen after, so that downstream can use maybe_get_output
19560 // to retrieve the output
19561 }
19562 const Tensor& maybe_get_output(int64_t output_idx) override {
19563 return *outputs_[output_idx];
19564 }
19565 std::array<c10::ExclusivelyOwned<Tensor>, 3> outputs_;
19566 c10::OptionalDeviceGuard guard_;
19567};
19568::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeExplicitAutogradNonFunctional_linalg_ldl_factor_ex(const at::Tensor & self, bool hermitian, bool check_errors) {
19569structured_linalg_ldl_factor_ex_default_backend_functional op;
19570op.meta(self, hermitian, check_errors);
19571at::linalg_ldl_factor_ex_outf(self, hermitian, check_errors, *op.outputs_[0], *op.outputs_[1], *op.outputs_[2]);
19572return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take(), std::move(op.outputs_[2]).take());
19573}
19574struct structured_linalg_ldl_solve_default_backend_functional final : public at::meta::structured_linalg_ldl_solve {
19575 void set_output_strided(
19576 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19577 TensorOptions options, DimnameList names
19578 ) override {
19579 auto current_device = guard_.current_device();
19580 if (C10_UNLIKELY(current_device.has_value())) {
19581 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19582 "structured kernels don't support multi-device outputs");
19583 } else {
19584 guard_.reset_device(options.device());
19585 }
19586 outputs_[output_idx] = create_out(sizes, strides, options);
19587 if (!names.empty()) {
19588 namedinference::propagate_names(*outputs_[output_idx], names);
19589 }
19590 // super must happen after, so that downstream can use maybe_get_output
19591 // to retrieve the output
19592 }
19593 void set_output_raw_strided(
19594 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19595 TensorOptions options, DimnameList names
19596 ) override {
19597 auto current_device = guard_.current_device();
19598 if (C10_UNLIKELY(current_device.has_value())) {
19599 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19600 "structured kernels don't support multi-device outputs");
19601 } else {
19602 guard_.reset_device(options.device());
19603 }
19604 outputs_[output_idx] = create_out(sizes, strides, options);
19605 if (!names.empty()) {
19606 namedinference::propagate_names(*outputs_[output_idx], names);
19607 }
19608 // super must happen after, so that downstream can use maybe_get_output
19609 // to retrieve the output
19610 }
19611 const Tensor& maybe_get_output(int64_t output_idx) override {
19612 return *outputs_[output_idx];
19613 }
19614 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
19615 c10::OptionalDeviceGuard guard_;
19616};
19617at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_linalg_ldl_solve(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian) {
19618structured_linalg_ldl_solve_default_backend_functional op;
19619op.meta(LD, pivots, B, hermitian);
19620at::linalg_ldl_solve_outf(LD, pivots, B, hermitian, *op.outputs_[0]);
19621return std::move(op.outputs_[0]).take();
19622}
19623struct structured__linalg_slogdet_default_backend_functional final : public at::meta::structured__linalg_slogdet {
19624 void set_output_strided(
19625 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19626 TensorOptions options, DimnameList names
19627 ) override {
19628 auto current_device = guard_.current_device();
19629 if (C10_UNLIKELY(current_device.has_value())) {
19630 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19631 "structured kernels don't support multi-device outputs");
19632 } else {
19633 guard_.reset_device(options.device());
19634 }
19635 outputs_[output_idx] = create_out(sizes, strides, options);
19636 if (!names.empty()) {
19637 namedinference::propagate_names(*outputs_[output_idx], names);
19638 }
19639 // super must happen after, so that downstream can use maybe_get_output
19640 // to retrieve the output
19641 }
19642 void set_output_raw_strided(
19643 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19644 TensorOptions options, DimnameList names
19645 ) override {
19646 auto current_device = guard_.current_device();
19647 if (C10_UNLIKELY(current_device.has_value())) {
19648 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19649 "structured kernels don't support multi-device outputs");
19650 } else {
19651 guard_.reset_device(options.device());
19652 }
19653 outputs_[output_idx] = create_out(sizes, strides, options);
19654 if (!names.empty()) {
19655 namedinference::propagate_names(*outputs_[output_idx], names);
19656 }
19657 // super must happen after, so that downstream can use maybe_get_output
19658 // to retrieve the output
19659 }
19660 const Tensor& maybe_get_output(int64_t output_idx) override {
19661 return *outputs_[output_idx];
19662 }
19663 std::array<c10::ExclusivelyOwned<Tensor>, 4> outputs_;
19664 c10::OptionalDeviceGuard guard_;
19665};
19666::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeExplicitAutogradNonFunctional__linalg_slogdet(const at::Tensor & A) {
19667structured__linalg_slogdet_default_backend_functional op;
19668op.meta(A);
19669at::_linalg_slogdet_outf(A, *op.outputs_[0], *op.outputs_[1], *op.outputs_[2], *op.outputs_[3]);
19670return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take(), std::move(op.outputs_[2]).take(), std::move(op.outputs_[3]).take());
19671}
19672struct structured__linalg_eigh_default_backend_functional final : public at::meta::structured__linalg_eigh {
19673 void set_output_strided(
19674 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19675 TensorOptions options, DimnameList names
19676 ) override {
19677 auto current_device = guard_.current_device();
19678 if (C10_UNLIKELY(current_device.has_value())) {
19679 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19680 "structured kernels don't support multi-device outputs");
19681 } else {
19682 guard_.reset_device(options.device());
19683 }
19684 outputs_[output_idx] = create_out(sizes, strides, options);
19685 if (!names.empty()) {
19686 namedinference::propagate_names(*outputs_[output_idx], names);
19687 }
19688 // super must happen after, so that downstream can use maybe_get_output
19689 // to retrieve the output
19690 }
19691 void set_output_raw_strided(
19692 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19693 TensorOptions options, DimnameList names
19694 ) override {
19695 auto current_device = guard_.current_device();
19696 if (C10_UNLIKELY(current_device.has_value())) {
19697 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19698 "structured kernels don't support multi-device outputs");
19699 } else {
19700 guard_.reset_device(options.device());
19701 }
19702 outputs_[output_idx] = create_out(sizes, strides, options);
19703 if (!names.empty()) {
19704 namedinference::propagate_names(*outputs_[output_idx], names);
19705 }
19706 // super must happen after, so that downstream can use maybe_get_output
19707 // to retrieve the output
19708 }
19709 const Tensor& maybe_get_output(int64_t output_idx) override {
19710 return *outputs_[output_idx];
19711 }
19712 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
19713 c10::OptionalDeviceGuard guard_;
19714};
19715::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeExplicitAutogradNonFunctional__linalg_eigh(const at::Tensor & A, c10::string_view UPLO, bool compute_v) {
19716structured__linalg_eigh_default_backend_functional op;
19717op.meta(A, UPLO, compute_v);
19718at::_linalg_eigh_outf(A, UPLO, compute_v, *op.outputs_[0], *op.outputs_[1]);
19719return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
19720}
19721struct structured_linalg_inv_ex_default_backend_functional final : public at::meta::structured_linalg_inv_ex {
19722 void set_output_strided(
19723 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19724 TensorOptions options, DimnameList names
19725 ) override {
19726 auto current_device = guard_.current_device();
19727 if (C10_UNLIKELY(current_device.has_value())) {
19728 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19729 "structured kernels don't support multi-device outputs");
19730 } else {
19731 guard_.reset_device(options.device());
19732 }
19733 outputs_[output_idx] = create_out(sizes, strides, options);
19734 if (!names.empty()) {
19735 namedinference::propagate_names(*outputs_[output_idx], names);
19736 }
19737 // super must happen after, so that downstream can use maybe_get_output
19738 // to retrieve the output
19739 }
19740 void set_output_raw_strided(
19741 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19742 TensorOptions options, DimnameList names
19743 ) override {
19744 auto current_device = guard_.current_device();
19745 if (C10_UNLIKELY(current_device.has_value())) {
19746 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19747 "structured kernels don't support multi-device outputs");
19748 } else {
19749 guard_.reset_device(options.device());
19750 }
19751 outputs_[output_idx] = create_out(sizes, strides, options);
19752 if (!names.empty()) {
19753 namedinference::propagate_names(*outputs_[output_idx], names);
19754 }
19755 // super must happen after, so that downstream can use maybe_get_output
19756 // to retrieve the output
19757 }
19758 const Tensor& maybe_get_output(int64_t output_idx) override {
19759 return *outputs_[output_idx];
19760 }
19761 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
19762 c10::OptionalDeviceGuard guard_;
19763};
19764::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeExplicitAutogradNonFunctional_linalg_inv_ex(const at::Tensor & A, bool check_errors) {
19765structured_linalg_inv_ex_default_backend_functional op;
19766op.meta(A, check_errors);
19767at::linalg_inv_ex_outf(A, check_errors, *op.outputs_[0], *op.outputs_[1]);
19768return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
19769}
19770struct structured_linalg_vector_norm_default_backend_functional final : public at::meta::structured_linalg_vector_norm {
19771 void set_output_strided(
19772 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19773 TensorOptions options, DimnameList names
19774 ) override {
19775 auto current_device = guard_.current_device();
19776 if (C10_UNLIKELY(current_device.has_value())) {
19777 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19778 "structured kernels don't support multi-device outputs");
19779 } else {
19780 guard_.reset_device(options.device());
19781 }
19782 outputs_[output_idx] = create_out(sizes, strides, options);
19783 if (!names.empty()) {
19784 namedinference::propagate_names(*outputs_[output_idx], names);
19785 }
19786 // super must happen after, so that downstream can use maybe_get_output
19787 // to retrieve the output
19788 }
19789 void set_output_raw_strided(
19790 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19791 TensorOptions options, DimnameList names
19792 ) override {
19793 auto current_device = guard_.current_device();
19794 if (C10_UNLIKELY(current_device.has_value())) {
19795 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19796 "structured kernels don't support multi-device outputs");
19797 } else {
19798 guard_.reset_device(options.device());
19799 }
19800 outputs_[output_idx] = create_out(sizes, strides, options);
19801 if (!names.empty()) {
19802 namedinference::propagate_names(*outputs_[output_idx], names);
19803 }
19804 // super must happen after, so that downstream can use maybe_get_output
19805 // to retrieve the output
19806 }
19807 const Tensor& maybe_get_output(int64_t output_idx) override {
19808 return *outputs_[output_idx];
19809 }
19810 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
19811 c10::OptionalDeviceGuard guard_;
19812};
19813at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_linalg_vector_norm(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
19814structured_linalg_vector_norm_default_backend_functional op;
19815op.meta(self, ord, dim, keepdim, dtype);
19816at::linalg_vector_norm_outf(self, ord, dim, keepdim, dtype, *op.outputs_[0]);
19817return std::move(op.outputs_[0]).take();
19818}
19819struct structured__linalg_svd_default_backend_functional final : public at::meta::structured__linalg_svd {
19820 void set_output_strided(
19821 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19822 TensorOptions options, DimnameList names
19823 ) override {
19824 auto current_device = guard_.current_device();
19825 if (C10_UNLIKELY(current_device.has_value())) {
19826 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19827 "structured kernels don't support multi-device outputs");
19828 } else {
19829 guard_.reset_device(options.device());
19830 }
19831 outputs_[output_idx] = create_out(sizes, strides, options);
19832 if (!names.empty()) {
19833 namedinference::propagate_names(*outputs_[output_idx], names);
19834 }
19835 // super must happen after, so that downstream can use maybe_get_output
19836 // to retrieve the output
19837 }
19838 void set_output_raw_strided(
19839 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19840 TensorOptions options, DimnameList names
19841 ) override {
19842 auto current_device = guard_.current_device();
19843 if (C10_UNLIKELY(current_device.has_value())) {
19844 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19845 "structured kernels don't support multi-device outputs");
19846 } else {
19847 guard_.reset_device(options.device());
19848 }
19849 outputs_[output_idx] = create_out(sizes, strides, options);
19850 if (!names.empty()) {
19851 namedinference::propagate_names(*outputs_[output_idx], names);
19852 }
19853 // super must happen after, so that downstream can use maybe_get_output
19854 // to retrieve the output
19855 }
19856 const Tensor& maybe_get_output(int64_t output_idx) override {
19857 return *outputs_[output_idx];
19858 }
19859 std::array<c10::ExclusivelyOwned<Tensor>, 3> outputs_;
19860 c10::OptionalDeviceGuard guard_;
19861};
19862::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeExplicitAutogradNonFunctional__linalg_svd(const at::Tensor & A, bool full_matrices, bool compute_uv, c10::optional<c10::string_view> driver) {
19863structured__linalg_svd_default_backend_functional op;
19864op.meta(A, full_matrices, compute_uv, driver);
19865at::_linalg_svd_outf(A, full_matrices, compute_uv, driver, *op.outputs_[0], *op.outputs_[1], *op.outputs_[2]);
19866return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take(), std::move(op.outputs_[2]).take());
19867}
19868namespace {
19869at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_atol_rtol_tensor_linalg_pinv(const at::Tensor & self, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian) {
19870 // No device check
19871 // DeviceGuard omitted
19872 return at::native::linalg_pinv(self, atol, rtol, hermitian);
19873}
19874} // anonymous namespace
19875struct structured__linalg_solve_ex_default_backend_functional final : public at::meta::structured__linalg_solve_ex {
19876 void set_output_strided(
19877 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19878 TensorOptions options, DimnameList names
19879 ) override {
19880 auto current_device = guard_.current_device();
19881 if (C10_UNLIKELY(current_device.has_value())) {
19882 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19883 "structured kernels don't support multi-device outputs");
19884 } else {
19885 guard_.reset_device(options.device());
19886 }
19887 outputs_[output_idx] = create_out(sizes, strides, options);
19888 if (!names.empty()) {
19889 namedinference::propagate_names(*outputs_[output_idx], names);
19890 }
19891 // super must happen after, so that downstream can use maybe_get_output
19892 // to retrieve the output
19893 }
19894 void set_output_raw_strided(
19895 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19896 TensorOptions options, DimnameList names
19897 ) override {
19898 auto current_device = guard_.current_device();
19899 if (C10_UNLIKELY(current_device.has_value())) {
19900 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19901 "structured kernels don't support multi-device outputs");
19902 } else {
19903 guard_.reset_device(options.device());
19904 }
19905 outputs_[output_idx] = create_out(sizes, strides, options);
19906 if (!names.empty()) {
19907 namedinference::propagate_names(*outputs_[output_idx], names);
19908 }
19909 // super must happen after, so that downstream can use maybe_get_output
19910 // to retrieve the output
19911 }
19912 const Tensor& maybe_get_output(int64_t output_idx) override {
19913 return *outputs_[output_idx];
19914 }
19915 std::array<c10::ExclusivelyOwned<Tensor>, 4> outputs_;
19916 c10::OptionalDeviceGuard guard_;
19917};
19918::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeExplicitAutogradNonFunctional__linalg_solve_ex(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
19919structured__linalg_solve_ex_default_backend_functional op;
19920op.meta(A, B, left, check_errors);
19921at::_linalg_solve_ex_outf(A, B, left, check_errors, *op.outputs_[0], *op.outputs_[1], *op.outputs_[2], *op.outputs_[3]);
19922return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take(), std::move(op.outputs_[2]).take(), std::move(op.outputs_[3]).take());
19923}
19924struct structured_linalg_qr_default_backend_functional final : public at::meta::structured_linalg_qr {
19925 void set_output_strided(
19926 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19927 TensorOptions options, DimnameList names
19928 ) override {
19929 auto current_device = guard_.current_device();
19930 if (C10_UNLIKELY(current_device.has_value())) {
19931 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19932 "structured kernels don't support multi-device outputs");
19933 } else {
19934 guard_.reset_device(options.device());
19935 }
19936 outputs_[output_idx] = create_out(sizes, strides, options);
19937 if (!names.empty()) {
19938 namedinference::propagate_names(*outputs_[output_idx], names);
19939 }
19940 // super must happen after, so that downstream can use maybe_get_output
19941 // to retrieve the output
19942 }
19943 void set_output_raw_strided(
19944 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19945 TensorOptions options, DimnameList names
19946 ) override {
19947 auto current_device = guard_.current_device();
19948 if (C10_UNLIKELY(current_device.has_value())) {
19949 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
19950 "structured kernels don't support multi-device outputs");
19951 } else {
19952 guard_.reset_device(options.device());
19953 }
19954 outputs_[output_idx] = create_out(sizes, strides, options);
19955 if (!names.empty()) {
19956 namedinference::propagate_names(*outputs_[output_idx], names);
19957 }
19958 // super must happen after, so that downstream can use maybe_get_output
19959 // to retrieve the output
19960 }
19961 const Tensor& maybe_get_output(int64_t output_idx) override {
19962 return *outputs_[output_idx];
19963 }
19964 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
19965 c10::OptionalDeviceGuard guard_;
19966};
19967::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeExplicitAutogradNonFunctional_linalg_qr(const at::Tensor & A, c10::string_view mode) {
19968structured_linalg_qr_default_backend_functional op;
19969op.meta(A, mode);
19970at::linalg_qr_outf(A, mode, *op.outputs_[0], *op.outputs_[1]);
19971return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
19972}
19973namespace {
19974at::Tensor wrapper_CompositeExplicitAutogradNonFunctional___test_autograd_multiple_dispatch_view_copy(const at::Tensor & self) {
19975 // No device check
19976 // DeviceGuard omitted
19977 return at::native::_test_autograd_multiple_dispatch_view_copy(self);
19978}
19979} // anonymous namespace
19980namespace {
19981at::Tensor wrapper_CompositeExplicitAutogradNonFunctional___fw_primal_copy(const at::Tensor & self, int64_t level) {
19982 // No device check
19983 // DeviceGuard omitted
19984 return at::native::_fw_primal_copy(self, level);
19985}
19986} // anonymous namespace
19987namespace {
19988at::Tensor wrapper_CompositeExplicitAutogradNonFunctional___make_dual_copy(const at::Tensor & primal, const at::Tensor & tangent, int64_t level) {
19989 // No device check
19990 // DeviceGuard omitted
19991 return at::native::_make_dual_copy(primal, tangent, level);
19992}
19993} // anonymous namespace
19994namespace {
19995at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__view_as_real_copy(const at::Tensor & self) {
19996 // No device check
19997 // DeviceGuard omitted
19998 return at::native::view_as_real_copy(self);
19999}
20000} // anonymous namespace
20001namespace {
20002at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__view_as_complex_copy(const at::Tensor & self) {
20003 // No device check
20004 // DeviceGuard omitted
20005 return at::native::view_as_complex_copy(self);
20006}
20007} // anonymous namespace
20008namespace {
20009at::Tensor wrapper_CompositeExplicitAutogradNonFunctional___conj_copy(const at::Tensor & self) {
20010 // No device check
20011 // DeviceGuard omitted
20012 return at::native::_conj_copy(self);
20013}
20014} // anonymous namespace
20015namespace {
20016at::Tensor wrapper_CompositeExplicitAutogradNonFunctional___neg_view_copy(const at::Tensor & self) {
20017 // No device check
20018 // DeviceGuard omitted
20019 return at::native::_neg_view_copy(self);
20020}
20021} // anonymous namespace
20022namespace {
20023at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__as_strided_copy(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
20024 // No device check
20025 // DeviceGuard omitted
20026 return at::native::as_strided_copy_symint(self, size, stride, storage_offset);
20027}
20028} // anonymous namespace
20029namespace {
20030at::Tensor wrapper_CompositeExplicitAutogradNonFunctional___sparse_broadcast_to_copy(const at::Tensor & self, at::IntArrayRef size) {
20031 // No device check
20032 // DeviceGuard omitted
20033 return at::native::_sparse_broadcast_to_copy(self, size);
20034}
20035} // anonymous namespace
20036namespace {
20037at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__diagonal_copy(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
20038 // No device check
20039 // DeviceGuard omitted
20040 return at::native::diagonal_copy(self, offset, dim1, dim2);
20041}
20042} // anonymous namespace
20043namespace {
20044at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__expand_copy(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) {
20045 // No device check
20046 // DeviceGuard omitted
20047 return at::native::expand_copy_symint(self, size, implicit);
20048}
20049} // anonymous namespace
20050namespace {
20051at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__permute_copy(const at::Tensor & self, at::IntArrayRef dims) {
20052 // No device check
20053 // DeviceGuard omitted
20054 return at::native::permute_copy(self, dims);
20055}
20056} // anonymous namespace
20057namespace {
20058at::Tensor wrapper_CompositeExplicitAutogradNonFunctional___reshape_alias_copy(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
20059 // No device check
20060 // DeviceGuard omitted
20061 return at::native::_reshape_alias_copy_symint(self, size, stride);
20062}
20063} // anonymous namespace
20064namespace {
20065at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_int_select_copy(const at::Tensor & self, int64_t dim, c10::SymInt index) {
20066 // No device check
20067 // DeviceGuard omitted
20068 return at::native::select_copy_symint(self, dim, index);
20069}
20070} // anonymous namespace
20071namespace {
20072at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__detach_copy(const at::Tensor & self) {
20073 // No device check
20074 // DeviceGuard omitted
20075 return at::native::detach_copy(self);
20076}
20077} // anonymous namespace
20078namespace {
20079at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_Tensor_slice_copy(const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
20080 // No device check
20081 // DeviceGuard omitted
20082 return at::native::slice_copy_Tensor_symint(self, dim, start, end, step);
20083}
20084} // anonymous namespace
20085namespace {
20086::std::vector<at::Tensor> wrapper_CompositeExplicitAutogradNonFunctional_Tensor_split_copy(const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
20087 // No device check
20088 // DeviceGuard omitted
20089 return at::native::split_copy_Tensor_symint(self, split_size, dim);
20090}
20091} // anonymous namespace
20092namespace {
20093::std::vector<at::Tensor> wrapper_CompositeExplicitAutogradNonFunctional__split_with_sizes_copy(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
20094 // No device check
20095 // DeviceGuard omitted
20096 return at::native::split_with_sizes_copy_symint(self, split_sizes, dim);
20097}
20098} // anonymous namespace
20099namespace {
20100at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__squeeze_copy(const at::Tensor & self) {
20101 // No device check
20102 // DeviceGuard omitted
20103 return at::native::squeeze_copy(self);
20104}
20105} // anonymous namespace
20106namespace {
20107at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_dim_squeeze_copy(const at::Tensor & self, int64_t dim) {
20108 // No device check
20109 // DeviceGuard omitted
20110 return at::native::squeeze_copy_dim(self, dim);
20111}
20112} // anonymous namespace
20113namespace {
20114at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_dims_squeeze_copy(const at::Tensor & self, at::IntArrayRef dim) {
20115 // No device check
20116 // DeviceGuard omitted
20117 return at::native::squeeze_copy_dims(self, dim);
20118}
20119} // anonymous namespace
20120namespace {
20121at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__t_copy(const at::Tensor & self) {
20122 // No device check
20123 // DeviceGuard omitted
20124 return at::native::t_copy(self);
20125}
20126} // anonymous namespace
20127namespace {
20128at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_int_transpose_copy(const at::Tensor & self, int64_t dim0, int64_t dim1) {
20129 // No device check
20130 // DeviceGuard omitted
20131 return at::native::transpose_copy_int(self, dim0, dim1);
20132}
20133} // anonymous namespace
20134namespace {
20135at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__unsqueeze_copy(const at::Tensor & self, int64_t dim) {
20136 // No device check
20137 // DeviceGuard omitted
20138 return at::native::unsqueeze_copy(self, dim);
20139}
20140} // anonymous namespace
20141namespace {
20142at::Tensor wrapper_CompositeExplicitAutogradNonFunctional___indices_copy(const at::Tensor & self) {
20143 // No device check
20144 // DeviceGuard omitted
20145 return at::native::_indices_copy(self);
20146}
20147} // anonymous namespace
20148namespace {
20149at::Tensor wrapper_CompositeExplicitAutogradNonFunctional___values_copy(const at::Tensor & self) {
20150 // No device check
20151 // DeviceGuard omitted
20152 return at::native::_values_copy(self);
20153}
20154} // anonymous namespace
20155namespace {
20156at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__indices_copy(const at::Tensor & self) {
20157 // No device check
20158 // DeviceGuard omitted
20159 return at::native::indices_copy(self);
20160}
20161} // anonymous namespace
20162namespace {
20163at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__values_copy(const at::Tensor & self) {
20164 // No device check
20165 // DeviceGuard omitted
20166 return at::native::values_copy(self);
20167}
20168} // anonymous namespace
20169namespace {
20170at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__crow_indices_copy(const at::Tensor & self) {
20171 // No device check
20172 // DeviceGuard omitted
20173 return at::native::crow_indices_copy(self);
20174}
20175} // anonymous namespace
20176namespace {
20177at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__col_indices_copy(const at::Tensor & self) {
20178 // No device check
20179 // DeviceGuard omitted
20180 return at::native::col_indices_copy(self);
20181}
20182} // anonymous namespace
20183namespace {
20184at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__ccol_indices_copy(const at::Tensor & self) {
20185 // No device check
20186 // DeviceGuard omitted
20187 return at::native::ccol_indices_copy(self);
20188}
20189} // anonymous namespace
20190namespace {
20191at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__row_indices_copy(const at::Tensor & self) {
20192 // No device check
20193 // DeviceGuard omitted
20194 return at::native::row_indices_copy(self);
20195}
20196} // anonymous namespace
20197namespace {
20198::std::vector<at::Tensor> wrapper_CompositeExplicitAutogradNonFunctional_int_unbind_copy(const at::Tensor & self, int64_t dim) {
20199 // No device check
20200 // DeviceGuard omitted
20201 return at::native::unbind_copy_int(self, dim);
20202}
20203} // anonymous namespace
20204namespace {
20205at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__view_copy(const at::Tensor & self, c10::SymIntArrayRef size) {
20206 // No device check
20207 // DeviceGuard omitted
20208 return at::native::view_copy_symint(self, size);
20209}
20210} // anonymous namespace
20211namespace {
20212at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_dtype_view_copy(const at::Tensor & self, at::ScalarType dtype) {
20213 // No device check
20214 // DeviceGuard omitted
20215 return at::native::view_copy_dtype(self, dtype);
20216}
20217} // anonymous namespace
20218namespace {
20219at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__unfold_copy(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
20220 // No device check
20221 // DeviceGuard omitted
20222 return at::native::unfold_copy(self, dimension, size, step);
20223}
20224} // anonymous namespace
20225namespace {
20226at::Tensor wrapper_CompositeExplicitAutogradNonFunctional__alias_copy(const at::Tensor & self) {
20227 // No device check
20228 // DeviceGuard omitted
20229 return at::native::alias_copy(self);
20230}
20231} // anonymous namespace
20232struct structured_special_airy_ai_default_backend_functional final : public at::meta::structured_special_airy_ai {
20233 void set_output_strided(
20234 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20235 TensorOptions options, DimnameList names
20236 ) override {
20237 auto current_device = guard_.current_device();
20238 if (C10_UNLIKELY(current_device.has_value())) {
20239 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20240 "structured kernels don't support multi-device outputs");
20241 } else {
20242 guard_.reset_device(options.device());
20243 }
20244 outputs_[output_idx] = create_out(sizes, strides, options);
20245 if (!names.empty()) {
20246 namedinference::propagate_names(*outputs_[output_idx], names);
20247 }
20248 // super must happen after, so that downstream can use maybe_get_output
20249 // to retrieve the output
20250 at::meta::structured_special_airy_ai::set_output_raw_strided(output_idx, sizes, strides, options, names);
20251 }
20252 void set_output_raw_strided(
20253 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20254 TensorOptions options, DimnameList names
20255 ) override {
20256 auto current_device = guard_.current_device();
20257 if (C10_UNLIKELY(current_device.has_value())) {
20258 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20259 "structured kernels don't support multi-device outputs");
20260 } else {
20261 guard_.reset_device(options.device());
20262 }
20263 outputs_[output_idx] = create_out(sizes, strides, options);
20264 if (!names.empty()) {
20265 namedinference::propagate_names(*outputs_[output_idx], names);
20266 }
20267 // super must happen after, so that downstream can use maybe_get_output
20268 // to retrieve the output
20269 at::meta::structured_special_airy_ai::set_output_raw_strided(output_idx, sizes, strides, options, names);
20270 }
20271 const Tensor& maybe_get_output(int64_t output_idx) override {
20272 return *outputs_[output_idx];
20273 }
20274 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
20275 c10::OptionalDeviceGuard guard_;
20276};
20277at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_airy_ai(const at::Tensor & x) {
20278structured_special_airy_ai_default_backend_functional op;
20279op.meta(x);
20280at::special_airy_ai_outf(x, *op.outputs_[0]);
20281return std::move(op.outputs_[0]).take();
20282}
20283struct structured_special_bessel_j0_default_backend_functional final : public at::meta::structured_special_bessel_j0 {
20284 void set_output_strided(
20285 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20286 TensorOptions options, DimnameList names
20287 ) override {
20288 auto current_device = guard_.current_device();
20289 if (C10_UNLIKELY(current_device.has_value())) {
20290 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20291 "structured kernels don't support multi-device outputs");
20292 } else {
20293 guard_.reset_device(options.device());
20294 }
20295 outputs_[output_idx] = create_out(sizes, strides, options);
20296 if (!names.empty()) {
20297 namedinference::propagate_names(*outputs_[output_idx], names);
20298 }
20299 // super must happen after, so that downstream can use maybe_get_output
20300 // to retrieve the output
20301 at::meta::structured_special_bessel_j0::set_output_raw_strided(output_idx, sizes, strides, options, names);
20302 }
20303 void set_output_raw_strided(
20304 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20305 TensorOptions options, DimnameList names
20306 ) override {
20307 auto current_device = guard_.current_device();
20308 if (C10_UNLIKELY(current_device.has_value())) {
20309 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20310 "structured kernels don't support multi-device outputs");
20311 } else {
20312 guard_.reset_device(options.device());
20313 }
20314 outputs_[output_idx] = create_out(sizes, strides, options);
20315 if (!names.empty()) {
20316 namedinference::propagate_names(*outputs_[output_idx], names);
20317 }
20318 // super must happen after, so that downstream can use maybe_get_output
20319 // to retrieve the output
20320 at::meta::structured_special_bessel_j0::set_output_raw_strided(output_idx, sizes, strides, options, names);
20321 }
20322 const Tensor& maybe_get_output(int64_t output_idx) override {
20323 return *outputs_[output_idx];
20324 }
20325 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
20326 c10::OptionalDeviceGuard guard_;
20327};
20328at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_bessel_j0(const at::Tensor & self) {
20329structured_special_bessel_j0_default_backend_functional op;
20330op.meta(self);
20331at::special_bessel_j0_outf(self, *op.outputs_[0]);
20332return std::move(op.outputs_[0]).take();
20333}
20334struct structured_special_bessel_j1_default_backend_functional final : public at::meta::structured_special_bessel_j1 {
20335 void set_output_strided(
20336 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20337 TensorOptions options, DimnameList names
20338 ) override {
20339 auto current_device = guard_.current_device();
20340 if (C10_UNLIKELY(current_device.has_value())) {
20341 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20342 "structured kernels don't support multi-device outputs");
20343 } else {
20344 guard_.reset_device(options.device());
20345 }
20346 outputs_[output_idx] = create_out(sizes, strides, options);
20347 if (!names.empty()) {
20348 namedinference::propagate_names(*outputs_[output_idx], names);
20349 }
20350 // super must happen after, so that downstream can use maybe_get_output
20351 // to retrieve the output
20352 at::meta::structured_special_bessel_j1::set_output_raw_strided(output_idx, sizes, strides, options, names);
20353 }
20354 void set_output_raw_strided(
20355 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20356 TensorOptions options, DimnameList names
20357 ) override {
20358 auto current_device = guard_.current_device();
20359 if (C10_UNLIKELY(current_device.has_value())) {
20360 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20361 "structured kernels don't support multi-device outputs");
20362 } else {
20363 guard_.reset_device(options.device());
20364 }
20365 outputs_[output_idx] = create_out(sizes, strides, options);
20366 if (!names.empty()) {
20367 namedinference::propagate_names(*outputs_[output_idx], names);
20368 }
20369 // super must happen after, so that downstream can use maybe_get_output
20370 // to retrieve the output
20371 at::meta::structured_special_bessel_j1::set_output_raw_strided(output_idx, sizes, strides, options, names);
20372 }
20373 const Tensor& maybe_get_output(int64_t output_idx) override {
20374 return *outputs_[output_idx];
20375 }
20376 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
20377 c10::OptionalDeviceGuard guard_;
20378};
20379at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_bessel_j1(const at::Tensor & self) {
20380structured_special_bessel_j1_default_backend_functional op;
20381op.meta(self);
20382at::special_bessel_j1_outf(self, *op.outputs_[0]);
20383return std::move(op.outputs_[0]).take();
20384}
20385struct structured_special_bessel_y0_default_backend_functional final : public at::meta::structured_special_bessel_y0 {
20386 void set_output_strided(
20387 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20388 TensorOptions options, DimnameList names
20389 ) override {
20390 auto current_device = guard_.current_device();
20391 if (C10_UNLIKELY(current_device.has_value())) {
20392 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20393 "structured kernels don't support multi-device outputs");
20394 } else {
20395 guard_.reset_device(options.device());
20396 }
20397 outputs_[output_idx] = create_out(sizes, strides, options);
20398 if (!names.empty()) {
20399 namedinference::propagate_names(*outputs_[output_idx], names);
20400 }
20401 // super must happen after, so that downstream can use maybe_get_output
20402 // to retrieve the output
20403 at::meta::structured_special_bessel_y0::set_output_raw_strided(output_idx, sizes, strides, options, names);
20404 }
20405 void set_output_raw_strided(
20406 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20407 TensorOptions options, DimnameList names
20408 ) override {
20409 auto current_device = guard_.current_device();
20410 if (C10_UNLIKELY(current_device.has_value())) {
20411 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20412 "structured kernels don't support multi-device outputs");
20413 } else {
20414 guard_.reset_device(options.device());
20415 }
20416 outputs_[output_idx] = create_out(sizes, strides, options);
20417 if (!names.empty()) {
20418 namedinference::propagate_names(*outputs_[output_idx], names);
20419 }
20420 // super must happen after, so that downstream can use maybe_get_output
20421 // to retrieve the output
20422 at::meta::structured_special_bessel_y0::set_output_raw_strided(output_idx, sizes, strides, options, names);
20423 }
20424 const Tensor& maybe_get_output(int64_t output_idx) override {
20425 return *outputs_[output_idx];
20426 }
20427 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
20428 c10::OptionalDeviceGuard guard_;
20429};
20430at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_bessel_y0(const at::Tensor & self) {
20431structured_special_bessel_y0_default_backend_functional op;
20432op.meta(self);
20433at::special_bessel_y0_outf(self, *op.outputs_[0]);
20434return std::move(op.outputs_[0]).take();
20435}
20436struct structured_special_bessel_y1_default_backend_functional final : public at::meta::structured_special_bessel_y1 {
20437 void set_output_strided(
20438 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20439 TensorOptions options, DimnameList names
20440 ) override {
20441 auto current_device = guard_.current_device();
20442 if (C10_UNLIKELY(current_device.has_value())) {
20443 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20444 "structured kernels don't support multi-device outputs");
20445 } else {
20446 guard_.reset_device(options.device());
20447 }
20448 outputs_[output_idx] = create_out(sizes, strides, options);
20449 if (!names.empty()) {
20450 namedinference::propagate_names(*outputs_[output_idx], names);
20451 }
20452 // super must happen after, so that downstream can use maybe_get_output
20453 // to retrieve the output
20454 at::meta::structured_special_bessel_y1::set_output_raw_strided(output_idx, sizes, strides, options, names);
20455 }
20456 void set_output_raw_strided(
20457 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20458 TensorOptions options, DimnameList names
20459 ) override {
20460 auto current_device = guard_.current_device();
20461 if (C10_UNLIKELY(current_device.has_value())) {
20462 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20463 "structured kernels don't support multi-device outputs");
20464 } else {
20465 guard_.reset_device(options.device());
20466 }
20467 outputs_[output_idx] = create_out(sizes, strides, options);
20468 if (!names.empty()) {
20469 namedinference::propagate_names(*outputs_[output_idx], names);
20470 }
20471 // super must happen after, so that downstream can use maybe_get_output
20472 // to retrieve the output
20473 at::meta::structured_special_bessel_y1::set_output_raw_strided(output_idx, sizes, strides, options, names);
20474 }
20475 const Tensor& maybe_get_output(int64_t output_idx) override {
20476 return *outputs_[output_idx];
20477 }
20478 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
20479 c10::OptionalDeviceGuard guard_;
20480};
20481at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_bessel_y1(const at::Tensor & self) {
20482structured_special_bessel_y1_default_backend_functional op;
20483op.meta(self);
20484at::special_bessel_y1_outf(self, *op.outputs_[0]);
20485return std::move(op.outputs_[0]).take();
20486}
20487struct structured_special_chebyshev_polynomial_t_default_backend_functional final : public at::meta::structured_special_chebyshev_polynomial_t {
20488 void set_output_strided(
20489 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20490 TensorOptions options, DimnameList names
20491 ) override {
20492 auto current_device = guard_.current_device();
20493 if (C10_UNLIKELY(current_device.has_value())) {
20494 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20495 "structured kernels don't support multi-device outputs");
20496 } else {
20497 guard_.reset_device(options.device());
20498 }
20499 outputs_[output_idx] = create_out(sizes, strides, options);
20500 if (!names.empty()) {
20501 namedinference::propagate_names(*outputs_[output_idx], names);
20502 }
20503 // super must happen after, so that downstream can use maybe_get_output
20504 // to retrieve the output
20505 at::meta::structured_special_chebyshev_polynomial_t::set_output_raw_strided(output_idx, sizes, strides, options, names);
20506 }
20507 void set_output_raw_strided(
20508 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20509 TensorOptions options, DimnameList names
20510 ) override {
20511 auto current_device = guard_.current_device();
20512 if (C10_UNLIKELY(current_device.has_value())) {
20513 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20514 "structured kernels don't support multi-device outputs");
20515 } else {
20516 guard_.reset_device(options.device());
20517 }
20518 outputs_[output_idx] = create_out(sizes, strides, options);
20519 if (!names.empty()) {
20520 namedinference::propagate_names(*outputs_[output_idx], names);
20521 }
20522 // super must happen after, so that downstream can use maybe_get_output
20523 // to retrieve the output
20524 at::meta::structured_special_chebyshev_polynomial_t::set_output_raw_strided(output_idx, sizes, strides, options, names);
20525 }
20526 const Tensor& maybe_get_output(int64_t output_idx) override {
20527 return *outputs_[output_idx];
20528 }
20529 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
20530 c10::OptionalDeviceGuard guard_;
20531};
20532at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_chebyshev_polynomial_t(const at::Tensor & x, const at::Tensor & n) {
20533structured_special_chebyshev_polynomial_t_default_backend_functional op;
20534op.meta(x, n);
20535at::special_chebyshev_polynomial_t_outf(x, n, *op.outputs_[0]);
20536return std::move(op.outputs_[0]).take();
20537}
20538struct structured_special_chebyshev_polynomial_u_default_backend_functional final : public at::meta::structured_special_chebyshev_polynomial_u {
20539 void set_output_strided(
20540 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20541 TensorOptions options, DimnameList names
20542 ) override {
20543 auto current_device = guard_.current_device();
20544 if (C10_UNLIKELY(current_device.has_value())) {
20545 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20546 "structured kernels don't support multi-device outputs");
20547 } else {
20548 guard_.reset_device(options.device());
20549 }
20550 outputs_[output_idx] = create_out(sizes, strides, options);
20551 if (!names.empty()) {
20552 namedinference::propagate_names(*outputs_[output_idx], names);
20553 }
20554 // super must happen after, so that downstream can use maybe_get_output
20555 // to retrieve the output
20556 at::meta::structured_special_chebyshev_polynomial_u::set_output_raw_strided(output_idx, sizes, strides, options, names);
20557 }
20558 void set_output_raw_strided(
20559 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20560 TensorOptions options, DimnameList names
20561 ) override {
20562 auto current_device = guard_.current_device();
20563 if (C10_UNLIKELY(current_device.has_value())) {
20564 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20565 "structured kernels don't support multi-device outputs");
20566 } else {
20567 guard_.reset_device(options.device());
20568 }
20569 outputs_[output_idx] = create_out(sizes, strides, options);
20570 if (!names.empty()) {
20571 namedinference::propagate_names(*outputs_[output_idx], names);
20572 }
20573 // super must happen after, so that downstream can use maybe_get_output
20574 // to retrieve the output
20575 at::meta::structured_special_chebyshev_polynomial_u::set_output_raw_strided(output_idx, sizes, strides, options, names);
20576 }
20577 const Tensor& maybe_get_output(int64_t output_idx) override {
20578 return *outputs_[output_idx];
20579 }
20580 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
20581 c10::OptionalDeviceGuard guard_;
20582};
20583at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_chebyshev_polynomial_u(const at::Tensor & x, const at::Tensor & n) {
20584structured_special_chebyshev_polynomial_u_default_backend_functional op;
20585op.meta(x, n);
20586at::special_chebyshev_polynomial_u_outf(x, n, *op.outputs_[0]);
20587return std::move(op.outputs_[0]).take();
20588}
20589struct structured_special_chebyshev_polynomial_v_default_backend_functional final : public at::meta::structured_special_chebyshev_polynomial_v {
20590 void set_output_strided(
20591 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20592 TensorOptions options, DimnameList names
20593 ) override {
20594 auto current_device = guard_.current_device();
20595 if (C10_UNLIKELY(current_device.has_value())) {
20596 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20597 "structured kernels don't support multi-device outputs");
20598 } else {
20599 guard_.reset_device(options.device());
20600 }
20601 outputs_[output_idx] = create_out(sizes, strides, options);
20602 if (!names.empty()) {
20603 namedinference::propagate_names(*outputs_[output_idx], names);
20604 }
20605 // super must happen after, so that downstream can use maybe_get_output
20606 // to retrieve the output
20607 at::meta::structured_special_chebyshev_polynomial_v::set_output_raw_strided(output_idx, sizes, strides, options, names);
20608 }
20609 void set_output_raw_strided(
20610 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20611 TensorOptions options, DimnameList names
20612 ) override {
20613 auto current_device = guard_.current_device();
20614 if (C10_UNLIKELY(current_device.has_value())) {
20615 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20616 "structured kernels don't support multi-device outputs");
20617 } else {
20618 guard_.reset_device(options.device());
20619 }
20620 outputs_[output_idx] = create_out(sizes, strides, options);
20621 if (!names.empty()) {
20622 namedinference::propagate_names(*outputs_[output_idx], names);
20623 }
20624 // super must happen after, so that downstream can use maybe_get_output
20625 // to retrieve the output
20626 at::meta::structured_special_chebyshev_polynomial_v::set_output_raw_strided(output_idx, sizes, strides, options, names);
20627 }
20628 const Tensor& maybe_get_output(int64_t output_idx) override {
20629 return *outputs_[output_idx];
20630 }
20631 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
20632 c10::OptionalDeviceGuard guard_;
20633};
20634at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_chebyshev_polynomial_v(const at::Tensor & x, const at::Tensor & n) {
20635structured_special_chebyshev_polynomial_v_default_backend_functional op;
20636op.meta(x, n);
20637at::special_chebyshev_polynomial_v_outf(x, n, *op.outputs_[0]);
20638return std::move(op.outputs_[0]).take();
20639}
20640struct structured_special_chebyshev_polynomial_w_default_backend_functional final : public at::meta::structured_special_chebyshev_polynomial_w {
20641 void set_output_strided(
20642 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20643 TensorOptions options, DimnameList names
20644 ) override {
20645 auto current_device = guard_.current_device();
20646 if (C10_UNLIKELY(current_device.has_value())) {
20647 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20648 "structured kernels don't support multi-device outputs");
20649 } else {
20650 guard_.reset_device(options.device());
20651 }
20652 outputs_[output_idx] = create_out(sizes, strides, options);
20653 if (!names.empty()) {
20654 namedinference::propagate_names(*outputs_[output_idx], names);
20655 }
20656 // super must happen after, so that downstream can use maybe_get_output
20657 // to retrieve the output
20658 at::meta::structured_special_chebyshev_polynomial_w::set_output_raw_strided(output_idx, sizes, strides, options, names);
20659 }
20660 void set_output_raw_strided(
20661 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20662 TensorOptions options, DimnameList names
20663 ) override {
20664 auto current_device = guard_.current_device();
20665 if (C10_UNLIKELY(current_device.has_value())) {
20666 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20667 "structured kernels don't support multi-device outputs");
20668 } else {
20669 guard_.reset_device(options.device());
20670 }
20671 outputs_[output_idx] = create_out(sizes, strides, options);
20672 if (!names.empty()) {
20673 namedinference::propagate_names(*outputs_[output_idx], names);
20674 }
20675 // super must happen after, so that downstream can use maybe_get_output
20676 // to retrieve the output
20677 at::meta::structured_special_chebyshev_polynomial_w::set_output_raw_strided(output_idx, sizes, strides, options, names);
20678 }
20679 const Tensor& maybe_get_output(int64_t output_idx) override {
20680 return *outputs_[output_idx];
20681 }
20682 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
20683 c10::OptionalDeviceGuard guard_;
20684};
20685at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_chebyshev_polynomial_w(const at::Tensor & x, const at::Tensor & n) {
20686structured_special_chebyshev_polynomial_w_default_backend_functional op;
20687op.meta(x, n);
20688at::special_chebyshev_polynomial_w_outf(x, n, *op.outputs_[0]);
20689return std::move(op.outputs_[0]).take();
20690}
20691struct structured_special_hermite_polynomial_h_default_backend_functional final : public at::meta::structured_special_hermite_polynomial_h {
20692 void set_output_strided(
20693 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20694 TensorOptions options, DimnameList names
20695 ) override {
20696 auto current_device = guard_.current_device();
20697 if (C10_UNLIKELY(current_device.has_value())) {
20698 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20699 "structured kernels don't support multi-device outputs");
20700 } else {
20701 guard_.reset_device(options.device());
20702 }
20703 outputs_[output_idx] = create_out(sizes, strides, options);
20704 if (!names.empty()) {
20705 namedinference::propagate_names(*outputs_[output_idx], names);
20706 }
20707 // super must happen after, so that downstream can use maybe_get_output
20708 // to retrieve the output
20709 at::meta::structured_special_hermite_polynomial_h::set_output_raw_strided(output_idx, sizes, strides, options, names);
20710 }
20711 void set_output_raw_strided(
20712 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20713 TensorOptions options, DimnameList names
20714 ) override {
20715 auto current_device = guard_.current_device();
20716 if (C10_UNLIKELY(current_device.has_value())) {
20717 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20718 "structured kernels don't support multi-device outputs");
20719 } else {
20720 guard_.reset_device(options.device());
20721 }
20722 outputs_[output_idx] = create_out(sizes, strides, options);
20723 if (!names.empty()) {
20724 namedinference::propagate_names(*outputs_[output_idx], names);
20725 }
20726 // super must happen after, so that downstream can use maybe_get_output
20727 // to retrieve the output
20728 at::meta::structured_special_hermite_polynomial_h::set_output_raw_strided(output_idx, sizes, strides, options, names);
20729 }
20730 const Tensor& maybe_get_output(int64_t output_idx) override {
20731 return *outputs_[output_idx];
20732 }
20733 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
20734 c10::OptionalDeviceGuard guard_;
20735};
20736at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_hermite_polynomial_h(const at::Tensor & x, const at::Tensor & n) {
20737structured_special_hermite_polynomial_h_default_backend_functional op;
20738op.meta(x, n);
20739at::special_hermite_polynomial_h_outf(x, n, *op.outputs_[0]);
20740return std::move(op.outputs_[0]).take();
20741}
20742struct structured_special_hermite_polynomial_he_default_backend_functional final : public at::meta::structured_special_hermite_polynomial_he {
20743 void set_output_strided(
20744 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20745 TensorOptions options, DimnameList names
20746 ) override {
20747 auto current_device = guard_.current_device();
20748 if (C10_UNLIKELY(current_device.has_value())) {
20749 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20750 "structured kernels don't support multi-device outputs");
20751 } else {
20752 guard_.reset_device(options.device());
20753 }
20754 outputs_[output_idx] = create_out(sizes, strides, options);
20755 if (!names.empty()) {
20756 namedinference::propagate_names(*outputs_[output_idx], names);
20757 }
20758 // super must happen after, so that downstream can use maybe_get_output
20759 // to retrieve the output
20760 at::meta::structured_special_hermite_polynomial_he::set_output_raw_strided(output_idx, sizes, strides, options, names);
20761 }
20762 void set_output_raw_strided(
20763 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20764 TensorOptions options, DimnameList names
20765 ) override {
20766 auto current_device = guard_.current_device();
20767 if (C10_UNLIKELY(current_device.has_value())) {
20768 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20769 "structured kernels don't support multi-device outputs");
20770 } else {
20771 guard_.reset_device(options.device());
20772 }
20773 outputs_[output_idx] = create_out(sizes, strides, options);
20774 if (!names.empty()) {
20775 namedinference::propagate_names(*outputs_[output_idx], names);
20776 }
20777 // super must happen after, so that downstream can use maybe_get_output
20778 // to retrieve the output
20779 at::meta::structured_special_hermite_polynomial_he::set_output_raw_strided(output_idx, sizes, strides, options, names);
20780 }
20781 const Tensor& maybe_get_output(int64_t output_idx) override {
20782 return *outputs_[output_idx];
20783 }
20784 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
20785 c10::OptionalDeviceGuard guard_;
20786};
20787at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_hermite_polynomial_he(const at::Tensor & x, const at::Tensor & n) {
20788structured_special_hermite_polynomial_he_default_backend_functional op;
20789op.meta(x, n);
20790at::special_hermite_polynomial_he_outf(x, n, *op.outputs_[0]);
20791return std::move(op.outputs_[0]).take();
20792}
20793struct structured_special_laguerre_polynomial_l_default_backend_functional final : public at::meta::structured_special_laguerre_polynomial_l {
20794 void set_output_strided(
20795 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20796 TensorOptions options, DimnameList names
20797 ) override {
20798 auto current_device = guard_.current_device();
20799 if (C10_UNLIKELY(current_device.has_value())) {
20800 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20801 "structured kernels don't support multi-device outputs");
20802 } else {
20803 guard_.reset_device(options.device());
20804 }
20805 outputs_[output_idx] = create_out(sizes, strides, options);
20806 if (!names.empty()) {
20807 namedinference::propagate_names(*outputs_[output_idx], names);
20808 }
20809 // super must happen after, so that downstream can use maybe_get_output
20810 // to retrieve the output
20811 at::meta::structured_special_laguerre_polynomial_l::set_output_raw_strided(output_idx, sizes, strides, options, names);
20812 }
20813 void set_output_raw_strided(
20814 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20815 TensorOptions options, DimnameList names
20816 ) override {
20817 auto current_device = guard_.current_device();
20818 if (C10_UNLIKELY(current_device.has_value())) {
20819 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20820 "structured kernels don't support multi-device outputs");
20821 } else {
20822 guard_.reset_device(options.device());
20823 }
20824 outputs_[output_idx] = create_out(sizes, strides, options);
20825 if (!names.empty()) {
20826 namedinference::propagate_names(*outputs_[output_idx], names);
20827 }
20828 // super must happen after, so that downstream can use maybe_get_output
20829 // to retrieve the output
20830 at::meta::structured_special_laguerre_polynomial_l::set_output_raw_strided(output_idx, sizes, strides, options, names);
20831 }
20832 const Tensor& maybe_get_output(int64_t output_idx) override {
20833 return *outputs_[output_idx];
20834 }
20835 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
20836 c10::OptionalDeviceGuard guard_;
20837};
20838at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_laguerre_polynomial_l(const at::Tensor & x, const at::Tensor & n) {
20839structured_special_laguerre_polynomial_l_default_backend_functional op;
20840op.meta(x, n);
20841at::special_laguerre_polynomial_l_outf(x, n, *op.outputs_[0]);
20842return std::move(op.outputs_[0]).take();
20843}
20844struct structured_special_legendre_polynomial_p_default_backend_functional final : public at::meta::structured_special_legendre_polynomial_p {
20845 void set_output_strided(
20846 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20847 TensorOptions options, DimnameList names
20848 ) override {
20849 auto current_device = guard_.current_device();
20850 if (C10_UNLIKELY(current_device.has_value())) {
20851 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20852 "structured kernels don't support multi-device outputs");
20853 } else {
20854 guard_.reset_device(options.device());
20855 }
20856 outputs_[output_idx] = create_out(sizes, strides, options);
20857 if (!names.empty()) {
20858 namedinference::propagate_names(*outputs_[output_idx], names);
20859 }
20860 // super must happen after, so that downstream can use maybe_get_output
20861 // to retrieve the output
20862 at::meta::structured_special_legendre_polynomial_p::set_output_raw_strided(output_idx, sizes, strides, options, names);
20863 }
20864 void set_output_raw_strided(
20865 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20866 TensorOptions options, DimnameList names
20867 ) override {
20868 auto current_device = guard_.current_device();
20869 if (C10_UNLIKELY(current_device.has_value())) {
20870 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20871 "structured kernels don't support multi-device outputs");
20872 } else {
20873 guard_.reset_device(options.device());
20874 }
20875 outputs_[output_idx] = create_out(sizes, strides, options);
20876 if (!names.empty()) {
20877 namedinference::propagate_names(*outputs_[output_idx], names);
20878 }
20879 // super must happen after, so that downstream can use maybe_get_output
20880 // to retrieve the output
20881 at::meta::structured_special_legendre_polynomial_p::set_output_raw_strided(output_idx, sizes, strides, options, names);
20882 }
20883 const Tensor& maybe_get_output(int64_t output_idx) override {
20884 return *outputs_[output_idx];
20885 }
20886 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
20887 c10::OptionalDeviceGuard guard_;
20888};
20889at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_legendre_polynomial_p(const at::Tensor & x, const at::Tensor & n) {
20890structured_special_legendre_polynomial_p_default_backend_functional op;
20891op.meta(x, n);
20892at::special_legendre_polynomial_p_outf(x, n, *op.outputs_[0]);
20893return std::move(op.outputs_[0]).take();
20894}
20895struct structured_special_modified_bessel_i0_default_backend_functional final : public at::meta::structured_special_modified_bessel_i0 {
20896 void set_output_strided(
20897 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20898 TensorOptions options, DimnameList names
20899 ) override {
20900 auto current_device = guard_.current_device();
20901 if (C10_UNLIKELY(current_device.has_value())) {
20902 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20903 "structured kernels don't support multi-device outputs");
20904 } else {
20905 guard_.reset_device(options.device());
20906 }
20907 outputs_[output_idx] = create_out(sizes, strides, options);
20908 if (!names.empty()) {
20909 namedinference::propagate_names(*outputs_[output_idx], names);
20910 }
20911 // super must happen after, so that downstream can use maybe_get_output
20912 // to retrieve the output
20913 at::meta::structured_special_modified_bessel_i0::set_output_raw_strided(output_idx, sizes, strides, options, names);
20914 }
20915 void set_output_raw_strided(
20916 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20917 TensorOptions options, DimnameList names
20918 ) override {
20919 auto current_device = guard_.current_device();
20920 if (C10_UNLIKELY(current_device.has_value())) {
20921 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20922 "structured kernels don't support multi-device outputs");
20923 } else {
20924 guard_.reset_device(options.device());
20925 }
20926 outputs_[output_idx] = create_out(sizes, strides, options);
20927 if (!names.empty()) {
20928 namedinference::propagate_names(*outputs_[output_idx], names);
20929 }
20930 // super must happen after, so that downstream can use maybe_get_output
20931 // to retrieve the output
20932 at::meta::structured_special_modified_bessel_i0::set_output_raw_strided(output_idx, sizes, strides, options, names);
20933 }
20934 const Tensor& maybe_get_output(int64_t output_idx) override {
20935 return *outputs_[output_idx];
20936 }
20937 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
20938 c10::OptionalDeviceGuard guard_;
20939};
20940at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_modified_bessel_i0(const at::Tensor & self) {
20941structured_special_modified_bessel_i0_default_backend_functional op;
20942op.meta(self);
20943at::special_modified_bessel_i0_outf(self, *op.outputs_[0]);
20944return std::move(op.outputs_[0]).take();
20945}
20946struct structured_special_modified_bessel_i1_default_backend_functional final : public at::meta::structured_special_modified_bessel_i1 {
20947 void set_output_strided(
20948 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20949 TensorOptions options, DimnameList names
20950 ) override {
20951 auto current_device = guard_.current_device();
20952 if (C10_UNLIKELY(current_device.has_value())) {
20953 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20954 "structured kernels don't support multi-device outputs");
20955 } else {
20956 guard_.reset_device(options.device());
20957 }
20958 outputs_[output_idx] = create_out(sizes, strides, options);
20959 if (!names.empty()) {
20960 namedinference::propagate_names(*outputs_[output_idx], names);
20961 }
20962 // super must happen after, so that downstream can use maybe_get_output
20963 // to retrieve the output
20964 at::meta::structured_special_modified_bessel_i1::set_output_raw_strided(output_idx, sizes, strides, options, names);
20965 }
20966 void set_output_raw_strided(
20967 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20968 TensorOptions options, DimnameList names
20969 ) override {
20970 auto current_device = guard_.current_device();
20971 if (C10_UNLIKELY(current_device.has_value())) {
20972 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
20973 "structured kernels don't support multi-device outputs");
20974 } else {
20975 guard_.reset_device(options.device());
20976 }
20977 outputs_[output_idx] = create_out(sizes, strides, options);
20978 if (!names.empty()) {
20979 namedinference::propagate_names(*outputs_[output_idx], names);
20980 }
20981 // super must happen after, so that downstream can use maybe_get_output
20982 // to retrieve the output
20983 at::meta::structured_special_modified_bessel_i1::set_output_raw_strided(output_idx, sizes, strides, options, names);
20984 }
20985 const Tensor& maybe_get_output(int64_t output_idx) override {
20986 return *outputs_[output_idx];
20987 }
20988 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
20989 c10::OptionalDeviceGuard guard_;
20990};
20991at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_modified_bessel_i1(const at::Tensor & self) {
20992structured_special_modified_bessel_i1_default_backend_functional op;
20993op.meta(self);
20994at::special_modified_bessel_i1_outf(self, *op.outputs_[0]);
20995return std::move(op.outputs_[0]).take();
20996}
20997struct structured_special_modified_bessel_k0_default_backend_functional final : public at::meta::structured_special_modified_bessel_k0 {
20998 void set_output_strided(
20999 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21000 TensorOptions options, DimnameList names
21001 ) override {
21002 auto current_device = guard_.current_device();
21003 if (C10_UNLIKELY(current_device.has_value())) {
21004 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
21005 "structured kernels don't support multi-device outputs");
21006 } else {
21007 guard_.reset_device(options.device());
21008 }
21009 outputs_[output_idx] = create_out(sizes, strides, options);
21010 if (!names.empty()) {
21011 namedinference::propagate_names(*outputs_[output_idx], names);
21012 }
21013 // super must happen after, so that downstream can use maybe_get_output
21014 // to retrieve the output
21015 at::meta::structured_special_modified_bessel_k0::set_output_raw_strided(output_idx, sizes, strides, options, names);
21016 }
21017 void set_output_raw_strided(
21018 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21019 TensorOptions options, DimnameList names
21020 ) override {
21021 auto current_device = guard_.current_device();
21022 if (C10_UNLIKELY(current_device.has_value())) {
21023 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
21024 "structured kernels don't support multi-device outputs");
21025 } else {
21026 guard_.reset_device(options.device());
21027 }
21028 outputs_[output_idx] = create_out(sizes, strides, options);
21029 if (!names.empty()) {
21030 namedinference::propagate_names(*outputs_[output_idx], names);
21031 }
21032 // super must happen after, so that downstream can use maybe_get_output
21033 // to retrieve the output
21034 at::meta::structured_special_modified_bessel_k0::set_output_raw_strided(output_idx, sizes, strides, options, names);
21035 }
21036 const Tensor& maybe_get_output(int64_t output_idx) override {
21037 return *outputs_[output_idx];
21038 }
21039 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
21040 c10::OptionalDeviceGuard guard_;
21041};
21042at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_modified_bessel_k0(const at::Tensor & self) {
21043structured_special_modified_bessel_k0_default_backend_functional op;
21044op.meta(self);
21045at::special_modified_bessel_k0_outf(self, *op.outputs_[0]);
21046return std::move(op.outputs_[0]).take();
21047}
21048struct structured_special_modified_bessel_k1_default_backend_functional final : public at::meta::structured_special_modified_bessel_k1 {
21049 void set_output_strided(
21050 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21051 TensorOptions options, DimnameList names
21052 ) override {
21053 auto current_device = guard_.current_device();
21054 if (C10_UNLIKELY(current_device.has_value())) {
21055 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
21056 "structured kernels don't support multi-device outputs");
21057 } else {
21058 guard_.reset_device(options.device());
21059 }
21060 outputs_[output_idx] = create_out(sizes, strides, options);
21061 if (!names.empty()) {
21062 namedinference::propagate_names(*outputs_[output_idx], names);
21063 }
21064 // super must happen after, so that downstream can use maybe_get_output
21065 // to retrieve the output
21066 at::meta::structured_special_modified_bessel_k1::set_output_raw_strided(output_idx, sizes, strides, options, names);
21067 }
21068 void set_output_raw_strided(
21069 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21070 TensorOptions options, DimnameList names
21071 ) override {
21072 auto current_device = guard_.current_device();
21073 if (C10_UNLIKELY(current_device.has_value())) {
21074 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
21075 "structured kernels don't support multi-device outputs");
21076 } else {
21077 guard_.reset_device(options.device());
21078 }
21079 outputs_[output_idx] = create_out(sizes, strides, options);
21080 if (!names.empty()) {
21081 namedinference::propagate_names(*outputs_[output_idx], names);
21082 }
21083 // super must happen after, so that downstream can use maybe_get_output
21084 // to retrieve the output
21085 at::meta::structured_special_modified_bessel_k1::set_output_raw_strided(output_idx, sizes, strides, options, names);
21086 }
21087 const Tensor& maybe_get_output(int64_t output_idx) override {
21088 return *outputs_[output_idx];
21089 }
21090 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
21091 c10::OptionalDeviceGuard guard_;
21092};
21093at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_modified_bessel_k1(const at::Tensor & self) {
21094structured_special_modified_bessel_k1_default_backend_functional op;
21095op.meta(self);
21096at::special_modified_bessel_k1_outf(self, *op.outputs_[0]);
21097return std::move(op.outputs_[0]).take();
21098}
21099struct structured_special_scaled_modified_bessel_k0_default_backend_functional final : public at::meta::structured_special_scaled_modified_bessel_k0 {
21100 void set_output_strided(
21101 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21102 TensorOptions options, DimnameList names
21103 ) override {
21104 auto current_device = guard_.current_device();
21105 if (C10_UNLIKELY(current_device.has_value())) {
21106 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
21107 "structured kernels don't support multi-device outputs");
21108 } else {
21109 guard_.reset_device(options.device());
21110 }
21111 outputs_[output_idx] = create_out(sizes, strides, options);
21112 if (!names.empty()) {
21113 namedinference::propagate_names(*outputs_[output_idx], names);
21114 }
21115 // super must happen after, so that downstream can use maybe_get_output
21116 // to retrieve the output
21117 at::meta::structured_special_scaled_modified_bessel_k0::set_output_raw_strided(output_idx, sizes, strides, options, names);
21118 }
21119 void set_output_raw_strided(
21120 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21121 TensorOptions options, DimnameList names
21122 ) override {
21123 auto current_device = guard_.current_device();
21124 if (C10_UNLIKELY(current_device.has_value())) {
21125 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
21126 "structured kernels don't support multi-device outputs");
21127 } else {
21128 guard_.reset_device(options.device());
21129 }
21130 outputs_[output_idx] = create_out(sizes, strides, options);
21131 if (!names.empty()) {
21132 namedinference::propagate_names(*outputs_[output_idx], names);
21133 }
21134 // super must happen after, so that downstream can use maybe_get_output
21135 // to retrieve the output
21136 at::meta::structured_special_scaled_modified_bessel_k0::set_output_raw_strided(output_idx, sizes, strides, options, names);
21137 }
21138 const Tensor& maybe_get_output(int64_t output_idx) override {
21139 return *outputs_[output_idx];
21140 }
21141 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
21142 c10::OptionalDeviceGuard guard_;
21143};
21144at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_scaled_modified_bessel_k0(const at::Tensor & x) {
21145structured_special_scaled_modified_bessel_k0_default_backend_functional op;
21146op.meta(x);
21147at::special_scaled_modified_bessel_k0_outf(x, *op.outputs_[0]);
21148return std::move(op.outputs_[0]).take();
21149}
21150struct structured_special_scaled_modified_bessel_k1_default_backend_functional final : public at::meta::structured_special_scaled_modified_bessel_k1 {
21151 void set_output_strided(
21152 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21153 TensorOptions options, DimnameList names
21154 ) override {
21155 auto current_device = guard_.current_device();
21156 if (C10_UNLIKELY(current_device.has_value())) {
21157 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
21158 "structured kernels don't support multi-device outputs");
21159 } else {
21160 guard_.reset_device(options.device());
21161 }
21162 outputs_[output_idx] = create_out(sizes, strides, options);
21163 if (!names.empty()) {
21164 namedinference::propagate_names(*outputs_[output_idx], names);
21165 }
21166 // super must happen after, so that downstream can use maybe_get_output
21167 // to retrieve the output
21168 at::meta::structured_special_scaled_modified_bessel_k1::set_output_raw_strided(output_idx, sizes, strides, options, names);
21169 }
21170 void set_output_raw_strided(
21171 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21172 TensorOptions options, DimnameList names
21173 ) override {
21174 auto current_device = guard_.current_device();
21175 if (C10_UNLIKELY(current_device.has_value())) {
21176 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
21177 "structured kernels don't support multi-device outputs");
21178 } else {
21179 guard_.reset_device(options.device());
21180 }
21181 outputs_[output_idx] = create_out(sizes, strides, options);
21182 if (!names.empty()) {
21183 namedinference::propagate_names(*outputs_[output_idx], names);
21184 }
21185 // super must happen after, so that downstream can use maybe_get_output
21186 // to retrieve the output
21187 at::meta::structured_special_scaled_modified_bessel_k1::set_output_raw_strided(output_idx, sizes, strides, options, names);
21188 }
21189 const Tensor& maybe_get_output(int64_t output_idx) override {
21190 return *outputs_[output_idx];
21191 }
21192 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
21193 c10::OptionalDeviceGuard guard_;
21194};
21195at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_scaled_modified_bessel_k1(const at::Tensor & x) {
21196structured_special_scaled_modified_bessel_k1_default_backend_functional op;
21197op.meta(x);
21198at::special_scaled_modified_bessel_k1_outf(x, *op.outputs_[0]);
21199return std::move(op.outputs_[0]).take();
21200}
21201struct structured_special_shifted_chebyshev_polynomial_t_default_backend_functional final : public at::meta::structured_special_shifted_chebyshev_polynomial_t {
21202 void set_output_strided(
21203 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21204 TensorOptions options, DimnameList names
21205 ) override {
21206 auto current_device = guard_.current_device();
21207 if (C10_UNLIKELY(current_device.has_value())) {
21208 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
21209 "structured kernels don't support multi-device outputs");
21210 } else {
21211 guard_.reset_device(options.device());
21212 }
21213 outputs_[output_idx] = create_out(sizes, strides, options);
21214 if (!names.empty()) {
21215 namedinference::propagate_names(*outputs_[output_idx], names);
21216 }
21217 // super must happen after, so that downstream can use maybe_get_output
21218 // to retrieve the output
21219 at::meta::structured_special_shifted_chebyshev_polynomial_t::set_output_raw_strided(output_idx, sizes, strides, options, names);
21220 }
21221 void set_output_raw_strided(
21222 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21223 TensorOptions options, DimnameList names
21224 ) override {
21225 auto current_device = guard_.current_device();
21226 if (C10_UNLIKELY(current_device.has_value())) {
21227 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
21228 "structured kernels don't support multi-device outputs");
21229 } else {
21230 guard_.reset_device(options.device());
21231 }
21232 outputs_[output_idx] = create_out(sizes, strides, options);
21233 if (!names.empty()) {
21234 namedinference::propagate_names(*outputs_[output_idx], names);
21235 }
21236 // super must happen after, so that downstream can use maybe_get_output
21237 // to retrieve the output
21238 at::meta::structured_special_shifted_chebyshev_polynomial_t::set_output_raw_strided(output_idx, sizes, strides, options, names);
21239 }
21240 const Tensor& maybe_get_output(int64_t output_idx) override {
21241 return *outputs_[output_idx];
21242 }
21243 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
21244 c10::OptionalDeviceGuard guard_;
21245};
21246at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_shifted_chebyshev_polynomial_t(const at::Tensor & x, const at::Tensor & n) {
21247structured_special_shifted_chebyshev_polynomial_t_default_backend_functional op;
21248op.meta(x, n);
21249at::special_shifted_chebyshev_polynomial_t_outf(x, n, *op.outputs_[0]);
21250return std::move(op.outputs_[0]).take();
21251}
21252struct structured_special_shifted_chebyshev_polynomial_u_default_backend_functional final : public at::meta::structured_special_shifted_chebyshev_polynomial_u {
21253 void set_output_strided(
21254 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21255 TensorOptions options, DimnameList names
21256 ) override {
21257 auto current_device = guard_.current_device();
21258 if (C10_UNLIKELY(current_device.has_value())) {
21259 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
21260 "structured kernels don't support multi-device outputs");
21261 } else {
21262 guard_.reset_device(options.device());
21263 }
21264 outputs_[output_idx] = create_out(sizes, strides, options);
21265 if (!names.empty()) {
21266 namedinference::propagate_names(*outputs_[output_idx], names);
21267 }
21268 // super must happen after, so that downstream can use maybe_get_output
21269 // to retrieve the output
21270 at::meta::structured_special_shifted_chebyshev_polynomial_u::set_output_raw_strided(output_idx, sizes, strides, options, names);
21271 }
21272 void set_output_raw_strided(
21273 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21274 TensorOptions options, DimnameList names
21275 ) override {
21276 auto current_device = guard_.current_device();
21277 if (C10_UNLIKELY(current_device.has_value())) {
21278 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
21279 "structured kernels don't support multi-device outputs");
21280 } else {
21281 guard_.reset_device(options.device());
21282 }
21283 outputs_[output_idx] = create_out(sizes, strides, options);
21284 if (!names.empty()) {
21285 namedinference::propagate_names(*outputs_[output_idx], names);
21286 }
21287 // super must happen after, so that downstream can use maybe_get_output
21288 // to retrieve the output
21289 at::meta::structured_special_shifted_chebyshev_polynomial_u::set_output_raw_strided(output_idx, sizes, strides, options, names);
21290 }
21291 const Tensor& maybe_get_output(int64_t output_idx) override {
21292 return *outputs_[output_idx];
21293 }
21294 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
21295 c10::OptionalDeviceGuard guard_;
21296};
21297at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_shifted_chebyshev_polynomial_u(const at::Tensor & x, const at::Tensor & n) {
21298structured_special_shifted_chebyshev_polynomial_u_default_backend_functional op;
21299op.meta(x, n);
21300at::special_shifted_chebyshev_polynomial_u_outf(x, n, *op.outputs_[0]);
21301return std::move(op.outputs_[0]).take();
21302}
21303struct structured_special_shifted_chebyshev_polynomial_v_default_backend_functional final : public at::meta::structured_special_shifted_chebyshev_polynomial_v {
21304 void set_output_strided(
21305 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21306 TensorOptions options, DimnameList names
21307 ) override {
21308 auto current_device = guard_.current_device();
21309 if (C10_UNLIKELY(current_device.has_value())) {
21310 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
21311 "structured kernels don't support multi-device outputs");
21312 } else {
21313 guard_.reset_device(options.device());
21314 }
21315 outputs_[output_idx] = create_out(sizes, strides, options);
21316 if (!names.empty()) {
21317 namedinference::propagate_names(*outputs_[output_idx], names);
21318 }
21319 // super must happen after, so that downstream can use maybe_get_output
21320 // to retrieve the output
21321 at::meta::structured_special_shifted_chebyshev_polynomial_v::set_output_raw_strided(output_idx, sizes, strides, options, names);
21322 }
21323 void set_output_raw_strided(
21324 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21325 TensorOptions options, DimnameList names
21326 ) override {
21327 auto current_device = guard_.current_device();
21328 if (C10_UNLIKELY(current_device.has_value())) {
21329 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
21330 "structured kernels don't support multi-device outputs");
21331 } else {
21332 guard_.reset_device(options.device());
21333 }
21334 outputs_[output_idx] = create_out(sizes, strides, options);
21335 if (!names.empty()) {
21336 namedinference::propagate_names(*outputs_[output_idx], names);
21337 }
21338 // super must happen after, so that downstream can use maybe_get_output
21339 // to retrieve the output
21340 at::meta::structured_special_shifted_chebyshev_polynomial_v::set_output_raw_strided(output_idx, sizes, strides, options, names);
21341 }
21342 const Tensor& maybe_get_output(int64_t output_idx) override {
21343 return *outputs_[output_idx];
21344 }
21345 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
21346 c10::OptionalDeviceGuard guard_;
21347};
21348at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_shifted_chebyshev_polynomial_v(const at::Tensor & x, const at::Tensor & n) {
21349structured_special_shifted_chebyshev_polynomial_v_default_backend_functional op;
21350op.meta(x, n);
21351at::special_shifted_chebyshev_polynomial_v_outf(x, n, *op.outputs_[0]);
21352return std::move(op.outputs_[0]).take();
21353}
21354struct structured_special_shifted_chebyshev_polynomial_w_default_backend_functional final : public at::meta::structured_special_shifted_chebyshev_polynomial_w {
21355 void set_output_strided(
21356 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21357 TensorOptions options, DimnameList names
21358 ) override {
21359 auto current_device = guard_.current_device();
21360 if (C10_UNLIKELY(current_device.has_value())) {
21361 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
21362 "structured kernels don't support multi-device outputs");
21363 } else {
21364 guard_.reset_device(options.device());
21365 }
21366 outputs_[output_idx] = create_out(sizes, strides, options);
21367 if (!names.empty()) {
21368 namedinference::propagate_names(*outputs_[output_idx], names);
21369 }
21370 // super must happen after, so that downstream can use maybe_get_output
21371 // to retrieve the output
21372 at::meta::structured_special_shifted_chebyshev_polynomial_w::set_output_raw_strided(output_idx, sizes, strides, options, names);
21373 }
21374 void set_output_raw_strided(
21375 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21376 TensorOptions options, DimnameList names
21377 ) override {
21378 auto current_device = guard_.current_device();
21379 if (C10_UNLIKELY(current_device.has_value())) {
21380 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
21381 "structured kernels don't support multi-device outputs");
21382 } else {
21383 guard_.reset_device(options.device());
21384 }
21385 outputs_[output_idx] = create_out(sizes, strides, options);
21386 if (!names.empty()) {
21387 namedinference::propagate_names(*outputs_[output_idx], names);
21388 }
21389 // super must happen after, so that downstream can use maybe_get_output
21390 // to retrieve the output
21391 at::meta::structured_special_shifted_chebyshev_polynomial_w::set_output_raw_strided(output_idx, sizes, strides, options, names);
21392 }
21393 const Tensor& maybe_get_output(int64_t output_idx) override {
21394 return *outputs_[output_idx];
21395 }
21396 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
21397 c10::OptionalDeviceGuard guard_;
21398};
21399at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_shifted_chebyshev_polynomial_w(const at::Tensor & x, const at::Tensor & n) {
21400structured_special_shifted_chebyshev_polynomial_w_default_backend_functional op;
21401op.meta(x, n);
21402at::special_shifted_chebyshev_polynomial_w_outf(x, n, *op.outputs_[0]);
21403return std::move(op.outputs_[0]).take();
21404}
21405struct structured_special_spherical_bessel_j0_default_backend_functional final : public at::meta::structured_special_spherical_bessel_j0 {
21406 void set_output_strided(
21407 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21408 TensorOptions options, DimnameList names
21409 ) override {
21410 auto current_device = guard_.current_device();
21411 if (C10_UNLIKELY(current_device.has_value())) {
21412 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
21413 "structured kernels don't support multi-device outputs");
21414 } else {
21415 guard_.reset_device(options.device());
21416 }
21417 outputs_[output_idx] = create_out(sizes, strides, options);
21418 if (!names.empty()) {
21419 namedinference::propagate_names(*outputs_[output_idx], names);
21420 }
21421 // super must happen after, so that downstream can use maybe_get_output
21422 // to retrieve the output
21423 at::meta::structured_special_spherical_bessel_j0::set_output_raw_strided(output_idx, sizes, strides, options, names);
21424 }
21425 void set_output_raw_strided(
21426 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21427 TensorOptions options, DimnameList names
21428 ) override {
21429 auto current_device = guard_.current_device();
21430 if (C10_UNLIKELY(current_device.has_value())) {
21431 TORCH_INTERNAL_ASSERT(*current_device == options.device(),
21432 "structured kernels don't support multi-device outputs");
21433 } else {
21434 guard_.reset_device(options.device());
21435 }
21436 outputs_[output_idx] = create_out(sizes, strides, options);
21437 if (!names.empty()) {
21438 namedinference::propagate_names(*outputs_[output_idx], names);
21439 }
21440 // super must happen after, so that downstream can use maybe_get_output
21441 // to retrieve the output
21442 at::meta::structured_special_spherical_bessel_j0::set_output_raw_strided(output_idx, sizes, strides, options, names);
21443 }
21444 const Tensor& maybe_get_output(int64_t output_idx) override {
21445 return *outputs_[output_idx];
21446 }
21447 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
21448 c10::OptionalDeviceGuard guard_;
21449};
21450at::Tensor wrapper_CompositeExplicitAutogradNonFunctional_special_spherical_bessel_j0(const at::Tensor & x) {
21451structured_special_spherical_bessel_j0_default_backend_functional op;
21452op.meta(x);
21453at::special_spherical_bessel_j0_outf(x, *op.outputs_[0]);
21454return std::move(op.outputs_[0]).take();
21455}
21456TORCH_LIBRARY_IMPL(aten, CompositeExplicitAutogradNonFunctional, m) {
21457 m.impl("sgn", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_sgn));
21458m.impl("sgn_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_sgn_));
21459m.impl("acos", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_acos));
21460m.impl("acos_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_acos_));
21461m.impl("add.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_add_Tensor));
21462m.impl("add_.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_add__Tensor));
21463m.impl("addmv", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_addmv));
21464m.impl("addmv_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_addmv_));
21465m.impl("all.dim", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_all_dim));
21466m.impl("any.dim", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_any_dim));
21467m.impl("argmax", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_argmax));
21468m.impl("argmin", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_argmin));
21469m.impl("acosh", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_acosh));
21470m.impl("acosh_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_acosh_));
21471m.impl("asinh", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_asinh));
21472m.impl("asinh_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_asinh_));
21473m.impl("atanh", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_atanh));
21474m.impl("atanh_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_atanh_));
21475m.impl("as_strided_",
21476TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__as_strided_));
21477m.impl("asin", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_asin));
21478m.impl("asin_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_asin_));
21479m.impl("atan", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_atan));
21480m.impl("atan_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_atan_));
21481m.impl("baddbmm", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_baddbmm));
21482m.impl("baddbmm_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_baddbmm_));
21483m.impl("bernoulli.p",
21484TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_p_bernoulli));
21485m.impl("bitwise_not", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_bitwise_not));
21486m.impl("bitwise_not_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_bitwise_not_));
21487m.impl("copysign.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_copysign_Tensor));
21488m.impl("copysign_.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_copysign__Tensor));
21489m.impl("bmm", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_bmm));
21490m.impl("cat", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_cat));
21491m.impl("ceil", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_ceil));
21492m.impl("ceil_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_ceil_));
21493m.impl("clamp", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_clamp));
21494m.impl("clamp_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_clamp_));
21495m.impl("clamp.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_clamp_Tensor));
21496m.impl("clamp_.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_clamp__Tensor));
21497m.impl("clamp_max", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_clamp_max));
21498m.impl("clamp_max_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_clamp_max_));
21499m.impl("clamp_max.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_clamp_max_Tensor));
21500m.impl("clamp_max_.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_clamp_max__Tensor));
21501m.impl("clamp_min", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_clamp_min));
21502m.impl("clamp_min_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_clamp_min_));
21503m.impl("clamp_min.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_clamp_min_Tensor));
21504m.impl("clamp_min_.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_clamp_min__Tensor));
21505m.impl("copy",
21506TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__copy));
21507m.impl("cos", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_cos));
21508m.impl("cos_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_cos_));
21509m.impl("cosh", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_cosh));
21510m.impl("cosh_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_cosh_));
21511m.impl("cumprod", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_cumprod));
21512m.impl("cumprod_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_cumprod_));
21513m.impl("cumsum", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_cumsum));
21514m.impl("cumsum_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_cumsum_));
21515m.impl("diag_embed",
21516TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__diag_embed));
21517m.impl("div.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_div_Tensor));
21518m.impl("div_.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_div__Tensor));
21519m.impl("div.Tensor_mode", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_div_Tensor_mode));
21520m.impl("div_.Tensor_mode", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_div__Tensor_mode));
21521m.impl("new_empty_strided",
21522TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__new_empty_strided));
21523m.impl("erf", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_erf));
21524m.impl("erf_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_erf_));
21525m.impl("erfc", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_erfc));
21526m.impl("erfc_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_erfc_));
21527m.impl("exp", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_exp));
21528m.impl("exp_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_exp_));
21529m.impl("exp2", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_exp2));
21530m.impl("exp2_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_exp2_));
21531m.impl("expm1", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_expm1));
21532m.impl("expm1_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_expm1_));
21533m.impl("floor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_floor));
21534m.impl("floor_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_floor_));
21535m.impl("frac", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_frac));
21536m.impl("frac_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_frac_));
21537m.impl("gcd", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_gcd));
21538m.impl("gcd_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_gcd_));
21539m.impl("lcm", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_lcm));
21540m.impl("lcm_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_lcm_));
21541m.impl("index.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_index_Tensor));
21542m.impl("index_copy", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_index_copy));
21543m.impl("index_copy_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_index_copy_));
21544m.impl("isin.Tensor_Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_isin_Tensor_Tensor));
21545m.impl("isin.Tensor_Scalar", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_isin_Tensor_Scalar));
21546m.impl("isin.Scalar_Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_isin_Scalar_Tensor));
21547m.impl("log", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_log));
21548m.impl("log_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_log_));
21549m.impl("log10", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_log10));
21550m.impl("log10_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_log10_));
21551m.impl("log1p", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_log1p));
21552m.impl("log1p_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_log1p_));
21553m.impl("log2", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_log2));
21554m.impl("log2_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_log2_));
21555m.impl("logaddexp", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_logaddexp));
21556m.impl("logaddexp2", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_logaddexp2));
21557m.impl("xlogy.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_xlogy_Tensor));
21558m.impl("xlogy_.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_xlogy__Tensor));
21559m.impl("_log_softmax", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__log_softmax));
21560m.impl("_log_softmax_backward_data", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__log_softmax_backward_data));
21561m.impl("logsumexp.out",
21562TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_out_logsumexp_out));
21563m.impl("aminmax", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_aminmax));
21564m.impl("max.dim", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_max_dim));
21565m.impl("amax", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_amax));
21566m.impl("mean.dim", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_mean_dim));
21567m.impl("min.dim", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_min_dim));
21568m.impl("amin", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_amin));
21569m.impl("mm", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_mm));
21570m.impl("mul.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_mul_Tensor));
21571m.impl("mul_.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_mul__Tensor));
21572m.impl("narrow_copy",
21573TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__narrow_copy));
21574m.impl("pixel_shuffle",
21575TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__pixel_shuffle));
21576m.impl("pixel_unshuffle",
21577TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__pixel_unshuffle));
21578m.impl("reciprocal", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_reciprocal));
21579m.impl("reciprocal_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_reciprocal_));
21580m.impl("neg", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_neg));
21581m.impl("neg_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_neg_));
21582m.impl("round", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_round));
21583m.impl("round_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_round_));
21584m.impl("round.decimals", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_round_decimals));
21585m.impl("round_.decimals", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_round__decimals));
21586m.impl("gelu", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_gelu));
21587m.impl("gelu_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_gelu_));
21588m.impl("gelu_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_gelu_backward));
21589m.impl("hardshrink", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_hardshrink));
21590m.impl("hardshrink_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_hardshrink_backward));
21591m.impl("rsqrt", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_rsqrt));
21592m.impl("rsqrt_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_rsqrt_));
21593m.impl("select_backward",
21594TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__select_backward));
21595m.impl("silu", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_silu));
21596m.impl("silu_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_silu_));
21597m.impl("silu_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_silu_backward));
21598m.impl("mish", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_mish));
21599m.impl("mish_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_mish_));
21600m.impl("sigmoid", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_sigmoid));
21601m.impl("sigmoid_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_sigmoid_));
21602m.impl("sin", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_sin));
21603m.impl("sin_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_sin_));
21604m.impl("sinc", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_sinc));
21605m.impl("sinc_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_sinc_));
21606m.impl("sinh", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_sinh));
21607m.impl("sinh_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_sinh_));
21608m.impl("_softmax", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__softmax));
21609m.impl("_softmax_backward_data", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__softmax_backward_data));
21610m.impl("sum.dim_IntList", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_sum_dim_IntList));
21611m.impl("sqrt", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_sqrt));
21612m.impl("sqrt_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_sqrt_));
21613m.impl("prod.dim_int", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_prod_dim_int));
21614m.impl("tan", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_tan));
21615m.impl("tan_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_tan_));
21616m.impl("tanh", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_tanh));
21617m.impl("tanh_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_tanh_));
21618m.impl("threshold", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_threshold));
21619m.impl("threshold_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_threshold_));
21620m.impl("threshold_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_threshold_backward));
21621m.impl("_nested_view_from_buffer_copy",
21622TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional___nested_view_from_buffer_copy));
21623m.impl("_trilinear",
21624TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional___trilinear));
21625m.impl("trunc", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_trunc));
21626m.impl("trunc_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_trunc_));
21627m.impl("norm.ScalarOpt_dim_dtype", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_norm_ScalarOpt_dim_dtype));
21628m.impl("norm.ScalarOpt_dim", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_norm_ScalarOpt_dim));
21629m.impl("sub.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_sub_Tensor));
21630m.impl("sub_.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_sub__Tensor));
21631m.impl("heaviside", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_heaviside));
21632m.impl("heaviside_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_heaviside_));
21633m.impl("addmm", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_addmm));
21634m.impl("addmm_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_addmm_));
21635m.impl("_addmm_activation", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__addmm_activation));
21636m.impl("lift_fresh_copy",
21637TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__lift_fresh_copy));
21638m.impl("index_add", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_index_add));
21639m.impl("index_add_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_index_add_));
21640m.impl("index_reduce", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_index_reduce));
21641m.impl("index_reduce_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_index_reduce_));
21642m.impl("scatter.src", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_scatter_src));
21643m.impl("scatter_.src", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_scatter__src));
21644m.impl("scatter.value", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_scatter_value));
21645m.impl("scatter_.value", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_scatter__value));
21646m.impl("scatter.reduce", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_scatter_reduce));
21647m.impl("scatter_.reduce", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_scatter__reduce));
21648m.impl("scatter.value_reduce", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_scatter_value_reduce));
21649m.impl("scatter_.value_reduce", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_scatter__value_reduce));
21650m.impl("scatter_add", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_scatter_add));
21651m.impl("scatter_add_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_scatter_add_));
21652m.impl("scatter_reduce.two", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_scatter_reduce_two));
21653m.impl("scatter_reduce_.two", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_scatter_reduce__two));
21654m.impl("eq.Scalar", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_eq_Scalar));
21655m.impl("eq_.Scalar", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_eq__Scalar));
21656m.impl("eq.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_eq_Tensor));
21657m.impl("eq_.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_eq__Tensor));
21658m.impl("bitwise_and.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_bitwise_and_Tensor));
21659m.impl("bitwise_and_.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_bitwise_and__Tensor));
21660m.impl("bitwise_or.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_bitwise_or_Tensor));
21661m.impl("bitwise_or_.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_bitwise_or__Tensor));
21662m.impl("bitwise_xor.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_bitwise_xor_Tensor));
21663m.impl("bitwise_xor_.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_bitwise_xor__Tensor));
21664m.impl("bitwise_left_shift.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_bitwise_left_shift_Tensor));
21665m.impl("bitwise_left_shift_.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_bitwise_left_shift__Tensor));
21666m.impl("bitwise_right_shift.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_bitwise_right_shift_Tensor));
21667m.impl("bitwise_right_shift_.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_bitwise_right_shift__Tensor));
21668m.impl("tril", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_tril));
21669m.impl("tril_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_tril_));
21670m.impl("triu", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_triu));
21671m.impl("triu_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_triu_));
21672m.impl("digamma", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_digamma));
21673m.impl("digamma_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_digamma_));
21674m.impl("lerp.Scalar", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_lerp_Scalar));
21675m.impl("lerp_.Scalar", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_lerp__Scalar));
21676m.impl("lerp.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_lerp_Tensor));
21677m.impl("lerp_.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_lerp__Tensor));
21678m.impl("ne.Scalar", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_ne_Scalar));
21679m.impl("ne_.Scalar", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_ne__Scalar));
21680m.impl("ne.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_ne_Tensor));
21681m.impl("ne_.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_ne__Tensor));
21682m.impl("ge.Scalar", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_ge_Scalar));
21683m.impl("ge_.Scalar", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_ge__Scalar));
21684m.impl("ge.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_ge_Tensor));
21685m.impl("ge_.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_ge__Tensor));
21686m.impl("le.Scalar", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_le_Scalar));
21687m.impl("le_.Scalar", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_le__Scalar));
21688m.impl("le.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_le_Tensor));
21689m.impl("le_.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_le__Tensor));
21690m.impl("gt.Scalar", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_gt_Scalar));
21691m.impl("gt_.Scalar", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_gt__Scalar));
21692m.impl("gt.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_gt_Tensor));
21693m.impl("gt_.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_gt__Tensor));
21694m.impl("lt.Scalar", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_lt_Scalar));
21695m.impl("lt_.Scalar", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_lt__Scalar));
21696m.impl("lt.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_lt_Tensor));
21697m.impl("lt_.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_lt__Tensor));
21698m.impl("gather", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_gather));
21699m.impl("addcmul", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_addcmul));
21700m.impl("addcmul_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_addcmul_));
21701m.impl("addcdiv", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_addcdiv));
21702m.impl("addcdiv_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_addcdiv_));
21703m.impl("triangular_solve", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_triangular_solve));
21704m.impl("lu_unpack", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_lu_unpack));
21705m.impl("lgamma", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_lgamma));
21706m.impl("lgamma_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_lgamma_));
21707m.impl("polygamma", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_polygamma));
21708m.impl("erfinv", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_erfinv));
21709m.impl("erfinv_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_erfinv_));
21710m.impl("i0", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_i0));
21711m.impl("i0_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_i0_));
21712m.impl("sign", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_sign));
21713m.impl("sign_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_sign_));
21714m.impl("signbit", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_signbit));
21715m.impl("atan2", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_atan2));
21716m.impl("atan2_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_atan2_));
21717m.impl("fmod.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_fmod_Tensor));
21718m.impl("fmod_.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_fmod__Tensor));
21719m.impl("hypot", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_hypot));
21720m.impl("hypot_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_hypot_));
21721m.impl("igamma", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_igamma));
21722m.impl("igamma_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_igamma_));
21723m.impl("igammac", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_igammac));
21724m.impl("igammac_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_igammac_));
21725m.impl("nextafter", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_nextafter));
21726m.impl("nextafter_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_nextafter_));
21727m.impl("remainder.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_remainder_Tensor));
21728m.impl("remainder_.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_remainder__Tensor));
21729m.impl("fmin", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_fmin));
21730m.impl("fmax", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_fmax));
21731m.impl("maximum", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_maximum));
21732m.impl("minimum", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_minimum));
21733m.impl("sort.stable", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_sort_stable));
21734m.impl("topk", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_topk));
21735m.impl("all", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_all));
21736m.impl("any", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_any));
21737m.impl("renorm", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_renorm));
21738m.impl("renorm_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_renorm_));
21739m.impl("pow.Tensor_Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_pow_Tensor_Tensor));
21740m.impl("pow_.Tensor", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_pow__Tensor));
21741m.impl("pow.Scalar", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_pow_Scalar));
21742m.impl("pow.Tensor_Scalar", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_pow_Tensor_Scalar));
21743m.impl("pow_.Scalar", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_pow__Scalar));
21744m.impl("_convert_indices_from_coo_to_csr", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__convert_indices_from_coo_to_csr));
21745m.impl("_convert_indices_from_csr_to_coo", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__convert_indices_from_csr_to_coo));
21746m.impl("mse_loss", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_mse_loss));
21747m.impl("nll_loss_forward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_nll_loss_forward));
21748m.impl("nll_loss_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_nll_loss_backward));
21749m.impl("smooth_l1_loss", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_smooth_l1_loss));
21750m.impl("elu", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_elu));
21751m.impl("elu_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_elu_));
21752m.impl("elu_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_elu_backward));
21753m.impl("glu", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_glu));
21754m.impl("hardsigmoid", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_hardsigmoid));
21755m.impl("hardsigmoid_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_hardsigmoid_));
21756m.impl("hardsigmoid_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_hardsigmoid_backward));
21757m.impl("leaky_relu", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_leaky_relu));
21758m.impl("leaky_relu_", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_leaky_relu_));
21759m.impl("leaky_relu_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_leaky_relu_backward));
21760m.impl("softplus", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_softplus));
21761m.impl("softplus_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_softplus_backward));
21762m.impl("softshrink", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_softshrink));
21763m.impl("softshrink_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_softshrink_backward));
21764m.impl("adaptive_max_pool2d", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_adaptive_max_pool2d));
21765m.impl("adaptive_max_pool2d_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_adaptive_max_pool2d_backward));
21766m.impl("adaptive_max_pool3d", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_adaptive_max_pool3d));
21767m.impl("adaptive_max_pool3d_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_adaptive_max_pool3d_backward));
21768m.impl("avg_pool2d", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_avg_pool2d));
21769m.impl("avg_pool2d_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_avg_pool2d_backward));
21770m.impl("avg_pool3d", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_avg_pool3d));
21771m.impl("avg_pool3d_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_avg_pool3d_backward));
21772m.impl("fractional_max_pool2d", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_fractional_max_pool2d));
21773m.impl("fractional_max_pool2d_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_fractional_max_pool2d_backward));
21774m.impl("fractional_max_pool3d", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_fractional_max_pool3d));
21775m.impl("max_pool2d_with_indices", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_max_pool2d_with_indices));
21776m.impl("max_pool2d_with_indices_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_max_pool2d_with_indices_backward));
21777m.impl("reflection_pad1d", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_reflection_pad1d));
21778m.impl("reflection_pad1d_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_reflection_pad1d_backward));
21779m.impl("reflection_pad3d", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_reflection_pad3d));
21780m.impl("reflection_pad3d_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_reflection_pad3d_backward));
21781m.impl("replication_pad1d", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_replication_pad1d));
21782m.impl("replication_pad1d_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_replication_pad1d_backward));
21783m.impl("replication_pad2d", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_replication_pad2d));
21784m.impl("replication_pad3d", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_replication_pad3d));
21785m.impl("upsample_linear1d", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_upsample_linear1d));
21786m.impl("upsample_linear1d_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_upsample_linear1d_backward));
21787m.impl("upsample_bilinear2d", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_upsample_bilinear2d));
21788m.impl("upsample_bilinear2d_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_upsample_bilinear2d_backward));
21789m.impl("_upsample_bilinear2d_aa", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__upsample_bilinear2d_aa));
21790m.impl("_upsample_bilinear2d_aa_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__upsample_bilinear2d_aa_backward));
21791m.impl("upsample_bicubic2d", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_upsample_bicubic2d));
21792m.impl("upsample_bicubic2d_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_upsample_bicubic2d_backward));
21793m.impl("_upsample_bicubic2d_aa", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__upsample_bicubic2d_aa));
21794m.impl("_upsample_bicubic2d_aa_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__upsample_bicubic2d_aa_backward));
21795m.impl("upsample_trilinear3d", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_upsample_trilinear3d));
21796m.impl("upsample_trilinear3d_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_upsample_trilinear3d_backward));
21797m.impl("upsample_nearest1d", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_upsample_nearest1d));
21798m.impl("_upsample_nearest_exact1d", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__upsample_nearest_exact1d));
21799m.impl("upsample_nearest1d_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_upsample_nearest1d_backward));
21800m.impl("_upsample_nearest_exact1d_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__upsample_nearest_exact1d_backward));
21801m.impl("upsample_nearest2d", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_upsample_nearest2d));
21802m.impl("_upsample_nearest_exact2d", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__upsample_nearest_exact2d));
21803m.impl("upsample_nearest2d_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_upsample_nearest2d_backward));
21804m.impl("_upsample_nearest_exact2d_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__upsample_nearest_exact2d_backward));
21805m.impl("upsample_nearest3d", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_upsample_nearest3d));
21806m.impl("_upsample_nearest_exact3d", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__upsample_nearest_exact3d));
21807m.impl("upsample_nearest3d_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_upsample_nearest3d_backward));
21808m.impl("_upsample_nearest_exact3d_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__upsample_nearest_exact3d_backward));
21809m.impl("sigmoid_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_sigmoid_backward));
21810m.impl("logit_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_logit_backward));
21811m.impl("tanh_backward", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_tanh_backward));
21812m.impl("slow_conv_transpose2d", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_slow_conv_transpose2d));
21813m.impl("isposinf", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_isposinf));
21814m.impl("isneginf", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_isneginf));
21815m.impl("special_entr", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_entr));
21816m.impl("special_ndtri", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_ndtri));
21817m.impl("special_log_ndtr", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_log_ndtr));
21818m.impl("special_erfcx", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_erfcx));
21819m.impl("special_xlog1py", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_xlog1py));
21820m.impl("special_zeta", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_zeta));
21821m.impl("special_i0e", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_i0e));
21822m.impl("special_i1", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_i1));
21823m.impl("special_i1e", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_i1e));
21824m.impl("linalg_cholesky_ex", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_linalg_cholesky_ex));
21825m.impl("linalg_cross", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_linalg_cross));
21826m.impl("linalg_lu_factor_ex", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_linalg_lu_factor_ex));
21827m.impl("linalg_lu", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_linalg_lu));
21828m.impl("linalg_lu_solve", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_linalg_lu_solve));
21829m.impl("_linalg_det", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__linalg_det));
21830m.impl("linalg_ldl_factor_ex", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_linalg_ldl_factor_ex));
21831m.impl("linalg_ldl_solve", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_linalg_ldl_solve));
21832m.impl("_linalg_slogdet", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__linalg_slogdet));
21833m.impl("_linalg_eigh", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__linalg_eigh));
21834m.impl("linalg_inv_ex", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_linalg_inv_ex));
21835m.impl("linalg_vector_norm", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_linalg_vector_norm));
21836m.impl("_linalg_svd", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__linalg_svd));
21837m.impl("linalg_pinv.atol_rtol_tensor",
21838TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_atol_rtol_tensor_linalg_pinv));
21839m.impl("_linalg_solve_ex", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__linalg_solve_ex));
21840m.impl("linalg_qr", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_linalg_qr));
21841m.impl("_test_autograd_multiple_dispatch_view_copy",
21842TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional___test_autograd_multiple_dispatch_view_copy));
21843m.impl("_fw_primal_copy",
21844TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional___fw_primal_copy));
21845m.impl("_make_dual_copy",
21846TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional___make_dual_copy));
21847m.impl("view_as_real_copy",
21848TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__view_as_real_copy));
21849m.impl("view_as_complex_copy",
21850TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__view_as_complex_copy));
21851m.impl("_conj_copy",
21852TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional___conj_copy));
21853m.impl("_neg_view_copy",
21854TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional___neg_view_copy));
21855m.impl("as_strided_copy",
21856TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__as_strided_copy));
21857m.impl("_sparse_broadcast_to_copy",
21858TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional___sparse_broadcast_to_copy));
21859m.impl("diagonal_copy",
21860TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__diagonal_copy));
21861m.impl("expand_copy",
21862TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__expand_copy));
21863m.impl("permute_copy",
21864TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__permute_copy));
21865m.impl("_reshape_alias_copy",
21866TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional___reshape_alias_copy));
21867m.impl("select_copy.int",
21868TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_int_select_copy));
21869m.impl("detach_copy",
21870TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__detach_copy));
21871m.impl("slice_copy.Tensor",
21872TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_Tensor_slice_copy));
21873m.impl("split_copy.Tensor",
21874TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_Tensor_split_copy));
21875m.impl("split_with_sizes_copy",
21876TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__split_with_sizes_copy));
21877m.impl("squeeze_copy",
21878TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__squeeze_copy));
21879m.impl("squeeze_copy.dim",
21880TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_dim_squeeze_copy));
21881m.impl("squeeze_copy.dims",
21882TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_dims_squeeze_copy));
21883m.impl("t_copy",
21884TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__t_copy));
21885m.impl("transpose_copy.int",
21886TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_int_transpose_copy));
21887m.impl("unsqueeze_copy",
21888TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__unsqueeze_copy));
21889m.impl("_indices_copy",
21890TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional___indices_copy));
21891m.impl("_values_copy",
21892TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional___values_copy));
21893m.impl("indices_copy",
21894TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__indices_copy));
21895m.impl("values_copy",
21896TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__values_copy));
21897m.impl("crow_indices_copy",
21898TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__crow_indices_copy));
21899m.impl("col_indices_copy",
21900TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__col_indices_copy));
21901m.impl("ccol_indices_copy",
21902TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__ccol_indices_copy));
21903m.impl("row_indices_copy",
21904TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__row_indices_copy));
21905m.impl("unbind_copy.int",
21906TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_int_unbind_copy));
21907m.impl("view_copy",
21908TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__view_copy));
21909m.impl("view_copy.dtype",
21910TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_dtype_view_copy));
21911m.impl("unfold_copy",
21912TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__unfold_copy));
21913m.impl("alias_copy",
21914TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional__alias_copy));
21915m.impl("special_airy_ai", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_airy_ai));
21916m.impl("special_bessel_j0", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_bessel_j0));
21917m.impl("special_bessel_j1", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_bessel_j1));
21918m.impl("special_bessel_y0", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_bessel_y0));
21919m.impl("special_bessel_y1", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_bessel_y1));
21920m.impl("special_chebyshev_polynomial_t", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_chebyshev_polynomial_t));
21921m.impl("special_chebyshev_polynomial_u", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_chebyshev_polynomial_u));
21922m.impl("special_chebyshev_polynomial_v", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_chebyshev_polynomial_v));
21923m.impl("special_chebyshev_polynomial_w", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_chebyshev_polynomial_w));
21924m.impl("special_hermite_polynomial_h", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_hermite_polynomial_h));
21925m.impl("special_hermite_polynomial_he", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_hermite_polynomial_he));
21926m.impl("special_laguerre_polynomial_l", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_laguerre_polynomial_l));
21927m.impl("special_legendre_polynomial_p", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_legendre_polynomial_p));
21928m.impl("special_modified_bessel_i0", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_modified_bessel_i0));
21929m.impl("special_modified_bessel_i1", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_modified_bessel_i1));
21930m.impl("special_modified_bessel_k0", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_modified_bessel_k0));
21931m.impl("special_modified_bessel_k1", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_modified_bessel_k1));
21932m.impl("special_scaled_modified_bessel_k0", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_scaled_modified_bessel_k0));
21933m.impl("special_scaled_modified_bessel_k1", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_scaled_modified_bessel_k1));
21934m.impl("special_shifted_chebyshev_polynomial_t", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_shifted_chebyshev_polynomial_t));
21935m.impl("special_shifted_chebyshev_polynomial_u", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_shifted_chebyshev_polynomial_u));
21936m.impl("special_shifted_chebyshev_polynomial_v", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_shifted_chebyshev_polynomial_v));
21937m.impl("special_shifted_chebyshev_polynomial_w", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_shifted_chebyshev_polynomial_w));
21938m.impl("special_spherical_bessel_j0", TORCH_FN(wrapper_CompositeExplicitAutogradNonFunctional_special_spherical_bessel_j0));
21939};
21940} // anonymous namespace
21941namespace compositeexplicitautogradnonfunctional {
21942at::Tensor sgn(const at::Tensor & self) {
21943return wrapper_CompositeExplicitAutogradNonFunctional_sgn(self);
21944}
21945at::Tensor & sgn_(at::Tensor & self) {
21946return wrapper_CompositeExplicitAutogradNonFunctional_sgn_(self);
21947}
21948at::Tensor acos(const at::Tensor & self) {
21949return wrapper_CompositeExplicitAutogradNonFunctional_acos(self);
21950}
21951at::Tensor & acos_(at::Tensor & self) {
21952return wrapper_CompositeExplicitAutogradNonFunctional_acos_(self);
21953}
21954at::Tensor add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
21955return wrapper_CompositeExplicitAutogradNonFunctional_add_Tensor(self, other, alpha);
21956}
21957at::Tensor & add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
21958return wrapper_CompositeExplicitAutogradNonFunctional_add__Tensor(self, other, alpha);
21959}
21960at::Tensor addmv(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
21961return wrapper_CompositeExplicitAutogradNonFunctional_addmv(self, mat, vec, beta, alpha);
21962}
21963at::Tensor & addmv_(at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
21964return wrapper_CompositeExplicitAutogradNonFunctional_addmv_(self, mat, vec, beta, alpha);
21965}
21966at::Tensor all(const at::Tensor & self, int64_t dim, bool keepdim) {
21967return wrapper_CompositeExplicitAutogradNonFunctional_all_dim(self, dim, keepdim);
21968}
21969at::Tensor any(const at::Tensor & self, int64_t dim, bool keepdim) {
21970return wrapper_CompositeExplicitAutogradNonFunctional_any_dim(self, dim, keepdim);
21971}
21972at::Tensor argmax(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
21973return wrapper_CompositeExplicitAutogradNonFunctional_argmax(self, dim, keepdim);
21974}
21975at::Tensor argmin(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
21976return wrapper_CompositeExplicitAutogradNonFunctional_argmin(self, dim, keepdim);
21977}
21978at::Tensor acosh(const at::Tensor & self) {
21979return wrapper_CompositeExplicitAutogradNonFunctional_acosh(self);
21980}
21981at::Tensor & acosh_(at::Tensor & self) {
21982return wrapper_CompositeExplicitAutogradNonFunctional_acosh_(self);
21983}
21984at::Tensor asinh(const at::Tensor & self) {
21985return wrapper_CompositeExplicitAutogradNonFunctional_asinh(self);
21986}
21987at::Tensor & asinh_(at::Tensor & self) {
21988return wrapper_CompositeExplicitAutogradNonFunctional_asinh_(self);
21989}
21990at::Tensor atanh(const at::Tensor & self) {
21991return wrapper_CompositeExplicitAutogradNonFunctional_atanh(self);
21992}
21993at::Tensor & atanh_(at::Tensor & self) {
21994return wrapper_CompositeExplicitAutogradNonFunctional_atanh_(self);
21995}
21996const at::Tensor & as_strided_(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset) {
21997return wrapper_CompositeExplicitAutogradNonFunctional__as_strided_(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt);
21998}
21999const at::Tensor & as_strided__symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
22000return wrapper_CompositeExplicitAutogradNonFunctional__as_strided_(self, size, stride, storage_offset);
22001}
22002at::Tensor asin(const at::Tensor & self) {
22003return wrapper_CompositeExplicitAutogradNonFunctional_asin(self);
22004}
22005at::Tensor & asin_(at::Tensor & self) {
22006return wrapper_CompositeExplicitAutogradNonFunctional_asin_(self);
22007}
22008at::Tensor atan(const at::Tensor & self) {
22009return wrapper_CompositeExplicitAutogradNonFunctional_atan(self);
22010}
22011at::Tensor & atan_(at::Tensor & self) {
22012return wrapper_CompositeExplicitAutogradNonFunctional_atan_(self);
22013}
22014at::Tensor baddbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
22015return wrapper_CompositeExplicitAutogradNonFunctional_baddbmm(self, batch1, batch2, beta, alpha);
22016}
22017at::Tensor & baddbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
22018return wrapper_CompositeExplicitAutogradNonFunctional_baddbmm_(self, batch1, batch2, beta, alpha);
22019}
22020at::Tensor bernoulli(const at::Tensor & self, double p, c10::optional<at::Generator> generator) {
22021return wrapper_CompositeExplicitAutogradNonFunctional_p_bernoulli(self, p, generator);
22022}
22023at::Tensor bitwise_not(const at::Tensor & self) {
22024return wrapper_CompositeExplicitAutogradNonFunctional_bitwise_not(self);
22025}
22026at::Tensor & bitwise_not_(at::Tensor & self) {
22027return wrapper_CompositeExplicitAutogradNonFunctional_bitwise_not_(self);
22028}
22029at::Tensor copysign(const at::Tensor & self, const at::Tensor & other) {
22030return wrapper_CompositeExplicitAutogradNonFunctional_copysign_Tensor(self, other);
22031}
22032at::Tensor & copysign_(at::Tensor & self, const at::Tensor & other) {
22033return wrapper_CompositeExplicitAutogradNonFunctional_copysign__Tensor(self, other);
22034}
22035at::Tensor bmm(const at::Tensor & self, const at::Tensor & mat2) {
22036return wrapper_CompositeExplicitAutogradNonFunctional_bmm(self, mat2);
22037}
22038at::Tensor cat(const at::ITensorListRef & tensors, int64_t dim) {
22039return wrapper_CompositeExplicitAutogradNonFunctional_cat(tensors, dim);
22040}
22041at::Tensor ceil(const at::Tensor & self) {
22042return wrapper_CompositeExplicitAutogradNonFunctional_ceil(self);
22043}
22044at::Tensor & ceil_(at::Tensor & self) {
22045return wrapper_CompositeExplicitAutogradNonFunctional_ceil_(self);
22046}
22047at::Tensor clamp(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
22048return wrapper_CompositeExplicitAutogradNonFunctional_clamp(self, min, max);
22049}
22050at::Tensor & clamp_(at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
22051return wrapper_CompositeExplicitAutogradNonFunctional_clamp_(self, min, max);
22052}
22053at::Tensor clamp(const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
22054return wrapper_CompositeExplicitAutogradNonFunctional_clamp_Tensor(self, min, max);
22055}
22056at::Tensor & clamp_(at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
22057return wrapper_CompositeExplicitAutogradNonFunctional_clamp__Tensor(self, min, max);
22058}
22059at::Tensor clamp_max(const at::Tensor & self, const at::Scalar & max) {
22060return wrapper_CompositeExplicitAutogradNonFunctional_clamp_max(self, max);
22061}
22062at::Tensor & clamp_max_(at::Tensor & self, const at::Scalar & max) {
22063return wrapper_CompositeExplicitAutogradNonFunctional_clamp_max_(self, max);
22064}
22065at::Tensor clamp_max(const at::Tensor & self, const at::Tensor & max) {
22066return wrapper_CompositeExplicitAutogradNonFunctional_clamp_max_Tensor(self, max);
22067}
22068at::Tensor & clamp_max_(at::Tensor & self, const at::Tensor & max) {
22069return wrapper_CompositeExplicitAutogradNonFunctional_clamp_max__Tensor(self, max);
22070}
22071at::Tensor clamp_min(const at::Tensor & self, const at::Scalar & min) {
22072return wrapper_CompositeExplicitAutogradNonFunctional_clamp_min(self, min);
22073}
22074at::Tensor & clamp_min_(at::Tensor & self, const at::Scalar & min) {
22075return wrapper_CompositeExplicitAutogradNonFunctional_clamp_min_(self, min);
22076}
22077at::Tensor clamp_min(const at::Tensor & self, const at::Tensor & min) {
22078return wrapper_CompositeExplicitAutogradNonFunctional_clamp_min_Tensor(self, min);
22079}
22080at::Tensor & clamp_min_(at::Tensor & self, const at::Tensor & min) {
22081return wrapper_CompositeExplicitAutogradNonFunctional_clamp_min__Tensor(self, min);
22082}
22083at::Tensor copy(const at::Tensor & self, const at::Tensor & src, bool non_blocking) {
22084return wrapper_CompositeExplicitAutogradNonFunctional__copy(self, src, non_blocking);
22085}
22086at::Tensor cos(const at::Tensor & self) {
22087return wrapper_CompositeExplicitAutogradNonFunctional_cos(self);
22088}
22089at::Tensor & cos_(at::Tensor & self) {
22090return wrapper_CompositeExplicitAutogradNonFunctional_cos_(self);
22091}
22092at::Tensor cosh(const at::Tensor & self) {
22093return wrapper_CompositeExplicitAutogradNonFunctional_cosh(self);
22094}
22095at::Tensor & cosh_(at::Tensor & self) {
22096return wrapper_CompositeExplicitAutogradNonFunctional_cosh_(self);
22097}
22098at::Tensor cumprod(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
22099return wrapper_CompositeExplicitAutogradNonFunctional_cumprod(self, dim, dtype);
22100}
22101at::Tensor & cumprod_(at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
22102return wrapper_CompositeExplicitAutogradNonFunctional_cumprod_(self, dim, dtype);
22103}
22104at::Tensor cumsum(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
22105return wrapper_CompositeExplicitAutogradNonFunctional_cumsum(self, dim, dtype);
22106}
22107at::Tensor & cumsum_(at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
22108return wrapper_CompositeExplicitAutogradNonFunctional_cumsum_(self, dim, dtype);
22109}
22110at::Tensor diag_embed(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
22111return wrapper_CompositeExplicitAutogradNonFunctional__diag_embed(self, offset, dim1, dim2);
22112}
22113at::Tensor div(const at::Tensor & self, const at::Tensor & other) {
22114return wrapper_CompositeExplicitAutogradNonFunctional_div_Tensor(self, other);
22115}
22116at::Tensor & div_(at::Tensor & self, const at::Tensor & other) {
22117return wrapper_CompositeExplicitAutogradNonFunctional_div__Tensor(self, other);
22118}
22119at::Tensor div(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
22120return wrapper_CompositeExplicitAutogradNonFunctional_div_Tensor_mode(self, other, rounding_mode);
22121}
22122at::Tensor & div_(at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
22123return wrapper_CompositeExplicitAutogradNonFunctional_div__Tensor_mode(self, other, rounding_mode);
22124}
22125at::Tensor new_empty_strided(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options) {
22126return wrapper_CompositeExplicitAutogradNonFunctional__new_empty_strided(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
22127}
22128at::Tensor new_empty_strided(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
22129return wrapper_CompositeExplicitAutogradNonFunctional__new_empty_strided(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), dtype, layout, device, pin_memory);
22130}
22131at::Tensor new_empty_strided_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options) {
22132return wrapper_CompositeExplicitAutogradNonFunctional__new_empty_strided(self, size, stride, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
22133}
22134at::Tensor new_empty_strided_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
22135return wrapper_CompositeExplicitAutogradNonFunctional__new_empty_strided(self, size, stride, dtype, layout, device, pin_memory);
22136}
22137at::Tensor erf(const at::Tensor & self) {
22138return wrapper_CompositeExplicitAutogradNonFunctional_erf(self);
22139}
22140at::Tensor & erf_(at::Tensor & self) {
22141return wrapper_CompositeExplicitAutogradNonFunctional_erf_(self);
22142}
22143at::Tensor erfc(const at::Tensor & self) {
22144return wrapper_CompositeExplicitAutogradNonFunctional_erfc(self);
22145}
22146at::Tensor & erfc_(at::Tensor & self) {
22147return wrapper_CompositeExplicitAutogradNonFunctional_erfc_(self);
22148}
22149at::Tensor exp(const at::Tensor & self) {
22150return wrapper_CompositeExplicitAutogradNonFunctional_exp(self);
22151}
22152at::Tensor & exp_(at::Tensor & self) {
22153return wrapper_CompositeExplicitAutogradNonFunctional_exp_(self);
22154}
22155at::Tensor exp2(const at::Tensor & self) {
22156return wrapper_CompositeExplicitAutogradNonFunctional_exp2(self);
22157}
22158at::Tensor & exp2_(at::Tensor & self) {
22159return wrapper_CompositeExplicitAutogradNonFunctional_exp2_(self);
22160}
22161at::Tensor expm1(const at::Tensor & self) {
22162return wrapper_CompositeExplicitAutogradNonFunctional_expm1(self);
22163}
22164at::Tensor & expm1_(at::Tensor & self) {
22165return wrapper_CompositeExplicitAutogradNonFunctional_expm1_(self);
22166}
22167at::Tensor floor(const at::Tensor & self) {
22168return wrapper_CompositeExplicitAutogradNonFunctional_floor(self);
22169}
22170at::Tensor & floor_(at::Tensor & self) {
22171return wrapper_CompositeExplicitAutogradNonFunctional_floor_(self);
22172}
22173at::Tensor frac(const at::Tensor & self) {
22174return wrapper_CompositeExplicitAutogradNonFunctional_frac(self);
22175}
22176at::Tensor & frac_(at::Tensor & self) {
22177return wrapper_CompositeExplicitAutogradNonFunctional_frac_(self);
22178}
22179at::Tensor gcd(const at::Tensor & self, const at::Tensor & other) {
22180return wrapper_CompositeExplicitAutogradNonFunctional_gcd(self, other);
22181}
22182at::Tensor & gcd_(at::Tensor & self, const at::Tensor & other) {
22183return wrapper_CompositeExplicitAutogradNonFunctional_gcd_(self, other);
22184}
22185at::Tensor lcm(const at::Tensor & self, const at::Tensor & other) {
22186return wrapper_CompositeExplicitAutogradNonFunctional_lcm(self, other);
22187}
22188at::Tensor & lcm_(at::Tensor & self, const at::Tensor & other) {
22189return wrapper_CompositeExplicitAutogradNonFunctional_lcm_(self, other);
22190}
22191at::Tensor index(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices) {
22192return wrapper_CompositeExplicitAutogradNonFunctional_index_Tensor(self, indices);
22193}
22194at::Tensor index_copy(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
22195return wrapper_CompositeExplicitAutogradNonFunctional_index_copy(self, dim, index, source);
22196}
22197at::Tensor & index_copy_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
22198return wrapper_CompositeExplicitAutogradNonFunctional_index_copy_(self, dim, index, source);
22199}
22200at::Tensor isin(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert) {
22201return wrapper_CompositeExplicitAutogradNonFunctional_isin_Tensor_Tensor(elements, test_elements, assume_unique, invert);
22202}
22203at::Tensor isin(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert) {
22204return wrapper_CompositeExplicitAutogradNonFunctional_isin_Tensor_Scalar(elements, test_element, assume_unique, invert);
22205}
22206at::Tensor isin(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert) {
22207return wrapper_CompositeExplicitAutogradNonFunctional_isin_Scalar_Tensor(element, test_elements, assume_unique, invert);
22208}
22209at::Tensor log(const at::Tensor & self) {
22210return wrapper_CompositeExplicitAutogradNonFunctional_log(self);
22211}
22212at::Tensor & log_(at::Tensor & self) {
22213return wrapper_CompositeExplicitAutogradNonFunctional_log_(self);
22214}
22215at::Tensor log10(const at::Tensor & self) {
22216return wrapper_CompositeExplicitAutogradNonFunctional_log10(self);
22217}
22218at::Tensor & log10_(at::Tensor & self) {
22219return wrapper_CompositeExplicitAutogradNonFunctional_log10_(self);
22220}
22221at::Tensor log1p(const at::Tensor & self) {
22222return wrapper_CompositeExplicitAutogradNonFunctional_log1p(self);
22223}
22224at::Tensor & log1p_(at::Tensor & self) {
22225return wrapper_CompositeExplicitAutogradNonFunctional_log1p_(self);
22226}
22227at::Tensor log2(const at::Tensor & self) {
22228return wrapper_CompositeExplicitAutogradNonFunctional_log2(self);
22229}
22230at::Tensor & log2_(at::Tensor & self) {
22231return wrapper_CompositeExplicitAutogradNonFunctional_log2_(self);
22232}
22233at::Tensor logaddexp(const at::Tensor & self, const at::Tensor & other) {
22234return wrapper_CompositeExplicitAutogradNonFunctional_logaddexp(self, other);
22235}
22236at::Tensor logaddexp2(const at::Tensor & self, const at::Tensor & other) {
22237return wrapper_CompositeExplicitAutogradNonFunctional_logaddexp2(self, other);
22238}
22239at::Tensor xlogy(const at::Tensor & self, const at::Tensor & other) {
22240return wrapper_CompositeExplicitAutogradNonFunctional_xlogy_Tensor(self, other);
22241}
22242at::Tensor & xlogy_(at::Tensor & self, const at::Tensor & other) {
22243return wrapper_CompositeExplicitAutogradNonFunctional_xlogy__Tensor(self, other);
22244}
22245at::Tensor _log_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
22246return wrapper_CompositeExplicitAutogradNonFunctional__log_softmax(self, dim, half_to_float);
22247}
22248at::Tensor _log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
22249return wrapper_CompositeExplicitAutogradNonFunctional__log_softmax_backward_data(grad_output, output, dim, input_dtype);
22250}
22251at::Tensor & logsumexp_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
22252return wrapper_CompositeExplicitAutogradNonFunctional_out_logsumexp_out(self, dim, keepdim, out);
22253}
22254at::Tensor & logsumexp_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
22255return wrapper_CompositeExplicitAutogradNonFunctional_out_logsumexp_out(self, dim, keepdim, out);
22256}
22257::std::tuple<at::Tensor,at::Tensor> aminmax(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
22258return wrapper_CompositeExplicitAutogradNonFunctional_aminmax(self, dim, keepdim);
22259}
22260::std::tuple<at::Tensor,at::Tensor> max(const at::Tensor & self, int64_t dim, bool keepdim) {
22261return wrapper_CompositeExplicitAutogradNonFunctional_max_dim(self, dim, keepdim);
22262}
22263at::Tensor amax(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
22264return wrapper_CompositeExplicitAutogradNonFunctional_amax(self, dim, keepdim);
22265}
22266at::Tensor mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
22267return wrapper_CompositeExplicitAutogradNonFunctional_mean_dim(self, dim, keepdim, dtype);
22268}
22269::std::tuple<at::Tensor,at::Tensor> min(const at::Tensor & self, int64_t dim, bool keepdim) {
22270return wrapper_CompositeExplicitAutogradNonFunctional_min_dim(self, dim, keepdim);
22271}
22272at::Tensor amin(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
22273return wrapper_CompositeExplicitAutogradNonFunctional_amin(self, dim, keepdim);
22274}
22275at::Tensor mm(const at::Tensor & self, const at::Tensor & mat2) {
22276return wrapper_CompositeExplicitAutogradNonFunctional_mm(self, mat2);
22277}
22278at::Tensor mul(const at::Tensor & self, const at::Tensor & other) {
22279return wrapper_CompositeExplicitAutogradNonFunctional_mul_Tensor(self, other);
22280}
22281at::Tensor & mul_(at::Tensor & self, const at::Tensor & other) {
22282return wrapper_CompositeExplicitAutogradNonFunctional_mul__Tensor(self, other);
22283}
22284at::Tensor narrow_copy(const at::Tensor & self, int64_t dim, int64_t start, int64_t length) {
22285return wrapper_CompositeExplicitAutogradNonFunctional__narrow_copy(self, dim, start, length);
22286}
22287at::Tensor narrow_copy_symint(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
22288return wrapper_CompositeExplicitAutogradNonFunctional__narrow_copy(self, dim, start, length);
22289}
22290at::Tensor pixel_shuffle(const at::Tensor & self, int64_t upscale_factor) {
22291return wrapper_CompositeExplicitAutogradNonFunctional__pixel_shuffle(self, upscale_factor);
22292}
22293at::Tensor pixel_unshuffle(const at::Tensor & self, int64_t downscale_factor) {
22294return wrapper_CompositeExplicitAutogradNonFunctional__pixel_unshuffle(self, downscale_factor);
22295}
22296at::Tensor reciprocal(const at::Tensor & self) {
22297return wrapper_CompositeExplicitAutogradNonFunctional_reciprocal(self);
22298}
22299at::Tensor & reciprocal_(at::Tensor & self) {
22300return wrapper_CompositeExplicitAutogradNonFunctional_reciprocal_(self);
22301}
22302at::Tensor neg(const at::Tensor & self) {
22303return wrapper_CompositeExplicitAutogradNonFunctional_neg(self);
22304}
22305at::Tensor & neg_(at::Tensor & self) {
22306return wrapper_CompositeExplicitAutogradNonFunctional_neg_(self);
22307}
22308at::Tensor round(const at::Tensor & self) {
22309return wrapper_CompositeExplicitAutogradNonFunctional_round(self);
22310}
22311at::Tensor & round_(at::Tensor & self) {
22312return wrapper_CompositeExplicitAutogradNonFunctional_round_(self);
22313}
22314at::Tensor round(const at::Tensor & self, int64_t decimals) {
22315return wrapper_CompositeExplicitAutogradNonFunctional_round_decimals(self, decimals);
22316}
22317at::Tensor & round_(at::Tensor & self, int64_t decimals) {
22318return wrapper_CompositeExplicitAutogradNonFunctional_round__decimals(self, decimals);
22319}
22320at::Tensor gelu(const at::Tensor & self, c10::string_view approximate) {
22321return wrapper_CompositeExplicitAutogradNonFunctional_gelu(self, approximate);
22322}
22323at::Tensor & gelu_(at::Tensor & self, c10::string_view approximate) {
22324return wrapper_CompositeExplicitAutogradNonFunctional_gelu_(self, approximate);
22325}
22326at::Tensor gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate) {
22327return wrapper_CompositeExplicitAutogradNonFunctional_gelu_backward(grad_output, self, approximate);
22328}
22329at::Tensor hardshrink(const at::Tensor & self, const at::Scalar & lambd) {
22330return wrapper_CompositeExplicitAutogradNonFunctional_hardshrink(self, lambd);
22331}
22332at::Tensor hardshrink_backward(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd) {
22333return wrapper_CompositeExplicitAutogradNonFunctional_hardshrink_backward(grad_out, self, lambd);
22334}
22335at::Tensor rsqrt(const at::Tensor & self) {
22336return wrapper_CompositeExplicitAutogradNonFunctional_rsqrt(self);
22337}
22338at::Tensor & rsqrt_(at::Tensor & self) {
22339return wrapper_CompositeExplicitAutogradNonFunctional_rsqrt_(self);
22340}
22341at::Tensor select_backward(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index) {
22342return wrapper_CompositeExplicitAutogradNonFunctional__select_backward(grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, index);
22343}
22344at::Tensor select_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index) {
22345return wrapper_CompositeExplicitAutogradNonFunctional__select_backward(grad_output, input_sizes, dim, index);
22346}
22347at::Tensor silu(const at::Tensor & self) {
22348return wrapper_CompositeExplicitAutogradNonFunctional_silu(self);
22349}
22350at::Tensor & silu_(at::Tensor & self) {
22351return wrapper_CompositeExplicitAutogradNonFunctional_silu_(self);
22352}
22353at::Tensor silu_backward(const at::Tensor & grad_output, const at::Tensor & self) {
22354return wrapper_CompositeExplicitAutogradNonFunctional_silu_backward(grad_output, self);
22355}
22356at::Tensor mish(const at::Tensor & self) {
22357return wrapper_CompositeExplicitAutogradNonFunctional_mish(self);
22358}
22359at::Tensor & mish_(at::Tensor & self) {
22360return wrapper_CompositeExplicitAutogradNonFunctional_mish_(self);
22361}
22362at::Tensor sigmoid(const at::Tensor & self) {
22363return wrapper_CompositeExplicitAutogradNonFunctional_sigmoid(self);
22364}
22365at::Tensor & sigmoid_(at::Tensor & self) {
22366return wrapper_CompositeExplicitAutogradNonFunctional_sigmoid_(self);
22367}
22368at::Tensor sin(const at::Tensor & self) {
22369return wrapper_CompositeExplicitAutogradNonFunctional_sin(self);
22370}
22371at::Tensor & sin_(at::Tensor & self) {
22372return wrapper_CompositeExplicitAutogradNonFunctional_sin_(self);
22373}
22374at::Tensor sinc(const at::Tensor & self) {
22375return wrapper_CompositeExplicitAutogradNonFunctional_sinc(self);
22376}
22377at::Tensor & sinc_(at::Tensor & self) {
22378return wrapper_CompositeExplicitAutogradNonFunctional_sinc_(self);
22379}
22380at::Tensor sinh(const at::Tensor & self) {
22381return wrapper_CompositeExplicitAutogradNonFunctional_sinh(self);
22382}
22383at::Tensor & sinh_(at::Tensor & self) {
22384return wrapper_CompositeExplicitAutogradNonFunctional_sinh_(self);
22385}
22386at::Tensor _softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
22387return wrapper_CompositeExplicitAutogradNonFunctional__softmax(self, dim, half_to_float);
22388}
22389at::Tensor _softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
22390return wrapper_CompositeExplicitAutogradNonFunctional__softmax_backward_data(grad_output, output, dim, input_dtype);
22391}
22392at::Tensor sum(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
22393return wrapper_CompositeExplicitAutogradNonFunctional_sum_dim_IntList(self, dim, keepdim, dtype);
22394}
22395at::Tensor sqrt(const at::Tensor & self) {
22396return wrapper_CompositeExplicitAutogradNonFunctional_sqrt(self);
22397}
22398at::Tensor & sqrt_(at::Tensor & self) {
22399return wrapper_CompositeExplicitAutogradNonFunctional_sqrt_(self);
22400}
22401at::Tensor prod(const at::Tensor & self, int64_t dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
22402return wrapper_CompositeExplicitAutogradNonFunctional_prod_dim_int(self, dim, keepdim, dtype);
22403}
22404at::Tensor tan(const at::Tensor & self) {
22405return wrapper_CompositeExplicitAutogradNonFunctional_tan(self);
22406}
22407at::Tensor & tan_(at::Tensor & self) {
22408return wrapper_CompositeExplicitAutogradNonFunctional_tan_(self);
22409}
22410at::Tensor tanh(const at::Tensor & self) {
22411return wrapper_CompositeExplicitAutogradNonFunctional_tanh(self);
22412}
22413at::Tensor & tanh_(at::Tensor & self) {
22414return wrapper_CompositeExplicitAutogradNonFunctional_tanh_(self);
22415}
22416at::Tensor threshold(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
22417return wrapper_CompositeExplicitAutogradNonFunctional_threshold(self, threshold, value);
22418}
22419at::Tensor & threshold_(at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
22420return wrapper_CompositeExplicitAutogradNonFunctional_threshold_(self, threshold, value);
22421}
22422at::Tensor threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
22423return wrapper_CompositeExplicitAutogradNonFunctional_threshold_backward(grad_output, self, threshold);
22424}
22425at::Tensor _nested_view_from_buffer_copy(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets) {
22426return wrapper_CompositeExplicitAutogradNonFunctional___nested_view_from_buffer_copy(self, nested_size, nested_strides, offsets);
22427}
22428at::Tensor _trilinear(const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim) {
22429return wrapper_CompositeExplicitAutogradNonFunctional___trilinear(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim);
22430}
22431at::Tensor trunc(const at::Tensor & self) {
22432return wrapper_CompositeExplicitAutogradNonFunctional_trunc(self);
22433}
22434at::Tensor & trunc_(at::Tensor & self) {
22435return wrapper_CompositeExplicitAutogradNonFunctional_trunc_(self);
22436}
22437at::Tensor norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) {
22438return wrapper_CompositeExplicitAutogradNonFunctional_norm_ScalarOpt_dim_dtype(self, p, dim, keepdim, dtype);
22439}
22440at::Tensor norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) {
22441return wrapper_CompositeExplicitAutogradNonFunctional_norm_ScalarOpt_dim(self, p, dim, keepdim);
22442}
22443at::Tensor sub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
22444return wrapper_CompositeExplicitAutogradNonFunctional_sub_Tensor(self, other, alpha);
22445}
22446at::Tensor & sub_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
22447return wrapper_CompositeExplicitAutogradNonFunctional_sub__Tensor(self, other, alpha);
22448}
22449at::Tensor heaviside(const at::Tensor & self, const at::Tensor & values) {
22450return wrapper_CompositeExplicitAutogradNonFunctional_heaviside(self, values);
22451}
22452at::Tensor & heaviside_(at::Tensor & self, const at::Tensor & values) {
22453return wrapper_CompositeExplicitAutogradNonFunctional_heaviside_(self, values);
22454}
22455at::Tensor addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
22456return wrapper_CompositeExplicitAutogradNonFunctional_addmm(self, mat1, mat2, beta, alpha);
22457}
22458at::Tensor & addmm_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
22459return wrapper_CompositeExplicitAutogradNonFunctional_addmm_(self, mat1, mat2, beta, alpha);
22460}
22461at::Tensor _addmm_activation(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu) {
22462return wrapper_CompositeExplicitAutogradNonFunctional__addmm_activation(self, mat1, mat2, beta, alpha, use_gelu);
22463}
22464at::Tensor lift_fresh_copy(const at::Tensor & self) {
22465return wrapper_CompositeExplicitAutogradNonFunctional__lift_fresh_copy(self);
22466}
22467at::Tensor index_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
22468return wrapper_CompositeExplicitAutogradNonFunctional_index_add(self, dim, index, source, alpha);
22469}
22470at::Tensor & index_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
22471return wrapper_CompositeExplicitAutogradNonFunctional_index_add_(self, dim, index, source, alpha);
22472}
22473at::Tensor index_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
22474return wrapper_CompositeExplicitAutogradNonFunctional_index_reduce(self, dim, index, source, reduce, include_self);
22475}
22476at::Tensor & index_reduce_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
22477return wrapper_CompositeExplicitAutogradNonFunctional_index_reduce_(self, dim, index, source, reduce, include_self);
22478}
22479at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
22480return wrapper_CompositeExplicitAutogradNonFunctional_scatter_src(self, dim, index, src);
22481}
22482at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
22483return wrapper_CompositeExplicitAutogradNonFunctional_scatter__src(self, dim, index, src);
22484}
22485at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
22486return wrapper_CompositeExplicitAutogradNonFunctional_scatter_value(self, dim, index, value);
22487}
22488at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
22489return wrapper_CompositeExplicitAutogradNonFunctional_scatter__value(self, dim, index, value);
22490}
22491at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
22492return wrapper_CompositeExplicitAutogradNonFunctional_scatter_reduce(self, dim, index, src, reduce);
22493}
22494at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
22495return wrapper_CompositeExplicitAutogradNonFunctional_scatter__reduce(self, dim, index, src, reduce);
22496}
22497at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
22498return wrapper_CompositeExplicitAutogradNonFunctional_scatter_value_reduce(self, dim, index, value, reduce);
22499}
22500at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
22501return wrapper_CompositeExplicitAutogradNonFunctional_scatter__value_reduce(self, dim, index, value, reduce);
22502}
22503at::Tensor scatter_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
22504return wrapper_CompositeExplicitAutogradNonFunctional_scatter_add(self, dim, index, src);
22505}
22506at::Tensor & scatter_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
22507return wrapper_CompositeExplicitAutogradNonFunctional_scatter_add_(self, dim, index, src);
22508}
22509at::Tensor scatter_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
22510return wrapper_CompositeExplicitAutogradNonFunctional_scatter_reduce_two(self, dim, index, src, reduce, include_self);
22511}
22512at::Tensor & scatter_reduce_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
22513return wrapper_CompositeExplicitAutogradNonFunctional_scatter_reduce__two(self, dim, index, src, reduce, include_self);
22514}
22515at::Tensor eq(const at::Tensor & self, const at::Scalar & other) {
22516return wrapper_CompositeExplicitAutogradNonFunctional_eq_Scalar(self, other);
22517}
22518at::Tensor & eq_(at::Tensor & self, const at::Scalar & other) {
22519return wrapper_CompositeExplicitAutogradNonFunctional_eq__Scalar(self, other);
22520}
22521at::Tensor eq(const at::Tensor & self, const at::Tensor & other) {
22522return wrapper_CompositeExplicitAutogradNonFunctional_eq_Tensor(self, other);
22523}
22524at::Tensor & eq_(at::Tensor & self, const at::Tensor & other) {
22525return wrapper_CompositeExplicitAutogradNonFunctional_eq__Tensor(self, other);
22526}
22527at::Tensor bitwise_and(const at::Tensor & self, const at::Tensor & other) {
22528return wrapper_CompositeExplicitAutogradNonFunctional_bitwise_and_Tensor(self, other);
22529}
22530at::Tensor & bitwise_and_(at::Tensor & self, const at::Tensor & other) {
22531return wrapper_CompositeExplicitAutogradNonFunctional_bitwise_and__Tensor(self, other);
22532}
22533at::Tensor bitwise_or(const at::Tensor & self, const at::Tensor & other) {
22534return wrapper_CompositeExplicitAutogradNonFunctional_bitwise_or_Tensor(self, other);
22535}
22536at::Tensor & bitwise_or_(at::Tensor & self, const at::Tensor & other) {
22537return wrapper_CompositeExplicitAutogradNonFunctional_bitwise_or__Tensor(self, other);
22538}
22539at::Tensor bitwise_xor(const at::Tensor & self, const at::Tensor & other) {
22540return wrapper_CompositeExplicitAutogradNonFunctional_bitwise_xor_Tensor(self, other);
22541}
22542at::Tensor & bitwise_xor_(at::Tensor & self, const at::Tensor & other) {
22543return wrapper_CompositeExplicitAutogradNonFunctional_bitwise_xor__Tensor(self, other);
22544}
22545at::Tensor bitwise_left_shift(const at::Tensor & self, const at::Tensor & other) {
22546return wrapper_CompositeExplicitAutogradNonFunctional_bitwise_left_shift_Tensor(self, other);
22547}
22548at::Tensor & bitwise_left_shift_(at::Tensor & self, const at::Tensor & other) {
22549return wrapper_CompositeExplicitAutogradNonFunctional_bitwise_left_shift__Tensor(self, other);
22550}
22551at::Tensor bitwise_right_shift(const at::Tensor & self, const at::Tensor & other) {
22552return wrapper_CompositeExplicitAutogradNonFunctional_bitwise_right_shift_Tensor(self, other);
22553}
22554at::Tensor & bitwise_right_shift_(at::Tensor & self, const at::Tensor & other) {
22555return wrapper_CompositeExplicitAutogradNonFunctional_bitwise_right_shift__Tensor(self, other);
22556}
22557at::Tensor tril(const at::Tensor & self, int64_t diagonal) {
22558return wrapper_CompositeExplicitAutogradNonFunctional_tril(self, diagonal);
22559}
22560at::Tensor & tril_(at::Tensor & self, int64_t diagonal) {
22561return wrapper_CompositeExplicitAutogradNonFunctional_tril_(self, diagonal);
22562}
22563at::Tensor triu(const at::Tensor & self, int64_t diagonal) {
22564return wrapper_CompositeExplicitAutogradNonFunctional_triu(self, diagonal);
22565}
22566at::Tensor & triu_(at::Tensor & self, int64_t diagonal) {
22567return wrapper_CompositeExplicitAutogradNonFunctional_triu_(self, diagonal);
22568}
22569at::Tensor digamma(const at::Tensor & self) {
22570return wrapper_CompositeExplicitAutogradNonFunctional_digamma(self);
22571}
22572at::Tensor & digamma_(at::Tensor & self) {
22573return wrapper_CompositeExplicitAutogradNonFunctional_digamma_(self);
22574}
22575at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
22576return wrapper_CompositeExplicitAutogradNonFunctional_lerp_Scalar(self, end, weight);
22577}
22578at::Tensor & lerp_(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
22579return wrapper_CompositeExplicitAutogradNonFunctional_lerp__Scalar(self, end, weight);
22580}
22581at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
22582return wrapper_CompositeExplicitAutogradNonFunctional_lerp_Tensor(self, end, weight);
22583}
22584at::Tensor & lerp_(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
22585return wrapper_CompositeExplicitAutogradNonFunctional_lerp__Tensor(self, end, weight);
22586}
22587at::Tensor ne(const at::Tensor & self, const at::Scalar & other) {
22588return wrapper_CompositeExplicitAutogradNonFunctional_ne_Scalar(self, other);
22589}
22590at::Tensor & ne_(at::Tensor & self, const at::Scalar & other) {
22591return wrapper_CompositeExplicitAutogradNonFunctional_ne__Scalar(self, other);
22592}
22593at::Tensor ne(const at::Tensor & self, const at::Tensor & other) {
22594return wrapper_CompositeExplicitAutogradNonFunctional_ne_Tensor(self, other);
22595}
22596at::Tensor & ne_(at::Tensor & self, const at::Tensor & other) {
22597return wrapper_CompositeExplicitAutogradNonFunctional_ne__Tensor(self, other);
22598}
22599at::Tensor ge(const at::Tensor & self, const at::Scalar & other) {
22600return wrapper_CompositeExplicitAutogradNonFunctional_ge_Scalar(self, other);
22601}
22602at::Tensor & ge_(at::Tensor & self, const at::Scalar & other) {
22603return wrapper_CompositeExplicitAutogradNonFunctional_ge__Scalar(self, other);
22604}
22605at::Tensor ge(const at::Tensor & self, const at::Tensor & other) {
22606return wrapper_CompositeExplicitAutogradNonFunctional_ge_Tensor(self, other);
22607}
22608at::Tensor & ge_(at::Tensor & self, const at::Tensor & other) {
22609return wrapper_CompositeExplicitAutogradNonFunctional_ge__Tensor(self, other);
22610}
22611at::Tensor le(const at::Tensor & self, const at::Scalar & other) {
22612return wrapper_CompositeExplicitAutogradNonFunctional_le_Scalar(self, other);
22613}
22614at::Tensor & le_(at::Tensor & self, const at::Scalar & other) {
22615return wrapper_CompositeExplicitAutogradNonFunctional_le__Scalar(self, other);
22616}
22617at::Tensor le(const at::Tensor & self, const at::Tensor & other) {
22618return wrapper_CompositeExplicitAutogradNonFunctional_le_Tensor(self, other);
22619}
22620at::Tensor & le_(at::Tensor & self, const at::Tensor & other) {
22621return wrapper_CompositeExplicitAutogradNonFunctional_le__Tensor(self, other);
22622}
22623at::Tensor gt(const at::Tensor & self, const at::Scalar & other) {
22624return wrapper_CompositeExplicitAutogradNonFunctional_gt_Scalar(self, other);
22625}
22626at::Tensor & gt_(at::Tensor & self, const at::Scalar & other) {
22627return wrapper_CompositeExplicitAutogradNonFunctional_gt__Scalar(self, other);
22628}
22629at::Tensor gt(const at::Tensor & self, const at::Tensor & other) {
22630return wrapper_CompositeExplicitAutogradNonFunctional_gt_Tensor(self, other);
22631}
22632at::Tensor & gt_(at::Tensor & self, const at::Tensor & other) {
22633return wrapper_CompositeExplicitAutogradNonFunctional_gt__Tensor(self, other);
22634}
22635at::Tensor lt(const at::Tensor & self, const at::Scalar & other) {
22636return wrapper_CompositeExplicitAutogradNonFunctional_lt_Scalar(self, other);
22637}
22638at::Tensor & lt_(at::Tensor & self, const at::Scalar & other) {
22639return wrapper_CompositeExplicitAutogradNonFunctional_lt__Scalar(self, other);
22640}
22641at::Tensor lt(const at::Tensor & self, const at::Tensor & other) {
22642return wrapper_CompositeExplicitAutogradNonFunctional_lt_Tensor(self, other);
22643}
22644at::Tensor & lt_(at::Tensor & self, const at::Tensor & other) {
22645return wrapper_CompositeExplicitAutogradNonFunctional_lt__Tensor(self, other);
22646}
22647at::Tensor gather(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
22648return wrapper_CompositeExplicitAutogradNonFunctional_gather(self, dim, index, sparse_grad);
22649}
22650at::Tensor addcmul(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
22651return wrapper_CompositeExplicitAutogradNonFunctional_addcmul(self, tensor1, tensor2, value);
22652}
22653at::Tensor & addcmul_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
22654return wrapper_CompositeExplicitAutogradNonFunctional_addcmul_(self, tensor1, tensor2, value);
22655}
22656at::Tensor addcdiv(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
22657return wrapper_CompositeExplicitAutogradNonFunctional_addcdiv(self, tensor1, tensor2, value);
22658}
22659at::Tensor & addcdiv_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
22660return wrapper_CompositeExplicitAutogradNonFunctional_addcdiv_(self, tensor1, tensor2, value);
22661}
22662::std::tuple<at::Tensor,at::Tensor> triangular_solve(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) {
22663return wrapper_CompositeExplicitAutogradNonFunctional_triangular_solve(self, A, upper, transpose, unitriangular);
22664}
22665::std::tuple<at::Tensor,at::Tensor,at::Tensor> lu_unpack(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots) {
22666return wrapper_CompositeExplicitAutogradNonFunctional_lu_unpack(LU_data, LU_pivots, unpack_data, unpack_pivots);
22667}
22668at::Tensor lgamma(const at::Tensor & self) {
22669return wrapper_CompositeExplicitAutogradNonFunctional_lgamma(self);
22670}
22671at::Tensor & lgamma_(at::Tensor & self) {
22672return wrapper_CompositeExplicitAutogradNonFunctional_lgamma_(self);
22673}
22674at::Tensor polygamma(int64_t n, const at::Tensor & self) {
22675return wrapper_CompositeExplicitAutogradNonFunctional_polygamma(n, self);
22676}
22677at::Tensor erfinv(const at::Tensor & self) {
22678return wrapper_CompositeExplicitAutogradNonFunctional_erfinv(self);
22679}
22680at::Tensor & erfinv_(at::Tensor & self) {
22681return wrapper_CompositeExplicitAutogradNonFunctional_erfinv_(self);
22682}
22683at::Tensor i0(const at::Tensor & self) {
22684return wrapper_CompositeExplicitAutogradNonFunctional_i0(self);
22685}
22686at::Tensor & i0_(at::Tensor & self) {
22687return wrapper_CompositeExplicitAutogradNonFunctional_i0_(self);
22688}
22689at::Tensor sign(const at::Tensor & self) {
22690return wrapper_CompositeExplicitAutogradNonFunctional_sign(self);
22691}
22692at::Tensor & sign_(at::Tensor & self) {
22693return wrapper_CompositeExplicitAutogradNonFunctional_sign_(self);
22694}
22695at::Tensor signbit(const at::Tensor & self) {
22696return wrapper_CompositeExplicitAutogradNonFunctional_signbit(self);
22697}
22698at::Tensor atan2(const at::Tensor & self, const at::Tensor & other) {
22699return wrapper_CompositeExplicitAutogradNonFunctional_atan2(self, other);
22700}
22701at::Tensor & atan2_(at::Tensor & self, const at::Tensor & other) {
22702return wrapper_CompositeExplicitAutogradNonFunctional_atan2_(self, other);
22703}
22704at::Tensor fmod(const at::Tensor & self, const at::Tensor & other) {
22705return wrapper_CompositeExplicitAutogradNonFunctional_fmod_Tensor(self, other);
22706}
22707at::Tensor & fmod_(at::Tensor & self, const at::Tensor & other) {
22708return wrapper_CompositeExplicitAutogradNonFunctional_fmod__Tensor(self, other);
22709}
22710at::Tensor hypot(const at::Tensor & self, const at::Tensor & other) {
22711return wrapper_CompositeExplicitAutogradNonFunctional_hypot(self, other);
22712}
22713at::Tensor & hypot_(at::Tensor & self, const at::Tensor & other) {
22714return wrapper_CompositeExplicitAutogradNonFunctional_hypot_(self, other);
22715}
22716at::Tensor igamma(const at::Tensor & self, const at::Tensor & other) {
22717return wrapper_CompositeExplicitAutogradNonFunctional_igamma(self, other);
22718}
22719at::Tensor & igamma_(at::Tensor & self, const at::Tensor & other) {
22720return wrapper_CompositeExplicitAutogradNonFunctional_igamma_(self, other);
22721}
22722at::Tensor igammac(const at::Tensor & self, const at::Tensor & other) {
22723return wrapper_CompositeExplicitAutogradNonFunctional_igammac(self, other);
22724}
22725at::Tensor & igammac_(at::Tensor & self, const at::Tensor & other) {
22726return wrapper_CompositeExplicitAutogradNonFunctional_igammac_(self, other);
22727}
22728at::Tensor nextafter(const at::Tensor & self, const at::Tensor & other) {
22729return wrapper_CompositeExplicitAutogradNonFunctional_nextafter(self, other);
22730}
22731at::Tensor & nextafter_(at::Tensor & self, const at::Tensor & other) {
22732return wrapper_CompositeExplicitAutogradNonFunctional_nextafter_(self, other);
22733}
22734at::Tensor remainder(const at::Tensor & self, const at::Tensor & other) {
22735return wrapper_CompositeExplicitAutogradNonFunctional_remainder_Tensor(self, other);
22736}
22737at::Tensor & remainder_(at::Tensor & self, const at::Tensor & other) {
22738return wrapper_CompositeExplicitAutogradNonFunctional_remainder__Tensor(self, other);
22739}
22740at::Tensor fmin(const at::Tensor & self, const at::Tensor & other) {
22741return wrapper_CompositeExplicitAutogradNonFunctional_fmin(self, other);
22742}
22743at::Tensor fmax(const at::Tensor & self, const at::Tensor & other) {
22744return wrapper_CompositeExplicitAutogradNonFunctional_fmax(self, other);
22745}
22746at::Tensor maximum(const at::Tensor & self, const at::Tensor & other) {
22747return wrapper_CompositeExplicitAutogradNonFunctional_maximum(self, other);
22748}
22749at::Tensor minimum(const at::Tensor & self, const at::Tensor & other) {
22750return wrapper_CompositeExplicitAutogradNonFunctional_minimum(self, other);
22751}
22752::std::tuple<at::Tensor,at::Tensor> sort(const at::Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending) {
22753return wrapper_CompositeExplicitAutogradNonFunctional_sort_stable(self, stable, dim, descending);
22754}
22755::std::tuple<at::Tensor,at::Tensor> topk(const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) {
22756return wrapper_CompositeExplicitAutogradNonFunctional_topk(self, k, dim, largest, sorted);
22757}
22758at::Tensor all(const at::Tensor & self) {
22759return wrapper_CompositeExplicitAutogradNonFunctional_all(self);
22760}
22761at::Tensor any(const at::Tensor & self) {
22762return wrapper_CompositeExplicitAutogradNonFunctional_any(self);
22763}
22764at::Tensor renorm(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
22765return wrapper_CompositeExplicitAutogradNonFunctional_renorm(self, p, dim, maxnorm);
22766}
22767at::Tensor & renorm_(at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
22768return wrapper_CompositeExplicitAutogradNonFunctional_renorm_(self, p, dim, maxnorm);
22769}
22770at::Tensor pow(const at::Tensor & self, const at::Tensor & exponent) {
22771return wrapper_CompositeExplicitAutogradNonFunctional_pow_Tensor_Tensor(self, exponent);
22772}
22773at::Tensor & pow_(at::Tensor & self, const at::Tensor & exponent) {
22774return wrapper_CompositeExplicitAutogradNonFunctional_pow__Tensor(self, exponent);
22775}
22776at::Tensor pow(const at::Scalar & self, const at::Tensor & exponent) {
22777return wrapper_CompositeExplicitAutogradNonFunctional_pow_Scalar(self, exponent);
22778}
22779at::Tensor pow(const at::Tensor & self, const at::Scalar & exponent) {
22780return wrapper_CompositeExplicitAutogradNonFunctional_pow_Tensor_Scalar(self, exponent);
22781}
22782at::Tensor & pow_(at::Tensor & self, const at::Scalar & exponent) {
22783return wrapper_CompositeExplicitAutogradNonFunctional_pow__Scalar(self, exponent);
22784}
22785at::Tensor _convert_indices_from_coo_to_csr(const at::Tensor & self, int64_t size, bool out_int32) {
22786return wrapper_CompositeExplicitAutogradNonFunctional__convert_indices_from_coo_to_csr(self, size, out_int32);
22787}
22788at::Tensor _convert_indices_from_csr_to_coo(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose) {
22789return wrapper_CompositeExplicitAutogradNonFunctional__convert_indices_from_csr_to_coo(crow_indices, col_indices, out_int32, transpose);
22790}
22791at::Tensor mse_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
22792return wrapper_CompositeExplicitAutogradNonFunctional_mse_loss(self, target, reduction);
22793}
22794::std::tuple<at::Tensor,at::Tensor> nll_loss_forward(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
22795return wrapper_CompositeExplicitAutogradNonFunctional_nll_loss_forward(self, target, weight, reduction, ignore_index);
22796}
22797::std::tuple<at::Tensor,at::Tensor> nll_loss_forward_symint(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
22798return wrapper_CompositeExplicitAutogradNonFunctional_nll_loss_forward(self, target, weight, reduction, ignore_index.expect_int());
22799}
22800at::Tensor nll_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) {
22801return wrapper_CompositeExplicitAutogradNonFunctional_nll_loss_backward(grad_output, self, target, weight, reduction, ignore_index, total_weight);
22802}
22803at::Tensor nll_loss_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
22804return wrapper_CompositeExplicitAutogradNonFunctional_nll_loss_backward(grad_output, self, target, weight, reduction, ignore_index.expect_int(), total_weight);
22805}
22806at::Tensor smooth_l1_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
22807return wrapper_CompositeExplicitAutogradNonFunctional_smooth_l1_loss(self, target, reduction, beta);
22808}
22809at::Tensor elu(const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
22810return wrapper_CompositeExplicitAutogradNonFunctional_elu(self, alpha, scale, input_scale);
22811}
22812at::Tensor & elu_(at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
22813return wrapper_CompositeExplicitAutogradNonFunctional_elu_(self, alpha, scale, input_scale);
22814}
22815at::Tensor elu_backward(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) {
22816return wrapper_CompositeExplicitAutogradNonFunctional_elu_backward(grad_output, alpha, scale, input_scale, is_result, self_or_result);
22817}
22818at::Tensor glu(const at::Tensor & self, int64_t dim) {
22819return wrapper_CompositeExplicitAutogradNonFunctional_glu(self, dim);
22820}
22821at::Tensor hardsigmoid(const at::Tensor & self) {
22822return wrapper_CompositeExplicitAutogradNonFunctional_hardsigmoid(self);
22823}
22824at::Tensor & hardsigmoid_(at::Tensor & self) {
22825return wrapper_CompositeExplicitAutogradNonFunctional_hardsigmoid_(self);
22826}
22827at::Tensor hardsigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self) {
22828return wrapper_CompositeExplicitAutogradNonFunctional_hardsigmoid_backward(grad_output, self);
22829}
22830at::Tensor leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope) {
22831return wrapper_CompositeExplicitAutogradNonFunctional_leaky_relu(self, negative_slope);
22832}
22833at::Tensor & leaky_relu_(at::Tensor & self, const at::Scalar & negative_slope) {
22834return wrapper_CompositeExplicitAutogradNonFunctional_leaky_relu_(self, negative_slope);
22835}
22836at::Tensor leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) {
22837return wrapper_CompositeExplicitAutogradNonFunctional_leaky_relu_backward(grad_output, self, negative_slope, self_is_result);
22838}
22839at::Tensor softplus(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
22840return wrapper_CompositeExplicitAutogradNonFunctional_softplus(self, beta, threshold);
22841}
22842at::Tensor softplus_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
22843return wrapper_CompositeExplicitAutogradNonFunctional_softplus_backward(grad_output, self, beta, threshold);
22844}
22845at::Tensor softshrink(const at::Tensor & self, const at::Scalar & lambd) {
22846return wrapper_CompositeExplicitAutogradNonFunctional_softshrink(self, lambd);
22847}
22848at::Tensor softshrink_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) {
22849return wrapper_CompositeExplicitAutogradNonFunctional_softshrink_backward(grad_output, self, lambd);
22850}
22851::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool2d(const at::Tensor & self, at::IntArrayRef output_size) {
22852return wrapper_CompositeExplicitAutogradNonFunctional_adaptive_max_pool2d(self, output_size);
22853}
22854at::Tensor adaptive_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
22855return wrapper_CompositeExplicitAutogradNonFunctional_adaptive_max_pool2d_backward(grad_output, self, indices);
22856}
22857::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool3d(const at::Tensor & self, at::IntArrayRef output_size) {
22858return wrapper_CompositeExplicitAutogradNonFunctional_adaptive_max_pool3d(self, output_size);
22859}
22860at::Tensor adaptive_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
22861return wrapper_CompositeExplicitAutogradNonFunctional_adaptive_max_pool3d_backward(grad_output, self, indices);
22862}
22863at::Tensor avg_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
22864return wrapper_CompositeExplicitAutogradNonFunctional_avg_pool2d(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
22865}
22866at::Tensor avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
22867return wrapper_CompositeExplicitAutogradNonFunctional_avg_pool2d_backward(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
22868}
22869at::Tensor avg_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
22870return wrapper_CompositeExplicitAutogradNonFunctional_avg_pool3d(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
22871}
22872at::Tensor avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
22873return wrapper_CompositeExplicitAutogradNonFunctional_avg_pool3d_backward(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
22874}
22875::std::tuple<at::Tensor,at::Tensor> fractional_max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
22876return wrapper_CompositeExplicitAutogradNonFunctional_fractional_max_pool2d(self, kernel_size, output_size, random_samples);
22877}
22878at::Tensor fractional_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
22879return wrapper_CompositeExplicitAutogradNonFunctional_fractional_max_pool2d_backward(grad_output, self, kernel_size, output_size, indices);
22880}
22881::std::tuple<at::Tensor,at::Tensor> fractional_max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
22882return wrapper_CompositeExplicitAutogradNonFunctional_fractional_max_pool3d(self, kernel_size, output_size, random_samples);
22883}
22884::std::tuple<at::Tensor,at::Tensor> max_pool2d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
22885return wrapper_CompositeExplicitAutogradNonFunctional_max_pool2d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode);
22886}
22887at::Tensor max_pool2d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
22888return wrapper_CompositeExplicitAutogradNonFunctional_max_pool2d_with_indices_backward(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
22889}
22890at::Tensor reflection_pad1d(const at::Tensor & self, at::IntArrayRef padding) {
22891return wrapper_CompositeExplicitAutogradNonFunctional_reflection_pad1d(self, padding);
22892}
22893at::Tensor reflection_pad1d_symint(const at::Tensor & self, c10::SymIntArrayRef padding) {
22894return wrapper_CompositeExplicitAutogradNonFunctional_reflection_pad1d(self, C10_AS_INTARRAYREF_SLOW(padding));
22895}
22896at::Tensor reflection_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
22897return wrapper_CompositeExplicitAutogradNonFunctional_reflection_pad1d_backward(grad_output, self, padding);
22898}
22899at::Tensor reflection_pad1d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
22900return wrapper_CompositeExplicitAutogradNonFunctional_reflection_pad1d_backward(grad_output, self, C10_AS_INTARRAYREF_SLOW(padding));
22901}
22902at::Tensor reflection_pad3d(const at::Tensor & self, at::IntArrayRef padding) {
22903return wrapper_CompositeExplicitAutogradNonFunctional_reflection_pad3d(self, padding);
22904}
22905at::Tensor reflection_pad3d_symint(const at::Tensor & self, c10::SymIntArrayRef padding) {
22906return wrapper_CompositeExplicitAutogradNonFunctional_reflection_pad3d(self, C10_AS_INTARRAYREF_SLOW(padding));
22907}
22908at::Tensor reflection_pad3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
22909return wrapper_CompositeExplicitAutogradNonFunctional_reflection_pad3d_backward(grad_output, self, padding);
22910}
22911at::Tensor reflection_pad3d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
22912return wrapper_CompositeExplicitAutogradNonFunctional_reflection_pad3d_backward(grad_output, self, C10_AS_INTARRAYREF_SLOW(padding));
22913}
22914at::Tensor replication_pad1d(const at::Tensor & self, at::IntArrayRef padding) {
22915return wrapper_CompositeExplicitAutogradNonFunctional_replication_pad1d(self, padding);
22916}
22917at::Tensor replication_pad1d_symint(const at::Tensor & self, c10::SymIntArrayRef padding) {
22918return wrapper_CompositeExplicitAutogradNonFunctional_replication_pad1d(self, C10_AS_INTARRAYREF_SLOW(padding));
22919}
22920at::Tensor replication_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
22921return wrapper_CompositeExplicitAutogradNonFunctional_replication_pad1d_backward(grad_output, self, padding);
22922}
22923at::Tensor replication_pad1d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
22924return wrapper_CompositeExplicitAutogradNonFunctional_replication_pad1d_backward(grad_output, self, C10_AS_INTARRAYREF_SLOW(padding));
22925}
22926at::Tensor replication_pad2d(const at::Tensor & self, at::IntArrayRef padding) {
22927return wrapper_CompositeExplicitAutogradNonFunctional_replication_pad2d(self, padding);
22928}
22929at::Tensor replication_pad2d_symint(const at::Tensor & self, c10::SymIntArrayRef padding) {
22930return wrapper_CompositeExplicitAutogradNonFunctional_replication_pad2d(self, C10_AS_INTARRAYREF_SLOW(padding));
22931}
22932at::Tensor replication_pad3d(const at::Tensor & self, at::IntArrayRef padding) {
22933return wrapper_CompositeExplicitAutogradNonFunctional_replication_pad3d(self, padding);
22934}
22935at::Tensor replication_pad3d_symint(const at::Tensor & self, c10::SymIntArrayRef padding) {
22936return wrapper_CompositeExplicitAutogradNonFunctional_replication_pad3d(self, C10_AS_INTARRAYREF_SLOW(padding));
22937}
22938at::Tensor upsample_linear1d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales) {
22939return wrapper_CompositeExplicitAutogradNonFunctional_upsample_linear1d(self, output_size, align_corners, scales);
22940}
22941at::Tensor upsample_linear1d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales) {
22942return wrapper_CompositeExplicitAutogradNonFunctional_upsample_linear1d(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales);
22943}
22944at::Tensor upsample_linear1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales) {
22945return wrapper_CompositeExplicitAutogradNonFunctional_upsample_linear1d_backward(grad_output, output_size, input_size, align_corners, scales);
22946}
22947at::Tensor upsample_linear1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales) {
22948return wrapper_CompositeExplicitAutogradNonFunctional_upsample_linear1d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales);
22949}
22950at::Tensor upsample_bilinear2d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
22951return wrapper_CompositeExplicitAutogradNonFunctional_upsample_bilinear2d(self, output_size, align_corners, scales_h, scales_w);
22952}
22953at::Tensor upsample_bilinear2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
22954return wrapper_CompositeExplicitAutogradNonFunctional_upsample_bilinear2d(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w);
22955}
22956at::Tensor upsample_bilinear2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
22957return wrapper_CompositeExplicitAutogradNonFunctional_upsample_bilinear2d_backward(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
22958}
22959at::Tensor upsample_bilinear2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
22960return wrapper_CompositeExplicitAutogradNonFunctional_upsample_bilinear2d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w);
22961}
22962at::Tensor _upsample_bilinear2d_aa(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
22963return wrapper_CompositeExplicitAutogradNonFunctional__upsample_bilinear2d_aa(self, output_size, align_corners, scales_h, scales_w);
22964}
22965at::Tensor _upsample_bilinear2d_aa_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
22966return wrapper_CompositeExplicitAutogradNonFunctional__upsample_bilinear2d_aa(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w);
22967}
22968at::Tensor _upsample_bilinear2d_aa_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
22969return wrapper_CompositeExplicitAutogradNonFunctional__upsample_bilinear2d_aa_backward(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
22970}
22971at::Tensor _upsample_bilinear2d_aa_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
22972return wrapper_CompositeExplicitAutogradNonFunctional__upsample_bilinear2d_aa_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w);
22973}
22974at::Tensor upsample_bicubic2d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
22975return wrapper_CompositeExplicitAutogradNonFunctional_upsample_bicubic2d(self, output_size, align_corners, scales_h, scales_w);
22976}
22977at::Tensor upsample_bicubic2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
22978return wrapper_CompositeExplicitAutogradNonFunctional_upsample_bicubic2d(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w);
22979}
22980at::Tensor upsample_bicubic2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
22981return wrapper_CompositeExplicitAutogradNonFunctional_upsample_bicubic2d_backward(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
22982}
22983at::Tensor upsample_bicubic2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
22984return wrapper_CompositeExplicitAutogradNonFunctional_upsample_bicubic2d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w);
22985}
22986at::Tensor _upsample_bicubic2d_aa(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
22987return wrapper_CompositeExplicitAutogradNonFunctional__upsample_bicubic2d_aa(self, output_size, align_corners, scales_h, scales_w);
22988}
22989at::Tensor _upsample_bicubic2d_aa_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
22990return wrapper_CompositeExplicitAutogradNonFunctional__upsample_bicubic2d_aa(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w);
22991}
22992at::Tensor _upsample_bicubic2d_aa_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
22993return wrapper_CompositeExplicitAutogradNonFunctional__upsample_bicubic2d_aa_backward(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
22994}
22995at::Tensor _upsample_bicubic2d_aa_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
22996return wrapper_CompositeExplicitAutogradNonFunctional__upsample_bicubic2d_aa_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w);
22997}
22998at::Tensor upsample_trilinear3d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
22999return wrapper_CompositeExplicitAutogradNonFunctional_upsample_trilinear3d(self, output_size, align_corners, scales_d, scales_h, scales_w);
23000}
23001at::Tensor upsample_trilinear3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
23002return wrapper_CompositeExplicitAutogradNonFunctional_upsample_trilinear3d(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_d, scales_h, scales_w);
23003}
23004at::Tensor upsample_trilinear3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
23005return wrapper_CompositeExplicitAutogradNonFunctional_upsample_trilinear3d_backward(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w);
23006}
23007at::Tensor upsample_trilinear3d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
23008return wrapper_CompositeExplicitAutogradNonFunctional_upsample_trilinear3d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_d, scales_h, scales_w);
23009}
23010at::Tensor upsample_nearest1d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales) {
23011return wrapper_CompositeExplicitAutogradNonFunctional_upsample_nearest1d(self, output_size, scales);
23012}
23013at::Tensor upsample_nearest1d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales) {
23014return wrapper_CompositeExplicitAutogradNonFunctional_upsample_nearest1d(self, C10_AS_INTARRAYREF_SLOW(output_size), scales);
23015}
23016at::Tensor _upsample_nearest_exact1d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales) {
23017return wrapper_CompositeExplicitAutogradNonFunctional__upsample_nearest_exact1d(self, output_size, scales);
23018}
23019at::Tensor _upsample_nearest_exact1d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales) {
23020return wrapper_CompositeExplicitAutogradNonFunctional__upsample_nearest_exact1d(self, C10_AS_INTARRAYREF_SLOW(output_size), scales);
23021}
23022at::Tensor upsample_nearest1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales) {
23023return wrapper_CompositeExplicitAutogradNonFunctional_upsample_nearest1d_backward(grad_output, output_size, input_size, scales);
23024}
23025at::Tensor upsample_nearest1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales) {
23026return wrapper_CompositeExplicitAutogradNonFunctional_upsample_nearest1d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales);
23027}
23028at::Tensor _upsample_nearest_exact1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales) {
23029return wrapper_CompositeExplicitAutogradNonFunctional__upsample_nearest_exact1d_backward(grad_output, output_size, input_size, scales);
23030}
23031at::Tensor _upsample_nearest_exact1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales) {
23032return wrapper_CompositeExplicitAutogradNonFunctional__upsample_nearest_exact1d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales);
23033}
23034at::Tensor upsample_nearest2d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
23035return wrapper_CompositeExplicitAutogradNonFunctional_upsample_nearest2d(self, output_size, scales_h, scales_w);
23036}
23037at::Tensor upsample_nearest2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
23038return wrapper_CompositeExplicitAutogradNonFunctional_upsample_nearest2d(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_h, scales_w);
23039}
23040at::Tensor _upsample_nearest_exact2d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
23041return wrapper_CompositeExplicitAutogradNonFunctional__upsample_nearest_exact2d(self, output_size, scales_h, scales_w);
23042}
23043at::Tensor _upsample_nearest_exact2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
23044return wrapper_CompositeExplicitAutogradNonFunctional__upsample_nearest_exact2d(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_h, scales_w);
23045}
23046at::Tensor upsample_nearest2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
23047return wrapper_CompositeExplicitAutogradNonFunctional_upsample_nearest2d_backward(grad_output, output_size, input_size, scales_h, scales_w);
23048}
23049at::Tensor upsample_nearest2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
23050return wrapper_CompositeExplicitAutogradNonFunctional_upsample_nearest2d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales_h, scales_w);
23051}
23052at::Tensor _upsample_nearest_exact2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
23053return wrapper_CompositeExplicitAutogradNonFunctional__upsample_nearest_exact2d_backward(grad_output, output_size, input_size, scales_h, scales_w);
23054}
23055at::Tensor _upsample_nearest_exact2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
23056return wrapper_CompositeExplicitAutogradNonFunctional__upsample_nearest_exact2d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales_h, scales_w);
23057}
23058at::Tensor upsample_nearest3d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
23059return wrapper_CompositeExplicitAutogradNonFunctional_upsample_nearest3d(self, output_size, scales_d, scales_h, scales_w);
23060}
23061at::Tensor upsample_nearest3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
23062return wrapper_CompositeExplicitAutogradNonFunctional_upsample_nearest3d(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_d, scales_h, scales_w);
23063}
23064at::Tensor _upsample_nearest_exact3d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
23065return wrapper_CompositeExplicitAutogradNonFunctional__upsample_nearest_exact3d(self, output_size, scales_d, scales_h, scales_w);
23066}
23067at::Tensor _upsample_nearest_exact3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
23068return wrapper_CompositeExplicitAutogradNonFunctional__upsample_nearest_exact3d(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_d, scales_h, scales_w);
23069}
23070at::Tensor upsample_nearest3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
23071return wrapper_CompositeExplicitAutogradNonFunctional_upsample_nearest3d_backward(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
23072}
23073at::Tensor upsample_nearest3d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
23074return wrapper_CompositeExplicitAutogradNonFunctional_upsample_nearest3d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales_d, scales_h, scales_w);
23075}
23076at::Tensor _upsample_nearest_exact3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
23077return wrapper_CompositeExplicitAutogradNonFunctional__upsample_nearest_exact3d_backward(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
23078}
23079at::Tensor _upsample_nearest_exact3d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
23080return wrapper_CompositeExplicitAutogradNonFunctional__upsample_nearest_exact3d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales_d, scales_h, scales_w);
23081}
23082at::Tensor sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & output) {
23083return wrapper_CompositeExplicitAutogradNonFunctional_sigmoid_backward(grad_output, output);
23084}
23085at::Tensor logit_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::optional<double> eps) {
23086return wrapper_CompositeExplicitAutogradNonFunctional_logit_backward(grad_output, self, eps);
23087}
23088at::Tensor tanh_backward(const at::Tensor & grad_output, const at::Tensor & output) {
23089return wrapper_CompositeExplicitAutogradNonFunctional_tanh_backward(grad_output, output);
23090}
23091at::Tensor slow_conv_transpose2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation) {
23092return wrapper_CompositeExplicitAutogradNonFunctional_slow_conv_transpose2d(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
23093}
23094at::Tensor slow_conv_transpose2d_symint(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation) {
23095return wrapper_CompositeExplicitAutogradNonFunctional_slow_conv_transpose2d(self, weight, kernel_size, bias, stride, C10_AS_INTARRAYREF_SLOW(padding), C10_AS_INTARRAYREF_SLOW(output_padding), dilation);
23096}
23097at::Tensor isposinf(const at::Tensor & self) {
23098return wrapper_CompositeExplicitAutogradNonFunctional_isposinf(self);
23099}
23100at::Tensor isneginf(const at::Tensor & self) {
23101return wrapper_CompositeExplicitAutogradNonFunctional_isneginf(self);
23102}
23103at::Tensor special_entr(const at::Tensor & self) {
23104return wrapper_CompositeExplicitAutogradNonFunctional_special_entr(self);
23105}
23106at::Tensor special_ndtri(const at::Tensor & self) {
23107return wrapper_CompositeExplicitAutogradNonFunctional_special_ndtri(self);
23108}
23109at::Tensor special_log_ndtr(const at::Tensor & self) {
23110return wrapper_CompositeExplicitAutogradNonFunctional_special_log_ndtr(self);
23111}
23112at::Tensor special_erfcx(const at::Tensor & self) {
23113return wrapper_CompositeExplicitAutogradNonFunctional_special_erfcx(self);
23114}
23115at::Tensor special_xlog1py(const at::Tensor & self, const at::Tensor & other) {
23116return wrapper_CompositeExplicitAutogradNonFunctional_special_xlog1py(self, other);
23117}
23118at::Tensor special_zeta(const at::Tensor & self, const at::Tensor & other) {
23119return wrapper_CompositeExplicitAutogradNonFunctional_special_zeta(self, other);
23120}
23121at::Tensor special_i0e(const at::Tensor & self) {
23122return wrapper_CompositeExplicitAutogradNonFunctional_special_i0e(self);
23123}
23124at::Tensor special_i1(const at::Tensor & self) {
23125return wrapper_CompositeExplicitAutogradNonFunctional_special_i1(self);
23126}
23127at::Tensor special_i1e(const at::Tensor & self) {
23128return wrapper_CompositeExplicitAutogradNonFunctional_special_i1e(self);
23129}
23130::std::tuple<at::Tensor,at::Tensor> linalg_cholesky_ex(const at::Tensor & self, bool upper, bool check_errors) {
23131return wrapper_CompositeExplicitAutogradNonFunctional_linalg_cholesky_ex(self, upper, check_errors);
23132}
23133at::Tensor linalg_cross(const at::Tensor & self, const at::Tensor & other, int64_t dim) {
23134return wrapper_CompositeExplicitAutogradNonFunctional_linalg_cross(self, other, dim);
23135}
23136::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu_factor_ex(const at::Tensor & A, bool pivot, bool check_errors) {
23137return wrapper_CompositeExplicitAutogradNonFunctional_linalg_lu_factor_ex(A, pivot, check_errors);
23138}
23139::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu(const at::Tensor & A, bool pivot) {
23140return wrapper_CompositeExplicitAutogradNonFunctional_linalg_lu(A, pivot);
23141}
23142at::Tensor linalg_lu_solve(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint) {
23143return wrapper_CompositeExplicitAutogradNonFunctional_linalg_lu_solve(LU, pivots, B, left, adjoint);
23144}
23145::std::tuple<at::Tensor,at::Tensor,at::Tensor> _linalg_det(const at::Tensor & A) {
23146return wrapper_CompositeExplicitAutogradNonFunctional__linalg_det(A);
23147}
23148::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_ldl_factor_ex(const at::Tensor & self, bool hermitian, bool check_errors) {
23149return wrapper_CompositeExplicitAutogradNonFunctional_linalg_ldl_factor_ex(self, hermitian, check_errors);
23150}
23151at::Tensor linalg_ldl_solve(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian) {
23152return wrapper_CompositeExplicitAutogradNonFunctional_linalg_ldl_solve(LD, pivots, B, hermitian);
23153}
23154::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_slogdet(const at::Tensor & A) {
23155return wrapper_CompositeExplicitAutogradNonFunctional__linalg_slogdet(A);
23156}
23157::std::tuple<at::Tensor,at::Tensor> _linalg_eigh(const at::Tensor & A, c10::string_view UPLO, bool compute_v) {
23158return wrapper_CompositeExplicitAutogradNonFunctional__linalg_eigh(A, UPLO, compute_v);
23159}
23160::std::tuple<at::Tensor,at::Tensor> linalg_inv_ex(const at::Tensor & A, bool check_errors) {
23161return wrapper_CompositeExplicitAutogradNonFunctional_linalg_inv_ex(A, check_errors);
23162}
23163at::Tensor linalg_vector_norm(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
23164return wrapper_CompositeExplicitAutogradNonFunctional_linalg_vector_norm(self, ord, dim, keepdim, dtype);
23165}
23166::std::tuple<at::Tensor,at::Tensor,at::Tensor> _linalg_svd(const at::Tensor & A, bool full_matrices, bool compute_uv, c10::optional<c10::string_view> driver) {
23167return wrapper_CompositeExplicitAutogradNonFunctional__linalg_svd(A, full_matrices, compute_uv, driver);
23168}
23169at::Tensor linalg_pinv(const at::Tensor & self, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian) {
23170return wrapper_CompositeExplicitAutogradNonFunctional_atol_rtol_tensor_linalg_pinv(self, atol, rtol, hermitian);
23171}
23172::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_solve_ex(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
23173return wrapper_CompositeExplicitAutogradNonFunctional__linalg_solve_ex(A, B, left, check_errors);
23174}
23175::std::tuple<at::Tensor,at::Tensor> linalg_qr(const at::Tensor & A, c10::string_view mode) {
23176return wrapper_CompositeExplicitAutogradNonFunctional_linalg_qr(A, mode);
23177}
23178at::Tensor _test_autograd_multiple_dispatch_view_copy(const at::Tensor & self) {
23179return wrapper_CompositeExplicitAutogradNonFunctional___test_autograd_multiple_dispatch_view_copy(self);
23180}
23181at::Tensor _fw_primal_copy(const at::Tensor & self, int64_t level) {
23182return wrapper_CompositeExplicitAutogradNonFunctional___fw_primal_copy(self, level);
23183}
23184at::Tensor _make_dual_copy(const at::Tensor & primal, const at::Tensor & tangent, int64_t level) {
23185return wrapper_CompositeExplicitAutogradNonFunctional___make_dual_copy(primal, tangent, level);
23186}
23187at::Tensor view_as_real_copy(const at::Tensor & self) {
23188return wrapper_CompositeExplicitAutogradNonFunctional__view_as_real_copy(self);
23189}
23190at::Tensor view_as_complex_copy(const at::Tensor & self) {
23191return wrapper_CompositeExplicitAutogradNonFunctional__view_as_complex_copy(self);
23192}
23193at::Tensor _conj_copy(const at::Tensor & self) {
23194return wrapper_CompositeExplicitAutogradNonFunctional___conj_copy(self);
23195}
23196at::Tensor _neg_view_copy(const at::Tensor & self) {
23197return wrapper_CompositeExplicitAutogradNonFunctional___neg_view_copy(self);
23198}
23199at::Tensor as_strided_copy(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset) {
23200return wrapper_CompositeExplicitAutogradNonFunctional__as_strided_copy(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt);
23201}
23202at::Tensor as_strided_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
23203return wrapper_CompositeExplicitAutogradNonFunctional__as_strided_copy(self, size, stride, storage_offset);
23204}
23205at::Tensor _sparse_broadcast_to_copy(const at::Tensor & self, at::IntArrayRef size) {
23206return wrapper_CompositeExplicitAutogradNonFunctional___sparse_broadcast_to_copy(self, size);
23207}
23208at::Tensor diagonal_copy(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
23209return wrapper_CompositeExplicitAutogradNonFunctional__diagonal_copy(self, offset, dim1, dim2);
23210}
23211at::Tensor expand_copy(const at::Tensor & self, at::IntArrayRef size, bool implicit) {
23212return wrapper_CompositeExplicitAutogradNonFunctional__expand_copy(self, c10::fromIntArrayRefSlow(size), implicit);
23213}
23214at::Tensor expand_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) {
23215return wrapper_CompositeExplicitAutogradNonFunctional__expand_copy(self, size, implicit);
23216}
23217at::Tensor permute_copy(const at::Tensor & self, at::IntArrayRef dims) {
23218return wrapper_CompositeExplicitAutogradNonFunctional__permute_copy(self, dims);
23219}
23220at::Tensor _reshape_alias_copy(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) {
23221return wrapper_CompositeExplicitAutogradNonFunctional___reshape_alias_copy(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride));
23222}
23223at::Tensor _reshape_alias_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
23224return wrapper_CompositeExplicitAutogradNonFunctional___reshape_alias_copy(self, size, stride);
23225}
23226at::Tensor select_copy(const at::Tensor & self, int64_t dim, int64_t index) {
23227return wrapper_CompositeExplicitAutogradNonFunctional_int_select_copy(self, dim, index);
23228}
23229at::Tensor select_copy_symint(const at::Tensor & self, int64_t dim, c10::SymInt index) {
23230return wrapper_CompositeExplicitAutogradNonFunctional_int_select_copy(self, dim, index);
23231}
23232at::Tensor detach_copy(const at::Tensor & self) {
23233return wrapper_CompositeExplicitAutogradNonFunctional__detach_copy(self);
23234}
23235at::Tensor slice_copy(const at::Tensor & self, int64_t dim, c10::optional<int64_t> start, c10::optional<int64_t> end, int64_t step) {
23236return wrapper_CompositeExplicitAutogradNonFunctional_Tensor_slice_copy(self, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step);
23237}
23238at::Tensor slice_copy_symint(const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
23239return wrapper_CompositeExplicitAutogradNonFunctional_Tensor_slice_copy(self, dim, start, end, step);
23240}
23241::std::vector<at::Tensor> split_copy(const at::Tensor & self, int64_t split_size, int64_t dim) {
23242return wrapper_CompositeExplicitAutogradNonFunctional_Tensor_split_copy(self, split_size, dim);
23243}
23244::std::vector<at::Tensor> split_copy_symint(const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
23245return wrapper_CompositeExplicitAutogradNonFunctional_Tensor_split_copy(self, split_size, dim);
23246}
23247::std::vector<at::Tensor> split_with_sizes_copy(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim) {
23248return wrapper_CompositeExplicitAutogradNonFunctional__split_with_sizes_copy(self, c10::fromIntArrayRefSlow(split_sizes), dim);
23249}
23250::std::vector<at::Tensor> split_with_sizes_copy_symint(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
23251return wrapper_CompositeExplicitAutogradNonFunctional__split_with_sizes_copy(self, split_sizes, dim);
23252}
23253at::Tensor squeeze_copy(const at::Tensor & self) {
23254return wrapper_CompositeExplicitAutogradNonFunctional__squeeze_copy(self);
23255}
23256at::Tensor squeeze_copy(const at::Tensor & self, int64_t dim) {
23257return wrapper_CompositeExplicitAutogradNonFunctional_dim_squeeze_copy(self, dim);
23258}
23259at::Tensor squeeze_copy(const at::Tensor & self, at::IntArrayRef dim) {
23260return wrapper_CompositeExplicitAutogradNonFunctional_dims_squeeze_copy(self, dim);
23261}
23262at::Tensor t_copy(const at::Tensor & self) {
23263return wrapper_CompositeExplicitAutogradNonFunctional__t_copy(self);
23264}
23265at::Tensor transpose_copy(const at::Tensor & self, int64_t dim0, int64_t dim1) {
23266return wrapper_CompositeExplicitAutogradNonFunctional_int_transpose_copy(self, dim0, dim1);
23267}
23268at::Tensor unsqueeze_copy(const at::Tensor & self, int64_t dim) {
23269return wrapper_CompositeExplicitAutogradNonFunctional__unsqueeze_copy(self, dim);
23270}
23271at::Tensor _indices_copy(const at::Tensor & self) {
23272return wrapper_CompositeExplicitAutogradNonFunctional___indices_copy(self);
23273}
23274at::Tensor _values_copy(const at::Tensor & self) {
23275return wrapper_CompositeExplicitAutogradNonFunctional___values_copy(self);
23276}
23277at::Tensor indices_copy(const at::Tensor & self) {
23278return wrapper_CompositeExplicitAutogradNonFunctional__indices_copy(self);
23279}
23280at::Tensor values_copy(const at::Tensor & self) {
23281return wrapper_CompositeExplicitAutogradNonFunctional__values_copy(self);
23282}
23283at::Tensor crow_indices_copy(const at::Tensor & self) {
23284return wrapper_CompositeExplicitAutogradNonFunctional__crow_indices_copy(self);
23285}
23286at::Tensor col_indices_copy(const at::Tensor & self) {
23287return wrapper_CompositeExplicitAutogradNonFunctional__col_indices_copy(self);
23288}
23289at::Tensor ccol_indices_copy(const at::Tensor & self) {
23290return wrapper_CompositeExplicitAutogradNonFunctional__ccol_indices_copy(self);
23291}
23292at::Tensor row_indices_copy(const at::Tensor & self) {
23293return wrapper_CompositeExplicitAutogradNonFunctional__row_indices_copy(self);
23294}
23295::std::vector<at::Tensor> unbind_copy(const at::Tensor & self, int64_t dim) {
23296return wrapper_CompositeExplicitAutogradNonFunctional_int_unbind_copy(self, dim);
23297}
23298at::Tensor view_copy(const at::Tensor & self, at::IntArrayRef size) {
23299return wrapper_CompositeExplicitAutogradNonFunctional__view_copy(self, c10::fromIntArrayRefSlow(size));
23300}
23301at::Tensor view_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size) {
23302return wrapper_CompositeExplicitAutogradNonFunctional__view_copy(self, size);
23303}
23304at::Tensor view_copy(const at::Tensor & self, at::ScalarType dtype) {
23305return wrapper_CompositeExplicitAutogradNonFunctional_dtype_view_copy(self, dtype);
23306}
23307at::Tensor unfold_copy(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
23308return wrapper_CompositeExplicitAutogradNonFunctional__unfold_copy(self, dimension, size, step);
23309}
23310at::Tensor alias_copy(const at::Tensor & self) {
23311return wrapper_CompositeExplicitAutogradNonFunctional__alias_copy(self);
23312}
23313at::Tensor special_airy_ai(const at::Tensor & x) {
23314return wrapper_CompositeExplicitAutogradNonFunctional_special_airy_ai(x);
23315}
23316at::Tensor special_bessel_j0(const at::Tensor & self) {
23317return wrapper_CompositeExplicitAutogradNonFunctional_special_bessel_j0(self);
23318}
23319at::Tensor special_bessel_j1(const at::Tensor & self) {
23320return wrapper_CompositeExplicitAutogradNonFunctional_special_bessel_j1(self);
23321}
23322at::Tensor special_bessel_y0(const at::Tensor & self) {
23323return wrapper_CompositeExplicitAutogradNonFunctional_special_bessel_y0(self);
23324}
23325at::Tensor special_bessel_y1(const at::Tensor & self) {
23326return wrapper_CompositeExplicitAutogradNonFunctional_special_bessel_y1(self);
23327}
23328at::Tensor special_chebyshev_polynomial_t(const at::Tensor & x, const at::Tensor & n) {
23329return wrapper_CompositeExplicitAutogradNonFunctional_special_chebyshev_polynomial_t(x, n);
23330}
23331at::Tensor special_chebyshev_polynomial_u(const at::Tensor & x, const at::Tensor & n) {
23332return wrapper_CompositeExplicitAutogradNonFunctional_special_chebyshev_polynomial_u(x, n);
23333}
23334at::Tensor special_chebyshev_polynomial_v(const at::Tensor & x, const at::Tensor & n) {
23335return wrapper_CompositeExplicitAutogradNonFunctional_special_chebyshev_polynomial_v(x, n);
23336}
23337at::Tensor special_chebyshev_polynomial_w(const at::Tensor & x, const at::Tensor & n) {
23338return wrapper_CompositeExplicitAutogradNonFunctional_special_chebyshev_polynomial_w(x, n);
23339}
23340at::Tensor special_hermite_polynomial_h(const at::Tensor & x, const at::Tensor & n) {
23341return wrapper_CompositeExplicitAutogradNonFunctional_special_hermite_polynomial_h(x, n);
23342}
23343at::Tensor special_hermite_polynomial_he(const at::Tensor & x, const at::Tensor & n) {
23344return wrapper_CompositeExplicitAutogradNonFunctional_special_hermite_polynomial_he(x, n);
23345}
23346at::Tensor special_laguerre_polynomial_l(const at::Tensor & x, const at::Tensor & n) {
23347return wrapper_CompositeExplicitAutogradNonFunctional_special_laguerre_polynomial_l(x, n);
23348}
23349at::Tensor special_legendre_polynomial_p(const at::Tensor & x, const at::Tensor & n) {
23350return wrapper_CompositeExplicitAutogradNonFunctional_special_legendre_polynomial_p(x, n);
23351}
23352at::Tensor special_modified_bessel_i0(const at::Tensor & self) {
23353return wrapper_CompositeExplicitAutogradNonFunctional_special_modified_bessel_i0(self);
23354}
23355at::Tensor special_modified_bessel_i1(const at::Tensor & self) {
23356return wrapper_CompositeExplicitAutogradNonFunctional_special_modified_bessel_i1(self);
23357}
23358at::Tensor special_modified_bessel_k0(const at::Tensor & self) {
23359return wrapper_CompositeExplicitAutogradNonFunctional_special_modified_bessel_k0(self);
23360}
23361at::Tensor special_modified_bessel_k1(const at::Tensor & self) {
23362return wrapper_CompositeExplicitAutogradNonFunctional_special_modified_bessel_k1(self);
23363}
23364at::Tensor special_scaled_modified_bessel_k0(const at::Tensor & x) {
23365return wrapper_CompositeExplicitAutogradNonFunctional_special_scaled_modified_bessel_k0(x);
23366}
23367at::Tensor special_scaled_modified_bessel_k1(const at::Tensor & x) {
23368return wrapper_CompositeExplicitAutogradNonFunctional_special_scaled_modified_bessel_k1(x);
23369}
23370at::Tensor special_shifted_chebyshev_polynomial_t(const at::Tensor & x, const at::Tensor & n) {
23371return wrapper_CompositeExplicitAutogradNonFunctional_special_shifted_chebyshev_polynomial_t(x, n);
23372}
23373at::Tensor special_shifted_chebyshev_polynomial_u(const at::Tensor & x, const at::Tensor & n) {
23374return wrapper_CompositeExplicitAutogradNonFunctional_special_shifted_chebyshev_polynomial_u(x, n);
23375}
23376at::Tensor special_shifted_chebyshev_polynomial_v(const at::Tensor & x, const at::Tensor & n) {
23377return wrapper_CompositeExplicitAutogradNonFunctional_special_shifted_chebyshev_polynomial_v(x, n);
23378}
23379at::Tensor special_shifted_chebyshev_polynomial_w(const at::Tensor & x, const at::Tensor & n) {
23380return wrapper_CompositeExplicitAutogradNonFunctional_special_shifted_chebyshev_polynomial_w(x, n);
23381}
23382at::Tensor special_spherical_bessel_j0(const at::Tensor & x) {
23383return wrapper_CompositeExplicitAutogradNonFunctional_special_spherical_bessel_j0(x);
23384}
23385} // namespace compositeexplicitautogradnonfunctional
23386} // namespace at
23387