1#include <ATen/Tensor.h>
2#include <ATen/core/dispatch/Dispatcher.h>
3
4// @generated by torchgen/gen.py from Operators.cpp
5// NOTE See [Sharded File] comment in VariableType
6
7#ifndef AT_PER_OPERATOR_HEADERS
8#include <ATen/Operators.h>
9#else
10#include <ATen/ops/set_data.h>
11#include <ATen/ops/_fw_primal.h>
12#include <ATen/ops/align_tensors.h>
13#include <ATen/ops/_assert_async.h>
14#include <ATen/ops/_masked_scale.h>
15#include <ATen/ops/_sobol_engine_draw.h>
16#include <ATen/ops/_reshape_from_tensor.h>
17#include <ATen/ops/alpha_dropout.h>
18#include <ATen/ops/alpha_dropout.h>
19#include <ATen/ops/view_as_real.h>
20#include <ATen/ops/view_as_complex.h>
21#include <ATen/ops/chalf.h>
22#include <ATen/ops/conj_physical.h>
23#include <ATen/ops/conj_physical.h>
24#include <ATen/ops/conj_physical.h>
25#include <ATen/ops/acos.h>
26#include <ATen/ops/acos.h>
27#include <ATen/ops/acos.h>
28#include <ATen/ops/arccos.h>
29#include <ATen/ops/arccos.h>
30#include <ATen/ops/arccos.h>
31#include <ATen/ops/any.h>
32#include <ATen/ops/any.h>
33#include <ATen/ops/any.h>
34#include <ATen/ops/any.h>
35#include <ATen/ops/arccosh.h>
36#include <ATen/ops/arccosh.h>
37#include <ATen/ops/arccosh.h>
38#include <ATen/ops/asin.h>
39#include <ATen/ops/asin.h>
40#include <ATen/ops/asin.h>
41#include <ATen/ops/atleast_1d.h>
42#include <ATen/ops/atleast_1d.h>
43#include <ATen/ops/copysign.h>
44#include <ATen/ops/copysign.h>
45#include <ATen/ops/copysign.h>
46#include <ATen/ops/copysign.h>
47#include <ATen/ops/copysign.h>
48#include <ATen/ops/copysign.h>
49#include <ATen/ops/logical_xor.h>
50#include <ATen/ops/logical_xor.h>
51#include <ATen/ops/logical_xor.h>
52#include <ATen/ops/broadcast_to.h>
53#include <ATen/ops/constant_pad_nd.h>
54#include <ATen/ops/contiguous.h>
55#include <ATen/ops/convolution_backward.h>
56#include <ATen/ops/convolution_overrideable.h>
57#include <ATen/ops/_convolution_double_backward.h>
58#include <ATen/ops/conv2d.h>
59#include <ATen/ops/conv2d.h>
60#include <ATen/ops/_copy_from.h>
61#include <ATen/ops/corrcoef.h>
62#include <ATen/ops/cudnn_batch_norm.h>
63#include <ATen/ops/_mps_convolution_transpose.h>
64#include <ATen/ops/mps_convolution_transpose_backward.h>
65#include <ATen/ops/cummaxmin_backward.h>
66#include <ATen/ops/cumprod_backward.h>
67#include <ATen/ops/fill_diagonal.h>
68#include <ATen/ops/embedding.h>
69#include <ATen/ops/_rowwise_prune.h>
70#include <ATen/ops/row_stack.h>
71#include <ATen/ops/row_stack.h>
72#include <ATen/ops/_embedding_bag_backward.h>
73#include <ATen/ops/_embedding_bag_dense_backward.h>
74#include <ATen/ops/resize.h>
75#include <ATen/ops/erfc.h>
76#include <ATen/ops/erfc.h>
77#include <ATen/ops/erfc.h>
78#include <ATen/ops/floor_divide.h>
79#include <ATen/ops/floor_divide.h>
80#include <ATen/ops/floor_divide.h>
81#include <ATen/ops/floor_divide.h>
82#include <ATen/ops/floor_divide.h>
83#include <ATen/ops/full.h>
84#include <ATen/ops/full.h>
85#include <ATen/ops/full.h>
86#include <ATen/ops/full_like.h>
87#include <ATen/ops/grid_sampler_2d.h>
88#include <ATen/ops/_grid_sampler_2d_cpu_fallback_backward.h>
89#include <ATen/ops/kaiser_window.h>
90#include <ATen/ops/kaiser_window.h>
91#include <ATen/ops/kaiser_window.h>
92#include <ATen/ops/_fft_c2r.h>
93#include <ATen/ops/_fft_c2r.h>
94#include <ATen/ops/_cufft_set_plan_cache_max_size.h>
95#include <ATen/ops/index_put.h>
96#include <ATen/ops/index_put.h>
97#include <ATen/ops/instance_norm.h>
98#include <ATen/ops/isclose.h>
99#include <ATen/ops/is_floating_point.h>
100#include <ATen/ops/is_complex.h>
101#include <ATen/ops/is_same_size.h>
102#include <ATen/ops/kl_div.h>
103#include <ATen/ops/fbgemm_pack_gemm_matrix_fp16.h>
104#include <ATen/ops/margin_ranking_loss.h>
105#include <ATen/ops/matmul.h>
106#include <ATen/ops/matmul_backward.h>
107#include <ATen/ops/matmul.h>
108#include <ATen/ops/matrix_exp.h>
109#include <ATen/ops/_compute_linear_combination.h>
110#include <ATen/ops/_compute_linear_combination.h>
111#include <ATen/ops/mkldnn_max_pool2d_backward.h>
112#include <ATen/ops/max_pool3d.h>
113#include <ATen/ops/median.h>
114#include <ATen/ops/median.h>
115#include <ATen/ops/median.h>
116#include <ATen/ops/median.h>
117#include <ATen/ops/median.h>
118#include <ATen/ops/nanmedian.h>
119#include <ATen/ops/nanmedian.h>
120#include <ATen/ops/nanmedian.h>
121#include <ATen/ops/nanmedian.h>
122#include <ATen/ops/nanmedian.h>
123#include <ATen/ops/miopen_batch_norm.h>
124#include <ATen/ops/miopen_convolution_transpose.h>
125#include <ATen/ops/miopen_convolution_add_relu.h>
126#include <ATen/ops/miopen_rnn_backward.h>
127#include <ATen/ops/multiply.h>
128#include <ATen/ops/multiply.h>
129#include <ATen/ops/multiply.h>
130#include <ATen/ops/multiply.h>
131#include <ATen/ops/multiply.h>
132#include <ATen/ops/batch_norm_elemt.h>
133#include <ATen/ops/batch_norm_elemt.h>
134#include <ATen/ops/cdist.h>
135#include <ATen/ops/mT.h>
136#include <ATen/ops/adjoint.h>
137#include <ATen/ops/channel_shuffle.h>
138#include <ATen/ops/poisson_nll_loss.h>
139#include <ATen/ops/deg2rad.h>
140#include <ATen/ops/deg2rad.h>
141#include <ATen/ops/deg2rad.h>
142#include <ATen/ops/randperm.h>
143#include <ATen/ops/randperm.h>
144#include <ATen/ops/randperm.h>
145#include <ATen/ops/randperm.h>
146#include <ATen/ops/negative.h>
147#include <ATen/ops/negative.h>
148#include <ATen/ops/negative.h>
149#include <ATen/ops/_reshape_copy.h>
150#include <ATen/ops/relu.h>
151#include <ATen/ops/relu.h>
152#include <ATen/ops/infinitely_differentiable_gelu_backward.h>
153#include <ATen/ops/hardshrink_backward.h>
154#include <ATen/ops/hardshrink_backward.h>
155#include <ATen/ops/sinc.h>
156#include <ATen/ops/sinc.h>
157#include <ATen/ops/sinc.h>
158#include <ATen/ops/slice.h>
159#include <ATen/ops/select_scatter.h>
160#include <ATen/ops/smm.h>
161#include <ATen/ops/unsafe_split_with_sizes.h>
162#include <ATen/ops/dstack.h>
163#include <ATen/ops/dstack.h>
164#include <ATen/ops/prod.h>
165#include <ATen/ops/prod.h>
166#include <ATen/ops/prod.h>
167#include <ATen/ops/prod.h>
168#include <ATen/ops/prod.h>
169#include <ATen/ops/tan.h>
170#include <ATen/ops/tan.h>
171#include <ATen/ops/tan.h>
172#include <ATen/ops/trapezoid.h>
173#include <ATen/ops/trapezoid.h>
174#include <ATen/ops/_nested_tensor_from_mask.h>
175#include <ATen/ops/_nested_tensor_from_mask_left_aligned.h>
176#include <ATen/ops/_nested_tensor_size.h>
177#include <ATen/ops/_nested_view_from_buffer_copy.h>
178#include <ATen/ops/unique_dim_consecutive.h>
179#include <ATen/ops/_unsafe_view.h>
180#include <ATen/ops/unsqueeze.h>
181#include <ATen/ops/unsqueeze.h>
182#include <ATen/ops/_efficientzerotensor.h>
183#include <ATen/ops/poisson.h>
184#include <ATen/ops/sub.h>
185#include <ATen/ops/sub.h>
186#include <ATen/ops/sub.h>
187#include <ATen/ops/sub.h>
188#include <ATen/ops/sub.h>
189#include <ATen/ops/subtract.h>
190#include <ATen/ops/subtract.h>
191#include <ATen/ops/subtract.h>
192#include <ATen/ops/subtract.h>
193#include <ATen/ops/subtract.h>
194#include <ATen/ops/heaviside.h>
195#include <ATen/ops/heaviside.h>
196#include <ATen/ops/heaviside.h>
197#include <ATen/ops/_addmm_activation.h>
198#include <ATen/ops/_addmm_activation.h>
199#include <ATen/ops/sparse_compressed_tensor.h>
200#include <ATen/ops/sparse_bsr_tensor.h>
201#include <ATen/ops/sparse_compressed_tensor.h>
202#include <ATen/ops/sparse_bsr_tensor.h>
203#include <ATen/ops/sparse_coo_tensor.h>
204#include <ATen/ops/sparse_coo_tensor.h>
205#include <ATen/ops/sparse_coo_tensor.h>
206#include <ATen/ops/_validate_sparse_compressed_tensor_args.h>
207#include <ATen/ops/sparse_resize_and_clear.h>
208#include <ATen/ops/to_dense.h>
209#include <ATen/ops/sparse_dim.h>
210#include <ATen/ops/_dimI.h>
211#include <ATen/ops/_nnz.h>
212#include <ATen/ops/ccol_indices.h>
213#include <ATen/ops/to_sparse_csr.h>
214#include <ATen/ops/to_sparse_bsr.h>
215#include <ATen/ops/mkldnn_reorder_conv3d_weight.h>
216#include <ATen/ops/q_scale.h>
217#include <ATen/ops/q_per_channel_axis.h>
218#include <ATen/ops/_make_per_tensor_quantized_tensor.h>
219#include <ATen/ops/_make_per_channel_quantized_tensor.h>
220#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward.h>
221#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_backward.h>
222#include <ATen/ops/_saturate_weight_to_fp16.h>
223#include <ATen/ops/_autocast_to_reduced_precision.h>
224#include <ATen/ops/result_type.h>
225#include <ATen/ops/result_type.h>
226#include <ATen/ops/result_type.h>
227#include <ATen/ops/result_type.h>
228#include <ATen/ops/_thnn_fused_lstm_cell_backward.h>
229#include <ATen/ops/lstm_cell.h>
230#include <ATen/ops/quantized_rnn_relu_cell.h>
231#include <ATen/ops/masked_fill.h>
232#include <ATen/ops/masked_fill.h>
233#include <ATen/ops/masked_fill.h>
234#include <ATen/ops/masked_fill.h>
235#include <ATen/ops/masked_scatter.h>
236#include <ATen/ops/masked_scatter.h>
237#include <ATen/ops/_masked_softmax_backward.h>
238#include <ATen/ops/index_add.h>
239#include <ATen/ops/index_add.h>
240#include <ATen/ops/index_add.h>
241#include <ATen/ops/index_add.h>
242#include <ATen/ops/bitwise_or.h>
243#include <ATen/ops/bitwise_or.h>
244#include <ATen/ops/bitwise_or.h>
245#include <ATen/ops/bitwise_or.h>
246#include <ATen/ops/bitwise_or.h>
247#include <ATen/ops/bitwise_or.h>
248#include <ATen/ops/bitwise_or.h>
249#include <ATen/ops/diag.h>
250#include <ATen/ops/diag.h>
251#include <ATen/ops/triu_indices.h>
252#include <ATen/ops/trace.h>
253#include <ATen/ops/greater_equal.h>
254#include <ATen/ops/greater_equal.h>
255#include <ATen/ops/greater_equal.h>
256#include <ATen/ops/greater_equal.h>
257#include <ATen/ops/greater_equal.h>
258#include <ATen/ops/greater_equal.h>
259#include <ATen/ops/take.h>
260#include <ATen/ops/take.h>
261#include <ATen/ops/index_select_backward.h>
262#include <ATen/ops/argwhere.h>
263#include <ATen/ops/svd.h>
264#include <ATen/ops/svd.h>
265#include <ATen/ops/geqrf.h>
266#include <ATen/ops/geqrf.h>
267#include <ATen/ops/orgqr.h>
268#include <ATen/ops/orgqr.h>
269#include <ATen/ops/erfinv.h>
270#include <ATen/ops/erfinv.h>
271#include <ATen/ops/erfinv.h>
272#include <ATen/ops/signbit.h>
273#include <ATen/ops/signbit.h>
274#include <ATen/ops/dist.h>
275#include <ATen/ops/_histogramdd_from_bin_cts.h>
276#include <ATen/ops/fmod.h>
277#include <ATen/ops/fmod.h>
278#include <ATen/ops/fmod.h>
279#include <ATen/ops/fmod.h>
280#include <ATen/ops/fmod.h>
281#include <ATen/ops/fmod.h>
282#include <ATen/ops/remainder.h>
283#include <ATen/ops/remainder.h>
284#include <ATen/ops/remainder.h>
285#include <ATen/ops/remainder.h>
286#include <ATen/ops/remainder.h>
287#include <ATen/ops/remainder.h>
288#include <ATen/ops/remainder.h>
289#include <ATen/ops/nanquantile.h>
290#include <ATen/ops/nanquantile.h>
291#include <ATen/ops/nanquantile.h>
292#include <ATen/ops/nanquantile.h>
293#include <ATen/ops/any.h>
294#include <ATen/ops/any.h>
295#include <ATen/ops/renorm.h>
296#include <ATen/ops/renorm.h>
297#include <ATen/ops/renorm.h>
298#include <ATen/ops/unfold.h>
299#include <ATen/ops/float_power.h>
300#include <ATen/ops/float_power.h>
301#include <ATen/ops/float_power.h>
302#include <ATen/ops/float_power.h>
303#include <ATen/ops/float_power.h>
304#include <ATen/ops/float_power.h>
305#include <ATen/ops/float_power.h>
306#include <ATen/ops/float_power.h>
307#include <ATen/ops/_foreach_clamp_max.h>
308#include <ATen/ops/_foreach_clamp_max.h>
309#include <ATen/ops/_foreach_clamp_max.h>
310#include <ATen/ops/_foreach_clamp_max.h>
311#include <ATen/ops/_foreach_clamp_max.h>
312#include <ATen/ops/_foreach_clamp_max.h>
313#include <ATen/ops/_foreach_abs.h>
314#include <ATen/ops/_foreach_abs.h>
315#include <ATen/ops/_foreach_expm1.h>
316#include <ATen/ops/_foreach_expm1.h>
317#include <ATen/ops/_foreach_log10.h>
318#include <ATen/ops/_foreach_log10.h>
319#include <ATen/ops/_foreach_tan.h>
320#include <ATen/ops/_foreach_tan.h>
321#include <ATen/ops/_foreach_sinh.h>
322#include <ATen/ops/_foreach_sinh.h>
323#include <ATen/ops/searchsorted.h>
324#include <ATen/ops/searchsorted.h>
325#include <ATen/ops/searchsorted.h>
326#include <ATen/ops/smooth_l1_loss.h>
327#include <ATen/ops/smooth_l1_loss.h>
328#include <ATen/ops/elu.h>
329#include <ATen/ops/elu.h>
330#include <ATen/ops/elu.h>
331#include <ATen/ops/glu_backward.h>
332#include <ATen/ops/glu_backward.h>
333#include <ATen/ops/hardtanh_backward.h>
334#include <ATen/ops/hardtanh_backward.h>
335#include <ATen/ops/leaky_relu_backward.h>
336#include <ATen/ops/leaky_relu_backward.h>
337#include <ATen/ops/softplus.h>
338#include <ATen/ops/softplus.h>
339#include <ATen/ops/mkldnn_adaptive_avg_pool2d.h>
340#include <ATen/ops/mkldnn_adaptive_avg_pool2d.h>
341#include <ATen/ops/_adaptive_avg_pool2d.h>
342#include <ATen/ops/avg_pool3d.h>
343#include <ATen/ops/avg_pool3d.h>
344#include <ATen/ops/avg_pool3d_backward.h>
345#include <ATen/ops/avg_pool3d_backward.h>
346#include <ATen/ops/max_pool2d_with_indices_backward.h>
347#include <ATen/ops/max_pool2d_with_indices_backward.h>
348#include <ATen/ops/max_pool3d_with_indices.h>
349#include <ATen/ops/max_pool3d_with_indices.h>
350#include <ATen/ops/reflection_pad2d.h>
351#include <ATen/ops/reflection_pad2d.h>
352#include <ATen/ops/_upsample_bilinear2d_aa.h>
353#include <ATen/ops/upsample_linear1d_backward.h>
354#include <ATen/ops/upsample_linear1d_backward.h>
355#include <ATen/ops/_upsample_bilinear2d_aa.h>
356#include <ATen/ops/_upsample_bilinear2d_aa.h>
357#include <ATen/ops/upsample_nearest1d_backward.h>
358#include <ATen/ops/upsample_nearest1d_backward.h>
359#include <ATen/ops/upsample_nearest2d_backward.h>
360#include <ATen/ops/upsample_nearest2d_backward.h>
361#include <ATen/ops/slow_conv_transpose3d.h>
362#include <ATen/ops/slow_conv_transpose3d.h>
363#include <ATen/ops/slow_conv3d_forward.h>
364#include <ATen/ops/slow_conv3d_forward.h>
365#include <ATen/ops/im2col.h>
366#include <ATen/ops/im2col.h>
367#include <ATen/ops/isneginf.h>
368#include <ATen/ops/isneginf.h>
369#include <ATen/ops/_add_batch_dim.h>
370#include <ATen/ops/special_psi.h>
371#include <ATen/ops/special_psi.h>
372#include <ATen/ops/special_erfcx.h>
373#include <ATen/ops/special_erfcx.h>
374#include <ATen/ops/special_i0e.h>
375#include <ATen/ops/special_i0e.h>
376#include <ATen/ops/special_i1.h>
377#include <ATen/ops/special_i1.h>
378#include <ATen/ops/special_logit.h>
379#include <ATen/ops/special_logit.h>
380#include <ATen/ops/special_log_softmax.h>
381#include <ATen/ops/special_gammaincc.h>
382#include <ATen/ops/special_gammaincc.h>
383#include <ATen/ops/special_multigammaln.h>
384#include <ATen/ops/special_multigammaln.h>
385#include <ATen/ops/fft_fft2.h>
386#include <ATen/ops/fft_fft2.h>
387#include <ATen/ops/fft_fftn.h>
388#include <ATen/ops/fft_fftn.h>
389#include <ATen/ops/fft_fftshift.h>
390#include <ATen/ops/linalg_lu_factor.h>
391#include <ATen/ops/linalg_lu_factor.h>
392#include <ATen/ops/linalg_lu_solve.h>
393#include <ATen/ops/linalg_lu_solve.h>
394#include <ATen/ops/linalg_det.h>
395#include <ATen/ops/linalg_det.h>
396#include <ATen/ops/_linalg_slogdet.h>
397#include <ATen/ops/_linalg_slogdet.h>
398#include <ATen/ops/linalg_inv.h>
399#include <ATen/ops/linalg_inv.h>
400#include <ATen/ops/outer.h>
401#include <ATen/ops/outer.h>
402#include <ATen/ops/ger.h>
403#include <ATen/ops/ger.h>
404#include <ATen/ops/_linalg_svd.h>
405#include <ATen/ops/_linalg_svd.h>
406#include <ATen/ops/_linalg_solve_ex.h>
407#include <ATen/ops/_linalg_solve_ex.h>
408#include <ATen/ops/linalg_qr.h>
409#include <ATen/ops/linalg_qr.h>
410#include <ATen/ops/nested_to_padded_tensor.h>
411#include <ATen/ops/_test_warn_in_autograd.h>
412#include <ATen/ops/_test_autograd_multiple_dispatch_view.h>
413#include <ATen/ops/diagonal_copy.h>
414#include <ATen/ops/permute_copy.h>
415#include <ATen/ops/select_copy.h>
416#include <ATen/ops/slice_copy.h>
417#include <ATen/ops/split_with_sizes_copy.h>
418#include <ATen/ops/t_copy.h>
419#include <ATen/ops/col_indices_copy.h>
420#include <ATen/ops/unbind_copy.h>
421#include <ATen/ops/unbind_copy.h>
422#include <ATen/ops/split_with_sizes_copy.h>
423#include <ATen/ops/alias_copy.h>
424#include <ATen/ops/_scaled_dot_product_attention_math.h>
425#include <ATen/ops/_scaled_dot_product_flash_attention_backward.h>
426#include <ATen/ops/_triton_scaled_dot_attention.h>
427#include <ATen/ops/special_chebyshev_polynomial_t.h>
428#include <ATen/ops/special_chebyshev_polynomial_t.h>
429#include <ATen/ops/special_chebyshev_polynomial_t.h>
430#include <ATen/ops/special_chebyshev_polynomial_t.h>
431#include <ATen/ops/special_chebyshev_polynomial_t.h>
432#include <ATen/ops/special_chebyshev_polynomial_t.h>
433#include <ATen/ops/special_scaled_modified_bessel_k1.h>
434#include <ATen/ops/special_scaled_modified_bessel_k1.h>
435#include <ATen/ops/_foobar.h>
436#include <ATen/ops/_masked_scale.h>
437#include <ATen/ops/constant_pad_nd.h>
438#include <ATen/ops/convolution_backward.h>
439#include <ATen/ops/convolution_overrideable.h>
440#include <ATen/ops/_copy_from.h>
441#include <ATen/ops/cudnn_batch_norm.h>
442#include <ATen/ops/_mps_convolution_transpose.h>
443#include <ATen/ops/mps_convolution_transpose_backward.h>
444#include <ATen/ops/embedding.h>
445#include <ATen/ops/_embedding_bag_dense_backward.h>
446#include <ATen/ops/resize.h>
447#include <ATen/ops/resize.h>
448#include <ATen/ops/full.h>
449#include <ATen/ops/full_like.h>
450#include <ATen/ops/grid_sampler_2d.h>
451#include <ATen/ops/kaiser_window.h>
452#include <ATen/ops/kaiser_window.h>
453#include <ATen/ops/kaiser_window.h>
454#include <ATen/ops/index_put.h>
455#include <ATen/ops/matmul_backward.h>
456#include <ATen/ops/mkldnn_max_pool2d_backward.h>
457#include <ATen/ops/median.h>
458#include <ATen/ops/nanmedian.h>
459#include <ATen/ops/miopen_batch_norm.h>
460#include <ATen/ops/miopen_convolution_transpose.h>
461#include <ATen/ops/miopen_rnn_backward.h>
462#include <ATen/ops/channel_shuffle.h>
463#include <ATen/ops/relu.h>
464#include <ATen/ops/select_scatter.h>
465#include <ATen/ops/unsafe_split_with_sizes.h>
466#include <ATen/ops/prod.h>
467#include <ATen/ops/_nested_tensor_from_mask.h>
468#include <ATen/ops/_nested_tensor_size.h>
469#include <ATen/ops/_nested_view_from_buffer_copy.h>
470#include <ATen/ops/unique_dim_consecutive.h>
471#include <ATen/ops/_unsafe_view.h>
472#include <ATen/ops/_efficientzerotensor.h>
473#include <ATen/ops/poisson.h>
474#include <ATen/ops/sub.h>
475#include <ATen/ops/sparse_coo_tensor.h>
476#include <ATen/ops/sparse_resize_and_clear.h>
477#include <ATen/ops/sparse_resize_and_clear.h>
478#include <ATen/ops/to_sparse_csr.h>
479#include <ATen/ops/to_sparse_bsr.h>
480#include <ATen/ops/mkldnn_reorder_conv3d_weight.h>
481#include <ATen/ops/_make_per_tensor_quantized_tensor.h>
482#include <ATen/ops/_make_per_channel_quantized_tensor.h>
483#include <ATen/ops/masked_fill.h>
484#include <ATen/ops/masked_fill.h>
485#include <ATen/ops/masked_scatter.h>
486#include <ATen/ops/_masked_softmax_backward.h>
487#include <ATen/ops/bitwise_or.h>
488#include <ATen/ops/triu_indices.h>
489#include <ATen/ops/trace.h>
490#include <ATen/ops/dist.h>
491#include <ATen/ops/_histogramdd_from_bin_cts.h>
492#include <ATen/ops/remainder.h>
493#include <ATen/ops/_foreach_clamp_max.h>
494#include <ATen/ops/_foreach_clamp_max.h>
495#include <ATen/ops/_foreach_clamp_max.h>
496#include <ATen/ops/_foreach_abs.h>
497#include <ATen/ops/_foreach_expm1.h>
498#include <ATen/ops/_foreach_log10.h>
499#include <ATen/ops/_foreach_tan.h>
500#include <ATen/ops/_foreach_sinh.h>
501#include <ATen/ops/searchsorted.h>
502#include <ATen/ops/_adaptive_avg_pool2d.h>
503#include <ATen/ops/_test_warn_in_autograd.h>
504#include <ATen/ops/diagonal_copy.h>
505#include <ATen/ops/permute_copy.h>
506#include <ATen/ops/select_copy.h>
507#include <ATen/ops/slice_copy.h>
508#include <ATen/ops/t_copy.h>
509#include <ATen/ops/col_indices_copy.h>
510#include <ATen/ops/alias_copy.h>
511#include <ATen/ops/_triton_scaled_dot_attention.h>
512#include <ATen/ops/_foobar.h>
513#endif
514
515
516
517namespace at { namespace _ops {
518
519
520STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(set_data, name, "aten::set_data")
521STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(set_data, overload_name, "")
522STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(set_data, schema_str, "set_data(Tensor(a!) self, Tensor new_data) -> ()")
523
524// aten::set_data(Tensor(a!) self, Tensor new_data) -> ()
525static C10_NOINLINE c10::TypedOperatorHandle<set_data::schema> create_set_data_typed_handle() {
526 return c10::Dispatcher::singleton()
527 .findSchemaOrThrow(set_data::name, set_data::overload_name)
528 .typed<set_data::schema>();
529}
530
531// aten::set_data(Tensor(a!) self, Tensor new_data) -> ()
532void set_data::call(at::Tensor & self, const at::Tensor & new_data) {
533
534 static auto op = create_set_data_typed_handle();
535 return op.call(self, new_data);
536}
537
538// aten::set_data(Tensor(a!) self, Tensor new_data) -> ()
539void set_data::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & new_data) {
540
541 static auto op = create_set_data_typed_handle();
542 return op.redispatch(dispatchKeySet, self, new_data);
543}
544
545STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fw_primal, name, "aten::_fw_primal")
546STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fw_primal, overload_name, "")
547STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fw_primal, schema_str, "_fw_primal(Tensor(a) self, int level) -> Tensor(a)")
548
549// aten::_fw_primal(Tensor(a) self, int level) -> Tensor(a)
550static C10_NOINLINE c10::TypedOperatorHandle<_fw_primal::schema> create__fw_primal_typed_handle() {
551 return c10::Dispatcher::singleton()
552 .findSchemaOrThrow(_fw_primal::name, _fw_primal::overload_name)
553 .typed<_fw_primal::schema>();
554}
555
556// aten::_fw_primal(Tensor(a) self, int level) -> Tensor(a)
557at::Tensor _fw_primal::call(const at::Tensor & self, int64_t level) {
558
559 static auto op = create__fw_primal_typed_handle();
560 return op.call(self, level);
561}
562
563// aten::_fw_primal(Tensor(a) self, int level) -> Tensor(a)
564at::Tensor _fw_primal::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t level) {
565
566 static auto op = create__fw_primal_typed_handle();
567 return op.redispatch(dispatchKeySet, self, level);
568}
569
570STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(align_tensors, name, "aten::align_tensors")
571STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(align_tensors, overload_name, "")
572STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(align_tensors, schema_str, "align_tensors(Tensor[] tensors) -> Tensor[]")
573
574// aten::align_tensors(Tensor[] tensors) -> Tensor[]
575static C10_NOINLINE c10::TypedOperatorHandle<align_tensors::schema> create_align_tensors_typed_handle() {
576 return c10::Dispatcher::singleton()
577 .findSchemaOrThrow(align_tensors::name, align_tensors::overload_name)
578 .typed<align_tensors::schema>();
579}
580
581// aten::align_tensors(Tensor[] tensors) -> Tensor[]
582::std::vector<at::Tensor> align_tensors::call(at::TensorList tensors) {
583
584 static auto op = create_align_tensors_typed_handle();
585 return op.call(tensors);
586}
587
588// aten::align_tensors(Tensor[] tensors) -> Tensor[]
589::std::vector<at::Tensor> align_tensors::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
590
591 static auto op = create_align_tensors_typed_handle();
592 return op.redispatch(dispatchKeySet, tensors);
593}
594
595STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_assert_async, name, "aten::_assert_async")
596STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_assert_async, overload_name, "")
597STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_assert_async, schema_str, "_assert_async(Tensor self) -> ()")
598
599// aten::_assert_async(Tensor self) -> ()
600static C10_NOINLINE c10::TypedOperatorHandle<_assert_async::schema> create__assert_async_typed_handle() {
601 return c10::Dispatcher::singleton()
602 .findSchemaOrThrow(_assert_async::name, _assert_async::overload_name)
603 .typed<_assert_async::schema>();
604}
605
606// aten::_assert_async(Tensor self) -> ()
607void _assert_async::call(const at::Tensor & self) {
608
609 static auto op = create__assert_async_typed_handle();
610 return op.call(self);
611}
612
613// aten::_assert_async(Tensor self) -> ()
614void _assert_async::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
615
616 static auto op = create__assert_async_typed_handle();
617 return op.redispatch(dispatchKeySet, self);
618}
619
620STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_masked_scale, name, "aten::_masked_scale")
621STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_masked_scale, overload_name, "")
622STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_masked_scale, schema_str, "_masked_scale(Tensor self, Tensor mask, float scale) -> Tensor")
623
624// aten::_masked_scale(Tensor self, Tensor mask, float scale) -> Tensor
625static C10_NOINLINE c10::TypedOperatorHandle<_masked_scale::schema> create__masked_scale_typed_handle() {
626 return c10::Dispatcher::singleton()
627 .findSchemaOrThrow(_masked_scale::name, _masked_scale::overload_name)
628 .typed<_masked_scale::schema>();
629}
630
631// aten::_masked_scale(Tensor self, Tensor mask, float scale) -> Tensor
632at::Tensor _masked_scale::call(const at::Tensor & self, const at::Tensor & mask, double scale) {
633
634 static auto op = create__masked_scale_typed_handle();
635 return op.call(self, mask, scale);
636}
637
638// aten::_masked_scale(Tensor self, Tensor mask, float scale) -> Tensor
639at::Tensor _masked_scale::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, double scale) {
640
641 static auto op = create__masked_scale_typed_handle();
642 return op.redispatch(dispatchKeySet, self, mask, scale);
643}
644
645STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sobol_engine_draw, name, "aten::_sobol_engine_draw")
646STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sobol_engine_draw, overload_name, "")
647STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sobol_engine_draw, schema_str, "_sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor)")
648
649// aten::_sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor)
650static C10_NOINLINE c10::TypedOperatorHandle<_sobol_engine_draw::schema> create__sobol_engine_draw_typed_handle() {
651 return c10::Dispatcher::singleton()
652 .findSchemaOrThrow(_sobol_engine_draw::name, _sobol_engine_draw::overload_name)
653 .typed<_sobol_engine_draw::schema>();
654}
655
656// aten::_sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor)
657::std::tuple<at::Tensor,at::Tensor> _sobol_engine_draw::call(const at::Tensor & quasi, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional<at::ScalarType> dtype) {
658
659 static auto op = create__sobol_engine_draw_typed_handle();
660 return op.call(quasi, n, sobolstate, dimension, num_generated, dtype);
661}
662
663// aten::_sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor)
664::std::tuple<at::Tensor,at::Tensor> _sobol_engine_draw::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & quasi, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional<at::ScalarType> dtype) {
665
666 static auto op = create__sobol_engine_draw_typed_handle();
667 return op.redispatch(dispatchKeySet, quasi, n, sobolstate, dimension, num_generated, dtype);
668}
669
670STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_reshape_from_tensor, name, "aten::_reshape_from_tensor")
671STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_reshape_from_tensor, overload_name, "")
672STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_reshape_from_tensor, schema_str, "_reshape_from_tensor(Tensor self, Tensor shape) -> Tensor")
673
674// aten::_reshape_from_tensor(Tensor self, Tensor shape) -> Tensor
675static C10_NOINLINE c10::TypedOperatorHandle<_reshape_from_tensor::schema> create__reshape_from_tensor_typed_handle() {
676 return c10::Dispatcher::singleton()
677 .findSchemaOrThrow(_reshape_from_tensor::name, _reshape_from_tensor::overload_name)
678 .typed<_reshape_from_tensor::schema>();
679}
680
681// aten::_reshape_from_tensor(Tensor self, Tensor shape) -> Tensor
682at::Tensor _reshape_from_tensor::call(const at::Tensor & self, const at::Tensor & shape) {
683
684 static auto op = create__reshape_from_tensor_typed_handle();
685 return op.call(self, shape);
686}
687
688// aten::_reshape_from_tensor(Tensor self, Tensor shape) -> Tensor
689at::Tensor _reshape_from_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & shape) {
690
691 static auto op = create__reshape_from_tensor_typed_handle();
692 return op.redispatch(dispatchKeySet, self, shape);
693}
694
695STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(alpha_dropout, name, "aten::alpha_dropout")
696STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(alpha_dropout, overload_name, "")
697STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(alpha_dropout, schema_str, "alpha_dropout(Tensor input, float p, bool train) -> Tensor")
698
699// aten::alpha_dropout(Tensor input, float p, bool train) -> Tensor
700static C10_NOINLINE c10::TypedOperatorHandle<alpha_dropout::schema> create_alpha_dropout_typed_handle() {
701 return c10::Dispatcher::singleton()
702 .findSchemaOrThrow(alpha_dropout::name, alpha_dropout::overload_name)
703 .typed<alpha_dropout::schema>();
704}
705
706// aten::alpha_dropout(Tensor input, float p, bool train) -> Tensor
707at::Tensor alpha_dropout::call(const at::Tensor & input, double p, bool train) {
708
709 static auto op = create_alpha_dropout_typed_handle();
710 return op.call(input, p, train);
711}
712
713// aten::alpha_dropout(Tensor input, float p, bool train) -> Tensor
714at::Tensor alpha_dropout::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, bool train) {
715
716 static auto op = create_alpha_dropout_typed_handle();
717 return op.redispatch(dispatchKeySet, input, p, train);
718}
719
720STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(alpha_dropout_, name, "aten::alpha_dropout_")
721STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(alpha_dropout_, overload_name, "")
722STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(alpha_dropout_, schema_str, "alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)")
723
724// aten::alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
725static C10_NOINLINE c10::TypedOperatorHandle<alpha_dropout_::schema> create_alpha_dropout__typed_handle() {
726 return c10::Dispatcher::singleton()
727 .findSchemaOrThrow(alpha_dropout_::name, alpha_dropout_::overload_name)
728 .typed<alpha_dropout_::schema>();
729}
730
731// aten::alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
732at::Tensor & alpha_dropout_::call(at::Tensor & self, double p, bool train) {
733
734 static auto op = create_alpha_dropout__typed_handle();
735 return op.call(self, p, train);
736}
737
738// aten::alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
739at::Tensor & alpha_dropout_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, bool train) {
740
741 static auto op = create_alpha_dropout__typed_handle();
742 return op.redispatch(dispatchKeySet, self, p, train);
743}
744
745STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(view_as_real, name, "aten::view_as_real")
746STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(view_as_real, overload_name, "")
747STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(view_as_real, schema_str, "view_as_real(Tensor(a) self) -> Tensor(a)")
748
749// aten::view_as_real(Tensor(a) self) -> Tensor(a)
750static C10_NOINLINE c10::TypedOperatorHandle<view_as_real::schema> create_view_as_real_typed_handle() {
751 return c10::Dispatcher::singleton()
752 .findSchemaOrThrow(view_as_real::name, view_as_real::overload_name)
753 .typed<view_as_real::schema>();
754}
755
756// aten::view_as_real(Tensor(a) self) -> Tensor(a)
757at::Tensor view_as_real::call(const at::Tensor & self) {
758
759 static auto op = create_view_as_real_typed_handle();
760 return op.call(self);
761}
762
763// aten::view_as_real(Tensor(a) self) -> Tensor(a)
764at::Tensor view_as_real::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
765
766 static auto op = create_view_as_real_typed_handle();
767 return op.redispatch(dispatchKeySet, self);
768}
769
770STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(view_as_complex, name, "aten::view_as_complex")
771STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(view_as_complex, overload_name, "")
772STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(view_as_complex, schema_str, "view_as_complex(Tensor(a) self) -> Tensor(a)")
773
774// aten::view_as_complex(Tensor(a) self) -> Tensor(a)
775static C10_NOINLINE c10::TypedOperatorHandle<view_as_complex::schema> create_view_as_complex_typed_handle() {
776 return c10::Dispatcher::singleton()
777 .findSchemaOrThrow(view_as_complex::name, view_as_complex::overload_name)
778 .typed<view_as_complex::schema>();
779}
780
781// aten::view_as_complex(Tensor(a) self) -> Tensor(a)
782at::Tensor view_as_complex::call(const at::Tensor & self) {
783
784 static auto op = create_view_as_complex_typed_handle();
785 return op.call(self);
786}
787
788// aten::view_as_complex(Tensor(a) self) -> Tensor(a)
789at::Tensor view_as_complex::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
790
791 static auto op = create_view_as_complex_typed_handle();
792 return op.redispatch(dispatchKeySet, self);
793}
794
795STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(chalf, name, "aten::chalf")
796STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(chalf, overload_name, "")
797STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(chalf, schema_str, "chalf(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor")
798
799// aten::chalf(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor
800static C10_NOINLINE c10::TypedOperatorHandle<chalf::schema> create_chalf_typed_handle() {
801 return c10::Dispatcher::singleton()
802 .findSchemaOrThrow(chalf::name, chalf::overload_name)
803 .typed<chalf::schema>();
804}
805
806// aten::chalf(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor
807at::Tensor chalf::call(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) {
808
809 static auto op = create_chalf_typed_handle();
810 return op.call(self, memory_format);
811}
812
813// aten::chalf(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor
814at::Tensor chalf::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) {
815
816 static auto op = create_chalf_typed_handle();
817 return op.redispatch(dispatchKeySet, self, memory_format);
818}
819
820STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conj_physical, name, "aten::conj_physical")
821STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conj_physical, overload_name, "")
822STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conj_physical, schema_str, "conj_physical(Tensor self) -> Tensor")
823
824// aten::conj_physical(Tensor self) -> Tensor
825static C10_NOINLINE c10::TypedOperatorHandle<conj_physical::schema> create_conj_physical_typed_handle() {
826 return c10::Dispatcher::singleton()
827 .findSchemaOrThrow(conj_physical::name, conj_physical::overload_name)
828 .typed<conj_physical::schema>();
829}
830
831// aten::conj_physical(Tensor self) -> Tensor
832at::Tensor conj_physical::call(const at::Tensor & self) {
833
834 static auto op = create_conj_physical_typed_handle();
835 return op.call(self);
836}
837
838// aten::conj_physical(Tensor self) -> Tensor
839at::Tensor conj_physical::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
840
841 static auto op = create_conj_physical_typed_handle();
842 return op.redispatch(dispatchKeySet, self);
843}
844
845STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conj_physical_out, name, "aten::conj_physical")
846STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conj_physical_out, overload_name, "out")
847STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conj_physical_out, schema_str, "conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
848
849// aten::conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
850static C10_NOINLINE c10::TypedOperatorHandle<conj_physical_out::schema> create_conj_physical_out_typed_handle() {
851 return c10::Dispatcher::singleton()
852 .findSchemaOrThrow(conj_physical_out::name, conj_physical_out::overload_name)
853 .typed<conj_physical_out::schema>();
854}
855
856// aten::conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
857at::Tensor & conj_physical_out::call(const at::Tensor & self, at::Tensor & out) {
858
859 static auto op = create_conj_physical_out_typed_handle();
860 return op.call(self, out);
861}
862
863// aten::conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
864at::Tensor & conj_physical_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
865
866 static auto op = create_conj_physical_out_typed_handle();
867 return op.redispatch(dispatchKeySet, self, out);
868}
869
870STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conj_physical_, name, "aten::conj_physical_")
871STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conj_physical_, overload_name, "")
872STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conj_physical_, schema_str, "conj_physical_(Tensor(a!) self) -> Tensor(a!)")
873
874// aten::conj_physical_(Tensor(a!) self) -> Tensor(a!)
875static C10_NOINLINE c10::TypedOperatorHandle<conj_physical_::schema> create_conj_physical__typed_handle() {
876 return c10::Dispatcher::singleton()
877 .findSchemaOrThrow(conj_physical_::name, conj_physical_::overload_name)
878 .typed<conj_physical_::schema>();
879}
880
881// aten::conj_physical_(Tensor(a!) self) -> Tensor(a!)
882at::Tensor & conj_physical_::call(at::Tensor & self) {
883
884 static auto op = create_conj_physical__typed_handle();
885 return op.call(self);
886}
887
888// aten::conj_physical_(Tensor(a!) self) -> Tensor(a!)
889at::Tensor & conj_physical_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
890
891 static auto op = create_conj_physical__typed_handle();
892 return op.redispatch(dispatchKeySet, self);
893}
894
895STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(acos, name, "aten::acos")
896STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(acos, overload_name, "")
897STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(acos, schema_str, "acos(Tensor self) -> Tensor")
898
899// aten::acos(Tensor self) -> Tensor
900static C10_NOINLINE c10::TypedOperatorHandle<acos::schema> create_acos_typed_handle() {
901 return c10::Dispatcher::singleton()
902 .findSchemaOrThrow(acos::name, acos::overload_name)
903 .typed<acos::schema>();
904}
905
906// aten::acos(Tensor self) -> Tensor
907at::Tensor acos::call(const at::Tensor & self) {
908
909 static auto op = create_acos_typed_handle();
910 return op.call(self);
911}
912
913// aten::acos(Tensor self) -> Tensor
914at::Tensor acos::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
915
916 static auto op = create_acos_typed_handle();
917 return op.redispatch(dispatchKeySet, self);
918}
919
920STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(acos_, name, "aten::acos_")
921STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(acos_, overload_name, "")
922STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(acos_, schema_str, "acos_(Tensor(a!) self) -> Tensor(a!)")
923
924// aten::acos_(Tensor(a!) self) -> Tensor(a!)
925static C10_NOINLINE c10::TypedOperatorHandle<acos_::schema> create_acos__typed_handle() {
926 return c10::Dispatcher::singleton()
927 .findSchemaOrThrow(acos_::name, acos_::overload_name)
928 .typed<acos_::schema>();
929}
930
931// aten::acos_(Tensor(a!) self) -> Tensor(a!)
932at::Tensor & acos_::call(at::Tensor & self) {
933
934 static auto op = create_acos__typed_handle();
935 return op.call(self);
936}
937
938// aten::acos_(Tensor(a!) self) -> Tensor(a!)
939at::Tensor & acos_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
940
941 static auto op = create_acos__typed_handle();
942 return op.redispatch(dispatchKeySet, self);
943}
944
945STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(acos_out, name, "aten::acos")
946STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(acos_out, overload_name, "out")
947STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(acos_out, schema_str, "acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
948
949// aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
950static C10_NOINLINE c10::TypedOperatorHandle<acos_out::schema> create_acos_out_typed_handle() {
951 return c10::Dispatcher::singleton()
952 .findSchemaOrThrow(acos_out::name, acos_out::overload_name)
953 .typed<acos_out::schema>();
954}
955
956// aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
957at::Tensor & acos_out::call(const at::Tensor & self, at::Tensor & out) {
958
959 static auto op = create_acos_out_typed_handle();
960 return op.call(self, out);
961}
962
963// aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
964at::Tensor & acos_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
965
966 static auto op = create_acos_out_typed_handle();
967 return op.redispatch(dispatchKeySet, self, out);
968}
969
970STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arccos, name, "aten::arccos")
971STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arccos, overload_name, "")
972STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arccos, schema_str, "arccos(Tensor self) -> Tensor")
973
974// aten::arccos(Tensor self) -> Tensor
975static C10_NOINLINE c10::TypedOperatorHandle<arccos::schema> create_arccos_typed_handle() {
976 return c10::Dispatcher::singleton()
977 .findSchemaOrThrow(arccos::name, arccos::overload_name)
978 .typed<arccos::schema>();
979}
980
981// aten::arccos(Tensor self) -> Tensor
982at::Tensor arccos::call(const at::Tensor & self) {
983
984 static auto op = create_arccos_typed_handle();
985 return op.call(self);
986}
987
988// aten::arccos(Tensor self) -> Tensor
989at::Tensor arccos::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
990
991 static auto op = create_arccos_typed_handle();
992 return op.redispatch(dispatchKeySet, self);
993}
994
995STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arccos_, name, "aten::arccos_")
996STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arccos_, overload_name, "")
997STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arccos_, schema_str, "arccos_(Tensor(a!) self) -> Tensor(a!)")
998
999// aten::arccos_(Tensor(a!) self) -> Tensor(a!)
1000static C10_NOINLINE c10::TypedOperatorHandle<arccos_::schema> create_arccos__typed_handle() {
1001 return c10::Dispatcher::singleton()
1002 .findSchemaOrThrow(arccos_::name, arccos_::overload_name)
1003 .typed<arccos_::schema>();
1004}
1005
1006// aten::arccos_(Tensor(a!) self) -> Tensor(a!)
1007at::Tensor & arccos_::call(at::Tensor & self) {
1008
1009 static auto op = create_arccos__typed_handle();
1010 return op.call(self);
1011}
1012
1013// aten::arccos_(Tensor(a!) self) -> Tensor(a!)
1014at::Tensor & arccos_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
1015
1016 static auto op = create_arccos__typed_handle();
1017 return op.redispatch(dispatchKeySet, self);
1018}
1019
1020STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arccos_out, name, "aten::arccos")
1021STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arccos_out, overload_name, "out")
1022STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arccos_out, schema_str, "arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
1023
1024// aten::arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1025static C10_NOINLINE c10::TypedOperatorHandle<arccos_out::schema> create_arccos_out_typed_handle() {
1026 return c10::Dispatcher::singleton()
1027 .findSchemaOrThrow(arccos_out::name, arccos_out::overload_name)
1028 .typed<arccos_out::schema>();
1029}
1030
1031// aten::arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1032at::Tensor & arccos_out::call(const at::Tensor & self, at::Tensor & out) {
1033
1034 static auto op = create_arccos_out_typed_handle();
1035 return op.call(self, out);
1036}
1037
1038// aten::arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1039at::Tensor & arccos_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
1040
1041 static auto op = create_arccos_out_typed_handle();
1042 return op.redispatch(dispatchKeySet, self, out);
1043}
1044
1045STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(any_dim, name, "aten::any")
1046STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(any_dim, overload_name, "dim")
1047STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(any_dim, schema_str, "any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor")
1048
1049// aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
1050static C10_NOINLINE c10::TypedOperatorHandle<any_dim::schema> create_any_dim_typed_handle() {
1051 return c10::Dispatcher::singleton()
1052 .findSchemaOrThrow(any_dim::name, any_dim::overload_name)
1053 .typed<any_dim::schema>();
1054}
1055
1056// aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
1057at::Tensor any_dim::call(const at::Tensor & self, int64_t dim, bool keepdim) {
1058
1059 static auto op = create_any_dim_typed_handle();
1060 return op.call(self, dim, keepdim);
1061}
1062
1063// aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
1064at::Tensor any_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim) {
1065
1066 static auto op = create_any_dim_typed_handle();
1067 return op.redispatch(dispatchKeySet, self, dim, keepdim);
1068}
1069
1070STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(any_out, name, "aten::any")
1071STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(any_out, overload_name, "out")
1072STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(any_out, schema_str, "any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)")
1073
1074// aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
1075static C10_NOINLINE c10::TypedOperatorHandle<any_out::schema> create_any_out_typed_handle() {
1076 return c10::Dispatcher::singleton()
1077 .findSchemaOrThrow(any_out::name, any_out::overload_name)
1078 .typed<any_out::schema>();
1079}
1080
1081// aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
1082at::Tensor & any_out::call(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out) {
1083
1084 static auto op = create_any_out_typed_handle();
1085 return op.call(self, dim, keepdim, out);
1086}
1087
1088// aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
1089at::Tensor & any_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out) {
1090
1091 static auto op = create_any_out_typed_handle();
1092 return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
1093}
1094
1095STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(any_dimname, name, "aten::any")
1096STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(any_dimname, overload_name, "dimname")
1097STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(any_dimname, schema_str, "any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor")
1098
1099// aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor
1100static C10_NOINLINE c10::TypedOperatorHandle<any_dimname::schema> create_any_dimname_typed_handle() {
1101 return c10::Dispatcher::singleton()
1102 .findSchemaOrThrow(any_dimname::name, any_dimname::overload_name)
1103 .typed<any_dimname::schema>();
1104}
1105
1106// aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor
1107at::Tensor any_dimname::call(const at::Tensor & self, at::Dimname dim, bool keepdim) {
1108
1109 static auto op = create_any_dimname_typed_handle();
1110 return op.call(self, dim, keepdim);
1111}
1112
1113// aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor
1114at::Tensor any_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim) {
1115
1116 static auto op = create_any_dimname_typed_handle();
1117 return op.redispatch(dispatchKeySet, self, dim, keepdim);
1118}
1119
1120STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(any_dimname_out, name, "aten::any")
1121STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(any_dimname_out, overload_name, "dimname_out")
1122STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(any_dimname_out, schema_str, "any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)")
1123
1124// aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
1125static C10_NOINLINE c10::TypedOperatorHandle<any_dimname_out::schema> create_any_dimname_out_typed_handle() {
1126 return c10::Dispatcher::singleton()
1127 .findSchemaOrThrow(any_dimname_out::name, any_dimname_out::overload_name)
1128 .typed<any_dimname_out::schema>();
1129}
1130
1131// aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
1132at::Tensor & any_dimname_out::call(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out) {
1133
1134 static auto op = create_any_dimname_out_typed_handle();
1135 return op.call(self, dim, keepdim, out);
1136}
1137
1138// aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
1139at::Tensor & any_dimname_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out) {
1140
1141 static auto op = create_any_dimname_out_typed_handle();
1142 return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
1143}
1144
1145STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arccosh, name, "aten::arccosh")
1146STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arccosh, overload_name, "")
1147STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arccosh, schema_str, "arccosh(Tensor self) -> Tensor")
1148
1149// aten::arccosh(Tensor self) -> Tensor
1150static C10_NOINLINE c10::TypedOperatorHandle<arccosh::schema> create_arccosh_typed_handle() {
1151 return c10::Dispatcher::singleton()
1152 .findSchemaOrThrow(arccosh::name, arccosh::overload_name)
1153 .typed<arccosh::schema>();
1154}
1155
1156// aten::arccosh(Tensor self) -> Tensor
1157at::Tensor arccosh::call(const at::Tensor & self) {
1158
1159 static auto op = create_arccosh_typed_handle();
1160 return op.call(self);
1161}
1162
1163// aten::arccosh(Tensor self) -> Tensor
1164at::Tensor arccosh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1165
1166 static auto op = create_arccosh_typed_handle();
1167 return op.redispatch(dispatchKeySet, self);
1168}
1169
1170STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arccosh_, name, "aten::arccosh_")
1171STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arccosh_, overload_name, "")
1172STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arccosh_, schema_str, "arccosh_(Tensor(a!) self) -> Tensor(a!)")
1173
1174// aten::arccosh_(Tensor(a!) self) -> Tensor(a!)
1175static C10_NOINLINE c10::TypedOperatorHandle<arccosh_::schema> create_arccosh__typed_handle() {
1176 return c10::Dispatcher::singleton()
1177 .findSchemaOrThrow(arccosh_::name, arccosh_::overload_name)
1178 .typed<arccosh_::schema>();
1179}
1180
1181// aten::arccosh_(Tensor(a!) self) -> Tensor(a!)
1182at::Tensor & arccosh_::call(at::Tensor & self) {
1183
1184 static auto op = create_arccosh__typed_handle();
1185 return op.call(self);
1186}
1187
1188// aten::arccosh_(Tensor(a!) self) -> Tensor(a!)
1189at::Tensor & arccosh_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
1190
1191 static auto op = create_arccosh__typed_handle();
1192 return op.redispatch(dispatchKeySet, self);
1193}
1194
1195STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arccosh_out, name, "aten::arccosh")
1196STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arccosh_out, overload_name, "out")
1197STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arccosh_out, schema_str, "arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
1198
1199// aten::arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1200static C10_NOINLINE c10::TypedOperatorHandle<arccosh_out::schema> create_arccosh_out_typed_handle() {
1201 return c10::Dispatcher::singleton()
1202 .findSchemaOrThrow(arccosh_out::name, arccosh_out::overload_name)
1203 .typed<arccosh_out::schema>();
1204}
1205
1206// aten::arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1207at::Tensor & arccosh_out::call(const at::Tensor & self, at::Tensor & out) {
1208
1209 static auto op = create_arccosh_out_typed_handle();
1210 return op.call(self, out);
1211}
1212
1213// aten::arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1214at::Tensor & arccosh_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
1215
1216 static auto op = create_arccosh_out_typed_handle();
1217 return op.redispatch(dispatchKeySet, self, out);
1218}
1219
1220STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(asin, name, "aten::asin")
1221STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(asin, overload_name, "")
1222STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(asin, schema_str, "asin(Tensor self) -> Tensor")
1223
1224// aten::asin(Tensor self) -> Tensor
1225static C10_NOINLINE c10::TypedOperatorHandle<asin::schema> create_asin_typed_handle() {
1226 return c10::Dispatcher::singleton()
1227 .findSchemaOrThrow(asin::name, asin::overload_name)
1228 .typed<asin::schema>();
1229}
1230
1231// aten::asin(Tensor self) -> Tensor
1232at::Tensor asin::call(const at::Tensor & self) {
1233
1234 static auto op = create_asin_typed_handle();
1235 return op.call(self);
1236}
1237
1238// aten::asin(Tensor self) -> Tensor
1239at::Tensor asin::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1240
1241 static auto op = create_asin_typed_handle();
1242 return op.redispatch(dispatchKeySet, self);
1243}
1244
1245STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(asin_, name, "aten::asin_")
1246STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(asin_, overload_name, "")
1247STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(asin_, schema_str, "asin_(Tensor(a!) self) -> Tensor(a!)")
1248
1249// aten::asin_(Tensor(a!) self) -> Tensor(a!)
1250static C10_NOINLINE c10::TypedOperatorHandle<asin_::schema> create_asin__typed_handle() {
1251 return c10::Dispatcher::singleton()
1252 .findSchemaOrThrow(asin_::name, asin_::overload_name)
1253 .typed<asin_::schema>();
1254}
1255
1256// aten::asin_(Tensor(a!) self) -> Tensor(a!)
1257at::Tensor & asin_::call(at::Tensor & self) {
1258
1259 static auto op = create_asin__typed_handle();
1260 return op.call(self);
1261}
1262
1263// aten::asin_(Tensor(a!) self) -> Tensor(a!)
1264at::Tensor & asin_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
1265
1266 static auto op = create_asin__typed_handle();
1267 return op.redispatch(dispatchKeySet, self);
1268}
1269
1270STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(asin_out, name, "aten::asin")
1271STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(asin_out, overload_name, "out")
1272STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(asin_out, schema_str, "asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
1273
1274// aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1275static C10_NOINLINE c10::TypedOperatorHandle<asin_out::schema> create_asin_out_typed_handle() {
1276 return c10::Dispatcher::singleton()
1277 .findSchemaOrThrow(asin_out::name, asin_out::overload_name)
1278 .typed<asin_out::schema>();
1279}
1280
1281// aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1282at::Tensor & asin_out::call(const at::Tensor & self, at::Tensor & out) {
1283
1284 static auto op = create_asin_out_typed_handle();
1285 return op.call(self, out);
1286}
1287
1288// aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1289at::Tensor & asin_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
1290
1291 static auto op = create_asin_out_typed_handle();
1292 return op.redispatch(dispatchKeySet, self, out);
1293}
1294
1295STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atleast_1d, name, "aten::atleast_1d")
1296STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atleast_1d, overload_name, "")
1297STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atleast_1d, schema_str, "atleast_1d(Tensor self) -> Tensor")
1298
1299// aten::atleast_1d(Tensor self) -> Tensor
1300static C10_NOINLINE c10::TypedOperatorHandle<atleast_1d::schema> create_atleast_1d_typed_handle() {
1301 return c10::Dispatcher::singleton()
1302 .findSchemaOrThrow(atleast_1d::name, atleast_1d::overload_name)
1303 .typed<atleast_1d::schema>();
1304}
1305
1306// aten::atleast_1d(Tensor self) -> Tensor
1307at::Tensor atleast_1d::call(const at::Tensor & self) {
1308
1309 static auto op = create_atleast_1d_typed_handle();
1310 return op.call(self);
1311}
1312
1313// aten::atleast_1d(Tensor self) -> Tensor
1314at::Tensor atleast_1d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1315
1316 static auto op = create_atleast_1d_typed_handle();
1317 return op.redispatch(dispatchKeySet, self);
1318}
1319
1320STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atleast_1d_Sequence, name, "aten::atleast_1d")
1321STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atleast_1d_Sequence, overload_name, "Sequence")
1322STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atleast_1d_Sequence, schema_str, "atleast_1d.Sequence(Tensor[] tensors) -> Tensor[]")
1323
1324// aten::atleast_1d.Sequence(Tensor[] tensors) -> Tensor[]
1325static C10_NOINLINE c10::TypedOperatorHandle<atleast_1d_Sequence::schema> create_atleast_1d_Sequence_typed_handle() {
1326 return c10::Dispatcher::singleton()
1327 .findSchemaOrThrow(atleast_1d_Sequence::name, atleast_1d_Sequence::overload_name)
1328 .typed<atleast_1d_Sequence::schema>();
1329}
1330
1331// aten::atleast_1d.Sequence(Tensor[] tensors) -> Tensor[]
1332::std::vector<at::Tensor> atleast_1d_Sequence::call(at::TensorList tensors) {
1333
1334 static auto op = create_atleast_1d_Sequence_typed_handle();
1335 return op.call(tensors);
1336}
1337
1338// aten::atleast_1d.Sequence(Tensor[] tensors) -> Tensor[]
1339::std::vector<at::Tensor> atleast_1d_Sequence::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
1340
1341 static auto op = create_atleast_1d_Sequence_typed_handle();
1342 return op.redispatch(dispatchKeySet, tensors);
1343}
1344
1345STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copysign_out, name, "aten::copysign")
1346STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copysign_out, overload_name, "out")
1347STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copysign_out, schema_str, "copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
1348
1349// aten::copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
1350static C10_NOINLINE c10::TypedOperatorHandle<copysign_out::schema> create_copysign_out_typed_handle() {
1351 return c10::Dispatcher::singleton()
1352 .findSchemaOrThrow(copysign_out::name, copysign_out::overload_name)
1353 .typed<copysign_out::schema>();
1354}
1355
1356// aten::copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
1357at::Tensor & copysign_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
1358
1359 static auto op = create_copysign_out_typed_handle();
1360 return op.call(self, other, out);
1361}
1362
1363// aten::copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
1364at::Tensor & copysign_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
1365
1366 static auto op = create_copysign_out_typed_handle();
1367 return op.redispatch(dispatchKeySet, self, other, out);
1368}
1369
1370STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copysign_Tensor, name, "aten::copysign")
1371STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copysign_Tensor, overload_name, "Tensor")
1372STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copysign_Tensor, schema_str, "copysign.Tensor(Tensor self, Tensor other) -> Tensor")
1373
1374// aten::copysign.Tensor(Tensor self, Tensor other) -> Tensor
1375static C10_NOINLINE c10::TypedOperatorHandle<copysign_Tensor::schema> create_copysign_Tensor_typed_handle() {
1376 return c10::Dispatcher::singleton()
1377 .findSchemaOrThrow(copysign_Tensor::name, copysign_Tensor::overload_name)
1378 .typed<copysign_Tensor::schema>();
1379}
1380
1381// aten::copysign.Tensor(Tensor self, Tensor other) -> Tensor
1382at::Tensor copysign_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
1383
1384 static auto op = create_copysign_Tensor_typed_handle();
1385 return op.call(self, other);
1386}
1387
1388// aten::copysign.Tensor(Tensor self, Tensor other) -> Tensor
1389at::Tensor copysign_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
1390
1391 static auto op = create_copysign_Tensor_typed_handle();
1392 return op.redispatch(dispatchKeySet, self, other);
1393}
1394
1395STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copysign__Tensor, name, "aten::copysign_")
1396STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copysign__Tensor, overload_name, "Tensor")
1397STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copysign__Tensor, schema_str, "copysign_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
1398
1399// aten::copysign_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
1400static C10_NOINLINE c10::TypedOperatorHandle<copysign__Tensor::schema> create_copysign__Tensor_typed_handle() {
1401 return c10::Dispatcher::singleton()
1402 .findSchemaOrThrow(copysign__Tensor::name, copysign__Tensor::overload_name)
1403 .typed<copysign__Tensor::schema>();
1404}
1405
1406// aten::copysign_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
1407at::Tensor & copysign__Tensor::call(at::Tensor & self, const at::Tensor & other) {
1408
1409 static auto op = create_copysign__Tensor_typed_handle();
1410 return op.call(self, other);
1411}
1412
1413// aten::copysign_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
1414at::Tensor & copysign__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
1415
1416 static auto op = create_copysign__Tensor_typed_handle();
1417 return op.redispatch(dispatchKeySet, self, other);
1418}
1419
1420STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copysign_Scalar, name, "aten::copysign")
1421STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copysign_Scalar, overload_name, "Scalar")
1422STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copysign_Scalar, schema_str, "copysign.Scalar(Tensor self, Scalar other) -> Tensor")
1423
1424// aten::copysign.Scalar(Tensor self, Scalar other) -> Tensor
1425static C10_NOINLINE c10::TypedOperatorHandle<copysign_Scalar::schema> create_copysign_Scalar_typed_handle() {
1426 return c10::Dispatcher::singleton()
1427 .findSchemaOrThrow(copysign_Scalar::name, copysign_Scalar::overload_name)
1428 .typed<copysign_Scalar::schema>();
1429}
1430
1431// aten::copysign.Scalar(Tensor self, Scalar other) -> Tensor
1432at::Tensor copysign_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
1433
1434 static auto op = create_copysign_Scalar_typed_handle();
1435 return op.call(self, other);
1436}
1437
1438// aten::copysign.Scalar(Tensor self, Scalar other) -> Tensor
1439at::Tensor copysign_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
1440
1441 static auto op = create_copysign_Scalar_typed_handle();
1442 return op.redispatch(dispatchKeySet, self, other);
1443}
1444
1445STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copysign__Scalar, name, "aten::copysign_")
1446STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copysign__Scalar, overload_name, "Scalar")
1447STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copysign__Scalar, schema_str, "copysign_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
1448
1449// aten::copysign_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
1450static C10_NOINLINE c10::TypedOperatorHandle<copysign__Scalar::schema> create_copysign__Scalar_typed_handle() {
1451 return c10::Dispatcher::singleton()
1452 .findSchemaOrThrow(copysign__Scalar::name, copysign__Scalar::overload_name)
1453 .typed<copysign__Scalar::schema>();
1454}
1455
1456// aten::copysign_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
1457at::Tensor & copysign__Scalar::call(at::Tensor & self, const at::Scalar & other) {
1458
1459 static auto op = create_copysign__Scalar_typed_handle();
1460 return op.call(self, other);
1461}
1462
1463// aten::copysign_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
1464at::Tensor & copysign__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
1465
1466 static auto op = create_copysign__Scalar_typed_handle();
1467 return op.redispatch(dispatchKeySet, self, other);
1468}
1469
1470STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copysign_Scalar_out, name, "aten::copysign")
1471STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copysign_Scalar_out, overload_name, "Scalar_out")
1472STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copysign_Scalar_out, schema_str, "copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
1473
1474// aten::copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
1475static C10_NOINLINE c10::TypedOperatorHandle<copysign_Scalar_out::schema> create_copysign_Scalar_out_typed_handle() {
1476 return c10::Dispatcher::singleton()
1477 .findSchemaOrThrow(copysign_Scalar_out::name, copysign_Scalar_out::overload_name)
1478 .typed<copysign_Scalar_out::schema>();
1479}
1480
1481// aten::copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
1482at::Tensor & copysign_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
1483
1484 static auto op = create_copysign_Scalar_out_typed_handle();
1485 return op.call(self, other, out);
1486}
1487
1488// aten::copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
1489at::Tensor & copysign_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
1490
1491 static auto op = create_copysign_Scalar_out_typed_handle();
1492 return op.redispatch(dispatchKeySet, self, other, out);
1493}
1494
1495STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logical_xor, name, "aten::logical_xor")
1496STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logical_xor, overload_name, "")
1497STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logical_xor, schema_str, "logical_xor(Tensor self, Tensor other) -> Tensor")
1498
1499// aten::logical_xor(Tensor self, Tensor other) -> Tensor
1500static C10_NOINLINE c10::TypedOperatorHandle<logical_xor::schema> create_logical_xor_typed_handle() {
1501 return c10::Dispatcher::singleton()
1502 .findSchemaOrThrow(logical_xor::name, logical_xor::overload_name)
1503 .typed<logical_xor::schema>();
1504}
1505
1506// aten::logical_xor(Tensor self, Tensor other) -> Tensor
1507at::Tensor logical_xor::call(const at::Tensor & self, const at::Tensor & other) {
1508
1509 static auto op = create_logical_xor_typed_handle();
1510 return op.call(self, other);
1511}
1512
1513// aten::logical_xor(Tensor self, Tensor other) -> Tensor
1514at::Tensor logical_xor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
1515
1516 static auto op = create_logical_xor_typed_handle();
1517 return op.redispatch(dispatchKeySet, self, other);
1518}
1519
1520STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logical_xor_, name, "aten::logical_xor_")
1521STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logical_xor_, overload_name, "")
1522STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logical_xor_, schema_str, "logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!)")
1523
1524// aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!)
1525static C10_NOINLINE c10::TypedOperatorHandle<logical_xor_::schema> create_logical_xor__typed_handle() {
1526 return c10::Dispatcher::singleton()
1527 .findSchemaOrThrow(logical_xor_::name, logical_xor_::overload_name)
1528 .typed<logical_xor_::schema>();
1529}
1530
1531// aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!)
1532at::Tensor & logical_xor_::call(at::Tensor & self, const at::Tensor & other) {
1533
1534 static auto op = create_logical_xor__typed_handle();
1535 return op.call(self, other);
1536}
1537
1538// aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!)
1539at::Tensor & logical_xor_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
1540
1541 static auto op = create_logical_xor__typed_handle();
1542 return op.redispatch(dispatchKeySet, self, other);
1543}
1544
1545STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logical_xor_out, name, "aten::logical_xor")
1546STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logical_xor_out, overload_name, "out")
1547STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logical_xor_out, schema_str, "logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
1548
1549// aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
1550static C10_NOINLINE c10::TypedOperatorHandle<logical_xor_out::schema> create_logical_xor_out_typed_handle() {
1551 return c10::Dispatcher::singleton()
1552 .findSchemaOrThrow(logical_xor_out::name, logical_xor_out::overload_name)
1553 .typed<logical_xor_out::schema>();
1554}
1555
1556// aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
1557at::Tensor & logical_xor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
1558
1559 static auto op = create_logical_xor_out_typed_handle();
1560 return op.call(self, other, out);
1561}
1562
1563// aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
1564at::Tensor & logical_xor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
1565
1566 static auto op = create_logical_xor_out_typed_handle();
1567 return op.redispatch(dispatchKeySet, self, other, out);
1568}
1569
1570STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(broadcast_to, name, "aten::broadcast_to")
1571STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(broadcast_to, overload_name, "")
1572STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(broadcast_to, schema_str, "broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a)")
1573
1574// aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a)
1575static C10_NOINLINE c10::TypedOperatorHandle<broadcast_to::schema> create_broadcast_to_typed_handle() {
1576 return c10::Dispatcher::singleton()
1577 .findSchemaOrThrow(broadcast_to::name, broadcast_to::overload_name)
1578 .typed<broadcast_to::schema>();
1579}
1580
1581// aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a)
1582at::Tensor broadcast_to::call(const at::Tensor & self, c10::SymIntArrayRef size) {
1583
1584 static auto op = create_broadcast_to_typed_handle();
1585 return op.call(self, size);
1586}
1587
1588// aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a)
1589at::Tensor broadcast_to::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size) {
1590
1591 static auto op = create_broadcast_to_typed_handle();
1592 return op.redispatch(dispatchKeySet, self, size);
1593}
1594
1595STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(constant_pad_nd, name, "aten::constant_pad_nd")
1596STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(constant_pad_nd, overload_name, "")
1597STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(constant_pad_nd, schema_str, "constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor")
1598
1599// aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor
1600static C10_NOINLINE c10::TypedOperatorHandle<constant_pad_nd::schema> create_constant_pad_nd_typed_handle() {
1601 return c10::Dispatcher::singleton()
1602 .findSchemaOrThrow(constant_pad_nd::name, constant_pad_nd::overload_name)
1603 .typed<constant_pad_nd::schema>();
1604}
1605
1606// aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor
1607at::Tensor constant_pad_nd::call(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value) {
1608
1609 static auto op = create_constant_pad_nd_typed_handle();
1610 return op.call(self, pad, value);
1611}
1612
1613// aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor
1614at::Tensor constant_pad_nd::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value) {
1615
1616 static auto op = create_constant_pad_nd_typed_handle();
1617 return op.redispatch(dispatchKeySet, self, pad, value);
1618}
1619
1620STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(contiguous, name, "aten::contiguous")
1621STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(contiguous, overload_name, "")
1622STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(contiguous, schema_str, "contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a)")
1623
1624// aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a)
1625static C10_NOINLINE c10::TypedOperatorHandle<contiguous::schema> create_contiguous_typed_handle() {
1626 return c10::Dispatcher::singleton()
1627 .findSchemaOrThrow(contiguous::name, contiguous::overload_name)
1628 .typed<contiguous::schema>();
1629}
1630
1631// aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a)
1632at::Tensor contiguous::call(const at::Tensor & self, at::MemoryFormat memory_format) {
1633
1634 static auto op = create_contiguous_typed_handle();
1635 return op.call(self, memory_format);
1636}
1637
1638// aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a)
1639at::Tensor contiguous::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::MemoryFormat memory_format) {
1640
1641 static auto op = create_contiguous_typed_handle();
1642 return op.redispatch(dispatchKeySet, self, memory_format);
1643}
1644
1645STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(convolution_backward, name, "aten::convolution_backward")
1646STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(convolution_backward, overload_name, "")
1647STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(convolution_backward, schema_str, "convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)")
1648
1649// aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
1650static C10_NOINLINE c10::TypedOperatorHandle<convolution_backward::schema> create_convolution_backward_typed_handle() {
1651 return c10::Dispatcher::singleton()
1652 .findSchemaOrThrow(convolution_backward::name, convolution_backward::overload_name)
1653 .typed<convolution_backward::schema>();
1654}
1655
1656// aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
1657::std::tuple<at::Tensor,at::Tensor,at::Tensor> convolution_backward::call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
1658
1659 static auto op = create_convolution_backward_typed_handle();
1660 return op.call(grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask);
1661}
1662
1663// aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
1664::std::tuple<at::Tensor,at::Tensor,at::Tensor> convolution_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
1665
1666 static auto op = create_convolution_backward_typed_handle();
1667 return op.redispatch(dispatchKeySet, grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask);
1668}
1669
1670STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(convolution_overrideable, name, "aten::convolution_overrideable")
1671STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(convolution_overrideable, overload_name, "")
1672STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(convolution_overrideable, schema_str, "convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor")
1673
1674// aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor
1675static C10_NOINLINE c10::TypedOperatorHandle<convolution_overrideable::schema> create_convolution_overrideable_typed_handle() {
1676 return c10::Dispatcher::singleton()
1677 .findSchemaOrThrow(convolution_overrideable::name, convolution_overrideable::overload_name)
1678 .typed<convolution_overrideable::schema>();
1679}
1680
1681// aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor
1682at::Tensor convolution_overrideable::call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) {
1683
1684 static auto op = create_convolution_overrideable_typed_handle();
1685 return op.call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
1686}
1687
1688// aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor
1689at::Tensor convolution_overrideable::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) {
1690
1691 static auto op = create_convolution_overrideable_typed_handle();
1692 return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
1693}
1694
1695STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_convolution_double_backward, name, "aten::_convolution_double_backward")
1696STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_convolution_double_backward, overload_name, "")
1697STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_convolution_double_backward, schema_str, "_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)")
1698
1699// aten::_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
1700static C10_NOINLINE c10::TypedOperatorHandle<_convolution_double_backward::schema> create__convolution_double_backward_typed_handle() {
1701 return c10::Dispatcher::singleton()
1702 .findSchemaOrThrow(_convolution_double_backward::name, _convolution_double_backward::overload_name)
1703 .typed<_convolution_double_backward::schema>();
1704}
1705
1706// aten::_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
1707::std::tuple<at::Tensor,at::Tensor,at::Tensor> _convolution_double_backward::call(const c10::optional<at::Tensor> & ggI, const c10::optional<at::Tensor> & ggW, const c10::optional<at::Tensor> & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
1708
1709 static auto op = create__convolution_double_backward_typed_handle();
1710 return op.call(ggI, ggW, ggb, gO, weight, self, stride, padding, dilation, transposed, output_padding, groups, output_mask);
1711}
1712
1713// aten::_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
1714::std::tuple<at::Tensor,at::Tensor,at::Tensor> _convolution_double_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const c10::optional<at::Tensor> & ggI, const c10::optional<at::Tensor> & ggW, const c10::optional<at::Tensor> & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
1715
1716 static auto op = create__convolution_double_backward_typed_handle();
1717 return op.redispatch(dispatchKeySet, ggI, ggW, ggb, gO, weight, self, stride, padding, dilation, transposed, output_padding, groups, output_mask);
1718}
1719
1720STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv2d, name, "aten::conv2d")
1721STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv2d, overload_name, "")
1722STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv2d, schema_str, "conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor")
1723
1724// aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor
1725static C10_NOINLINE c10::TypedOperatorHandle<conv2d::schema> create_conv2d_typed_handle() {
1726 return c10::Dispatcher::singleton()
1727 .findSchemaOrThrow(conv2d::name, conv2d::overload_name)
1728 .typed<conv2d::schema>();
1729}
1730
1731// aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor
1732at::Tensor conv2d::call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
1733
1734 static auto op = create_conv2d_typed_handle();
1735 return op.call(input, weight, bias, stride, padding, dilation, groups);
1736}
1737
1738// aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor
1739at::Tensor conv2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
1740
1741 static auto op = create_conv2d_typed_handle();
1742 return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups);
1743}
1744
1745STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv2d_padding, name, "aten::conv2d")
1746STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv2d_padding, overload_name, "padding")
1747STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv2d_padding, schema_str, "conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, str padding=\"valid\", int[2] dilation=1, int groups=1) -> Tensor")
1748
1749// aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, str padding="valid", int[2] dilation=1, int groups=1) -> Tensor
1750static C10_NOINLINE c10::TypedOperatorHandle<conv2d_padding::schema> create_conv2d_padding_typed_handle() {
1751 return c10::Dispatcher::singleton()
1752 .findSchemaOrThrow(conv2d_padding::name, conv2d_padding::overload_name)
1753 .typed<conv2d_padding::schema>();
1754}
1755
1756// aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, str padding="valid", int[2] dilation=1, int groups=1) -> Tensor
1757at::Tensor conv2d_padding::call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
1758
1759 static auto op = create_conv2d_padding_typed_handle();
1760 return op.call(input, weight, bias, stride, padding, dilation, groups);
1761}
1762
1763// aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, str padding="valid", int[2] dilation=1, int groups=1) -> Tensor
1764at::Tensor conv2d_padding::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
1765
1766 static auto op = create_conv2d_padding_typed_handle();
1767 return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups);
1768}
1769
1770STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_copy_from, name, "aten::_copy_from")
1771STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_copy_from, overload_name, "")
1772STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_copy_from, schema_str, "_copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor")
1773
1774// aten::_copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor
1775static C10_NOINLINE c10::TypedOperatorHandle<_copy_from::schema> create__copy_from_typed_handle() {
1776 return c10::Dispatcher::singleton()
1777 .findSchemaOrThrow(_copy_from::name, _copy_from::overload_name)
1778 .typed<_copy_from::schema>();
1779}
1780
1781// aten::_copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor
1782at::Tensor _copy_from::call(const at::Tensor & self, const at::Tensor & dst, bool non_blocking) {
1783
1784 static auto op = create__copy_from_typed_handle();
1785 return op.call(self, dst, non_blocking);
1786}
1787
1788// aten::_copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor
1789at::Tensor _copy_from::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & dst, bool non_blocking) {
1790
1791 static auto op = create__copy_from_typed_handle();
1792 return op.redispatch(dispatchKeySet, self, dst, non_blocking);
1793}
1794
1795STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(corrcoef, name, "aten::corrcoef")
1796STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(corrcoef, overload_name, "")
1797STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(corrcoef, schema_str, "corrcoef(Tensor self) -> Tensor")
1798
1799// aten::corrcoef(Tensor self) -> Tensor
1800static C10_NOINLINE c10::TypedOperatorHandle<corrcoef::schema> create_corrcoef_typed_handle() {
1801 return c10::Dispatcher::singleton()
1802 .findSchemaOrThrow(corrcoef::name, corrcoef::overload_name)
1803 .typed<corrcoef::schema>();
1804}
1805
1806// aten::corrcoef(Tensor self) -> Tensor
1807at::Tensor corrcoef::call(const at::Tensor & self) {
1808
1809 static auto op = create_corrcoef_typed_handle();
1810 return op.call(self);
1811}
1812
1813// aten::corrcoef(Tensor self) -> Tensor
1814at::Tensor corrcoef::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1815
1816 static auto op = create_corrcoef_typed_handle();
1817 return op.redispatch(dispatchKeySet, self);
1818}
1819
1820STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_batch_norm, name, "aten::cudnn_batch_norm")
1821STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_batch_norm, overload_name, "")
1822STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_batch_norm, schema_str, "cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor)")
1823
1824// aten::cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor)
1825static C10_NOINLINE c10::TypedOperatorHandle<cudnn_batch_norm::schema> create_cudnn_batch_norm_typed_handle() {
1826 return c10::Dispatcher::singleton()
1827 .findSchemaOrThrow(cudnn_batch_norm::name, cudnn_batch_norm::overload_name)
1828 .typed<cudnn_batch_norm::schema>();
1829}
1830
1831// aten::cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor)
1832::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> cudnn_batch_norm::call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon) {
1833
1834 static auto op = create_cudnn_batch_norm_typed_handle();
1835 return op.call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon);
1836}
1837
1838// aten::cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor)
1839::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> cudnn_batch_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon) {
1840
1841 static auto op = create_cudnn_batch_norm_typed_handle();
1842 return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon);
1843}
1844
1845STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_mps_convolution_transpose, name, "aten::_mps_convolution_transpose")
1846STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_mps_convolution_transpose, overload_name, "")
1847STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_mps_convolution_transpose, schema_str, "_mps_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups) -> Tensor")
1848
1849// aten::_mps_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups) -> Tensor
1850static C10_NOINLINE c10::TypedOperatorHandle<_mps_convolution_transpose::schema> create__mps_convolution_transpose_typed_handle() {
1851 return c10::Dispatcher::singleton()
1852 .findSchemaOrThrow(_mps_convolution_transpose::name, _mps_convolution_transpose::overload_name)
1853 .typed<_mps_convolution_transpose::schema>();
1854}
1855
1856// aten::_mps_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups) -> Tensor
1857at::Tensor _mps_convolution_transpose::call(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
1858
1859 static auto op = create__mps_convolution_transpose_typed_handle();
1860 return op.call(self, weight, padding, output_padding, stride, dilation, groups);
1861}
1862
1863// aten::_mps_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups) -> Tensor
1864at::Tensor _mps_convolution_transpose::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
1865
1866 static auto op = create__mps_convolution_transpose_typed_handle();
1867 return op.redispatch(dispatchKeySet, self, weight, padding, output_padding, stride, dilation, groups);
1868}
1869
1870STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mps_convolution_transpose_backward, name, "aten::mps_convolution_transpose_backward")
1871STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mps_convolution_transpose_backward, overload_name, "")
1872STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mps_convolution_transpose_backward, schema_str, "mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask) -> (Tensor, Tensor)")
1873
1874// aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask) -> (Tensor, Tensor)
1875static C10_NOINLINE c10::TypedOperatorHandle<mps_convolution_transpose_backward::schema> create_mps_convolution_transpose_backward_typed_handle() {
1876 return c10::Dispatcher::singleton()
1877 .findSchemaOrThrow(mps_convolution_transpose_backward::name, mps_convolution_transpose_backward::overload_name)
1878 .typed<mps_convolution_transpose_backward::schema>();
1879}
1880
1881// aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask) -> (Tensor, Tensor)
1882::std::tuple<at::Tensor,at::Tensor> mps_convolution_transpose_backward::call(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array<bool,2> output_mask) {
1883
1884 static auto op = create_mps_convolution_transpose_backward_typed_handle();
1885 return op.call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask);
1886}
1887
1888// aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask) -> (Tensor, Tensor)
1889::std::tuple<at::Tensor,at::Tensor> mps_convolution_transpose_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array<bool,2> output_mask) {
1890
1891 static auto op = create_mps_convolution_transpose_backward_typed_handle();
1892 return op.redispatch(dispatchKeySet, self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask);
1893}
1894
1895STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cummaxmin_backward, name, "aten::cummaxmin_backward")
1896STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cummaxmin_backward, overload_name, "")
1897STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cummaxmin_backward, schema_str, "cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor")
1898
1899// aten::cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor
1900static C10_NOINLINE c10::TypedOperatorHandle<cummaxmin_backward::schema> create_cummaxmin_backward_typed_handle() {
1901 return c10::Dispatcher::singleton()
1902 .findSchemaOrThrow(cummaxmin_backward::name, cummaxmin_backward::overload_name)
1903 .typed<cummaxmin_backward::schema>();
1904}
1905
1906// aten::cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor
1907at::Tensor cummaxmin_backward::call(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & indices, int64_t dim) {
1908
1909 static auto op = create_cummaxmin_backward_typed_handle();
1910 return op.call(grad, input, indices, dim);
1911}
1912
1913// aten::cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor
1914at::Tensor cummaxmin_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & input, const at::Tensor & indices, int64_t dim) {
1915
1916 static auto op = create_cummaxmin_backward_typed_handle();
1917 return op.redispatch(dispatchKeySet, grad, input, indices, dim);
1918}
1919
1920STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumprod_backward, name, "aten::cumprod_backward")
1921STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumprod_backward, overload_name, "")
1922STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumprod_backward, schema_str, "cumprod_backward(Tensor grad, Tensor input, int dim, Tensor output) -> Tensor")
1923
1924// aten::cumprod_backward(Tensor grad, Tensor input, int dim, Tensor output) -> Tensor
1925static C10_NOINLINE c10::TypedOperatorHandle<cumprod_backward::schema> create_cumprod_backward_typed_handle() {
1926 return c10::Dispatcher::singleton()
1927 .findSchemaOrThrow(cumprod_backward::name, cumprod_backward::overload_name)
1928 .typed<cumprod_backward::schema>();
1929}
1930
1931// aten::cumprod_backward(Tensor grad, Tensor input, int dim, Tensor output) -> Tensor
1932at::Tensor cumprod_backward::call(const at::Tensor & grad, const at::Tensor & input, int64_t dim, const at::Tensor & output) {
1933
1934 static auto op = create_cumprod_backward_typed_handle();
1935 return op.call(grad, input, dim, output);
1936}
1937
1938// aten::cumprod_backward(Tensor grad, Tensor input, int dim, Tensor output) -> Tensor
1939at::Tensor cumprod_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & input, int64_t dim, const at::Tensor & output) {
1940
1941 static auto op = create_cumprod_backward_typed_handle();
1942 return op.redispatch(dispatchKeySet, grad, input, dim, output);
1943}
1944
1945STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fill_diagonal_, name, "aten::fill_diagonal_")
1946STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fill_diagonal_, overload_name, "")
1947STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fill_diagonal_, schema_str, "fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!)")
1948
1949// aten::fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!)
1950static C10_NOINLINE c10::TypedOperatorHandle<fill_diagonal_::schema> create_fill_diagonal__typed_handle() {
1951 return c10::Dispatcher::singleton()
1952 .findSchemaOrThrow(fill_diagonal_::name, fill_diagonal_::overload_name)
1953 .typed<fill_diagonal_::schema>();
1954}
1955
1956// aten::fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!)
1957at::Tensor & fill_diagonal_::call(at::Tensor & self, const at::Scalar & fill_value, bool wrap) {
1958
1959 static auto op = create_fill_diagonal__typed_handle();
1960 return op.call(self, fill_value, wrap);
1961}
1962
1963// aten::fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!)
1964at::Tensor & fill_diagonal_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & fill_value, bool wrap) {
1965
1966 static auto op = create_fill_diagonal__typed_handle();
1967 return op.redispatch(dispatchKeySet, self, fill_value, wrap);
1968}
1969
1970STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(embedding, name, "aten::embedding")
1971STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(embedding, overload_name, "")
1972STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(embedding, schema_str, "embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor")
1973
1974// aten::embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor
1975static C10_NOINLINE c10::TypedOperatorHandle<embedding::schema> create_embedding_typed_handle() {
1976 return c10::Dispatcher::singleton()
1977 .findSchemaOrThrow(embedding::name, embedding::overload_name)
1978 .typed<embedding::schema>();
1979}
1980
1981// aten::embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor
1982at::Tensor embedding::call(const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) {
1983
1984 static auto op = create_embedding_typed_handle();
1985 return op.call(weight, indices, padding_idx, scale_grad_by_freq, sparse);
1986}
1987
1988// aten::embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor
1989at::Tensor embedding::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) {
1990
1991 static auto op = create_embedding_typed_handle();
1992 return op.redispatch(dispatchKeySet, weight, indices, padding_idx, scale_grad_by_freq, sparse);
1993}
1994
1995STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_rowwise_prune, name, "aten::_rowwise_prune")
1996STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_rowwise_prune, overload_name, "")
1997STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_rowwise_prune, schema_str, "_rowwise_prune(Tensor weight, Tensor mask, ScalarType compressed_indices_dtype) -> (Tensor, Tensor)")
1998
1999// aten::_rowwise_prune(Tensor weight, Tensor mask, ScalarType compressed_indices_dtype) -> (Tensor, Tensor)
2000static C10_NOINLINE c10::TypedOperatorHandle<_rowwise_prune::schema> create__rowwise_prune_typed_handle() {
2001 return c10::Dispatcher::singleton()
2002 .findSchemaOrThrow(_rowwise_prune::name, _rowwise_prune::overload_name)
2003 .typed<_rowwise_prune::schema>();
2004}
2005
2006// aten::_rowwise_prune(Tensor weight, Tensor mask, ScalarType compressed_indices_dtype) -> (Tensor, Tensor)
2007::std::tuple<at::Tensor,at::Tensor> _rowwise_prune::call(const at::Tensor & weight, const at::Tensor & mask, at::ScalarType compressed_indices_dtype) {
2008
2009 static auto op = create__rowwise_prune_typed_handle();
2010 return op.call(weight, mask, compressed_indices_dtype);
2011}
2012
2013// aten::_rowwise_prune(Tensor weight, Tensor mask, ScalarType compressed_indices_dtype) -> (Tensor, Tensor)
2014::std::tuple<at::Tensor,at::Tensor> _rowwise_prune::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & mask, at::ScalarType compressed_indices_dtype) {
2015
2016 static auto op = create__rowwise_prune_typed_handle();
2017 return op.redispatch(dispatchKeySet, weight, mask, compressed_indices_dtype);
2018}
2019
2020STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(row_stack, name, "aten::row_stack")
2021STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(row_stack, overload_name, "")
2022STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(row_stack, schema_str, "row_stack(Tensor[] tensors) -> Tensor")
2023
2024// aten::row_stack(Tensor[] tensors) -> Tensor
2025static C10_NOINLINE c10::TypedOperatorHandle<row_stack::schema> create_row_stack_typed_handle() {
2026 return c10::Dispatcher::singleton()
2027 .findSchemaOrThrow(row_stack::name, row_stack::overload_name)
2028 .typed<row_stack::schema>();
2029}
2030
2031// aten::row_stack(Tensor[] tensors) -> Tensor
2032at::Tensor row_stack::call(at::TensorList tensors) {
2033
2034 static auto op = create_row_stack_typed_handle();
2035 return op.call(tensors);
2036}
2037
2038// aten::row_stack(Tensor[] tensors) -> Tensor
2039at::Tensor row_stack::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
2040
2041 static auto op = create_row_stack_typed_handle();
2042 return op.redispatch(dispatchKeySet, tensors);
2043}
2044
2045STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(row_stack_out, name, "aten::row_stack")
2046STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(row_stack_out, overload_name, "out")
2047STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(row_stack_out, schema_str, "row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)")
2048
2049// aten::row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
2050static C10_NOINLINE c10::TypedOperatorHandle<row_stack_out::schema> create_row_stack_out_typed_handle() {
2051 return c10::Dispatcher::singleton()
2052 .findSchemaOrThrow(row_stack_out::name, row_stack_out::overload_name)
2053 .typed<row_stack_out::schema>();
2054}
2055
2056// aten::row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
2057at::Tensor & row_stack_out::call(at::TensorList tensors, at::Tensor & out) {
2058
2059 static auto op = create_row_stack_out_typed_handle();
2060 return op.call(tensors, out);
2061}
2062
2063// aten::row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
2064at::Tensor & row_stack_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) {
2065
2066 static auto op = create_row_stack_out_typed_handle();
2067 return op.redispatch(dispatchKeySet, tensors, out);
2068}
2069
2070STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_embedding_bag_backward, name, "aten::_embedding_bag_backward")
2071STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_embedding_bag_backward, overload_name, "")
2072STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_embedding_bag_backward, schema_str, "_embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor")
2073
2074// aten::_embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
2075static C10_NOINLINE c10::TypedOperatorHandle<_embedding_bag_backward::schema> create__embedding_bag_backward_typed_handle() {
2076 return c10::Dispatcher::singleton()
2077 .findSchemaOrThrow(_embedding_bag_backward::name, _embedding_bag_backward::overload_name)
2078 .typed<_embedding_bag_backward::schema>();
2079}
2080
2081// aten::_embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
2082at::Tensor _embedding_bag_backward::call(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
2083
2084 static auto op = create__embedding_bag_backward_typed_handle();
2085 return op.call(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx);
2086}
2087
2088// aten::_embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
2089at::Tensor _embedding_bag_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
2090
2091 static auto op = create__embedding_bag_backward_typed_handle();
2092 return op.redispatch(dispatchKeySet, grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx);
2093}
2094
2095STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_embedding_bag_dense_backward, name, "aten::_embedding_bag_dense_backward")
2096STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_embedding_bag_dense_backward, overload_name, "")
2097STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_embedding_bag_dense_backward, schema_str, "_embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor")
2098
2099// aten::_embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
2100static C10_NOINLINE c10::TypedOperatorHandle<_embedding_bag_dense_backward::schema> create__embedding_bag_dense_backward_typed_handle() {
2101 return c10::Dispatcher::singleton()
2102 .findSchemaOrThrow(_embedding_bag_dense_backward::name, _embedding_bag_dense_backward::overload_name)
2103 .typed<_embedding_bag_dense_backward::schema>();
2104}
2105
2106// aten::_embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
2107at::Tensor _embedding_bag_dense_backward::call(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
2108
2109 static auto op = create__embedding_bag_dense_backward_typed_handle();
2110 return op.call(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
2111}
2112
2113// aten::_embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
2114at::Tensor _embedding_bag_dense_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
2115
2116 static auto op = create__embedding_bag_dense_backward_typed_handle();
2117 return op.redispatch(dispatchKeySet, grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
2118}
2119
2120STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resize_, name, "aten::resize_")
2121STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resize_, overload_name, "")
2122STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resize_, schema_str, "resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)")
2123
2124// aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)
2125static C10_NOINLINE c10::TypedOperatorHandle<resize_::schema> create_resize__typed_handle() {
2126 return c10::Dispatcher::singleton()
2127 .findSchemaOrThrow(resize_::name, resize_::overload_name)
2128 .typed<resize_::schema>();
2129}
2130
2131// aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)
2132const at::Tensor & resize_::call(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) {
2133
2134 static auto op = create_resize__typed_handle();
2135 return op.call(self, size, memory_format);
2136}
2137
2138// aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)
2139const at::Tensor & resize_::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) {
2140
2141 static auto op = create_resize__typed_handle();
2142 return op.redispatch(dispatchKeySet, self, size, memory_format);
2143}
2144
2145STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(erfc, name, "aten::erfc")
2146STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(erfc, overload_name, "")
2147STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(erfc, schema_str, "erfc(Tensor self) -> Tensor")
2148
2149// aten::erfc(Tensor self) -> Tensor
2150static C10_NOINLINE c10::TypedOperatorHandle<erfc::schema> create_erfc_typed_handle() {
2151 return c10::Dispatcher::singleton()
2152 .findSchemaOrThrow(erfc::name, erfc::overload_name)
2153 .typed<erfc::schema>();
2154}
2155
2156// aten::erfc(Tensor self) -> Tensor
2157at::Tensor erfc::call(const at::Tensor & self) {
2158
2159 static auto op = create_erfc_typed_handle();
2160 return op.call(self);
2161}
2162
2163// aten::erfc(Tensor self) -> Tensor
2164at::Tensor erfc::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
2165
2166 static auto op = create_erfc_typed_handle();
2167 return op.redispatch(dispatchKeySet, self);
2168}
2169
2170STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(erfc_, name, "aten::erfc_")
2171STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(erfc_, overload_name, "")
2172STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(erfc_, schema_str, "erfc_(Tensor(a!) self) -> Tensor(a!)")
2173
2174// aten::erfc_(Tensor(a!) self) -> Tensor(a!)
2175static C10_NOINLINE c10::TypedOperatorHandle<erfc_::schema> create_erfc__typed_handle() {
2176 return c10::Dispatcher::singleton()
2177 .findSchemaOrThrow(erfc_::name, erfc_::overload_name)
2178 .typed<erfc_::schema>();
2179}
2180
2181// aten::erfc_(Tensor(a!) self) -> Tensor(a!)
2182at::Tensor & erfc_::call(at::Tensor & self) {
2183
2184 static auto op = create_erfc__typed_handle();
2185 return op.call(self);
2186}
2187
2188// aten::erfc_(Tensor(a!) self) -> Tensor(a!)
2189at::Tensor & erfc_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
2190
2191 static auto op = create_erfc__typed_handle();
2192 return op.redispatch(dispatchKeySet, self);
2193}
2194
2195STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(erfc_out, name, "aten::erfc")
2196STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(erfc_out, overload_name, "out")
2197STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(erfc_out, schema_str, "erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
2198
2199// aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2200static C10_NOINLINE c10::TypedOperatorHandle<erfc_out::schema> create_erfc_out_typed_handle() {
2201 return c10::Dispatcher::singleton()
2202 .findSchemaOrThrow(erfc_out::name, erfc_out::overload_name)
2203 .typed<erfc_out::schema>();
2204}
2205
2206// aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2207at::Tensor & erfc_out::call(const at::Tensor & self, at::Tensor & out) {
2208
2209 static auto op = create_erfc_out_typed_handle();
2210 return op.call(self, out);
2211}
2212
2213// aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2214at::Tensor & erfc_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
2215
2216 static auto op = create_erfc_out_typed_handle();
2217 return op.redispatch(dispatchKeySet, self, out);
2218}
2219
2220STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(floor_divide, name, "aten::floor_divide")
2221STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(floor_divide, overload_name, "")
2222STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(floor_divide, schema_str, "floor_divide(Tensor self, Tensor other) -> Tensor")
2223
2224// aten::floor_divide(Tensor self, Tensor other) -> Tensor
2225static C10_NOINLINE c10::TypedOperatorHandle<floor_divide::schema> create_floor_divide_typed_handle() {
2226 return c10::Dispatcher::singleton()
2227 .findSchemaOrThrow(floor_divide::name, floor_divide::overload_name)
2228 .typed<floor_divide::schema>();
2229}
2230
2231// aten::floor_divide(Tensor self, Tensor other) -> Tensor
2232at::Tensor floor_divide::call(const at::Tensor & self, const at::Tensor & other) {
2233
2234 static auto op = create_floor_divide_typed_handle();
2235 return op.call(self, other);
2236}
2237
2238// aten::floor_divide(Tensor self, Tensor other) -> Tensor
2239at::Tensor floor_divide::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
2240
2241 static auto op = create_floor_divide_typed_handle();
2242 return op.redispatch(dispatchKeySet, self, other);
2243}
2244
2245STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(floor_divide__Tensor, name, "aten::floor_divide_")
2246STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(floor_divide__Tensor, overload_name, "Tensor")
2247STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(floor_divide__Tensor, schema_str, "floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
2248
2249// aten::floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
2250static C10_NOINLINE c10::TypedOperatorHandle<floor_divide__Tensor::schema> create_floor_divide__Tensor_typed_handle() {
2251 return c10::Dispatcher::singleton()
2252 .findSchemaOrThrow(floor_divide__Tensor::name, floor_divide__Tensor::overload_name)
2253 .typed<floor_divide__Tensor::schema>();
2254}
2255
2256// aten::floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
2257at::Tensor & floor_divide__Tensor::call(at::Tensor & self, const at::Tensor & other) {
2258
2259 static auto op = create_floor_divide__Tensor_typed_handle();
2260 return op.call(self, other);
2261}
2262
2263// aten::floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
2264at::Tensor & floor_divide__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
2265
2266 static auto op = create_floor_divide__Tensor_typed_handle();
2267 return op.redispatch(dispatchKeySet, self, other);
2268}
2269
2270STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(floor_divide_out, name, "aten::floor_divide")
2271STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(floor_divide_out, overload_name, "out")
2272STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(floor_divide_out, schema_str, "floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
2273
2274// aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2275static C10_NOINLINE c10::TypedOperatorHandle<floor_divide_out::schema> create_floor_divide_out_typed_handle() {
2276 return c10::Dispatcher::singleton()
2277 .findSchemaOrThrow(floor_divide_out::name, floor_divide_out::overload_name)
2278 .typed<floor_divide_out::schema>();
2279}
2280
2281// aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2282at::Tensor & floor_divide_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
2283
2284 static auto op = create_floor_divide_out_typed_handle();
2285 return op.call(self, other, out);
2286}
2287
2288// aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2289at::Tensor & floor_divide_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
2290
2291 static auto op = create_floor_divide_out_typed_handle();
2292 return op.redispatch(dispatchKeySet, self, other, out);
2293}
2294
2295STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(floor_divide_Scalar, name, "aten::floor_divide")
2296STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(floor_divide_Scalar, overload_name, "Scalar")
2297STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(floor_divide_Scalar, schema_str, "floor_divide.Scalar(Tensor self, Scalar other) -> Tensor")
2298
2299// aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor
2300static C10_NOINLINE c10::TypedOperatorHandle<floor_divide_Scalar::schema> create_floor_divide_Scalar_typed_handle() {
2301 return c10::Dispatcher::singleton()
2302 .findSchemaOrThrow(floor_divide_Scalar::name, floor_divide_Scalar::overload_name)
2303 .typed<floor_divide_Scalar::schema>();
2304}
2305
2306// aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor
2307at::Tensor floor_divide_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
2308
2309 static auto op = create_floor_divide_Scalar_typed_handle();
2310 return op.call(self, other);
2311}
2312
2313// aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor
2314at::Tensor floor_divide_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
2315
2316 static auto op = create_floor_divide_Scalar_typed_handle();
2317 return op.redispatch(dispatchKeySet, self, other);
2318}
2319
2320STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(floor_divide__Scalar, name, "aten::floor_divide_")
2321STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(floor_divide__Scalar, overload_name, "Scalar")
2322STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(floor_divide__Scalar, schema_str, "floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
2323
2324// aten::floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
2325static C10_NOINLINE c10::TypedOperatorHandle<floor_divide__Scalar::schema> create_floor_divide__Scalar_typed_handle() {
2326 return c10::Dispatcher::singleton()
2327 .findSchemaOrThrow(floor_divide__Scalar::name, floor_divide__Scalar::overload_name)
2328 .typed<floor_divide__Scalar::schema>();
2329}
2330
2331// aten::floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
2332at::Tensor & floor_divide__Scalar::call(at::Tensor & self, const at::Scalar & other) {
2333
2334 static auto op = create_floor_divide__Scalar_typed_handle();
2335 return op.call(self, other);
2336}
2337
2338// aten::floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
2339at::Tensor & floor_divide__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
2340
2341 static auto op = create_floor_divide__Scalar_typed_handle();
2342 return op.redispatch(dispatchKeySet, self, other);
2343}
2344
2345STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(full_names, name, "aten::full")
2346STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(full_names, overload_name, "names")
2347STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(full_names, schema_str, "full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
2348
2349// aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2350static C10_NOINLINE c10::TypedOperatorHandle<full_names::schema> create_full_names_typed_handle() {
2351 return c10::Dispatcher::singleton()
2352 .findSchemaOrThrow(full_names::name, full_names::overload_name)
2353 .typed<full_names::schema>();
2354}
2355
2356// aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2357at::Tensor full_names::call(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2358
2359 static auto op = create_full_names_typed_handle();
2360 return op.call(size, fill_value, names, dtype, layout, device, pin_memory);
2361}
2362
2363// aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2364at::Tensor full_names::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2365
2366 static auto op = create_full_names_typed_handle();
2367 return op.redispatch(dispatchKeySet, size, fill_value, names, dtype, layout, device, pin_memory);
2368}
2369
2370STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(full, name, "aten::full")
2371STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(full, overload_name, "")
2372STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(full, schema_str, "full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
2373
2374// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2375static C10_NOINLINE c10::TypedOperatorHandle<full::schema> create_full_typed_handle() {
2376 return c10::Dispatcher::singleton()
2377 .findSchemaOrThrow(full::name, full::overload_name)
2378 .typed<full::schema>();
2379}
2380
2381// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2382at::Tensor full::call(c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2383
2384 static auto op = create_full_typed_handle();
2385 return op.call(size, fill_value, dtype, layout, device, pin_memory);
2386}
2387
2388// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2389at::Tensor full::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2390
2391 static auto op = create_full_typed_handle();
2392 return op.redispatch(dispatchKeySet, size, fill_value, dtype, layout, device, pin_memory);
2393}
2394
2395STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(full_out, name, "aten::full")
2396STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(full_out, overload_name, "out")
2397STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(full_out, schema_str, "full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)")
2398
2399// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
2400static C10_NOINLINE c10::TypedOperatorHandle<full_out::schema> create_full_out_typed_handle() {
2401 return c10::Dispatcher::singleton()
2402 .findSchemaOrThrow(full_out::name, full_out::overload_name)
2403 .typed<full_out::schema>();
2404}
2405
2406// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
2407at::Tensor & full_out::call(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) {
2408
2409 static auto op = create_full_out_typed_handle();
2410 return op.call(size, fill_value, out);
2411}
2412
2413// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
2414at::Tensor & full_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) {
2415
2416 static auto op = create_full_out_typed_handle();
2417 return op.redispatch(dispatchKeySet, size, fill_value, out);
2418}
2419
2420STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(full_like, name, "aten::full_like")
2421STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(full_like, overload_name, "")
2422STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(full_like, schema_str, "full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor")
2423
2424// aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
2425static C10_NOINLINE c10::TypedOperatorHandle<full_like::schema> create_full_like_typed_handle() {
2426 return c10::Dispatcher::singleton()
2427 .findSchemaOrThrow(full_like::name, full_like::overload_name)
2428 .typed<full_like::schema>();
2429}
2430
2431// aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
2432at::Tensor full_like::call(const at::Tensor & self, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
2433
2434 static auto op = create_full_like_typed_handle();
2435 return op.call(self, fill_value, dtype, layout, device, pin_memory, memory_format);
2436}
2437
2438// aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
2439at::Tensor full_like::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
2440
2441 static auto op = create_full_like_typed_handle();
2442 return op.redispatch(dispatchKeySet, self, fill_value, dtype, layout, device, pin_memory, memory_format);
2443}
2444
2445STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(grid_sampler_2d, name, "aten::grid_sampler_2d")
2446STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(grid_sampler_2d, overload_name, "")
2447STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(grid_sampler_2d, schema_str, "grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor")
2448
2449// aten::grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
2450static C10_NOINLINE c10::TypedOperatorHandle<grid_sampler_2d::schema> create_grid_sampler_2d_typed_handle() {
2451 return c10::Dispatcher::singleton()
2452 .findSchemaOrThrow(grid_sampler_2d::name, grid_sampler_2d::overload_name)
2453 .typed<grid_sampler_2d::schema>();
2454}
2455
2456// aten::grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
2457at::Tensor grid_sampler_2d::call(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
2458
2459 static auto op = create_grid_sampler_2d_typed_handle();
2460 return op.call(input, grid, interpolation_mode, padding_mode, align_corners);
2461}
2462
2463// aten::grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
2464at::Tensor grid_sampler_2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
2465
2466 static auto op = create_grid_sampler_2d_typed_handle();
2467 return op.redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners);
2468}
2469
2470STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_grid_sampler_2d_cpu_fallback_backward, name, "aten::_grid_sampler_2d_cpu_fallback_backward")
2471STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_grid_sampler_2d_cpu_fallback_backward, overload_name, "")
2472STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_grid_sampler_2d_cpu_fallback_backward, schema_str, "_grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor)")
2473
2474// aten::_grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor)
2475static C10_NOINLINE c10::TypedOperatorHandle<_grid_sampler_2d_cpu_fallback_backward::schema> create__grid_sampler_2d_cpu_fallback_backward_typed_handle() {
2476 return c10::Dispatcher::singleton()
2477 .findSchemaOrThrow(_grid_sampler_2d_cpu_fallback_backward::name, _grid_sampler_2d_cpu_fallback_backward::overload_name)
2478 .typed<_grid_sampler_2d_cpu_fallback_backward::schema>();
2479}
2480
2481// aten::_grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor)
2482::std::tuple<at::Tensor,at::Tensor> _grid_sampler_2d_cpu_fallback_backward::call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
2483
2484 static auto op = create__grid_sampler_2d_cpu_fallback_backward_typed_handle();
2485 return op.call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners);
2486}
2487
2488// aten::_grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor)
2489::std::tuple<at::Tensor,at::Tensor> _grid_sampler_2d_cpu_fallback_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
2490
2491 static auto op = create__grid_sampler_2d_cpu_fallback_backward_typed_handle();
2492 return op.redispatch(dispatchKeySet, grad_output, input, grid, interpolation_mode, padding_mode, align_corners);
2493}
2494
2495STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kaiser_window, name, "aten::kaiser_window")
2496STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kaiser_window, overload_name, "")
2497STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kaiser_window, schema_str, "kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
2498
2499// aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2500static C10_NOINLINE c10::TypedOperatorHandle<kaiser_window::schema> create_kaiser_window_typed_handle() {
2501 return c10::Dispatcher::singleton()
2502 .findSchemaOrThrow(kaiser_window::name, kaiser_window::overload_name)
2503 .typed<kaiser_window::schema>();
2504}
2505
2506// aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2507at::Tensor kaiser_window::call(int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2508
2509 static auto op = create_kaiser_window_typed_handle();
2510 return op.call(window_length, dtype, layout, device, pin_memory);
2511}
2512
2513// aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2514at::Tensor kaiser_window::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2515
2516 static auto op = create_kaiser_window_typed_handle();
2517 return op.redispatch(dispatchKeySet, window_length, dtype, layout, device, pin_memory);
2518}
2519
2520STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kaiser_window_periodic, name, "aten::kaiser_window")
2521STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kaiser_window_periodic, overload_name, "periodic")
2522STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kaiser_window_periodic, schema_str, "kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
2523
2524// aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2525static C10_NOINLINE c10::TypedOperatorHandle<kaiser_window_periodic::schema> create_kaiser_window_periodic_typed_handle() {
2526 return c10::Dispatcher::singleton()
2527 .findSchemaOrThrow(kaiser_window_periodic::name, kaiser_window_periodic::overload_name)
2528 .typed<kaiser_window_periodic::schema>();
2529}
2530
2531// aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2532at::Tensor kaiser_window_periodic::call(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2533
2534 static auto op = create_kaiser_window_periodic_typed_handle();
2535 return op.call(window_length, periodic, dtype, layout, device, pin_memory);
2536}
2537
2538// aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2539at::Tensor kaiser_window_periodic::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2540
2541 static auto op = create_kaiser_window_periodic_typed_handle();
2542 return op.redispatch(dispatchKeySet, window_length, periodic, dtype, layout, device, pin_memory);
2543}
2544
2545STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kaiser_window_beta, name, "aten::kaiser_window")
2546STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kaiser_window_beta, overload_name, "beta")
2547STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kaiser_window_beta, schema_str, "kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
2548
2549// aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2550static C10_NOINLINE c10::TypedOperatorHandle<kaiser_window_beta::schema> create_kaiser_window_beta_typed_handle() {
2551 return c10::Dispatcher::singleton()
2552 .findSchemaOrThrow(kaiser_window_beta::name, kaiser_window_beta::overload_name)
2553 .typed<kaiser_window_beta::schema>();
2554}
2555
2556// aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2557at::Tensor kaiser_window_beta::call(int64_t window_length, bool periodic, double beta, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2558
2559 static auto op = create_kaiser_window_beta_typed_handle();
2560 return op.call(window_length, periodic, beta, dtype, layout, device, pin_memory);
2561}
2562
2563// aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2564at::Tensor kaiser_window_beta::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double beta, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2565
2566 static auto op = create_kaiser_window_beta_typed_handle();
2567 return op.redispatch(dispatchKeySet, window_length, periodic, beta, dtype, layout, device, pin_memory);
2568}
2569
2570STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fft_c2r, name, "aten::_fft_c2r")
2571STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fft_c2r, overload_name, "")
2572STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fft_c2r, schema_str, "_fft_c2r(Tensor self, int[] dim, int normalization, int last_dim_size) -> Tensor")
2573
2574// aten::_fft_c2r(Tensor self, int[] dim, int normalization, int last_dim_size) -> Tensor
2575static C10_NOINLINE c10::TypedOperatorHandle<_fft_c2r::schema> create__fft_c2r_typed_handle() {
2576 return c10::Dispatcher::singleton()
2577 .findSchemaOrThrow(_fft_c2r::name, _fft_c2r::overload_name)
2578 .typed<_fft_c2r::schema>();
2579}
2580
2581// aten::_fft_c2r(Tensor self, int[] dim, int normalization, int last_dim_size) -> Tensor
2582at::Tensor _fft_c2r::call(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size) {
2583
2584 static auto op = create__fft_c2r_typed_handle();
2585 return op.call(self, dim, normalization, last_dim_size);
2586}
2587
2588// aten::_fft_c2r(Tensor self, int[] dim, int normalization, int last_dim_size) -> Tensor
2589at::Tensor _fft_c2r::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size) {
2590
2591 static auto op = create__fft_c2r_typed_handle();
2592 return op.redispatch(dispatchKeySet, self, dim, normalization, last_dim_size);
2593}
2594
2595STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fft_c2r_out, name, "aten::_fft_c2r")
2596STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fft_c2r_out, overload_name, "out")
2597STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fft_c2r_out, schema_str, "_fft_c2r.out(Tensor self, int[] dim, int normalization, int last_dim_size, *, Tensor(a!) out) -> Tensor(a!)")
2598
2599// aten::_fft_c2r.out(Tensor self, int[] dim, int normalization, int last_dim_size, *, Tensor(a!) out) -> Tensor(a!)
2600static C10_NOINLINE c10::TypedOperatorHandle<_fft_c2r_out::schema> create__fft_c2r_out_typed_handle() {
2601 return c10::Dispatcher::singleton()
2602 .findSchemaOrThrow(_fft_c2r_out::name, _fft_c2r_out::overload_name)
2603 .typed<_fft_c2r_out::schema>();
2604}
2605
2606// aten::_fft_c2r.out(Tensor self, int[] dim, int normalization, int last_dim_size, *, Tensor(a!) out) -> Tensor(a!)
2607at::Tensor & _fft_c2r_out::call(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size, at::Tensor & out) {
2608
2609 static auto op = create__fft_c2r_out_typed_handle();
2610 return op.call(self, dim, normalization, last_dim_size, out);
2611}
2612
2613// aten::_fft_c2r.out(Tensor self, int[] dim, int normalization, int last_dim_size, *, Tensor(a!) out) -> Tensor(a!)
2614at::Tensor & _fft_c2r_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size, at::Tensor & out) {
2615
2616 static auto op = create__fft_c2r_out_typed_handle();
2617 return op.redispatch(dispatchKeySet, self, dim, normalization, last_dim_size, out);
2618}
2619
2620STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cufft_set_plan_cache_max_size, name, "aten::_cufft_set_plan_cache_max_size")
2621STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cufft_set_plan_cache_max_size, overload_name, "")
2622STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cufft_set_plan_cache_max_size, schema_str, "_cufft_set_plan_cache_max_size(int device_index, int max_size) -> ()")
2623
2624// aten::_cufft_set_plan_cache_max_size(int device_index, int max_size) -> ()
2625static C10_NOINLINE c10::TypedOperatorHandle<_cufft_set_plan_cache_max_size::schema> create__cufft_set_plan_cache_max_size_typed_handle() {
2626 return c10::Dispatcher::singleton()
2627 .findSchemaOrThrow(_cufft_set_plan_cache_max_size::name, _cufft_set_plan_cache_max_size::overload_name)
2628 .typed<_cufft_set_plan_cache_max_size::schema>();
2629}
2630
2631// aten::_cufft_set_plan_cache_max_size(int device_index, int max_size) -> ()
2632void _cufft_set_plan_cache_max_size::call(int64_t device_index, int64_t max_size) {
2633
2634 static auto op = create__cufft_set_plan_cache_max_size_typed_handle();
2635 return op.call(device_index, max_size);
2636}
2637
2638// aten::_cufft_set_plan_cache_max_size(int device_index, int max_size) -> ()
2639void _cufft_set_plan_cache_max_size::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t device_index, int64_t max_size) {
2640
2641 static auto op = create__cufft_set_plan_cache_max_size_typed_handle();
2642 return op.redispatch(dispatchKeySet, device_index, max_size);
2643}
2644
2645STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_put_, name, "aten::index_put_")
2646STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_put_, overload_name, "")
2647STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_put_, schema_str, "index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)")
2648
2649// aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)
2650static C10_NOINLINE c10::TypedOperatorHandle<index_put_::schema> create_index_put__typed_handle() {
2651 return c10::Dispatcher::singleton()
2652 .findSchemaOrThrow(index_put_::name, index_put_::overload_name)
2653 .typed<index_put_::schema>();
2654}
2655
2656// aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)
2657at::Tensor & index_put_::call(at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) {
2658
2659 static auto op = create_index_put__typed_handle();
2660 return op.call(self, indices, values, accumulate);
2661}
2662
2663// aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)
2664at::Tensor & index_put_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) {
2665
2666 static auto op = create_index_put__typed_handle();
2667 return op.redispatch(dispatchKeySet, self, indices, values, accumulate);
2668}
2669
2670STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_put, name, "aten::index_put")
2671STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_put, overload_name, "")
2672STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_put, schema_str, "index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor")
2673
2674// aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor
2675static C10_NOINLINE c10::TypedOperatorHandle<index_put::schema> create_index_put_typed_handle() {
2676 return c10::Dispatcher::singleton()
2677 .findSchemaOrThrow(index_put::name, index_put::overload_name)
2678 .typed<index_put::schema>();
2679}
2680
2681// aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor
2682at::Tensor index_put::call(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) {
2683
2684 static auto op = create_index_put_typed_handle();
2685 return op.call(self, indices, values, accumulate);
2686}
2687
2688// aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor
2689at::Tensor index_put::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) {
2690
2691 static auto op = create_index_put_typed_handle();
2692 return op.redispatch(dispatchKeySet, self, indices, values, accumulate);
2693}
2694
2695STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(instance_norm, name, "aten::instance_norm")
2696STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(instance_norm, overload_name, "")
2697STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(instance_norm, schema_str, "instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor")
2698
2699// aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor
2700static C10_NOINLINE c10::TypedOperatorHandle<instance_norm::schema> create_instance_norm_typed_handle() {
2701 return c10::Dispatcher::singleton()
2702 .findSchemaOrThrow(instance_norm::name, instance_norm::overload_name)
2703 .typed<instance_norm::schema>();
2704}
2705
2706// aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor
2707at::Tensor instance_norm::call(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) {
2708
2709 static auto op = create_instance_norm_typed_handle();
2710 return op.call(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled);
2711}
2712
2713// aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor
2714at::Tensor instance_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) {
2715
2716 static auto op = create_instance_norm_typed_handle();
2717 return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled);
2718}
2719
2720STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(isclose, name, "aten::isclose")
2721STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(isclose, overload_name, "")
2722STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(isclose, schema_str, "isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor")
2723
2724// aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor
2725static C10_NOINLINE c10::TypedOperatorHandle<isclose::schema> create_isclose_typed_handle() {
2726 return c10::Dispatcher::singleton()
2727 .findSchemaOrThrow(isclose::name, isclose::overload_name)
2728 .typed<isclose::schema>();
2729}
2730
2731// aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor
2732at::Tensor isclose::call(const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan) {
2733
2734 static auto op = create_isclose_typed_handle();
2735 return op.call(self, other, rtol, atol, equal_nan);
2736}
2737
2738// aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor
2739at::Tensor isclose::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan) {
2740
2741 static auto op = create_isclose_typed_handle();
2742 return op.redispatch(dispatchKeySet, self, other, rtol, atol, equal_nan);
2743}
2744
2745STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_floating_point, name, "aten::is_floating_point")
2746STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_floating_point, overload_name, "")
2747STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_floating_point, schema_str, "is_floating_point(Tensor self) -> bool")
2748
2749// aten::is_floating_point(Tensor self) -> bool
2750static C10_NOINLINE c10::TypedOperatorHandle<is_floating_point::schema> create_is_floating_point_typed_handle() {
2751 return c10::Dispatcher::singleton()
2752 .findSchemaOrThrow(is_floating_point::name, is_floating_point::overload_name)
2753 .typed<is_floating_point::schema>();
2754}
2755
2756// aten::is_floating_point(Tensor self) -> bool
2757bool is_floating_point::call(const at::Tensor & self) {
2758
2759 static auto op = create_is_floating_point_typed_handle();
2760 return op.call(self);
2761}
2762
2763// aten::is_floating_point(Tensor self) -> bool
2764bool is_floating_point::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
2765
2766 static auto op = create_is_floating_point_typed_handle();
2767 return op.redispatch(dispatchKeySet, self);
2768}
2769
2770STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_complex, name, "aten::is_complex")
2771STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_complex, overload_name, "")
2772STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_complex, schema_str, "is_complex(Tensor self) -> bool")
2773
2774// aten::is_complex(Tensor self) -> bool
2775static C10_NOINLINE c10::TypedOperatorHandle<is_complex::schema> create_is_complex_typed_handle() {
2776 return c10::Dispatcher::singleton()
2777 .findSchemaOrThrow(is_complex::name, is_complex::overload_name)
2778 .typed<is_complex::schema>();
2779}
2780
2781// aten::is_complex(Tensor self) -> bool
2782bool is_complex::call(const at::Tensor & self) {
2783
2784 static auto op = create_is_complex_typed_handle();
2785 return op.call(self);
2786}
2787
2788// aten::is_complex(Tensor self) -> bool
2789bool is_complex::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
2790
2791 static auto op = create_is_complex_typed_handle();
2792 return op.redispatch(dispatchKeySet, self);
2793}
2794
2795STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_same_size, name, "aten::is_same_size")
2796STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_same_size, overload_name, "")
2797STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_same_size, schema_str, "is_same_size(Tensor self, Tensor other) -> bool")
2798
2799// aten::is_same_size(Tensor self, Tensor other) -> bool
2800static C10_NOINLINE c10::TypedOperatorHandle<is_same_size::schema> create_is_same_size_typed_handle() {
2801 return c10::Dispatcher::singleton()
2802 .findSchemaOrThrow(is_same_size::name, is_same_size::overload_name)
2803 .typed<is_same_size::schema>();
2804}
2805
2806// aten::is_same_size(Tensor self, Tensor other) -> bool
2807bool is_same_size::call(const at::Tensor & self, const at::Tensor & other) {
2808
2809 static auto op = create_is_same_size_typed_handle();
2810 return op.call(self, other);
2811}
2812
2813// aten::is_same_size(Tensor self, Tensor other) -> bool
2814bool is_same_size::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
2815
2816 static auto op = create_is_same_size_typed_handle();
2817 return op.redispatch(dispatchKeySet, self, other);
2818}
2819
2820STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kl_div, name, "aten::kl_div")
2821STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kl_div, overload_name, "")
2822STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kl_div, schema_str, "kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor")
2823
2824// aten::kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor
2825static C10_NOINLINE c10::TypedOperatorHandle<kl_div::schema> create_kl_div_typed_handle() {
2826 return c10::Dispatcher::singleton()
2827 .findSchemaOrThrow(kl_div::name, kl_div::overload_name)
2828 .typed<kl_div::schema>();
2829}
2830
2831// aten::kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor
2832at::Tensor kl_div::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, bool log_target) {
2833
2834 static auto op = create_kl_div_typed_handle();
2835 return op.call(self, target, reduction, log_target);
2836}
2837
2838// aten::kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor
2839at::Tensor kl_div::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, bool log_target) {
2840
2841 static auto op = create_kl_div_typed_handle();
2842 return op.redispatch(dispatchKeySet, self, target, reduction, log_target);
2843}
2844
2845STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fbgemm_pack_gemm_matrix_fp16, name, "aten::fbgemm_pack_gemm_matrix_fp16")
2846STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fbgemm_pack_gemm_matrix_fp16, overload_name, "")
2847STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fbgemm_pack_gemm_matrix_fp16, schema_str, "fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor")
2848
2849// aten::fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor
2850static C10_NOINLINE c10::TypedOperatorHandle<fbgemm_pack_gemm_matrix_fp16::schema> create_fbgemm_pack_gemm_matrix_fp16_typed_handle() {
2851 return c10::Dispatcher::singleton()
2852 .findSchemaOrThrow(fbgemm_pack_gemm_matrix_fp16::name, fbgemm_pack_gemm_matrix_fp16::overload_name)
2853 .typed<fbgemm_pack_gemm_matrix_fp16::schema>();
2854}
2855
2856// aten::fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor
2857at::Tensor fbgemm_pack_gemm_matrix_fp16::call(const at::Tensor & input) {
2858
2859 static auto op = create_fbgemm_pack_gemm_matrix_fp16_typed_handle();
2860 return op.call(input);
2861}
2862
2863// aten::fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor
2864at::Tensor fbgemm_pack_gemm_matrix_fp16::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input) {
2865
2866 static auto op = create_fbgemm_pack_gemm_matrix_fp16_typed_handle();
2867 return op.redispatch(dispatchKeySet, input);
2868}
2869
2870STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(margin_ranking_loss, name, "aten::margin_ranking_loss")
2871STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(margin_ranking_loss, overload_name, "")
2872STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(margin_ranking_loss, schema_str, "margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor")
2873
2874// aten::margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor
2875static C10_NOINLINE c10::TypedOperatorHandle<margin_ranking_loss::schema> create_margin_ranking_loss_typed_handle() {
2876 return c10::Dispatcher::singleton()
2877 .findSchemaOrThrow(margin_ranking_loss::name, margin_ranking_loss::overload_name)
2878 .typed<margin_ranking_loss::schema>();
2879}
2880
2881// aten::margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor
2882at::Tensor margin_ranking_loss::call(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) {
2883
2884 static auto op = create_margin_ranking_loss_typed_handle();
2885 return op.call(input1, input2, target, margin, reduction);
2886}
2887
2888// aten::margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor
2889at::Tensor margin_ranking_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) {
2890
2891 static auto op = create_margin_ranking_loss_typed_handle();
2892 return op.redispatch(dispatchKeySet, input1, input2, target, margin, reduction);
2893}
2894
2895STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(matmul, name, "aten::matmul")
2896STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(matmul, overload_name, "")
2897STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(matmul, schema_str, "matmul(Tensor self, Tensor other) -> Tensor")
2898
2899// aten::matmul(Tensor self, Tensor other) -> Tensor
2900static C10_NOINLINE c10::TypedOperatorHandle<matmul::schema> create_matmul_typed_handle() {
2901 return c10::Dispatcher::singleton()
2902 .findSchemaOrThrow(matmul::name, matmul::overload_name)
2903 .typed<matmul::schema>();
2904}
2905
2906// aten::matmul(Tensor self, Tensor other) -> Tensor
2907at::Tensor matmul::call(const at::Tensor & self, const at::Tensor & other) {
2908
2909 static auto op = create_matmul_typed_handle();
2910 return op.call(self, other);
2911}
2912
2913// aten::matmul(Tensor self, Tensor other) -> Tensor
2914at::Tensor matmul::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
2915
2916 static auto op = create_matmul_typed_handle();
2917 return op.redispatch(dispatchKeySet, self, other);
2918}
2919
2920STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(matmul_backward, name, "aten::matmul_backward")
2921STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(matmul_backward, overload_name, "")
2922STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(matmul_backward, schema_str, "matmul_backward(Tensor grad, Tensor self, Tensor other, bool[2] mask) -> (Tensor, Tensor)")
2923
2924// aten::matmul_backward(Tensor grad, Tensor self, Tensor other, bool[2] mask) -> (Tensor, Tensor)
2925static C10_NOINLINE c10::TypedOperatorHandle<matmul_backward::schema> create_matmul_backward_typed_handle() {
2926 return c10::Dispatcher::singleton()
2927 .findSchemaOrThrow(matmul_backward::name, matmul_backward::overload_name)
2928 .typed<matmul_backward::schema>();
2929}
2930
2931// aten::matmul_backward(Tensor grad, Tensor self, Tensor other, bool[2] mask) -> (Tensor, Tensor)
2932::std::tuple<at::Tensor,at::Tensor> matmul_backward::call(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array<bool,2> mask) {
2933
2934 static auto op = create_matmul_backward_typed_handle();
2935 return op.call(grad, self, other, mask);
2936}
2937
2938// aten::matmul_backward(Tensor grad, Tensor self, Tensor other, bool[2] mask) -> (Tensor, Tensor)
2939::std::tuple<at::Tensor,at::Tensor> matmul_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array<bool,2> mask) {
2940
2941 static auto op = create_matmul_backward_typed_handle();
2942 return op.redispatch(dispatchKeySet, grad, self, other, mask);
2943}
2944
2945STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(matmul_out, name, "aten::matmul")
2946STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(matmul_out, overload_name, "out")
2947STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(matmul_out, schema_str, "matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
2948
2949// aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2950static C10_NOINLINE c10::TypedOperatorHandle<matmul_out::schema> create_matmul_out_typed_handle() {
2951 return c10::Dispatcher::singleton()
2952 .findSchemaOrThrow(matmul_out::name, matmul_out::overload_name)
2953 .typed<matmul_out::schema>();
2954}
2955
2956// aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2957at::Tensor & matmul_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
2958
2959 static auto op = create_matmul_out_typed_handle();
2960 return op.call(self, other, out);
2961}
2962
2963// aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2964at::Tensor & matmul_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
2965
2966 static auto op = create_matmul_out_typed_handle();
2967 return op.redispatch(dispatchKeySet, self, other, out);
2968}
2969
2970STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(matrix_exp, name, "aten::matrix_exp")
2971STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(matrix_exp, overload_name, "")
2972STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(matrix_exp, schema_str, "matrix_exp(Tensor self) -> Tensor")
2973
2974// aten::matrix_exp(Tensor self) -> Tensor
2975static C10_NOINLINE c10::TypedOperatorHandle<matrix_exp::schema> create_matrix_exp_typed_handle() {
2976 return c10::Dispatcher::singleton()
2977 .findSchemaOrThrow(matrix_exp::name, matrix_exp::overload_name)
2978 .typed<matrix_exp::schema>();
2979}
2980
2981// aten::matrix_exp(Tensor self) -> Tensor
2982at::Tensor matrix_exp::call(const at::Tensor & self) {
2983
2984 static auto op = create_matrix_exp_typed_handle();
2985 return op.call(self);
2986}
2987
2988// aten::matrix_exp(Tensor self) -> Tensor
2989at::Tensor matrix_exp::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
2990
2991 static auto op = create_matrix_exp_typed_handle();
2992 return op.redispatch(dispatchKeySet, self);
2993}
2994
2995STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_compute_linear_combination, name, "aten::_compute_linear_combination")
2996STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_compute_linear_combination, overload_name, "")
2997STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_compute_linear_combination, schema_str, "_compute_linear_combination(Tensor input, Tensor coefficients) -> Tensor")
2998
2999// aten::_compute_linear_combination(Tensor input, Tensor coefficients) -> Tensor
3000static C10_NOINLINE c10::TypedOperatorHandle<_compute_linear_combination::schema> create__compute_linear_combination_typed_handle() {
3001 return c10::Dispatcher::singleton()
3002 .findSchemaOrThrow(_compute_linear_combination::name, _compute_linear_combination::overload_name)
3003 .typed<_compute_linear_combination::schema>();
3004}
3005
3006// aten::_compute_linear_combination(Tensor input, Tensor coefficients) -> Tensor
3007at::Tensor _compute_linear_combination::call(const at::Tensor & input, const at::Tensor & coefficients) {
3008
3009 static auto op = create__compute_linear_combination_typed_handle();
3010 return op.call(input, coefficients);
3011}
3012
3013// aten::_compute_linear_combination(Tensor input, Tensor coefficients) -> Tensor
3014at::Tensor _compute_linear_combination::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & coefficients) {
3015
3016 static auto op = create__compute_linear_combination_typed_handle();
3017 return op.redispatch(dispatchKeySet, input, coefficients);
3018}
3019
3020STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_compute_linear_combination_out, name, "aten::_compute_linear_combination")
3021STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_compute_linear_combination_out, overload_name, "out")
3022STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_compute_linear_combination_out, schema_str, "_compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!)")
3023
3024// aten::_compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!)
3025static C10_NOINLINE c10::TypedOperatorHandle<_compute_linear_combination_out::schema> create__compute_linear_combination_out_typed_handle() {
3026 return c10::Dispatcher::singleton()
3027 .findSchemaOrThrow(_compute_linear_combination_out::name, _compute_linear_combination_out::overload_name)
3028 .typed<_compute_linear_combination_out::schema>();
3029}
3030
3031// aten::_compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!)
3032at::Tensor & _compute_linear_combination_out::call(const at::Tensor & input, const at::Tensor & coefficients, at::Tensor & out) {
3033
3034 static auto op = create__compute_linear_combination_out_typed_handle();
3035 return op.call(input, coefficients, out);
3036}
3037
3038// aten::_compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!)
3039at::Tensor & _compute_linear_combination_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & coefficients, at::Tensor & out) {
3040
3041 static auto op = create__compute_linear_combination_out_typed_handle();
3042 return op.redispatch(dispatchKeySet, input, coefficients, out);
3043}
3044
3045STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_max_pool2d_backward, name, "aten::mkldnn_max_pool2d_backward")
3046STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_max_pool2d_backward, overload_name, "")
3047STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_max_pool2d_backward, schema_str, "mkldnn_max_pool2d_backward(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor")
3048
3049// aten::mkldnn_max_pool2d_backward(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
3050static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_max_pool2d_backward::schema> create_mkldnn_max_pool2d_backward_typed_handle() {
3051 return c10::Dispatcher::singleton()
3052 .findSchemaOrThrow(mkldnn_max_pool2d_backward::name, mkldnn_max_pool2d_backward::overload_name)
3053 .typed<mkldnn_max_pool2d_backward::schema>();
3054}
3055
3056// aten::mkldnn_max_pool2d_backward(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
3057at::Tensor mkldnn_max_pool2d_backward::call(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
3058
3059 static auto op = create_mkldnn_max_pool2d_backward_typed_handle();
3060 return op.call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode);
3061}
3062
3063// aten::mkldnn_max_pool2d_backward(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
3064at::Tensor mkldnn_max_pool2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
3065
3066 static auto op = create_mkldnn_max_pool2d_backward_typed_handle();
3067 return op.redispatch(dispatchKeySet, grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode);
3068}
3069
3070STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool3d, name, "aten::max_pool3d")
3071STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool3d, overload_name, "")
3072STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool3d, schema_str, "max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor")
3073
3074// aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
3075static C10_NOINLINE c10::TypedOperatorHandle<max_pool3d::schema> create_max_pool3d_typed_handle() {
3076 return c10::Dispatcher::singleton()
3077 .findSchemaOrThrow(max_pool3d::name, max_pool3d::overload_name)
3078 .typed<max_pool3d::schema>();
3079}
3080
3081// aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
3082at::Tensor max_pool3d::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
3083
3084 static auto op = create_max_pool3d_typed_handle();
3085 return op.call(self, kernel_size, stride, padding, dilation, ceil_mode);
3086}
3087
3088// aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
3089at::Tensor max_pool3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
3090
3091 static auto op = create_max_pool3d_typed_handle();
3092 return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode);
3093}
3094
3095STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(median, name, "aten::median")
3096STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(median, overload_name, "")
3097STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(median, schema_str, "median(Tensor self) -> Tensor")
3098
3099// aten::median(Tensor self) -> Tensor
3100static C10_NOINLINE c10::TypedOperatorHandle<median::schema> create_median_typed_handle() {
3101 return c10::Dispatcher::singleton()
3102 .findSchemaOrThrow(median::name, median::overload_name)
3103 .typed<median::schema>();
3104}
3105
3106// aten::median(Tensor self) -> Tensor
3107at::Tensor median::call(const at::Tensor & self) {
3108
3109 static auto op = create_median_typed_handle();
3110 return op.call(self);
3111}
3112
3113// aten::median(Tensor self) -> Tensor
3114at::Tensor median::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
3115
3116 static auto op = create_median_typed_handle();
3117 return op.redispatch(dispatchKeySet, self);
3118}
3119
3120STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(median_dim, name, "aten::median")
3121STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(median_dim, overload_name, "dim")
3122STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(median_dim, schema_str, "median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)")
3123
3124// aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
3125static C10_NOINLINE c10::TypedOperatorHandle<median_dim::schema> create_median_dim_typed_handle() {
3126 return c10::Dispatcher::singleton()
3127 .findSchemaOrThrow(median_dim::name, median_dim::overload_name)
3128 .typed<median_dim::schema>();
3129}
3130
3131// aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
3132::std::tuple<at::Tensor,at::Tensor> median_dim::call(const at::Tensor & self, int64_t dim, bool keepdim) {
3133
3134 static auto op = create_median_dim_typed_handle();
3135 return op.call(self, dim, keepdim);
3136}
3137
3138// aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
3139::std::tuple<at::Tensor,at::Tensor> median_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim) {
3140
3141 static auto op = create_median_dim_typed_handle();
3142 return op.redispatch(dispatchKeySet, self, dim, keepdim);
3143}
3144
3145STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(median_dim_values, name, "aten::median")
3146STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(median_dim_values, overload_name, "dim_values")
3147STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(median_dim_values, schema_str, "median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)")
3148
3149// aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
3150static C10_NOINLINE c10::TypedOperatorHandle<median_dim_values::schema> create_median_dim_values_typed_handle() {
3151 return c10::Dispatcher::singleton()
3152 .findSchemaOrThrow(median_dim_values::name, median_dim_values::overload_name)
3153 .typed<median_dim_values::schema>();
3154}
3155
3156// aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
3157::std::tuple<at::Tensor &,at::Tensor &> median_dim_values::call(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
3158
3159 static auto op = create_median_dim_values_typed_handle();
3160 return op.call(self, dim, keepdim, values, indices);
3161}
3162
3163// aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
3164::std::tuple<at::Tensor &,at::Tensor &> median_dim_values::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
3165
3166 static auto op = create_median_dim_values_typed_handle();
3167 return op.redispatch(dispatchKeySet, self, dim, keepdim, values, indices);
3168}
3169
3170STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(median_names_dim, name, "aten::median")
3171STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(median_names_dim, overload_name, "names_dim")
3172STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(median_names_dim, schema_str, "median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)")
3173
3174// aten::median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
3175static C10_NOINLINE c10::TypedOperatorHandle<median_names_dim::schema> create_median_names_dim_typed_handle() {
3176 return c10::Dispatcher::singleton()
3177 .findSchemaOrThrow(median_names_dim::name, median_names_dim::overload_name)
3178 .typed<median_names_dim::schema>();
3179}
3180
3181// aten::median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
3182::std::tuple<at::Tensor,at::Tensor> median_names_dim::call(const at::Tensor & self, at::Dimname dim, bool keepdim) {
3183
3184 static auto op = create_median_names_dim_typed_handle();
3185 return op.call(self, dim, keepdim);
3186}
3187
3188// aten::median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
3189::std::tuple<at::Tensor,at::Tensor> median_names_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim) {
3190
3191 static auto op = create_median_names_dim_typed_handle();
3192 return op.redispatch(dispatchKeySet, self, dim, keepdim);
3193}
3194
3195STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(median_names_dim_values, name, "aten::median")
3196STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(median_names_dim_values, overload_name, "names_dim_values")
3197STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(median_names_dim_values, schema_str, "median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)")
3198
3199// aten::median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
3200static C10_NOINLINE c10::TypedOperatorHandle<median_names_dim_values::schema> create_median_names_dim_values_typed_handle() {
3201 return c10::Dispatcher::singleton()
3202 .findSchemaOrThrow(median_names_dim_values::name, median_names_dim_values::overload_name)
3203 .typed<median_names_dim_values::schema>();
3204}
3205
3206// aten::median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
3207::std::tuple<at::Tensor &,at::Tensor &> median_names_dim_values::call(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
3208
3209 static auto op = create_median_names_dim_values_typed_handle();
3210 return op.call(self, dim, keepdim, values, indices);
3211}
3212
3213// aten::median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
3214::std::tuple<at::Tensor &,at::Tensor &> median_names_dim_values::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
3215
3216 static auto op = create_median_names_dim_values_typed_handle();
3217 return op.redispatch(dispatchKeySet, self, dim, keepdim, values, indices);
3218}
3219
3220STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanmedian, name, "aten::nanmedian")
3221STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanmedian, overload_name, "")
3222STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanmedian, schema_str, "nanmedian(Tensor self) -> Tensor")
3223
3224// aten::nanmedian(Tensor self) -> Tensor
3225static C10_NOINLINE c10::TypedOperatorHandle<nanmedian::schema> create_nanmedian_typed_handle() {
3226 return c10::Dispatcher::singleton()
3227 .findSchemaOrThrow(nanmedian::name, nanmedian::overload_name)
3228 .typed<nanmedian::schema>();
3229}
3230
3231// aten::nanmedian(Tensor self) -> Tensor
3232at::Tensor nanmedian::call(const at::Tensor & self) {
3233
3234 static auto op = create_nanmedian_typed_handle();
3235 return op.call(self);
3236}
3237
3238// aten::nanmedian(Tensor self) -> Tensor
3239at::Tensor nanmedian::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
3240
3241 static auto op = create_nanmedian_typed_handle();
3242 return op.redispatch(dispatchKeySet, self);
3243}
3244
3245STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanmedian_dim, name, "aten::nanmedian")
3246STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanmedian_dim, overload_name, "dim")
3247STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanmedian_dim, schema_str, "nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)")
3248
3249// aten::nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
3250static C10_NOINLINE c10::TypedOperatorHandle<nanmedian_dim::schema> create_nanmedian_dim_typed_handle() {
3251 return c10::Dispatcher::singleton()
3252 .findSchemaOrThrow(nanmedian_dim::name, nanmedian_dim::overload_name)
3253 .typed<nanmedian_dim::schema>();
3254}
3255
3256// aten::nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
3257::std::tuple<at::Tensor,at::Tensor> nanmedian_dim::call(const at::Tensor & self, int64_t dim, bool keepdim) {
3258
3259 static auto op = create_nanmedian_dim_typed_handle();
3260 return op.call(self, dim, keepdim);
3261}
3262
3263// aten::nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
3264::std::tuple<at::Tensor,at::Tensor> nanmedian_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim) {
3265
3266 static auto op = create_nanmedian_dim_typed_handle();
3267 return op.redispatch(dispatchKeySet, self, dim, keepdim);
3268}
3269
3270STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanmedian_dim_values, name, "aten::nanmedian")
3271STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanmedian_dim_values, overload_name, "dim_values")
3272STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanmedian_dim_values, schema_str, "nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)")
3273
3274// aten::nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
3275static C10_NOINLINE c10::TypedOperatorHandle<nanmedian_dim_values::schema> create_nanmedian_dim_values_typed_handle() {
3276 return c10::Dispatcher::singleton()
3277 .findSchemaOrThrow(nanmedian_dim_values::name, nanmedian_dim_values::overload_name)
3278 .typed<nanmedian_dim_values::schema>();
3279}
3280
3281// aten::nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
3282::std::tuple<at::Tensor &,at::Tensor &> nanmedian_dim_values::call(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
3283
3284 static auto op = create_nanmedian_dim_values_typed_handle();
3285 return op.call(self, dim, keepdim, values, indices);
3286}
3287
3288// aten::nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
3289::std::tuple<at::Tensor &,at::Tensor &> nanmedian_dim_values::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
3290
3291 static auto op = create_nanmedian_dim_values_typed_handle();
3292 return op.redispatch(dispatchKeySet, self, dim, keepdim, values, indices);
3293}
3294
3295STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanmedian_names_dim, name, "aten::nanmedian")
3296STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanmedian_names_dim, overload_name, "names_dim")
3297STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanmedian_names_dim, schema_str, "nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)")
3298
3299// aten::nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
3300static C10_NOINLINE c10::TypedOperatorHandle<nanmedian_names_dim::schema> create_nanmedian_names_dim_typed_handle() {
3301 return c10::Dispatcher::singleton()
3302 .findSchemaOrThrow(nanmedian_names_dim::name, nanmedian_names_dim::overload_name)
3303 .typed<nanmedian_names_dim::schema>();
3304}
3305
3306// aten::nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
3307::std::tuple<at::Tensor,at::Tensor> nanmedian_names_dim::call(const at::Tensor & self, at::Dimname dim, bool keepdim) {
3308
3309 static auto op = create_nanmedian_names_dim_typed_handle();
3310 return op.call(self, dim, keepdim);
3311}
3312
3313// aten::nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
3314::std::tuple<at::Tensor,at::Tensor> nanmedian_names_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim) {
3315
3316 static auto op = create_nanmedian_names_dim_typed_handle();
3317 return op.redispatch(dispatchKeySet, self, dim, keepdim);
3318}
3319
3320STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanmedian_names_dim_values, name, "aten::nanmedian")
3321STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanmedian_names_dim_values, overload_name, "names_dim_values")
3322STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanmedian_names_dim_values, schema_str, "nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)")
3323
3324// aten::nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
3325static C10_NOINLINE c10::TypedOperatorHandle<nanmedian_names_dim_values::schema> create_nanmedian_names_dim_values_typed_handle() {
3326 return c10::Dispatcher::singleton()
3327 .findSchemaOrThrow(nanmedian_names_dim_values::name, nanmedian_names_dim_values::overload_name)
3328 .typed<nanmedian_names_dim_values::schema>();
3329}
3330
3331// aten::nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
3332::std::tuple<at::Tensor &,at::Tensor &> nanmedian_names_dim_values::call(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
3333
3334 static auto op = create_nanmedian_names_dim_values_typed_handle();
3335 return op.call(self, dim, keepdim, values, indices);
3336}
3337
3338// aten::nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
3339::std::tuple<at::Tensor &,at::Tensor &> nanmedian_names_dim_values::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
3340
3341 static auto op = create_nanmedian_names_dim_values_typed_handle();
3342 return op.redispatch(dispatchKeySet, self, dim, keepdim, values, indices);
3343}
3344
3345STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_batch_norm, name, "aten::miopen_batch_norm")
3346STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_batch_norm, overload_name, "")
3347STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_batch_norm, schema_str, "miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor)")
3348
3349// aten::miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor)
3350static C10_NOINLINE c10::TypedOperatorHandle<miopen_batch_norm::schema> create_miopen_batch_norm_typed_handle() {
3351 return c10::Dispatcher::singleton()
3352 .findSchemaOrThrow(miopen_batch_norm::name, miopen_batch_norm::overload_name)
3353 .typed<miopen_batch_norm::schema>();
3354}
3355
3356// aten::miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor)
3357::std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm::call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon) {
3358
3359 static auto op = create_miopen_batch_norm_typed_handle();
3360 return op.call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon);
3361}
3362
3363// aten::miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor)
3364::std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon) {
3365
3366 static auto op = create_miopen_batch_norm_typed_handle();
3367 return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon);
3368}
3369
3370STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_convolution_transpose, name, "aten::miopen_convolution_transpose")
3371STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_convolution_transpose, overload_name, "")
3372STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_convolution_transpose, schema_str, "miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor")
3373
3374// aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
3375static C10_NOINLINE c10::TypedOperatorHandle<miopen_convolution_transpose::schema> create_miopen_convolution_transpose_typed_handle() {
3376 return c10::Dispatcher::singleton()
3377 .findSchemaOrThrow(miopen_convolution_transpose::name, miopen_convolution_transpose::overload_name)
3378 .typed<miopen_convolution_transpose::schema>();
3379}
3380
3381// aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
3382at::Tensor miopen_convolution_transpose::call(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
3383
3384 static auto op = create_miopen_convolution_transpose_typed_handle();
3385 return op.call(self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic);
3386}
3387
3388// aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
3389at::Tensor miopen_convolution_transpose::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
3390
3391 static auto op = create_miopen_convolution_transpose_typed_handle();
3392 return op.redispatch(dispatchKeySet, self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic);
3393}
3394
3395STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_convolution_add_relu, name, "aten::miopen_convolution_add_relu")
3396STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_convolution_add_relu, overload_name, "")
3397STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_convolution_add_relu, schema_str, "miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor")
3398
3399// aten::miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor
3400static C10_NOINLINE c10::TypedOperatorHandle<miopen_convolution_add_relu::schema> create_miopen_convolution_add_relu_typed_handle() {
3401 return c10::Dispatcher::singleton()
3402 .findSchemaOrThrow(miopen_convolution_add_relu::name, miopen_convolution_add_relu::overload_name)
3403 .typed<miopen_convolution_add_relu::schema>();
3404}
3405
3406// aten::miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor
3407at::Tensor miopen_convolution_add_relu::call(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional<at::Scalar> & alpha, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
3408
3409 static auto op = create_miopen_convolution_add_relu_typed_handle();
3410 return op.call(self, weight, z, alpha, bias, stride, padding, dilation, groups);
3411}
3412
3413// aten::miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor
3414at::Tensor miopen_convolution_add_relu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional<at::Scalar> & alpha, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
3415
3416 static auto op = create_miopen_convolution_add_relu_typed_handle();
3417 return op.redispatch(dispatchKeySet, self, weight, z, alpha, bias, stride, padding, dilation, groups);
3418}
3419
3420STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_rnn_backward, name, "aten::miopen_rnn_backward")
3421STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_rnn_backward, overload_name, "")
3422STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_rnn_backward, schema_str, "miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])")
3423
3424// aten::miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])
3425static C10_NOINLINE c10::TypedOperatorHandle<miopen_rnn_backward::schema> create_miopen_rnn_backward_typed_handle() {
3426 return c10::Dispatcher::singleton()
3427 .findSchemaOrThrow(miopen_rnn_backward::name, miopen_rnn_backward::overload_name)
3428 .typed<miopen_rnn_backward::schema>();
3429}
3430
3431// aten::miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])
3432::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> miopen_rnn_backward::call(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
3433
3434 static auto op = create_miopen_rnn_backward_typed_handle();
3435 return op.call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask);
3436}
3437
3438// aten::miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])
3439::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> miopen_rnn_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
3440
3441 static auto op = create_miopen_rnn_backward_typed_handle();
3442 return op.redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask);
3443}
3444
3445STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multiply_Tensor, name, "aten::multiply")
3446STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multiply_Tensor, overload_name, "Tensor")
3447STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multiply_Tensor, schema_str, "multiply.Tensor(Tensor self, Tensor other) -> Tensor")
3448
3449// aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor
3450static C10_NOINLINE c10::TypedOperatorHandle<multiply_Tensor::schema> create_multiply_Tensor_typed_handle() {
3451 return c10::Dispatcher::singleton()
3452 .findSchemaOrThrow(multiply_Tensor::name, multiply_Tensor::overload_name)
3453 .typed<multiply_Tensor::schema>();
3454}
3455
3456// aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor
3457at::Tensor multiply_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
3458
3459 static auto op = create_multiply_Tensor_typed_handle();
3460 return op.call(self, other);
3461}
3462
3463// aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor
3464at::Tensor multiply_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
3465
3466 static auto op = create_multiply_Tensor_typed_handle();
3467 return op.redispatch(dispatchKeySet, self, other);
3468}
3469
3470STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multiply__Tensor, name, "aten::multiply_")
3471STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multiply__Tensor, overload_name, "Tensor")
3472STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multiply__Tensor, schema_str, "multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
3473
3474// aten::multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
3475static C10_NOINLINE c10::TypedOperatorHandle<multiply__Tensor::schema> create_multiply__Tensor_typed_handle() {
3476 return c10::Dispatcher::singleton()
3477 .findSchemaOrThrow(multiply__Tensor::name, multiply__Tensor::overload_name)
3478 .typed<multiply__Tensor::schema>();
3479}
3480
3481// aten::multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
3482at::Tensor & multiply__Tensor::call(at::Tensor & self, const at::Tensor & other) {
3483
3484 static auto op = create_multiply__Tensor_typed_handle();
3485 return op.call(self, other);
3486}
3487
3488// aten::multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
3489at::Tensor & multiply__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
3490
3491 static auto op = create_multiply__Tensor_typed_handle();
3492 return op.redispatch(dispatchKeySet, self, other);
3493}
3494
3495STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multiply_out, name, "aten::multiply")
3496STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multiply_out, overload_name, "out")
3497STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multiply_out, schema_str, "multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
3498
3499// aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
3500static C10_NOINLINE c10::TypedOperatorHandle<multiply_out::schema> create_multiply_out_typed_handle() {
3501 return c10::Dispatcher::singleton()
3502 .findSchemaOrThrow(multiply_out::name, multiply_out::overload_name)
3503 .typed<multiply_out::schema>();
3504}
3505
3506// aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
3507at::Tensor & multiply_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
3508
3509 static auto op = create_multiply_out_typed_handle();
3510 return op.call(self, other, out);
3511}
3512
3513// aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
3514at::Tensor & multiply_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
3515
3516 static auto op = create_multiply_out_typed_handle();
3517 return op.redispatch(dispatchKeySet, self, other, out);
3518}
3519
3520STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multiply_Scalar, name, "aten::multiply")
3521STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multiply_Scalar, overload_name, "Scalar")
3522STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multiply_Scalar, schema_str, "multiply.Scalar(Tensor self, Scalar other) -> Tensor")
3523
3524// aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor
3525static C10_NOINLINE c10::TypedOperatorHandle<multiply_Scalar::schema> create_multiply_Scalar_typed_handle() {
3526 return c10::Dispatcher::singleton()
3527 .findSchemaOrThrow(multiply_Scalar::name, multiply_Scalar::overload_name)
3528 .typed<multiply_Scalar::schema>();
3529}
3530
3531// aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor
3532at::Tensor multiply_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
3533
3534 static auto op = create_multiply_Scalar_typed_handle();
3535 return op.call(self, other);
3536}
3537
3538// aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor
3539at::Tensor multiply_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
3540
3541 static auto op = create_multiply_Scalar_typed_handle();
3542 return op.redispatch(dispatchKeySet, self, other);
3543}
3544
3545STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multiply__Scalar, name, "aten::multiply_")
3546STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multiply__Scalar, overload_name, "Scalar")
3547STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multiply__Scalar, schema_str, "multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
3548
3549// aten::multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
3550static C10_NOINLINE c10::TypedOperatorHandle<multiply__Scalar::schema> create_multiply__Scalar_typed_handle() {
3551 return c10::Dispatcher::singleton()
3552 .findSchemaOrThrow(multiply__Scalar::name, multiply__Scalar::overload_name)
3553 .typed<multiply__Scalar::schema>();
3554}
3555
3556// aten::multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
3557at::Tensor & multiply__Scalar::call(at::Tensor & self, const at::Scalar & other) {
3558
3559 static auto op = create_multiply__Scalar_typed_handle();
3560 return op.call(self, other);
3561}
3562
3563// aten::multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
3564at::Tensor & multiply__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
3565
3566 static auto op = create_multiply__Scalar_typed_handle();
3567 return op.redispatch(dispatchKeySet, self, other);
3568}
3569
3570STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_elemt, name, "aten::batch_norm_elemt")
3571STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_elemt, overload_name, "")
3572STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_elemt, schema_str, "batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor")
3573
3574// aten::batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor
3575static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_elemt::schema> create_batch_norm_elemt_typed_handle() {
3576 return c10::Dispatcher::singleton()
3577 .findSchemaOrThrow(batch_norm_elemt::name, batch_norm_elemt::overload_name)
3578 .typed<batch_norm_elemt::schema>();
3579}
3580
3581// aten::batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor
3582at::Tensor batch_norm_elemt::call(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps) {
3583
3584 static auto op = create_batch_norm_elemt_typed_handle();
3585 return op.call(input, weight, bias, mean, invstd, eps);
3586}
3587
3588// aten::batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor
3589at::Tensor batch_norm_elemt::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps) {
3590
3591 static auto op = create_batch_norm_elemt_typed_handle();
3592 return op.redispatch(dispatchKeySet, input, weight, bias, mean, invstd, eps);
3593}
3594
3595STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_elemt_out, name, "aten::batch_norm_elemt")
3596STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_elemt_out, overload_name, "out")
3597STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_elemt_out, schema_str, "batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!)")
3598
3599// aten::batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!)
3600static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_elemt_out::schema> create_batch_norm_elemt_out_typed_handle() {
3601 return c10::Dispatcher::singleton()
3602 .findSchemaOrThrow(batch_norm_elemt_out::name, batch_norm_elemt_out::overload_name)
3603 .typed<batch_norm_elemt_out::schema>();
3604}
3605
3606// aten::batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!)
3607at::Tensor & batch_norm_elemt_out::call(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps, at::Tensor & out) {
3608
3609 static auto op = create_batch_norm_elemt_out_typed_handle();
3610 return op.call(input, weight, bias, mean, invstd, eps, out);
3611}
3612
3613// aten::batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!)
3614at::Tensor & batch_norm_elemt_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps, at::Tensor & out) {
3615
3616 static auto op = create_batch_norm_elemt_out_typed_handle();
3617 return op.redispatch(dispatchKeySet, input, weight, bias, mean, invstd, eps, out);
3618}
3619
3620STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cdist, name, "aten::cdist")
3621STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cdist, overload_name, "")
3622STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cdist, schema_str, "cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor")
3623
3624// aten::cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor
3625static C10_NOINLINE c10::TypedOperatorHandle<cdist::schema> create_cdist_typed_handle() {
3626 return c10::Dispatcher::singleton()
3627 .findSchemaOrThrow(cdist::name, cdist::overload_name)
3628 .typed<cdist::schema>();
3629}
3630
3631// aten::cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor
3632at::Tensor cdist::call(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode) {
3633
3634 static auto op = create_cdist_typed_handle();
3635 return op.call(x1, x2, p, compute_mode);
3636}
3637
3638// aten::cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor
3639at::Tensor cdist::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode) {
3640
3641 static auto op = create_cdist_typed_handle();
3642 return op.redispatch(dispatchKeySet, x1, x2, p, compute_mode);
3643}
3644
3645STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mT, name, "aten::mT")
3646STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mT, overload_name, "")
3647STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mT, schema_str, "mT(Tensor(a) self) -> Tensor(a)")
3648
3649// aten::mT(Tensor(a) self) -> Tensor(a)
3650static C10_NOINLINE c10::TypedOperatorHandle<mT::schema> create_mT_typed_handle() {
3651 return c10::Dispatcher::singleton()
3652 .findSchemaOrThrow(mT::name, mT::overload_name)
3653 .typed<mT::schema>();
3654}
3655
3656// aten::mT(Tensor(a) self) -> Tensor(a)
3657at::Tensor mT::call(const at::Tensor & self) {
3658
3659 static auto op = create_mT_typed_handle();
3660 return op.call(self);
3661}
3662
3663// aten::mT(Tensor(a) self) -> Tensor(a)
3664at::Tensor mT::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
3665
3666 static auto op = create_mT_typed_handle();
3667 return op.redispatch(dispatchKeySet, self);
3668}
3669
3670STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adjoint, name, "aten::adjoint")
3671STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adjoint, overload_name, "")
3672STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adjoint, schema_str, "adjoint(Tensor(a) self) -> Tensor(a)")
3673
3674// aten::adjoint(Tensor(a) self) -> Tensor(a)
3675static C10_NOINLINE c10::TypedOperatorHandle<adjoint::schema> create_adjoint_typed_handle() {
3676 return c10::Dispatcher::singleton()
3677 .findSchemaOrThrow(adjoint::name, adjoint::overload_name)
3678 .typed<adjoint::schema>();
3679}
3680
3681// aten::adjoint(Tensor(a) self) -> Tensor(a)
3682at::Tensor adjoint::call(const at::Tensor & self) {
3683
3684 static auto op = create_adjoint_typed_handle();
3685 return op.call(self);
3686}
3687
3688// aten::adjoint(Tensor(a) self) -> Tensor(a)
3689at::Tensor adjoint::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
3690
3691 static auto op = create_adjoint_typed_handle();
3692 return op.redispatch(dispatchKeySet, self);
3693}
3694
3695STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(channel_shuffle, name, "aten::channel_shuffle")
3696STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(channel_shuffle, overload_name, "")
3697STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(channel_shuffle, schema_str, "channel_shuffle(Tensor self, int groups) -> Tensor")
3698
3699// aten::channel_shuffle(Tensor self, int groups) -> Tensor
3700static C10_NOINLINE c10::TypedOperatorHandle<channel_shuffle::schema> create_channel_shuffle_typed_handle() {
3701 return c10::Dispatcher::singleton()
3702 .findSchemaOrThrow(channel_shuffle::name, channel_shuffle::overload_name)
3703 .typed<channel_shuffle::schema>();
3704}
3705
3706// aten::channel_shuffle(Tensor self, int groups) -> Tensor
3707at::Tensor channel_shuffle::call(const at::Tensor & self, int64_t groups) {
3708
3709 static auto op = create_channel_shuffle_typed_handle();
3710 return op.call(self, groups);
3711}
3712
3713// aten::channel_shuffle(Tensor self, int groups) -> Tensor
3714at::Tensor channel_shuffle::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t groups) {
3715
3716 static auto op = create_channel_shuffle_typed_handle();
3717 return op.redispatch(dispatchKeySet, self, groups);
3718}
3719
3720STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(poisson_nll_loss, name, "aten::poisson_nll_loss")
3721STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(poisson_nll_loss, overload_name, "")
3722STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(poisson_nll_loss, schema_str, "poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor")
3723
3724// aten::poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor
3725static C10_NOINLINE c10::TypedOperatorHandle<poisson_nll_loss::schema> create_poisson_nll_loss_typed_handle() {
3726 return c10::Dispatcher::singleton()
3727 .findSchemaOrThrow(poisson_nll_loss::name, poisson_nll_loss::overload_name)
3728 .typed<poisson_nll_loss::schema>();
3729}
3730
3731// aten::poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor
3732at::Tensor poisson_nll_loss::call(const at::Tensor & input, const at::Tensor & target, bool log_input, bool full, double eps, int64_t reduction) {
3733
3734 static auto op = create_poisson_nll_loss_typed_handle();
3735 return op.call(input, target, log_input, full, eps, reduction);
3736}
3737
3738// aten::poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor
3739at::Tensor poisson_nll_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & target, bool log_input, bool full, double eps, int64_t reduction) {
3740
3741 static auto op = create_poisson_nll_loss_typed_handle();
3742 return op.redispatch(dispatchKeySet, input, target, log_input, full, eps, reduction);
3743}
3744
3745STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(deg2rad, name, "aten::deg2rad")
3746STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(deg2rad, overload_name, "")
3747STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(deg2rad, schema_str, "deg2rad(Tensor self) -> Tensor")
3748
3749// aten::deg2rad(Tensor self) -> Tensor
3750static C10_NOINLINE c10::TypedOperatorHandle<deg2rad::schema> create_deg2rad_typed_handle() {
3751 return c10::Dispatcher::singleton()
3752 .findSchemaOrThrow(deg2rad::name, deg2rad::overload_name)
3753 .typed<deg2rad::schema>();
3754}
3755
3756// aten::deg2rad(Tensor self) -> Tensor
3757at::Tensor deg2rad::call(const at::Tensor & self) {
3758
3759 static auto op = create_deg2rad_typed_handle();
3760 return op.call(self);
3761}
3762
3763// aten::deg2rad(Tensor self) -> Tensor
3764at::Tensor deg2rad::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
3765
3766 static auto op = create_deg2rad_typed_handle();
3767 return op.redispatch(dispatchKeySet, self);
3768}
3769
3770STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(deg2rad_, name, "aten::deg2rad_")
3771STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(deg2rad_, overload_name, "")
3772STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(deg2rad_, schema_str, "deg2rad_(Tensor(a!) self) -> Tensor(a!)")
3773
3774// aten::deg2rad_(Tensor(a!) self) -> Tensor(a!)
3775static C10_NOINLINE c10::TypedOperatorHandle<deg2rad_::schema> create_deg2rad__typed_handle() {
3776 return c10::Dispatcher::singleton()
3777 .findSchemaOrThrow(deg2rad_::name, deg2rad_::overload_name)
3778 .typed<deg2rad_::schema>();
3779}
3780
3781// aten::deg2rad_(Tensor(a!) self) -> Tensor(a!)
3782at::Tensor & deg2rad_::call(at::Tensor & self) {
3783
3784 static auto op = create_deg2rad__typed_handle();
3785 return op.call(self);
3786}
3787
3788// aten::deg2rad_(Tensor(a!) self) -> Tensor(a!)
3789at::Tensor & deg2rad_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
3790
3791 static auto op = create_deg2rad__typed_handle();
3792 return op.redispatch(dispatchKeySet, self);
3793}
3794
3795STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(deg2rad_out, name, "aten::deg2rad")
3796STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(deg2rad_out, overload_name, "out")
3797STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(deg2rad_out, schema_str, "deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
3798
3799// aten::deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
3800static C10_NOINLINE c10::TypedOperatorHandle<deg2rad_out::schema> create_deg2rad_out_typed_handle() {
3801 return c10::Dispatcher::singleton()
3802 .findSchemaOrThrow(deg2rad_out::name, deg2rad_out::overload_name)
3803 .typed<deg2rad_out::schema>();
3804}
3805
3806// aten::deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
3807at::Tensor & deg2rad_out::call(const at::Tensor & self, at::Tensor & out) {
3808
3809 static auto op = create_deg2rad_out_typed_handle();
3810 return op.call(self, out);
3811}
3812
3813// aten::deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
3814at::Tensor & deg2rad_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
3815
3816 static auto op = create_deg2rad_out_typed_handle();
3817 return op.redispatch(dispatchKeySet, self, out);
3818}
3819
3820STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randperm, name, "aten::randperm")
3821STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randperm, overload_name, "")
3822STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randperm, schema_str, "randperm(int n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
3823
3824// aten::randperm(int n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3825static C10_NOINLINE c10::TypedOperatorHandle<randperm::schema> create_randperm_typed_handle() {
3826 return c10::Dispatcher::singleton()
3827 .findSchemaOrThrow(randperm::name, randperm::overload_name)
3828 .typed<randperm::schema>();
3829}
3830
3831// aten::randperm(int n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3832at::Tensor randperm::call(int64_t n, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
3833
3834 static auto op = create_randperm_typed_handle();
3835 return op.call(n, dtype, layout, device, pin_memory);
3836}
3837
3838// aten::randperm(int n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3839at::Tensor randperm::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t n, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
3840
3841 static auto op = create_randperm_typed_handle();
3842 return op.redispatch(dispatchKeySet, n, dtype, layout, device, pin_memory);
3843}
3844
3845STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randperm_generator, name, "aten::randperm")
3846STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randperm_generator, overload_name, "generator")
3847STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randperm_generator, schema_str, "randperm.generator(int n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
3848
3849// aten::randperm.generator(int n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3850static C10_NOINLINE c10::TypedOperatorHandle<randperm_generator::schema> create_randperm_generator_typed_handle() {
3851 return c10::Dispatcher::singleton()
3852 .findSchemaOrThrow(randperm_generator::name, randperm_generator::overload_name)
3853 .typed<randperm_generator::schema>();
3854}
3855
3856// aten::randperm.generator(int n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3857at::Tensor randperm_generator::call(int64_t n, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
3858
3859 static auto op = create_randperm_generator_typed_handle();
3860 return op.call(n, generator, dtype, layout, device, pin_memory);
3861}
3862
3863// aten::randperm.generator(int n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3864at::Tensor randperm_generator::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t n, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
3865
3866 static auto op = create_randperm_generator_typed_handle();
3867 return op.redispatch(dispatchKeySet, n, generator, dtype, layout, device, pin_memory);
3868}
3869
3870STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randperm_out, name, "aten::randperm")
3871STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randperm_out, overload_name, "out")
3872STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randperm_out, schema_str, "randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!)")
3873
3874// aten::randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!)
3875static C10_NOINLINE c10::TypedOperatorHandle<randperm_out::schema> create_randperm_out_typed_handle() {
3876 return c10::Dispatcher::singleton()
3877 .findSchemaOrThrow(randperm_out::name, randperm_out::overload_name)
3878 .typed<randperm_out::schema>();
3879}
3880
3881// aten::randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!)
3882at::Tensor & randperm_out::call(int64_t n, at::Tensor & out) {
3883
3884 static auto op = create_randperm_out_typed_handle();
3885 return op.call(n, out);
3886}
3887
3888// aten::randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!)
3889at::Tensor & randperm_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t n, at::Tensor & out) {
3890
3891 static auto op = create_randperm_out_typed_handle();
3892 return op.redispatch(dispatchKeySet, n, out);
3893}
3894
3895STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randperm_generator_out, name, "aten::randperm")
3896STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randperm_generator_out, overload_name, "generator_out")
3897STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randperm_generator_out, schema_str, "randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)")
3898
3899// aten::randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
3900static C10_NOINLINE c10::TypedOperatorHandle<randperm_generator_out::schema> create_randperm_generator_out_typed_handle() {
3901 return c10::Dispatcher::singleton()
3902 .findSchemaOrThrow(randperm_generator_out::name, randperm_generator_out::overload_name)
3903 .typed<randperm_generator_out::schema>();
3904}
3905
3906// aten::randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
3907at::Tensor & randperm_generator_out::call(int64_t n, c10::optional<at::Generator> generator, at::Tensor & out) {
3908
3909 static auto op = create_randperm_generator_out_typed_handle();
3910 return op.call(n, generator, out);
3911}
3912
3913// aten::randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
3914at::Tensor & randperm_generator_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t n, c10::optional<at::Generator> generator, at::Tensor & out) {
3915
3916 static auto op = create_randperm_generator_out_typed_handle();
3917 return op.redispatch(dispatchKeySet, n, generator, out);
3918}
3919
3920STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(negative, name, "aten::negative")
3921STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(negative, overload_name, "")
3922STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(negative, schema_str, "negative(Tensor self) -> Tensor")
3923
3924// aten::negative(Tensor self) -> Tensor
3925static C10_NOINLINE c10::TypedOperatorHandle<negative::schema> create_negative_typed_handle() {
3926 return c10::Dispatcher::singleton()
3927 .findSchemaOrThrow(negative::name, negative::overload_name)
3928 .typed<negative::schema>();
3929}
3930
3931// aten::negative(Tensor self) -> Tensor
3932at::Tensor negative::call(const at::Tensor & self) {
3933
3934 static auto op = create_negative_typed_handle();
3935 return op.call(self);
3936}
3937
3938// aten::negative(Tensor self) -> Tensor
3939at::Tensor negative::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
3940
3941 static auto op = create_negative_typed_handle();
3942 return op.redispatch(dispatchKeySet, self);
3943}
3944
3945STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(negative_, name, "aten::negative_")
3946STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(negative_, overload_name, "")
3947STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(negative_, schema_str, "negative_(Tensor(a!) self) -> Tensor(a!)")
3948
3949// aten::negative_(Tensor(a!) self) -> Tensor(a!)
3950static C10_NOINLINE c10::TypedOperatorHandle<negative_::schema> create_negative__typed_handle() {
3951 return c10::Dispatcher::singleton()
3952 .findSchemaOrThrow(negative_::name, negative_::overload_name)
3953 .typed<negative_::schema>();
3954}
3955
3956// aten::negative_(Tensor(a!) self) -> Tensor(a!)
3957at::Tensor & negative_::call(at::Tensor & self) {
3958
3959 static auto op = create_negative__typed_handle();
3960 return op.call(self);
3961}
3962
3963// aten::negative_(Tensor(a!) self) -> Tensor(a!)
3964at::Tensor & negative_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
3965
3966 static auto op = create_negative__typed_handle();
3967 return op.redispatch(dispatchKeySet, self);
3968}
3969
3970STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(negative_out, name, "aten::negative")
3971STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(negative_out, overload_name, "out")
3972STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(negative_out, schema_str, "negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
3973
3974// aten::negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
3975static C10_NOINLINE c10::TypedOperatorHandle<negative_out::schema> create_negative_out_typed_handle() {
3976 return c10::Dispatcher::singleton()
3977 .findSchemaOrThrow(negative_out::name, negative_out::overload_name)
3978 .typed<negative_out::schema>();
3979}
3980
3981// aten::negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
3982at::Tensor & negative_out::call(const at::Tensor & self, at::Tensor & out) {
3983
3984 static auto op = create_negative_out_typed_handle();
3985 return op.call(self, out);
3986}
3987
3988// aten::negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
3989at::Tensor & negative_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
3990
3991 static auto op = create_negative_out_typed_handle();
3992 return op.redispatch(dispatchKeySet, self, out);
3993}
3994
3995STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_reshape_copy, name, "aten::_reshape_copy")
3996STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_reshape_copy, overload_name, "")
3997STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_reshape_copy, schema_str, "_reshape_copy(Tensor self, SymInt[] size) -> Tensor")
3998
3999// aten::_reshape_copy(Tensor self, SymInt[] size) -> Tensor
4000static C10_NOINLINE c10::TypedOperatorHandle<_reshape_copy::schema> create__reshape_copy_typed_handle() {
4001 return c10::Dispatcher::singleton()
4002 .findSchemaOrThrow(_reshape_copy::name, _reshape_copy::overload_name)
4003 .typed<_reshape_copy::schema>();
4004}
4005
4006// aten::_reshape_copy(Tensor self, SymInt[] size) -> Tensor
4007at::Tensor _reshape_copy::call(const at::Tensor & self, c10::SymIntArrayRef size) {
4008
4009 static auto op = create__reshape_copy_typed_handle();
4010 return op.call(self, size);
4011}
4012
4013// aten::_reshape_copy(Tensor self, SymInt[] size) -> Tensor
4014at::Tensor _reshape_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size) {
4015
4016 static auto op = create__reshape_copy_typed_handle();
4017 return op.redispatch(dispatchKeySet, self, size);
4018}
4019
4020STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(relu, name, "aten::relu")
4021STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(relu, overload_name, "")
4022STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(relu, schema_str, "relu(Tensor self) -> Tensor")
4023
4024// aten::relu(Tensor self) -> Tensor
4025static C10_NOINLINE c10::TypedOperatorHandle<relu::schema> create_relu_typed_handle() {
4026 return c10::Dispatcher::singleton()
4027 .findSchemaOrThrow(relu::name, relu::overload_name)
4028 .typed<relu::schema>();
4029}
4030
4031// aten::relu(Tensor self) -> Tensor
4032at::Tensor relu::call(const at::Tensor & self) {
4033
4034 static auto op = create_relu_typed_handle();
4035 return op.call(self);
4036}
4037
4038// aten::relu(Tensor self) -> Tensor
4039at::Tensor relu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
4040
4041 static auto op = create_relu_typed_handle();
4042 return op.redispatch(dispatchKeySet, self);
4043}
4044
4045STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(relu_, name, "aten::relu_")
4046STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(relu_, overload_name, "")
4047STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(relu_, schema_str, "relu_(Tensor(a!) self) -> Tensor(a!)")
4048
4049// aten::relu_(Tensor(a!) self) -> Tensor(a!)
4050static C10_NOINLINE c10::TypedOperatorHandle<relu_::schema> create_relu__typed_handle() {
4051 return c10::Dispatcher::singleton()
4052 .findSchemaOrThrow(relu_::name, relu_::overload_name)
4053 .typed<relu_::schema>();
4054}
4055
4056// aten::relu_(Tensor(a!) self) -> Tensor(a!)
4057at::Tensor & relu_::call(at::Tensor & self) {
4058
4059 static auto op = create_relu__typed_handle();
4060 return op.call(self);
4061}
4062
4063// aten::relu_(Tensor(a!) self) -> Tensor(a!)
4064at::Tensor & relu_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
4065
4066 static auto op = create_relu__typed_handle();
4067 return op.redispatch(dispatchKeySet, self);
4068}
4069
4070STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(infinitely_differentiable_gelu_backward, name, "aten::infinitely_differentiable_gelu_backward")
4071STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(infinitely_differentiable_gelu_backward, overload_name, "")
4072STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(infinitely_differentiable_gelu_backward, schema_str, "infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor")
4073
4074// aten::infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor
4075static C10_NOINLINE c10::TypedOperatorHandle<infinitely_differentiable_gelu_backward::schema> create_infinitely_differentiable_gelu_backward_typed_handle() {
4076 return c10::Dispatcher::singleton()
4077 .findSchemaOrThrow(infinitely_differentiable_gelu_backward::name, infinitely_differentiable_gelu_backward::overload_name)
4078 .typed<infinitely_differentiable_gelu_backward::schema>();
4079}
4080
4081// aten::infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor
4082at::Tensor infinitely_differentiable_gelu_backward::call(const at::Tensor & grad, const at::Tensor & self) {
4083
4084 static auto op = create_infinitely_differentiable_gelu_backward_typed_handle();
4085 return op.call(grad, self);
4086}
4087
4088// aten::infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor
4089at::Tensor infinitely_differentiable_gelu_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self) {
4090
4091 static auto op = create_infinitely_differentiable_gelu_backward_typed_handle();
4092 return op.redispatch(dispatchKeySet, grad, self);
4093}
4094
4095STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardshrink_backward_grad_input, name, "aten::hardshrink_backward")
4096STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardshrink_backward_grad_input, overload_name, "grad_input")
4097STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardshrink_backward_grad_input, schema_str, "hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)")
4098
4099// aten::hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)
4100static C10_NOINLINE c10::TypedOperatorHandle<hardshrink_backward_grad_input::schema> create_hardshrink_backward_grad_input_typed_handle() {
4101 return c10::Dispatcher::singleton()
4102 .findSchemaOrThrow(hardshrink_backward_grad_input::name, hardshrink_backward_grad_input::overload_name)
4103 .typed<hardshrink_backward_grad_input::schema>();
4104}
4105
4106// aten::hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)
4107at::Tensor & hardshrink_backward_grad_input::call(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) {
4108
4109 static auto op = create_hardshrink_backward_grad_input_typed_handle();
4110 return op.call(grad_out, self, lambd, grad_input);
4111}
4112
4113// aten::hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)
4114at::Tensor & hardshrink_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) {
4115
4116 static auto op = create_hardshrink_backward_grad_input_typed_handle();
4117 return op.redispatch(dispatchKeySet, grad_out, self, lambd, grad_input);
4118}
4119
4120STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardshrink_backward, name, "aten::hardshrink_backward")
4121STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardshrink_backward, overload_name, "")
4122STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardshrink_backward, schema_str, "hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor")
4123
4124// aten::hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor
4125static C10_NOINLINE c10::TypedOperatorHandle<hardshrink_backward::schema> create_hardshrink_backward_typed_handle() {
4126 return c10::Dispatcher::singleton()
4127 .findSchemaOrThrow(hardshrink_backward::name, hardshrink_backward::overload_name)
4128 .typed<hardshrink_backward::schema>();
4129}
4130
4131// aten::hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor
4132at::Tensor hardshrink_backward::call(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd) {
4133
4134 static auto op = create_hardshrink_backward_typed_handle();
4135 return op.call(grad_out, self, lambd);
4136}
4137
4138// aten::hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor
4139at::Tensor hardshrink_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd) {
4140
4141 static auto op = create_hardshrink_backward_typed_handle();
4142 return op.redispatch(dispatchKeySet, grad_out, self, lambd);
4143}
4144
4145STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sinc, name, "aten::sinc")
4146STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sinc, overload_name, "")
4147STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sinc, schema_str, "sinc(Tensor self) -> Tensor")
4148
4149// aten::sinc(Tensor self) -> Tensor
4150static C10_NOINLINE c10::TypedOperatorHandle<sinc::schema> create_sinc_typed_handle() {
4151 return c10::Dispatcher::singleton()
4152 .findSchemaOrThrow(sinc::name, sinc::overload_name)
4153 .typed<sinc::schema>();
4154}
4155
4156// aten::sinc(Tensor self) -> Tensor
4157at::Tensor sinc::call(const at::Tensor & self) {
4158
4159 static auto op = create_sinc_typed_handle();
4160 return op.call(self);
4161}
4162
4163// aten::sinc(Tensor self) -> Tensor
4164at::Tensor sinc::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
4165
4166 static auto op = create_sinc_typed_handle();
4167 return op.redispatch(dispatchKeySet, self);
4168}
4169
4170STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sinc_, name, "aten::sinc_")
4171STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sinc_, overload_name, "")
4172STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sinc_, schema_str, "sinc_(Tensor(a!) self) -> Tensor(a!)")
4173
4174// aten::sinc_(Tensor(a!) self) -> Tensor(a!)
4175static C10_NOINLINE c10::TypedOperatorHandle<sinc_::schema> create_sinc__typed_handle() {
4176 return c10::Dispatcher::singleton()
4177 .findSchemaOrThrow(sinc_::name, sinc_::overload_name)
4178 .typed<sinc_::schema>();
4179}
4180
4181// aten::sinc_(Tensor(a!) self) -> Tensor(a!)
4182at::Tensor & sinc_::call(at::Tensor & self) {
4183
4184 static auto op = create_sinc__typed_handle();
4185 return op.call(self);
4186}
4187
4188// aten::sinc_(Tensor(a!) self) -> Tensor(a!)
4189at::Tensor & sinc_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
4190
4191 static auto op = create_sinc__typed_handle();
4192 return op.redispatch(dispatchKeySet, self);
4193}
4194
4195STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sinc_out, name, "aten::sinc")
4196STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sinc_out, overload_name, "out")
4197STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sinc_out, schema_str, "sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
4198
4199// aten::sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4200static C10_NOINLINE c10::TypedOperatorHandle<sinc_out::schema> create_sinc_out_typed_handle() {
4201 return c10::Dispatcher::singleton()
4202 .findSchemaOrThrow(sinc_out::name, sinc_out::overload_name)
4203 .typed<sinc_out::schema>();
4204}
4205
4206// aten::sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4207at::Tensor & sinc_out::call(const at::Tensor & self, at::Tensor & out) {
4208
4209 static auto op = create_sinc_out_typed_handle();
4210 return op.call(self, out);
4211}
4212
4213// aten::sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4214at::Tensor & sinc_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
4215
4216 static auto op = create_sinc_out_typed_handle();
4217 return op.redispatch(dispatchKeySet, self, out);
4218}
4219
4220STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slice_Tensor, name, "aten::slice")
4221STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slice_Tensor, overload_name, "Tensor")
4222STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slice_Tensor, schema_str, "slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)")
4223
4224// aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)
4225static C10_NOINLINE c10::TypedOperatorHandle<slice_Tensor::schema> create_slice_Tensor_typed_handle() {
4226 return c10::Dispatcher::singleton()
4227 .findSchemaOrThrow(slice_Tensor::name, slice_Tensor::overload_name)
4228 .typed<slice_Tensor::schema>();
4229}
4230
4231// aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)
4232at::Tensor slice_Tensor::call(const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
4233
4234 static auto op = create_slice_Tensor_typed_handle();
4235 return op.call(self, dim, start, end, step);
4236}
4237
4238// aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)
4239at::Tensor slice_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
4240
4241 static auto op = create_slice_Tensor_typed_handle();
4242 return op.redispatch(dispatchKeySet, self, dim, start, end, step);
4243}
4244
4245STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(select_scatter, name, "aten::select_scatter")
4246STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(select_scatter, overload_name, "")
4247STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(select_scatter, schema_str, "select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor")
4248
4249// aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor
4250static C10_NOINLINE c10::TypedOperatorHandle<select_scatter::schema> create_select_scatter_typed_handle() {
4251 return c10::Dispatcher::singleton()
4252 .findSchemaOrThrow(select_scatter::name, select_scatter::overload_name)
4253 .typed<select_scatter::schema>();
4254}
4255
4256// aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor
4257at::Tensor select_scatter::call(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index) {
4258
4259 static auto op = create_select_scatter_typed_handle();
4260 return op.call(self, src, dim, index);
4261}
4262
4263// aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor
4264at::Tensor select_scatter::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index) {
4265
4266 static auto op = create_select_scatter_typed_handle();
4267 return op.redispatch(dispatchKeySet, self, src, dim, index);
4268}
4269
4270STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(smm, name, "aten::smm")
4271STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(smm, overload_name, "")
4272STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(smm, schema_str, "smm(Tensor self, Tensor mat2) -> Tensor")
4273
4274// aten::smm(Tensor self, Tensor mat2) -> Tensor
4275static C10_NOINLINE c10::TypedOperatorHandle<smm::schema> create_smm_typed_handle() {
4276 return c10::Dispatcher::singleton()
4277 .findSchemaOrThrow(smm::name, smm::overload_name)
4278 .typed<smm::schema>();
4279}
4280
4281// aten::smm(Tensor self, Tensor mat2) -> Tensor
4282at::Tensor smm::call(const at::Tensor & self, const at::Tensor & mat2) {
4283
4284 static auto op = create_smm_typed_handle();
4285 return op.call(self, mat2);
4286}
4287
4288// aten::smm(Tensor self, Tensor mat2) -> Tensor
4289at::Tensor smm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2) {
4290
4291 static auto op = create_smm_typed_handle();
4292 return op.redispatch(dispatchKeySet, self, mat2);
4293}
4294
4295STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unsafe_split_with_sizes, name, "aten::unsafe_split_with_sizes")
4296STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unsafe_split_with_sizes, overload_name, "")
4297STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unsafe_split_with_sizes, schema_str, "unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]")
4298
4299// aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]
4300static C10_NOINLINE c10::TypedOperatorHandle<unsafe_split_with_sizes::schema> create_unsafe_split_with_sizes_typed_handle() {
4301 return c10::Dispatcher::singleton()
4302 .findSchemaOrThrow(unsafe_split_with_sizes::name, unsafe_split_with_sizes::overload_name)
4303 .typed<unsafe_split_with_sizes::schema>();
4304}
4305
4306// aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]
4307::std::vector<at::Tensor> unsafe_split_with_sizes::call(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
4308
4309 static auto op = create_unsafe_split_with_sizes_typed_handle();
4310 return op.call(self, split_sizes, dim);
4311}
4312
4313// aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]
4314::std::vector<at::Tensor> unsafe_split_with_sizes::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
4315
4316 static auto op = create_unsafe_split_with_sizes_typed_handle();
4317 return op.redispatch(dispatchKeySet, self, split_sizes, dim);
4318}
4319
4320STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dstack, name, "aten::dstack")
4321STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dstack, overload_name, "")
4322STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dstack, schema_str, "dstack(Tensor[] tensors) -> Tensor")
4323
4324// aten::dstack(Tensor[] tensors) -> Tensor
4325static C10_NOINLINE c10::TypedOperatorHandle<dstack::schema> create_dstack_typed_handle() {
4326 return c10::Dispatcher::singleton()
4327 .findSchemaOrThrow(dstack::name, dstack::overload_name)
4328 .typed<dstack::schema>();
4329}
4330
4331// aten::dstack(Tensor[] tensors) -> Tensor
4332at::Tensor dstack::call(at::TensorList tensors) {
4333
4334 static auto op = create_dstack_typed_handle();
4335 return op.call(tensors);
4336}
4337
4338// aten::dstack(Tensor[] tensors) -> Tensor
4339at::Tensor dstack::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
4340
4341 static auto op = create_dstack_typed_handle();
4342 return op.redispatch(dispatchKeySet, tensors);
4343}
4344
4345STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dstack_out, name, "aten::dstack")
4346STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dstack_out, overload_name, "out")
4347STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dstack_out, schema_str, "dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)")
4348
4349// aten::dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
4350static C10_NOINLINE c10::TypedOperatorHandle<dstack_out::schema> create_dstack_out_typed_handle() {
4351 return c10::Dispatcher::singleton()
4352 .findSchemaOrThrow(dstack_out::name, dstack_out::overload_name)
4353 .typed<dstack_out::schema>();
4354}
4355
4356// aten::dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
4357at::Tensor & dstack_out::call(at::TensorList tensors, at::Tensor & out) {
4358
4359 static auto op = create_dstack_out_typed_handle();
4360 return op.call(tensors, out);
4361}
4362
4363// aten::dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
4364at::Tensor & dstack_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) {
4365
4366 static auto op = create_dstack_out_typed_handle();
4367 return op.redispatch(dispatchKeySet, tensors, out);
4368}
4369
4370STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(prod, name, "aten::prod")
4371STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(prod, overload_name, "")
4372STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(prod, schema_str, "prod(Tensor self, *, ScalarType? dtype=None) -> Tensor")
4373
4374// aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor
4375static C10_NOINLINE c10::TypedOperatorHandle<prod::schema> create_prod_typed_handle() {
4376 return c10::Dispatcher::singleton()
4377 .findSchemaOrThrow(prod::name, prod::overload_name)
4378 .typed<prod::schema>();
4379}
4380
4381// aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor
4382at::Tensor prod::call(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
4383
4384 static auto op = create_prod_typed_handle();
4385 return op.call(self, dtype);
4386}
4387
4388// aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor
4389at::Tensor prod::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
4390
4391 static auto op = create_prod_typed_handle();
4392 return op.redispatch(dispatchKeySet, self, dtype);
4393}
4394
4395STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(prod_dim_int, name, "aten::prod")
4396STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(prod_dim_int, overload_name, "dim_int")
4397STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(prod_dim_int, schema_str, "prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor")
4398
4399// aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
4400static C10_NOINLINE c10::TypedOperatorHandle<prod_dim_int::schema> create_prod_dim_int_typed_handle() {
4401 return c10::Dispatcher::singleton()
4402 .findSchemaOrThrow(prod_dim_int::name, prod_dim_int::overload_name)
4403 .typed<prod_dim_int::schema>();
4404}
4405
4406// aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
4407at::Tensor prod_dim_int::call(const at::Tensor & self, int64_t dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
4408
4409 static auto op = create_prod_dim_int_typed_handle();
4410 return op.call(self, dim, keepdim, dtype);
4411}
4412
4413// aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
4414at::Tensor prod_dim_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
4415
4416 static auto op = create_prod_dim_int_typed_handle();
4417 return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype);
4418}
4419
4420STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(prod_int_out, name, "aten::prod")
4421STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(prod_int_out, overload_name, "int_out")
4422STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(prod_int_out, schema_str, "prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)")
4423
4424// aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
4425static C10_NOINLINE c10::TypedOperatorHandle<prod_int_out::schema> create_prod_int_out_typed_handle() {
4426 return c10::Dispatcher::singleton()
4427 .findSchemaOrThrow(prod_int_out::name, prod_int_out::overload_name)
4428 .typed<prod_int_out::schema>();
4429}
4430
4431// aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
4432at::Tensor & prod_int_out::call(const at::Tensor & self, int64_t dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
4433
4434 static auto op = create_prod_int_out_typed_handle();
4435 return op.call(self, dim, keepdim, dtype, out);
4436}
4437
4438// aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
4439at::Tensor & prod_int_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
4440
4441 static auto op = create_prod_int_out_typed_handle();
4442 return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype, out);
4443}
4444
4445STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(prod_dim_Dimname, name, "aten::prod")
4446STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(prod_dim_Dimname, overload_name, "dim_Dimname")
4447STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(prod_dim_Dimname, schema_str, "prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor")
4448
4449// aten::prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
4450static C10_NOINLINE c10::TypedOperatorHandle<prod_dim_Dimname::schema> create_prod_dim_Dimname_typed_handle() {
4451 return c10::Dispatcher::singleton()
4452 .findSchemaOrThrow(prod_dim_Dimname::name, prod_dim_Dimname::overload_name)
4453 .typed<prod_dim_Dimname::schema>();
4454}
4455
4456// aten::prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
4457at::Tensor prod_dim_Dimname::call(const at::Tensor & self, at::Dimname dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
4458
4459 static auto op = create_prod_dim_Dimname_typed_handle();
4460 return op.call(self, dim, keepdim, dtype);
4461}
4462
4463// aten::prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
4464at::Tensor prod_dim_Dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
4465
4466 static auto op = create_prod_dim_Dimname_typed_handle();
4467 return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype);
4468}
4469
4470STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(prod_Dimname_out, name, "aten::prod")
4471STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(prod_Dimname_out, overload_name, "Dimname_out")
4472STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(prod_Dimname_out, schema_str, "prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)")
4473
4474// aten::prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
4475static C10_NOINLINE c10::TypedOperatorHandle<prod_Dimname_out::schema> create_prod_Dimname_out_typed_handle() {
4476 return c10::Dispatcher::singleton()
4477 .findSchemaOrThrow(prod_Dimname_out::name, prod_Dimname_out::overload_name)
4478 .typed<prod_Dimname_out::schema>();
4479}
4480
4481// aten::prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
4482at::Tensor & prod_Dimname_out::call(const at::Tensor & self, at::Dimname dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
4483
4484 static auto op = create_prod_Dimname_out_typed_handle();
4485 return op.call(self, dim, keepdim, dtype, out);
4486}
4487
4488// aten::prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
4489at::Tensor & prod_Dimname_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
4490
4491 static auto op = create_prod_Dimname_out_typed_handle();
4492 return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype, out);
4493}
4494
4495STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tan, name, "aten::tan")
4496STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tan, overload_name, "")
4497STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tan, schema_str, "tan(Tensor self) -> Tensor")
4498
4499// aten::tan(Tensor self) -> Tensor
4500static C10_NOINLINE c10::TypedOperatorHandle<tan::schema> create_tan_typed_handle() {
4501 return c10::Dispatcher::singleton()
4502 .findSchemaOrThrow(tan::name, tan::overload_name)
4503 .typed<tan::schema>();
4504}
4505
4506// aten::tan(Tensor self) -> Tensor
4507at::Tensor tan::call(const at::Tensor & self) {
4508
4509 static auto op = create_tan_typed_handle();
4510 return op.call(self);
4511}
4512
4513// aten::tan(Tensor self) -> Tensor
4514at::Tensor tan::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
4515
4516 static auto op = create_tan_typed_handle();
4517 return op.redispatch(dispatchKeySet, self);
4518}
4519
4520STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tan_, name, "aten::tan_")
4521STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tan_, overload_name, "")
4522STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tan_, schema_str, "tan_(Tensor(a!) self) -> Tensor(a!)")
4523
4524// aten::tan_(Tensor(a!) self) -> Tensor(a!)
4525static C10_NOINLINE c10::TypedOperatorHandle<tan_::schema> create_tan__typed_handle() {
4526 return c10::Dispatcher::singleton()
4527 .findSchemaOrThrow(tan_::name, tan_::overload_name)
4528 .typed<tan_::schema>();
4529}
4530
4531// aten::tan_(Tensor(a!) self) -> Tensor(a!)
4532at::Tensor & tan_::call(at::Tensor & self) {
4533
4534 static auto op = create_tan__typed_handle();
4535 return op.call(self);
4536}
4537
4538// aten::tan_(Tensor(a!) self) -> Tensor(a!)
4539at::Tensor & tan_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
4540
4541 static auto op = create_tan__typed_handle();
4542 return op.redispatch(dispatchKeySet, self);
4543}
4544
4545STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tan_out, name, "aten::tan")
4546STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tan_out, overload_name, "out")
4547STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tan_out, schema_str, "tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
4548
4549// aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4550static C10_NOINLINE c10::TypedOperatorHandle<tan_out::schema> create_tan_out_typed_handle() {
4551 return c10::Dispatcher::singleton()
4552 .findSchemaOrThrow(tan_out::name, tan_out::overload_name)
4553 .typed<tan_out::schema>();
4554}
4555
4556// aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4557at::Tensor & tan_out::call(const at::Tensor & self, at::Tensor & out) {
4558
4559 static auto op = create_tan_out_typed_handle();
4560 return op.call(self, out);
4561}
4562
4563// aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4564at::Tensor & tan_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
4565
4566 static auto op = create_tan_out_typed_handle();
4567 return op.redispatch(dispatchKeySet, self, out);
4568}
4569
4570STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(trapezoid_x, name, "aten::trapezoid")
4571STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(trapezoid_x, overload_name, "x")
4572STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(trapezoid_x, schema_str, "trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor")
4573
4574// aten::trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
4575static C10_NOINLINE c10::TypedOperatorHandle<trapezoid_x::schema> create_trapezoid_x_typed_handle() {
4576 return c10::Dispatcher::singleton()
4577 .findSchemaOrThrow(trapezoid_x::name, trapezoid_x::overload_name)
4578 .typed<trapezoid_x::schema>();
4579}
4580
4581// aten::trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
4582at::Tensor trapezoid_x::call(const at::Tensor & y, const at::Tensor & x, int64_t dim) {
4583
4584 static auto op = create_trapezoid_x_typed_handle();
4585 return op.call(y, x, dim);
4586}
4587
4588// aten::trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
4589at::Tensor trapezoid_x::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, const at::Tensor & x, int64_t dim) {
4590
4591 static auto op = create_trapezoid_x_typed_handle();
4592 return op.redispatch(dispatchKeySet, y, x, dim);
4593}
4594
4595STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(trapezoid_dx, name, "aten::trapezoid")
4596STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(trapezoid_dx, overload_name, "dx")
4597STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(trapezoid_dx, schema_str, "trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor")
4598
4599// aten::trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor
4600static C10_NOINLINE c10::TypedOperatorHandle<trapezoid_dx::schema> create_trapezoid_dx_typed_handle() {
4601 return c10::Dispatcher::singleton()
4602 .findSchemaOrThrow(trapezoid_dx::name, trapezoid_dx::overload_name)
4603 .typed<trapezoid_dx::schema>();
4604}
4605
4606// aten::trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor
4607at::Tensor trapezoid_dx::call(const at::Tensor & y, const at::Scalar & dx, int64_t dim) {
4608
4609 static auto op = create_trapezoid_dx_typed_handle();
4610 return op.call(y, dx, dim);
4611}
4612
4613// aten::trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor
4614at::Tensor trapezoid_dx::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, const at::Scalar & dx, int64_t dim) {
4615
4616 static auto op = create_trapezoid_dx_typed_handle();
4617 return op.redispatch(dispatchKeySet, y, dx, dim);
4618}
4619
4620STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_tensor_from_mask, name, "aten::_nested_tensor_from_mask")
4621STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_tensor_from_mask, overload_name, "")
4622STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_tensor_from_mask, schema_str, "_nested_tensor_from_mask(Tensor t, Tensor mask, bool mask_check=True) -> Tensor")
4623
4624// aten::_nested_tensor_from_mask(Tensor t, Tensor mask, bool mask_check=True) -> Tensor
4625static C10_NOINLINE c10::TypedOperatorHandle<_nested_tensor_from_mask::schema> create__nested_tensor_from_mask_typed_handle() {
4626 return c10::Dispatcher::singleton()
4627 .findSchemaOrThrow(_nested_tensor_from_mask::name, _nested_tensor_from_mask::overload_name)
4628 .typed<_nested_tensor_from_mask::schema>();
4629}
4630
4631// aten::_nested_tensor_from_mask(Tensor t, Tensor mask, bool mask_check=True) -> Tensor
4632at::Tensor _nested_tensor_from_mask::call(const at::Tensor & t, const at::Tensor & mask, bool mask_check) {
4633
4634 static auto op = create__nested_tensor_from_mask_typed_handle();
4635 return op.call(t, mask, mask_check);
4636}
4637
4638// aten::_nested_tensor_from_mask(Tensor t, Tensor mask, bool mask_check=True) -> Tensor
4639at::Tensor _nested_tensor_from_mask::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & t, const at::Tensor & mask, bool mask_check) {
4640
4641 static auto op = create__nested_tensor_from_mask_typed_handle();
4642 return op.redispatch(dispatchKeySet, t, mask, mask_check);
4643}
4644
4645STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_tensor_from_mask_left_aligned, name, "aten::_nested_tensor_from_mask_left_aligned")
4646STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_tensor_from_mask_left_aligned, overload_name, "")
4647STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_tensor_from_mask_left_aligned, schema_str, "_nested_tensor_from_mask_left_aligned(Tensor t, Tensor mask) -> bool")
4648
4649// aten::_nested_tensor_from_mask_left_aligned(Tensor t, Tensor mask) -> bool
4650static C10_NOINLINE c10::TypedOperatorHandle<_nested_tensor_from_mask_left_aligned::schema> create__nested_tensor_from_mask_left_aligned_typed_handle() {
4651 return c10::Dispatcher::singleton()
4652 .findSchemaOrThrow(_nested_tensor_from_mask_left_aligned::name, _nested_tensor_from_mask_left_aligned::overload_name)
4653 .typed<_nested_tensor_from_mask_left_aligned::schema>();
4654}
4655
4656// aten::_nested_tensor_from_mask_left_aligned(Tensor t, Tensor mask) -> bool
4657bool _nested_tensor_from_mask_left_aligned::call(const at::Tensor & t, const at::Tensor & mask) {
4658
4659 static auto op = create__nested_tensor_from_mask_left_aligned_typed_handle();
4660 return op.call(t, mask);
4661}
4662
4663// aten::_nested_tensor_from_mask_left_aligned(Tensor t, Tensor mask) -> bool
4664bool _nested_tensor_from_mask_left_aligned::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & t, const at::Tensor & mask) {
4665
4666 static auto op = create__nested_tensor_from_mask_left_aligned_typed_handle();
4667 return op.redispatch(dispatchKeySet, t, mask);
4668}
4669
4670STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_tensor_size, name, "aten::_nested_tensor_size")
4671STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_tensor_size, overload_name, "")
4672STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_tensor_size, schema_str, "_nested_tensor_size(Tensor self) -> Tensor")
4673
4674// aten::_nested_tensor_size(Tensor self) -> Tensor
4675static C10_NOINLINE c10::TypedOperatorHandle<_nested_tensor_size::schema> create__nested_tensor_size_typed_handle() {
4676 return c10::Dispatcher::singleton()
4677 .findSchemaOrThrow(_nested_tensor_size::name, _nested_tensor_size::overload_name)
4678 .typed<_nested_tensor_size::schema>();
4679}
4680
4681// aten::_nested_tensor_size(Tensor self) -> Tensor
4682at::Tensor _nested_tensor_size::call(const at::Tensor & self) {
4683
4684 static auto op = create__nested_tensor_size_typed_handle();
4685 return op.call(self);
4686}
4687
4688// aten::_nested_tensor_size(Tensor self) -> Tensor
4689at::Tensor _nested_tensor_size::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
4690
4691 static auto op = create__nested_tensor_size_typed_handle();
4692 return op.redispatch(dispatchKeySet, self);
4693}
4694
4695STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_view_from_buffer_copy, name, "aten::_nested_view_from_buffer_copy")
4696STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_view_from_buffer_copy, overload_name, "")
4697STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_view_from_buffer_copy, schema_str, "_nested_view_from_buffer_copy(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor")
4698
4699// aten::_nested_view_from_buffer_copy(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor
4700static C10_NOINLINE c10::TypedOperatorHandle<_nested_view_from_buffer_copy::schema> create__nested_view_from_buffer_copy_typed_handle() {
4701 return c10::Dispatcher::singleton()
4702 .findSchemaOrThrow(_nested_view_from_buffer_copy::name, _nested_view_from_buffer_copy::overload_name)
4703 .typed<_nested_view_from_buffer_copy::schema>();
4704}
4705
4706// aten::_nested_view_from_buffer_copy(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor
4707at::Tensor _nested_view_from_buffer_copy::call(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets) {
4708
4709 static auto op = create__nested_view_from_buffer_copy_typed_handle();
4710 return op.call(self, nested_size, nested_strides, offsets);
4711}
4712
4713// aten::_nested_view_from_buffer_copy(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor
4714at::Tensor _nested_view_from_buffer_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets) {
4715
4716 static auto op = create__nested_view_from_buffer_copy_typed_handle();
4717 return op.redispatch(dispatchKeySet, self, nested_size, nested_strides, offsets);
4718}
4719
4720STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unique_dim_consecutive, name, "aten::unique_dim_consecutive")
4721STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unique_dim_consecutive, overload_name, "")
4722STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unique_dim_consecutive, schema_str, "unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)")
4723
4724// aten::unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
4725static C10_NOINLINE c10::TypedOperatorHandle<unique_dim_consecutive::schema> create_unique_dim_consecutive_typed_handle() {
4726 return c10::Dispatcher::singleton()
4727 .findSchemaOrThrow(unique_dim_consecutive::name, unique_dim_consecutive::overload_name)
4728 .typed<unique_dim_consecutive::schema>();
4729}
4730
4731// aten::unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
4732::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_dim_consecutive::call(const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts) {
4733
4734 static auto op = create_unique_dim_consecutive_typed_handle();
4735 return op.call(self, dim, return_inverse, return_counts);
4736}
4737
4738// aten::unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
4739::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_dim_consecutive::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts) {
4740
4741 static auto op = create_unique_dim_consecutive_typed_handle();
4742 return op.redispatch(dispatchKeySet, self, dim, return_inverse, return_counts);
4743}
4744
4745STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_unsafe_view, name, "aten::_unsafe_view")
4746STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_unsafe_view, overload_name, "")
4747STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_unsafe_view, schema_str, "_unsafe_view(Tensor self, SymInt[] size) -> Tensor")
4748
4749// aten::_unsafe_view(Tensor self, SymInt[] size) -> Tensor
4750static C10_NOINLINE c10::TypedOperatorHandle<_unsafe_view::schema> create__unsafe_view_typed_handle() {
4751 return c10::Dispatcher::singleton()
4752 .findSchemaOrThrow(_unsafe_view::name, _unsafe_view::overload_name)
4753 .typed<_unsafe_view::schema>();
4754}
4755
4756// aten::_unsafe_view(Tensor self, SymInt[] size) -> Tensor
4757at::Tensor _unsafe_view::call(const at::Tensor & self, c10::SymIntArrayRef size) {
4758
4759 static auto op = create__unsafe_view_typed_handle();
4760 return op.call(self, size);
4761}
4762
4763// aten::_unsafe_view(Tensor self, SymInt[] size) -> Tensor
4764at::Tensor _unsafe_view::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size) {
4765
4766 static auto op = create__unsafe_view_typed_handle();
4767 return op.redispatch(dispatchKeySet, self, size);
4768}
4769
4770STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unsqueeze, name, "aten::unsqueeze")
4771STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unsqueeze, overload_name, "")
4772STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unsqueeze, schema_str, "unsqueeze(Tensor(a) self, int dim) -> Tensor(a)")
4773
4774// aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a)
4775static C10_NOINLINE c10::TypedOperatorHandle<unsqueeze::schema> create_unsqueeze_typed_handle() {
4776 return c10::Dispatcher::singleton()
4777 .findSchemaOrThrow(unsqueeze::name, unsqueeze::overload_name)
4778 .typed<unsqueeze::schema>();
4779}
4780
4781// aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a)
4782at::Tensor unsqueeze::call(const at::Tensor & self, int64_t dim) {
4783
4784 static auto op = create_unsqueeze_typed_handle();
4785 return op.call(self, dim);
4786}
4787
4788// aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a)
4789at::Tensor unsqueeze::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
4790
4791 static auto op = create_unsqueeze_typed_handle();
4792 return op.redispatch(dispatchKeySet, self, dim);
4793}
4794
4795STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unsqueeze_, name, "aten::unsqueeze_")
4796STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unsqueeze_, overload_name, "")
4797STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unsqueeze_, schema_str, "unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!)")
4798
4799// aten::unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!)
4800static C10_NOINLINE c10::TypedOperatorHandle<unsqueeze_::schema> create_unsqueeze__typed_handle() {
4801 return c10::Dispatcher::singleton()
4802 .findSchemaOrThrow(unsqueeze_::name, unsqueeze_::overload_name)
4803 .typed<unsqueeze_::schema>();
4804}
4805
4806// aten::unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!)
4807at::Tensor & unsqueeze_::call(at::Tensor & self, int64_t dim) {
4808
4809 static auto op = create_unsqueeze__typed_handle();
4810 return op.call(self, dim);
4811}
4812
4813// aten::unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!)
4814at::Tensor & unsqueeze_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim) {
4815
4816 static auto op = create_unsqueeze__typed_handle();
4817 return op.redispatch(dispatchKeySet, self, dim);
4818}
4819
4820STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_efficientzerotensor, name, "aten::_efficientzerotensor")
4821STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_efficientzerotensor, overload_name, "")
4822STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_efficientzerotensor, schema_str, "_efficientzerotensor(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
4823
4824// aten::_efficientzerotensor(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
4825static C10_NOINLINE c10::TypedOperatorHandle<_efficientzerotensor::schema> create__efficientzerotensor_typed_handle() {
4826 return c10::Dispatcher::singleton()
4827 .findSchemaOrThrow(_efficientzerotensor::name, _efficientzerotensor::overload_name)
4828 .typed<_efficientzerotensor::schema>();
4829}
4830
4831// aten::_efficientzerotensor(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
4832at::Tensor _efficientzerotensor::call(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4833
4834 static auto op = create__efficientzerotensor_typed_handle();
4835 return op.call(size, dtype, layout, device, pin_memory);
4836}
4837
4838// aten::_efficientzerotensor(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
4839at::Tensor _efficientzerotensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4840
4841 static auto op = create__efficientzerotensor_typed_handle();
4842 return op.redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory);
4843}
4844
4845STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(poisson, name, "aten::poisson")
4846STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(poisson, overload_name, "")
4847STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(poisson, schema_str, "poisson(Tensor self, Generator? generator=None) -> Tensor")
4848
4849// aten::poisson(Tensor self, Generator? generator=None) -> Tensor
4850static C10_NOINLINE c10::TypedOperatorHandle<poisson::schema> create_poisson_typed_handle() {
4851 return c10::Dispatcher::singleton()
4852 .findSchemaOrThrow(poisson::name, poisson::overload_name)
4853 .typed<poisson::schema>();
4854}
4855
4856// aten::poisson(Tensor self, Generator? generator=None) -> Tensor
4857at::Tensor poisson::call(const at::Tensor & self, c10::optional<at::Generator> generator) {
4858
4859 static auto op = create_poisson_typed_handle();
4860 return op.call(self, generator);
4861}
4862
4863// aten::poisson(Tensor self, Generator? generator=None) -> Tensor
4864at::Tensor poisson::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::Generator> generator) {
4865
4866 static auto op = create_poisson_typed_handle();
4867 return op.redispatch(dispatchKeySet, self, generator);
4868}
4869
4870STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sub_out, name, "aten::sub")
4871STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sub_out, overload_name, "out")
4872STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sub_out, schema_str, "sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)")
4873
4874// aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
4875static C10_NOINLINE c10::TypedOperatorHandle<sub_out::schema> create_sub_out_typed_handle() {
4876 return c10::Dispatcher::singleton()
4877 .findSchemaOrThrow(sub_out::name, sub_out::overload_name)
4878 .typed<sub_out::schema>();
4879}
4880
4881// aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
4882at::Tensor & sub_out::call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
4883
4884 static auto op = create_sub_out_typed_handle();
4885 return op.call(self, other, alpha, out);
4886}
4887
4888// aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
4889at::Tensor & sub_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
4890
4891 static auto op = create_sub_out_typed_handle();
4892 return op.redispatch(dispatchKeySet, self, other, alpha, out);
4893}
4894
4895STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sub_Tensor, name, "aten::sub")
4896STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sub_Tensor, overload_name, "Tensor")
4897STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sub_Tensor, schema_str, "sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor")
4898
4899// aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
4900static C10_NOINLINE c10::TypedOperatorHandle<sub_Tensor::schema> create_sub_Tensor_typed_handle() {
4901 return c10::Dispatcher::singleton()
4902 .findSchemaOrThrow(sub_Tensor::name, sub_Tensor::overload_name)
4903 .typed<sub_Tensor::schema>();
4904}
4905
4906// aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
4907at::Tensor sub_Tensor::call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
4908
4909 static auto op = create_sub_Tensor_typed_handle();
4910 return op.call(self, other, alpha);
4911}
4912
4913// aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
4914at::Tensor sub_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
4915
4916 static auto op = create_sub_Tensor_typed_handle();
4917 return op.redispatch(dispatchKeySet, self, other, alpha);
4918}
4919
4920STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sub__Tensor, name, "aten::sub_")
4921STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sub__Tensor, overload_name, "Tensor")
4922STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sub__Tensor, schema_str, "sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)")
4923
4924// aten::sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
4925static C10_NOINLINE c10::TypedOperatorHandle<sub__Tensor::schema> create_sub__Tensor_typed_handle() {
4926 return c10::Dispatcher::singleton()
4927 .findSchemaOrThrow(sub__Tensor::name, sub__Tensor::overload_name)
4928 .typed<sub__Tensor::schema>();
4929}
4930
4931// aten::sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
4932at::Tensor & sub__Tensor::call(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
4933
4934 static auto op = create_sub__Tensor_typed_handle();
4935 return op.call(self, other, alpha);
4936}
4937
4938// aten::sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
4939at::Tensor & sub__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
4940
4941 static auto op = create_sub__Tensor_typed_handle();
4942 return op.redispatch(dispatchKeySet, self, other, alpha);
4943}
4944
4945STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sub_Scalar, name, "aten::sub")
4946STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sub_Scalar, overload_name, "Scalar")
4947STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sub_Scalar, schema_str, "sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor")
4948
4949// aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
4950static C10_NOINLINE c10::TypedOperatorHandle<sub_Scalar::schema> create_sub_Scalar_typed_handle() {
4951 return c10::Dispatcher::singleton()
4952 .findSchemaOrThrow(sub_Scalar::name, sub_Scalar::overload_name)
4953 .typed<sub_Scalar::schema>();
4954}
4955
4956// aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
4957at::Tensor sub_Scalar::call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
4958
4959 static auto op = create_sub_Scalar_typed_handle();
4960 return op.call(self, other, alpha);
4961}
4962
4963// aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
4964at::Tensor sub_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
4965
4966 static auto op = create_sub_Scalar_typed_handle();
4967 return op.redispatch(dispatchKeySet, self, other, alpha);
4968}
4969
4970STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sub__Scalar, name, "aten::sub_")
4971STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sub__Scalar, overload_name, "Scalar")
4972STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sub__Scalar, schema_str, "sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)")
4973
4974// aten::sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
4975static C10_NOINLINE c10::TypedOperatorHandle<sub__Scalar::schema> create_sub__Scalar_typed_handle() {
4976 return c10::Dispatcher::singleton()
4977 .findSchemaOrThrow(sub__Scalar::name, sub__Scalar::overload_name)
4978 .typed<sub__Scalar::schema>();
4979}
4980
4981// aten::sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
4982at::Tensor & sub__Scalar::call(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
4983
4984 static auto op = create_sub__Scalar_typed_handle();
4985 return op.call(self, other, alpha);
4986}
4987
4988// aten::sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
4989at::Tensor & sub__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
4990
4991 static auto op = create_sub__Scalar_typed_handle();
4992 return op.redispatch(dispatchKeySet, self, other, alpha);
4993}
4994
4995STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(subtract_out, name, "aten::subtract")
4996STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(subtract_out, overload_name, "out")
4997STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(subtract_out, schema_str, "subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)")
4998
4999// aten::subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
5000static C10_NOINLINE c10::TypedOperatorHandle<subtract_out::schema> create_subtract_out_typed_handle() {
5001 return c10::Dispatcher::singleton()
5002 .findSchemaOrThrow(subtract_out::name, subtract_out::overload_name)
5003 .typed<subtract_out::schema>();
5004}
5005
5006// aten::subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
5007at::Tensor & subtract_out::call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
5008
5009 static auto op = create_subtract_out_typed_handle();
5010 return op.call(self, other, alpha, out);
5011}
5012
5013// aten::subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
5014at::Tensor & subtract_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
5015
5016 static auto op = create_subtract_out_typed_handle();
5017 return op.redispatch(dispatchKeySet, self, other, alpha, out);
5018}
5019
5020STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(subtract_Tensor, name, "aten::subtract")
5021STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(subtract_Tensor, overload_name, "Tensor")
5022STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(subtract_Tensor, schema_str, "subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor")
5023
5024// aten::subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
5025static C10_NOINLINE c10::TypedOperatorHandle<subtract_Tensor::schema> create_subtract_Tensor_typed_handle() {
5026 return c10::Dispatcher::singleton()
5027 .findSchemaOrThrow(subtract_Tensor::name, subtract_Tensor::overload_name)
5028 .typed<subtract_Tensor::schema>();
5029}
5030
5031// aten::subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
5032at::Tensor subtract_Tensor::call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
5033
5034 static auto op = create_subtract_Tensor_typed_handle();
5035 return op.call(self, other, alpha);
5036}
5037
5038// aten::subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
5039at::Tensor subtract_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
5040
5041 static auto op = create_subtract_Tensor_typed_handle();
5042 return op.redispatch(dispatchKeySet, self, other, alpha);
5043}
5044
5045STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(subtract__Tensor, name, "aten::subtract_")
5046STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(subtract__Tensor, overload_name, "Tensor")
5047STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(subtract__Tensor, schema_str, "subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)")
5048
5049// aten::subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
5050static C10_NOINLINE c10::TypedOperatorHandle<subtract__Tensor::schema> create_subtract__Tensor_typed_handle() {
5051 return c10::Dispatcher::singleton()
5052 .findSchemaOrThrow(subtract__Tensor::name, subtract__Tensor::overload_name)
5053 .typed<subtract__Tensor::schema>();
5054}
5055
5056// aten::subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
5057at::Tensor & subtract__Tensor::call(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
5058
5059 static auto op = create_subtract__Tensor_typed_handle();
5060 return op.call(self, other, alpha);
5061}
5062
5063// aten::subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
5064at::Tensor & subtract__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
5065
5066 static auto op = create_subtract__Tensor_typed_handle();
5067 return op.redispatch(dispatchKeySet, self, other, alpha);
5068}
5069
5070STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(subtract_Scalar, name, "aten::subtract")
5071STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(subtract_Scalar, overload_name, "Scalar")
5072STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(subtract_Scalar, schema_str, "subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor")
5073
5074// aten::subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
5075static C10_NOINLINE c10::TypedOperatorHandle<subtract_Scalar::schema> create_subtract_Scalar_typed_handle() {
5076 return c10::Dispatcher::singleton()
5077 .findSchemaOrThrow(subtract_Scalar::name, subtract_Scalar::overload_name)
5078 .typed<subtract_Scalar::schema>();
5079}
5080
5081// aten::subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
5082at::Tensor subtract_Scalar::call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
5083
5084 static auto op = create_subtract_Scalar_typed_handle();
5085 return op.call(self, other, alpha);
5086}
5087
5088// aten::subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
5089at::Tensor subtract_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
5090
5091 static auto op = create_subtract_Scalar_typed_handle();
5092 return op.redispatch(dispatchKeySet, self, other, alpha);
5093}
5094
5095STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(subtract__Scalar, name, "aten::subtract_")
5096STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(subtract__Scalar, overload_name, "Scalar")
5097STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(subtract__Scalar, schema_str, "subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)")
5098
5099// aten::subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
5100static C10_NOINLINE c10::TypedOperatorHandle<subtract__Scalar::schema> create_subtract__Scalar_typed_handle() {
5101 return c10::Dispatcher::singleton()
5102 .findSchemaOrThrow(subtract__Scalar::name, subtract__Scalar::overload_name)
5103 .typed<subtract__Scalar::schema>();
5104}
5105
5106// aten::subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
5107at::Tensor & subtract__Scalar::call(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
5108
5109 static auto op = create_subtract__Scalar_typed_handle();
5110 return op.call(self, other, alpha);
5111}
5112
5113// aten::subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
5114at::Tensor & subtract__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
5115
5116 static auto op = create_subtract__Scalar_typed_handle();
5117 return op.redispatch(dispatchKeySet, self, other, alpha);
5118}
5119
5120STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(heaviside_out, name, "aten::heaviside")
5121STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(heaviside_out, overload_name, "out")
5122STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(heaviside_out, schema_str, "heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!)")
5123
5124// aten::heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!)
5125static C10_NOINLINE c10::TypedOperatorHandle<heaviside_out::schema> create_heaviside_out_typed_handle() {
5126 return c10::Dispatcher::singleton()
5127 .findSchemaOrThrow(heaviside_out::name, heaviside_out::overload_name)
5128 .typed<heaviside_out::schema>();
5129}
5130
5131// aten::heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!)
5132at::Tensor & heaviside_out::call(const at::Tensor & self, const at::Tensor & values, at::Tensor & out) {
5133
5134 static auto op = create_heaviside_out_typed_handle();
5135 return op.call(self, values, out);
5136}
5137
5138// aten::heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!)
5139at::Tensor & heaviside_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & values, at::Tensor & out) {
5140
5141 static auto op = create_heaviside_out_typed_handle();
5142 return op.redispatch(dispatchKeySet, self, values, out);
5143}
5144
5145STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(heaviside, name, "aten::heaviside")
5146STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(heaviside, overload_name, "")
5147STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(heaviside, schema_str, "heaviside(Tensor self, Tensor values) -> Tensor")
5148
5149// aten::heaviside(Tensor self, Tensor values) -> Tensor
5150static C10_NOINLINE c10::TypedOperatorHandle<heaviside::schema> create_heaviside_typed_handle() {
5151 return c10::Dispatcher::singleton()
5152 .findSchemaOrThrow(heaviside::name, heaviside::overload_name)
5153 .typed<heaviside::schema>();
5154}
5155
5156// aten::heaviside(Tensor self, Tensor values) -> Tensor
5157at::Tensor heaviside::call(const at::Tensor & self, const at::Tensor & values) {
5158
5159 static auto op = create_heaviside_typed_handle();
5160 return op.call(self, values);
5161}
5162
5163// aten::heaviside(Tensor self, Tensor values) -> Tensor
5164at::Tensor heaviside::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & values) {
5165
5166 static auto op = create_heaviside_typed_handle();
5167 return op.redispatch(dispatchKeySet, self, values);
5168}
5169
5170STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(heaviside_, name, "aten::heaviside_")
5171STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(heaviside_, overload_name, "")
5172STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(heaviside_, schema_str, "heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!)")
5173
5174// aten::heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!)
5175static C10_NOINLINE c10::TypedOperatorHandle<heaviside_::schema> create_heaviside__typed_handle() {
5176 return c10::Dispatcher::singleton()
5177 .findSchemaOrThrow(heaviside_::name, heaviside_::overload_name)
5178 .typed<heaviside_::schema>();
5179}
5180
5181// aten::heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!)
5182at::Tensor & heaviside_::call(at::Tensor & self, const at::Tensor & values) {
5183
5184 static auto op = create_heaviside__typed_handle();
5185 return op.call(self, values);
5186}
5187
5188// aten::heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!)
5189at::Tensor & heaviside_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & values) {
5190
5191 static auto op = create_heaviside__typed_handle();
5192 return op.redispatch(dispatchKeySet, self, values);
5193}
5194
5195STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_addmm_activation_out, name, "aten::_addmm_activation")
5196STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_addmm_activation_out, overload_name, "out")
5197STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_addmm_activation_out, schema_str, "_addmm_activation.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False, Tensor(a!) out) -> Tensor(a!)")
5198
5199// aten::_addmm_activation.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False, Tensor(a!) out) -> Tensor(a!)
5200static C10_NOINLINE c10::TypedOperatorHandle<_addmm_activation_out::schema> create__addmm_activation_out_typed_handle() {
5201 return c10::Dispatcher::singleton()
5202 .findSchemaOrThrow(_addmm_activation_out::name, _addmm_activation_out::overload_name)
5203 .typed<_addmm_activation_out::schema>();
5204}
5205
5206// aten::_addmm_activation.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False, Tensor(a!) out) -> Tensor(a!)
5207at::Tensor & _addmm_activation_out::call(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu, at::Tensor & out) {
5208
5209 static auto op = create__addmm_activation_out_typed_handle();
5210 return op.call(self, mat1, mat2, beta, alpha, use_gelu, out);
5211}
5212
5213// aten::_addmm_activation.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False, Tensor(a!) out) -> Tensor(a!)
5214at::Tensor & _addmm_activation_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu, at::Tensor & out) {
5215
5216 static auto op = create__addmm_activation_out_typed_handle();
5217 return op.redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, use_gelu, out);
5218}
5219
5220STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_addmm_activation, name, "aten::_addmm_activation")
5221STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_addmm_activation, overload_name, "")
5222STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_addmm_activation, schema_str, "_addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor")
5223
5224// aten::_addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor
5225static C10_NOINLINE c10::TypedOperatorHandle<_addmm_activation::schema> create__addmm_activation_typed_handle() {
5226 return c10::Dispatcher::singleton()
5227 .findSchemaOrThrow(_addmm_activation::name, _addmm_activation::overload_name)
5228 .typed<_addmm_activation::schema>();
5229}
5230
5231// aten::_addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor
5232at::Tensor _addmm_activation::call(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu) {
5233
5234 static auto op = create__addmm_activation_typed_handle();
5235 return op.call(self, mat1, mat2, beta, alpha, use_gelu);
5236}
5237
5238// aten::_addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor
5239at::Tensor _addmm_activation::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu) {
5240
5241 static auto op = create__addmm_activation_typed_handle();
5242 return op.redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, use_gelu);
5243}
5244
5245STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_compressed_tensor_comp_plain_value_size, name, "aten::sparse_compressed_tensor")
5246STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_compressed_tensor_comp_plain_value_size, overload_name, "comp_plain_value_size")
5247STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_compressed_tensor_comp_plain_value_size, schema_str, "sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor")
5248
5249// aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
5250static C10_NOINLINE c10::TypedOperatorHandle<sparse_compressed_tensor_comp_plain_value_size::schema> create_sparse_compressed_tensor_comp_plain_value_size_typed_handle() {
5251 return c10::Dispatcher::singleton()
5252 .findSchemaOrThrow(sparse_compressed_tensor_comp_plain_value_size::name, sparse_compressed_tensor_comp_plain_value_size::overload_name)
5253 .typed<sparse_compressed_tensor_comp_plain_value_size::schema>();
5254}
5255
5256// aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
5257at::Tensor sparse_compressed_tensor_comp_plain_value_size::call(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5258
5259 static auto op = create_sparse_compressed_tensor_comp_plain_value_size_typed_handle();
5260 return op.call(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
5261}
5262
5263// aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
5264at::Tensor sparse_compressed_tensor_comp_plain_value_size::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5265
5266 static auto op = create_sparse_compressed_tensor_comp_plain_value_size_typed_handle();
5267 return op.redispatch(dispatchKeySet, compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
5268}
5269
5270STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_bsr_tensor_crow_col_value_size, name, "aten::sparse_bsr_tensor")
5271STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_bsr_tensor_crow_col_value_size, overload_name, "crow_col_value_size")
5272STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_bsr_tensor_crow_col_value_size, schema_str, "sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor")
5273
5274// aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
5275static C10_NOINLINE c10::TypedOperatorHandle<sparse_bsr_tensor_crow_col_value_size::schema> create_sparse_bsr_tensor_crow_col_value_size_typed_handle() {
5276 return c10::Dispatcher::singleton()
5277 .findSchemaOrThrow(sparse_bsr_tensor_crow_col_value_size::name, sparse_bsr_tensor_crow_col_value_size::overload_name)
5278 .typed<sparse_bsr_tensor_crow_col_value_size::schema>();
5279}
5280
5281// aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
5282at::Tensor sparse_bsr_tensor_crow_col_value_size::call(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5283
5284 static auto op = create_sparse_bsr_tensor_crow_col_value_size_typed_handle();
5285 return op.call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
5286}
5287
5288// aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
5289at::Tensor sparse_bsr_tensor_crow_col_value_size::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5290
5291 static auto op = create_sparse_bsr_tensor_crow_col_value_size_typed_handle();
5292 return op.redispatch(dispatchKeySet, crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
5293}
5294
5295STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_compressed_tensor_comp_plain_value, name, "aten::sparse_compressed_tensor")
5296STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_compressed_tensor_comp_plain_value, overload_name, "comp_plain_value")
5297STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_compressed_tensor_comp_plain_value, schema_str, "sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor")
5298
5299// aten::sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
5300static C10_NOINLINE c10::TypedOperatorHandle<sparse_compressed_tensor_comp_plain_value::schema> create_sparse_compressed_tensor_comp_plain_value_typed_handle() {
5301 return c10::Dispatcher::singleton()
5302 .findSchemaOrThrow(sparse_compressed_tensor_comp_plain_value::name, sparse_compressed_tensor_comp_plain_value::overload_name)
5303 .typed<sparse_compressed_tensor_comp_plain_value::schema>();
5304}
5305
5306// aten::sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
5307at::Tensor sparse_compressed_tensor_comp_plain_value::call(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5308
5309 static auto op = create_sparse_compressed_tensor_comp_plain_value_typed_handle();
5310 return op.call(compressed_indices, plain_indices, values, dtype, layout, device, pin_memory);
5311}
5312
5313// aten::sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
5314at::Tensor sparse_compressed_tensor_comp_plain_value::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5315
5316 static auto op = create_sparse_compressed_tensor_comp_plain_value_typed_handle();
5317 return op.redispatch(dispatchKeySet, compressed_indices, plain_indices, values, dtype, layout, device, pin_memory);
5318}
5319
5320STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_bsr_tensor_crow_col_value, name, "aten::sparse_bsr_tensor")
5321STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_bsr_tensor_crow_col_value, overload_name, "crow_col_value")
5322STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_bsr_tensor_crow_col_value, schema_str, "sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor")
5323
5324// aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
5325static C10_NOINLINE c10::TypedOperatorHandle<sparse_bsr_tensor_crow_col_value::schema> create_sparse_bsr_tensor_crow_col_value_typed_handle() {
5326 return c10::Dispatcher::singleton()
5327 .findSchemaOrThrow(sparse_bsr_tensor_crow_col_value::name, sparse_bsr_tensor_crow_col_value::overload_name)
5328 .typed<sparse_bsr_tensor_crow_col_value::schema>();
5329}
5330
5331// aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
5332at::Tensor sparse_bsr_tensor_crow_col_value::call(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5333
5334 static auto op = create_sparse_bsr_tensor_crow_col_value_typed_handle();
5335 return op.call(crow_indices, col_indices, values, dtype, layout, device, pin_memory);
5336}
5337
5338// aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
5339at::Tensor sparse_bsr_tensor_crow_col_value::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5340
5341 static auto op = create_sparse_bsr_tensor_crow_col_value_typed_handle();
5342 return op.redispatch(dispatchKeySet, crow_indices, col_indices, values, dtype, layout, device, pin_memory);
5343}
5344
5345STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_coo_tensor_size, name, "aten::sparse_coo_tensor")
5346STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_coo_tensor_size, overload_name, "size")
5347STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_coo_tensor_size, schema_str, "sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor")
5348
5349// aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
5350static C10_NOINLINE c10::TypedOperatorHandle<sparse_coo_tensor_size::schema> create_sparse_coo_tensor_size_typed_handle() {
5351 return c10::Dispatcher::singleton()
5352 .findSchemaOrThrow(sparse_coo_tensor_size::name, sparse_coo_tensor_size::overload_name)
5353 .typed<sparse_coo_tensor_size::schema>();
5354}
5355
5356// aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
5357at::Tensor sparse_coo_tensor_size::call(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5358
5359 static auto op = create_sparse_coo_tensor_size_typed_handle();
5360 return op.call(size, dtype, layout, device, pin_memory);
5361}
5362
5363// aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
5364at::Tensor sparse_coo_tensor_size::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5365
5366 static auto op = create_sparse_coo_tensor_size_typed_handle();
5367 return op.redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory);
5368}
5369
5370STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_coo_tensor_indices, name, "aten::sparse_coo_tensor")
5371STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_coo_tensor_indices, overload_name, "indices")
5372STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_coo_tensor_indices, schema_str, "sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
5373
5374// aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5375static C10_NOINLINE c10::TypedOperatorHandle<sparse_coo_tensor_indices::schema> create_sparse_coo_tensor_indices_typed_handle() {
5376 return c10::Dispatcher::singleton()
5377 .findSchemaOrThrow(sparse_coo_tensor_indices::name, sparse_coo_tensor_indices::overload_name)
5378 .typed<sparse_coo_tensor_indices::schema>();
5379}
5380
5381// aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5382at::Tensor sparse_coo_tensor_indices::call(const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5383
5384 static auto op = create_sparse_coo_tensor_indices_typed_handle();
5385 return op.call(indices, values, dtype, layout, device, pin_memory);
5386}
5387
5388// aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5389at::Tensor sparse_coo_tensor_indices::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5390
5391 static auto op = create_sparse_coo_tensor_indices_typed_handle();
5392 return op.redispatch(dispatchKeySet, indices, values, dtype, layout, device, pin_memory);
5393}
5394
5395STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_coo_tensor_indices_size, name, "aten::sparse_coo_tensor")
5396STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_coo_tensor_indices_size, overload_name, "indices_size")
5397STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_coo_tensor_indices_size, schema_str, "sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
5398
5399// aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5400static C10_NOINLINE c10::TypedOperatorHandle<sparse_coo_tensor_indices_size::schema> create_sparse_coo_tensor_indices_size_typed_handle() {
5401 return c10::Dispatcher::singleton()
5402 .findSchemaOrThrow(sparse_coo_tensor_indices_size::name, sparse_coo_tensor_indices_size::overload_name)
5403 .typed<sparse_coo_tensor_indices_size::schema>();
5404}
5405
5406// aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5407at::Tensor sparse_coo_tensor_indices_size::call(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5408
5409 static auto op = create_sparse_coo_tensor_indices_size_typed_handle();
5410 return op.call(indices, values, size, dtype, layout, device, pin_memory);
5411}
5412
5413// aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5414at::Tensor sparse_coo_tensor_indices_size::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5415
5416 static auto op = create_sparse_coo_tensor_indices_size_typed_handle();
5417 return op.redispatch(dispatchKeySet, indices, values, size, dtype, layout, device, pin_memory);
5418}
5419
5420STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_validate_sparse_compressed_tensor_args, name, "aten::_validate_sparse_compressed_tensor_args")
5421STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_validate_sparse_compressed_tensor_args, overload_name, "")
5422STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_validate_sparse_compressed_tensor_args, schema_str, "_validate_sparse_compressed_tensor_args(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, Layout layout) -> ()")
5423
5424// aten::_validate_sparse_compressed_tensor_args(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, Layout layout) -> ()
5425static C10_NOINLINE c10::TypedOperatorHandle<_validate_sparse_compressed_tensor_args::schema> create__validate_sparse_compressed_tensor_args_typed_handle() {
5426 return c10::Dispatcher::singleton()
5427 .findSchemaOrThrow(_validate_sparse_compressed_tensor_args::name, _validate_sparse_compressed_tensor_args::overload_name)
5428 .typed<_validate_sparse_compressed_tensor_args::schema>();
5429}
5430
5431// aten::_validate_sparse_compressed_tensor_args(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, Layout layout) -> ()
5432void _validate_sparse_compressed_tensor_args::call(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::Layout layout) {
5433
5434 static auto op = create__validate_sparse_compressed_tensor_args_typed_handle();
5435 return op.call(compressed_indices, plain_indices, values, size, layout);
5436}
5437
5438// aten::_validate_sparse_compressed_tensor_args(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, Layout layout) -> ()
5439void _validate_sparse_compressed_tensor_args::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::Layout layout) {
5440
5441 static auto op = create__validate_sparse_compressed_tensor_args_typed_handle();
5442 return op.redispatch(dispatchKeySet, compressed_indices, plain_indices, values, size, layout);
5443}
5444
5445STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_resize_and_clear_, name, "aten::sparse_resize_and_clear_")
5446STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_resize_and_clear_, overload_name, "")
5447STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_resize_and_clear_, schema_str, "sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)")
5448
5449// aten::sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)
5450static C10_NOINLINE c10::TypedOperatorHandle<sparse_resize_and_clear_::schema> create_sparse_resize_and_clear__typed_handle() {
5451 return c10::Dispatcher::singleton()
5452 .findSchemaOrThrow(sparse_resize_and_clear_::name, sparse_resize_and_clear_::overload_name)
5453 .typed<sparse_resize_and_clear_::schema>();
5454}
5455
5456// aten::sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)
5457const at::Tensor & sparse_resize_and_clear_::call(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
5458
5459 static auto op = create_sparse_resize_and_clear__typed_handle();
5460 return op.call(self, size, sparse_dim, dense_dim);
5461}
5462
5463// aten::sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)
5464const at::Tensor & sparse_resize_and_clear_::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
5465
5466 static auto op = create_sparse_resize_and_clear__typed_handle();
5467 return op.redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim);
5468}
5469
5470STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_dense, name, "aten::to_dense")
5471STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_dense, overload_name, "")
5472STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_dense, schema_str, "to_dense(Tensor self, ScalarType? dtype=None) -> Tensor")
5473
5474// aten::to_dense(Tensor self, ScalarType? dtype=None) -> Tensor
5475static C10_NOINLINE c10::TypedOperatorHandle<to_dense::schema> create_to_dense_typed_handle() {
5476 return c10::Dispatcher::singleton()
5477 .findSchemaOrThrow(to_dense::name, to_dense::overload_name)
5478 .typed<to_dense::schema>();
5479}
5480
5481// aten::to_dense(Tensor self, ScalarType? dtype=None) -> Tensor
5482at::Tensor to_dense::call(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
5483
5484 static auto op = create_to_dense_typed_handle();
5485 return op.call(self, dtype);
5486}
5487
5488// aten::to_dense(Tensor self, ScalarType? dtype=None) -> Tensor
5489at::Tensor to_dense::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
5490
5491 static auto op = create_to_dense_typed_handle();
5492 return op.redispatch(dispatchKeySet, self, dtype);
5493}
5494
5495STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_dim, name, "aten::sparse_dim")
5496STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_dim, overload_name, "")
5497STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_dim, schema_str, "sparse_dim(Tensor self) -> int")
5498
5499// aten::sparse_dim(Tensor self) -> int
5500static C10_NOINLINE c10::TypedOperatorHandle<sparse_dim::schema> create_sparse_dim_typed_handle() {
5501 return c10::Dispatcher::singleton()
5502 .findSchemaOrThrow(sparse_dim::name, sparse_dim::overload_name)
5503 .typed<sparse_dim::schema>();
5504}
5505
5506// aten::sparse_dim(Tensor self) -> int
5507int64_t sparse_dim::call(const at::Tensor & self) {
5508
5509 static auto op = create_sparse_dim_typed_handle();
5510 return op.call(self);
5511}
5512
5513// aten::sparse_dim(Tensor self) -> int
5514int64_t sparse_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
5515
5516 static auto op = create_sparse_dim_typed_handle();
5517 return op.redispatch(dispatchKeySet, self);
5518}
5519
5520STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_dimI, name, "aten::_dimI")
5521STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_dimI, overload_name, "")
5522STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_dimI, schema_str, "_dimI(Tensor self) -> int")
5523
5524// aten::_dimI(Tensor self) -> int
5525static C10_NOINLINE c10::TypedOperatorHandle<_dimI::schema> create__dimI_typed_handle() {
5526 return c10::Dispatcher::singleton()
5527 .findSchemaOrThrow(_dimI::name, _dimI::overload_name)
5528 .typed<_dimI::schema>();
5529}
5530
5531// aten::_dimI(Tensor self) -> int
5532int64_t _dimI::call(const at::Tensor & self) {
5533
5534 static auto op = create__dimI_typed_handle();
5535 return op.call(self);
5536}
5537
5538// aten::_dimI(Tensor self) -> int
5539int64_t _dimI::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
5540
5541 static auto op = create__dimI_typed_handle();
5542 return op.redispatch(dispatchKeySet, self);
5543}
5544
5545STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nnz, name, "aten::_nnz")
5546STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nnz, overload_name, "")
5547STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nnz, schema_str, "_nnz(Tensor self) -> int")
5548
5549// aten::_nnz(Tensor self) -> int
5550static C10_NOINLINE c10::TypedOperatorHandle<_nnz::schema> create__nnz_typed_handle() {
5551 return c10::Dispatcher::singleton()
5552 .findSchemaOrThrow(_nnz::name, _nnz::overload_name)
5553 .typed<_nnz::schema>();
5554}
5555
5556// aten::_nnz(Tensor self) -> int
5557int64_t _nnz::call(const at::Tensor & self) {
5558
5559 static auto op = create__nnz_typed_handle();
5560 return op.call(self);
5561}
5562
5563// aten::_nnz(Tensor self) -> int
5564int64_t _nnz::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
5565
5566 static auto op = create__nnz_typed_handle();
5567 return op.redispatch(dispatchKeySet, self);
5568}
5569
5570STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ccol_indices, name, "aten::ccol_indices")
5571STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ccol_indices, overload_name, "")
5572STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ccol_indices, schema_str, "ccol_indices(Tensor(a) self) -> Tensor(a)")
5573
5574// aten::ccol_indices(Tensor(a) self) -> Tensor(a)
5575static C10_NOINLINE c10::TypedOperatorHandle<ccol_indices::schema> create_ccol_indices_typed_handle() {
5576 return c10::Dispatcher::singleton()
5577 .findSchemaOrThrow(ccol_indices::name, ccol_indices::overload_name)
5578 .typed<ccol_indices::schema>();
5579}
5580
5581// aten::ccol_indices(Tensor(a) self) -> Tensor(a)
5582at::Tensor ccol_indices::call(const at::Tensor & self) {
5583
5584 static auto op = create_ccol_indices_typed_handle();
5585 return op.call(self);
5586}
5587
5588// aten::ccol_indices(Tensor(a) self) -> Tensor(a)
5589at::Tensor ccol_indices::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
5590
5591 static auto op = create_ccol_indices_typed_handle();
5592 return op.redispatch(dispatchKeySet, self);
5593}
5594
5595STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse_csr, name, "aten::to_sparse_csr")
5596STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse_csr, overload_name, "")
5597STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse_csr, schema_str, "to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor")
5598
5599// aten::to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor
5600static C10_NOINLINE c10::TypedOperatorHandle<to_sparse_csr::schema> create_to_sparse_csr_typed_handle() {
5601 return c10::Dispatcher::singleton()
5602 .findSchemaOrThrow(to_sparse_csr::name, to_sparse_csr::overload_name)
5603 .typed<to_sparse_csr::schema>();
5604}
5605
5606// aten::to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor
5607at::Tensor to_sparse_csr::call(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
5608
5609 static auto op = create_to_sparse_csr_typed_handle();
5610 return op.call(self, dense_dim);
5611}
5612
5613// aten::to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor
5614at::Tensor to_sparse_csr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> dense_dim) {
5615
5616 static auto op = create_to_sparse_csr_typed_handle();
5617 return op.redispatch(dispatchKeySet, self, dense_dim);
5618}
5619
5620STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse_bsr, name, "aten::to_sparse_bsr")
5621STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse_bsr, overload_name, "")
5622STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse_bsr, schema_str, "to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor")
5623
5624// aten::to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
5625static C10_NOINLINE c10::TypedOperatorHandle<to_sparse_bsr::schema> create_to_sparse_bsr_typed_handle() {
5626 return c10::Dispatcher::singleton()
5627 .findSchemaOrThrow(to_sparse_bsr::name, to_sparse_bsr::overload_name)
5628 .typed<to_sparse_bsr::schema>();
5629}
5630
5631// aten::to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
5632at::Tensor to_sparse_bsr::call(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
5633
5634 static auto op = create_to_sparse_bsr_typed_handle();
5635 return op.call(self, blocksize, dense_dim);
5636}
5637
5638// aten::to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
5639at::Tensor to_sparse_bsr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
5640
5641 static auto op = create_to_sparse_bsr_typed_handle();
5642 return op.redispatch(dispatchKeySet, self, blocksize, dense_dim);
5643}
5644
5645STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_reorder_conv3d_weight, name, "aten::mkldnn_reorder_conv3d_weight")
5646STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_reorder_conv3d_weight, overload_name, "")
5647STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_reorder_conv3d_weight, schema_str, "mkldnn_reorder_conv3d_weight(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1) -> Tensor")
5648
5649// aten::mkldnn_reorder_conv3d_weight(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1) -> Tensor
5650static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_reorder_conv3d_weight::schema> create_mkldnn_reorder_conv3d_weight_typed_handle() {
5651 return c10::Dispatcher::singleton()
5652 .findSchemaOrThrow(mkldnn_reorder_conv3d_weight::name, mkldnn_reorder_conv3d_weight::overload_name)
5653 .typed<mkldnn_reorder_conv3d_weight::schema>();
5654}
5655
5656// aten::mkldnn_reorder_conv3d_weight(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1) -> Tensor
5657at::Tensor mkldnn_reorder_conv3d_weight::call(const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
5658
5659 static auto op = create_mkldnn_reorder_conv3d_weight_typed_handle();
5660 return op.call(self, padding, stride, dilation, groups);
5661}
5662
5663// aten::mkldnn_reorder_conv3d_weight(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1) -> Tensor
5664at::Tensor mkldnn_reorder_conv3d_weight::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
5665
5666 static auto op = create_mkldnn_reorder_conv3d_weight_typed_handle();
5667 return op.redispatch(dispatchKeySet, self, padding, stride, dilation, groups);
5668}
5669
5670STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(q_scale, name, "aten::q_scale")
5671STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(q_scale, overload_name, "")
5672STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(q_scale, schema_str, "q_scale(Tensor self) -> float")
5673
5674// aten::q_scale(Tensor self) -> float
5675static C10_NOINLINE c10::TypedOperatorHandle<q_scale::schema> create_q_scale_typed_handle() {
5676 return c10::Dispatcher::singleton()
5677 .findSchemaOrThrow(q_scale::name, q_scale::overload_name)
5678 .typed<q_scale::schema>();
5679}
5680
5681// aten::q_scale(Tensor self) -> float
5682double q_scale::call(const at::Tensor & self) {
5683
5684 static auto op = create_q_scale_typed_handle();
5685 return op.call(self);
5686}
5687
5688// aten::q_scale(Tensor self) -> float
5689double q_scale::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
5690
5691 static auto op = create_q_scale_typed_handle();
5692 return op.redispatch(dispatchKeySet, self);
5693}
5694
5695STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(q_per_channel_axis, name, "aten::q_per_channel_axis")
5696STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(q_per_channel_axis, overload_name, "")
5697STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(q_per_channel_axis, schema_str, "q_per_channel_axis(Tensor self) -> int")
5698
5699// aten::q_per_channel_axis(Tensor self) -> int
5700static C10_NOINLINE c10::TypedOperatorHandle<q_per_channel_axis::schema> create_q_per_channel_axis_typed_handle() {
5701 return c10::Dispatcher::singleton()
5702 .findSchemaOrThrow(q_per_channel_axis::name, q_per_channel_axis::overload_name)
5703 .typed<q_per_channel_axis::schema>();
5704}
5705
5706// aten::q_per_channel_axis(Tensor self) -> int
5707int64_t q_per_channel_axis::call(const at::Tensor & self) {
5708
5709 static auto op = create_q_per_channel_axis_typed_handle();
5710 return op.call(self);
5711}
5712
5713// aten::q_per_channel_axis(Tensor self) -> int
5714int64_t q_per_channel_axis::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
5715
5716 static auto op = create_q_per_channel_axis_typed_handle();
5717 return op.redispatch(dispatchKeySet, self);
5718}
5719
5720STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_make_per_tensor_quantized_tensor, name, "aten::_make_per_tensor_quantized_tensor")
5721STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_make_per_tensor_quantized_tensor, overload_name, "")
5722STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_make_per_tensor_quantized_tensor, schema_str, "_make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor")
5723
5724// aten::_make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor
5725static C10_NOINLINE c10::TypedOperatorHandle<_make_per_tensor_quantized_tensor::schema> create__make_per_tensor_quantized_tensor_typed_handle() {
5726 return c10::Dispatcher::singleton()
5727 .findSchemaOrThrow(_make_per_tensor_quantized_tensor::name, _make_per_tensor_quantized_tensor::overload_name)
5728 .typed<_make_per_tensor_quantized_tensor::schema>();
5729}
5730
5731// aten::_make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor
5732at::Tensor _make_per_tensor_quantized_tensor::call(const at::Tensor & self, double scale, int64_t zero_point) {
5733
5734 static auto op = create__make_per_tensor_quantized_tensor_typed_handle();
5735 return op.call(self, scale, zero_point);
5736}
5737
5738// aten::_make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor
5739at::Tensor _make_per_tensor_quantized_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point) {
5740
5741 static auto op = create__make_per_tensor_quantized_tensor_typed_handle();
5742 return op.redispatch(dispatchKeySet, self, scale, zero_point);
5743}
5744
5745STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_make_per_channel_quantized_tensor, name, "aten::_make_per_channel_quantized_tensor")
5746STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_make_per_channel_quantized_tensor, overload_name, "")
5747STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_make_per_channel_quantized_tensor, schema_str, "_make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor")
5748
5749// aten::_make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor
5750static C10_NOINLINE c10::TypedOperatorHandle<_make_per_channel_quantized_tensor::schema> create__make_per_channel_quantized_tensor_typed_handle() {
5751 return c10::Dispatcher::singleton()
5752 .findSchemaOrThrow(_make_per_channel_quantized_tensor::name, _make_per_channel_quantized_tensor::overload_name)
5753 .typed<_make_per_channel_quantized_tensor::schema>();
5754}
5755
5756// aten::_make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor
5757at::Tensor _make_per_channel_quantized_tensor::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis) {
5758
5759 static auto op = create__make_per_channel_quantized_tensor_typed_handle();
5760 return op.call(self, scale, zero_point, axis);
5761}
5762
5763// aten::_make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor
5764at::Tensor _make_per_channel_quantized_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis) {
5765
5766 static auto op = create__make_per_channel_quantized_tensor_typed_handle();
5767 return op.redispatch(dispatchKeySet, self, scale, zero_point, axis);
5768}
5769
5770STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fake_quantize_per_tensor_affine_cachemask_backward, name, "aten::fake_quantize_per_tensor_affine_cachemask_backward")
5771STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fake_quantize_per_tensor_affine_cachemask_backward, overload_name, "")
5772STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fake_quantize_per_tensor_affine_cachemask_backward, schema_str, "fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor")
5773
5774// aten::fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor
5775static C10_NOINLINE c10::TypedOperatorHandle<fake_quantize_per_tensor_affine_cachemask_backward::schema> create_fake_quantize_per_tensor_affine_cachemask_backward_typed_handle() {
5776 return c10::Dispatcher::singleton()
5777 .findSchemaOrThrow(fake_quantize_per_tensor_affine_cachemask_backward::name, fake_quantize_per_tensor_affine_cachemask_backward::overload_name)
5778 .typed<fake_quantize_per_tensor_affine_cachemask_backward::schema>();
5779}
5780
5781// aten::fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor
5782at::Tensor fake_quantize_per_tensor_affine_cachemask_backward::call(const at::Tensor & grad, const at::Tensor & mask) {
5783
5784 static auto op = create_fake_quantize_per_tensor_affine_cachemask_backward_typed_handle();
5785 return op.call(grad, mask);
5786}
5787
5788// aten::fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor
5789at::Tensor fake_quantize_per_tensor_affine_cachemask_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & mask) {
5790
5791 static auto op = create_fake_quantize_per_tensor_affine_cachemask_backward_typed_handle();
5792 return op.redispatch(dispatchKeySet, grad, mask);
5793}
5794
5795STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fake_quantize_per_channel_affine_cachemask_backward, name, "aten::fake_quantize_per_channel_affine_cachemask_backward")
5796STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fake_quantize_per_channel_affine_cachemask_backward, overload_name, "")
5797STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fake_quantize_per_channel_affine_cachemask_backward, schema_str, "fake_quantize_per_channel_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor")
5798
5799// aten::fake_quantize_per_channel_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor
5800static C10_NOINLINE c10::TypedOperatorHandle<fake_quantize_per_channel_affine_cachemask_backward::schema> create_fake_quantize_per_channel_affine_cachemask_backward_typed_handle() {
5801 return c10::Dispatcher::singleton()
5802 .findSchemaOrThrow(fake_quantize_per_channel_affine_cachemask_backward::name, fake_quantize_per_channel_affine_cachemask_backward::overload_name)
5803 .typed<fake_quantize_per_channel_affine_cachemask_backward::schema>();
5804}
5805
5806// aten::fake_quantize_per_channel_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor
5807at::Tensor fake_quantize_per_channel_affine_cachemask_backward::call(const at::Tensor & grad, const at::Tensor & mask) {
5808
5809 static auto op = create_fake_quantize_per_channel_affine_cachemask_backward_typed_handle();
5810 return op.call(grad, mask);
5811}
5812
5813// aten::fake_quantize_per_channel_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor
5814at::Tensor fake_quantize_per_channel_affine_cachemask_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & mask) {
5815
5816 static auto op = create_fake_quantize_per_channel_affine_cachemask_backward_typed_handle();
5817 return op.redispatch(dispatchKeySet, grad, mask);
5818}
5819
5820STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_saturate_weight_to_fp16, name, "aten::_saturate_weight_to_fp16")
5821STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_saturate_weight_to_fp16, overload_name, "")
5822STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_saturate_weight_to_fp16, schema_str, "_saturate_weight_to_fp16(Tensor weight) -> Tensor")
5823
5824// aten::_saturate_weight_to_fp16(Tensor weight) -> Tensor
5825static C10_NOINLINE c10::TypedOperatorHandle<_saturate_weight_to_fp16::schema> create__saturate_weight_to_fp16_typed_handle() {
5826 return c10::Dispatcher::singleton()
5827 .findSchemaOrThrow(_saturate_weight_to_fp16::name, _saturate_weight_to_fp16::overload_name)
5828 .typed<_saturate_weight_to_fp16::schema>();
5829}
5830
5831// aten::_saturate_weight_to_fp16(Tensor weight) -> Tensor
5832at::Tensor _saturate_weight_to_fp16::call(const at::Tensor & weight) {
5833
5834 static auto op = create__saturate_weight_to_fp16_typed_handle();
5835 return op.call(weight);
5836}
5837
5838// aten::_saturate_weight_to_fp16(Tensor weight) -> Tensor
5839at::Tensor _saturate_weight_to_fp16::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight) {
5840
5841 static auto op = create__saturate_weight_to_fp16_typed_handle();
5842 return op.redispatch(dispatchKeySet, weight);
5843}
5844
5845STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_autocast_to_reduced_precision, name, "aten::_autocast_to_reduced_precision")
5846STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_autocast_to_reduced_precision, overload_name, "")
5847STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_autocast_to_reduced_precision, schema_str, "_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a)")
5848
5849// aten::_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a)
5850static C10_NOINLINE c10::TypedOperatorHandle<_autocast_to_reduced_precision::schema> create__autocast_to_reduced_precision_typed_handle() {
5851 return c10::Dispatcher::singleton()
5852 .findSchemaOrThrow(_autocast_to_reduced_precision::name, _autocast_to_reduced_precision::overload_name)
5853 .typed<_autocast_to_reduced_precision::schema>();
5854}
5855
5856// aten::_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a)
5857at::Tensor _autocast_to_reduced_precision::call(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype) {
5858
5859 static auto op = create__autocast_to_reduced_precision_typed_handle();
5860 return op.call(self, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype);
5861}
5862
5863// aten::_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a)
5864at::Tensor _autocast_to_reduced_precision::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype) {
5865
5866 static auto op = create__autocast_to_reduced_precision_typed_handle();
5867 return op.redispatch(dispatchKeySet, self, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype);
5868}
5869
5870STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(result_type_Tensor, name, "aten::result_type")
5871STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(result_type_Tensor, overload_name, "Tensor")
5872STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(result_type_Tensor, schema_str, "result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType")
5873
5874// aten::result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType
5875static C10_NOINLINE c10::TypedOperatorHandle<result_type_Tensor::schema> create_result_type_Tensor_typed_handle() {
5876 return c10::Dispatcher::singleton()
5877 .findSchemaOrThrow(result_type_Tensor::name, result_type_Tensor::overload_name)
5878 .typed<result_type_Tensor::schema>();
5879}
5880
5881// aten::result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType
5882at::ScalarType result_type_Tensor::call(const at::Tensor & tensor, const at::Tensor & other) {
5883
5884 static auto op = create_result_type_Tensor_typed_handle();
5885 return op.call(tensor, other);
5886}
5887
5888// aten::result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType
5889at::ScalarType result_type_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & tensor, const at::Tensor & other) {
5890
5891 static auto op = create_result_type_Tensor_typed_handle();
5892 return op.redispatch(dispatchKeySet, tensor, other);
5893}
5894
5895STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(result_type_Scalar, name, "aten::result_type")
5896STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(result_type_Scalar, overload_name, "Scalar")
5897STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(result_type_Scalar, schema_str, "result_type.Scalar(Tensor tensor, Scalar other) -> ScalarType")
5898
5899// aten::result_type.Scalar(Tensor tensor, Scalar other) -> ScalarType
5900static C10_NOINLINE c10::TypedOperatorHandle<result_type_Scalar::schema> create_result_type_Scalar_typed_handle() {
5901 return c10::Dispatcher::singleton()
5902 .findSchemaOrThrow(result_type_Scalar::name, result_type_Scalar::overload_name)
5903 .typed<result_type_Scalar::schema>();
5904}
5905
5906// aten::result_type.Scalar(Tensor tensor, Scalar other) -> ScalarType
5907at::ScalarType result_type_Scalar::call(const at::Tensor & tensor, const at::Scalar & other) {
5908
5909 static auto op = create_result_type_Scalar_typed_handle();
5910 return op.call(tensor, other);
5911}
5912
5913// aten::result_type.Scalar(Tensor tensor, Scalar other) -> ScalarType
5914at::ScalarType result_type_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & tensor, const at::Scalar & other) {
5915
5916 static auto op = create_result_type_Scalar_typed_handle();
5917 return op.redispatch(dispatchKeySet, tensor, other);
5918}
5919
5920STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(result_type_Scalar_Tensor, name, "aten::result_type")
5921STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(result_type_Scalar_Tensor, overload_name, "Scalar_Tensor")
5922STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(result_type_Scalar_Tensor, schema_str, "result_type.Scalar_Tensor(Scalar scalar, Tensor tensor) -> ScalarType")
5923
5924// aten::result_type.Scalar_Tensor(Scalar scalar, Tensor tensor) -> ScalarType
5925static C10_NOINLINE c10::TypedOperatorHandle<result_type_Scalar_Tensor::schema> create_result_type_Scalar_Tensor_typed_handle() {
5926 return c10::Dispatcher::singleton()
5927 .findSchemaOrThrow(result_type_Scalar_Tensor::name, result_type_Scalar_Tensor::overload_name)
5928 .typed<result_type_Scalar_Tensor::schema>();
5929}
5930
5931// aten::result_type.Scalar_Tensor(Scalar scalar, Tensor tensor) -> ScalarType
5932at::ScalarType result_type_Scalar_Tensor::call(const at::Scalar & scalar, const at::Tensor & tensor) {
5933
5934 static auto op = create_result_type_Scalar_Tensor_typed_handle();
5935 return op.call(scalar, tensor);
5936}
5937
5938// aten::result_type.Scalar_Tensor(Scalar scalar, Tensor tensor) -> ScalarType
5939at::ScalarType result_type_Scalar_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & scalar, const at::Tensor & tensor) {
5940
5941 static auto op = create_result_type_Scalar_Tensor_typed_handle();
5942 return op.redispatch(dispatchKeySet, scalar, tensor);
5943}
5944
5945STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(result_type_Scalar_Scalar, name, "aten::result_type")
5946STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(result_type_Scalar_Scalar, overload_name, "Scalar_Scalar")
5947STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(result_type_Scalar_Scalar, schema_str, "result_type.Scalar_Scalar(Scalar scalar1, Scalar scalar2) -> ScalarType")
5948
5949// aten::result_type.Scalar_Scalar(Scalar scalar1, Scalar scalar2) -> ScalarType
5950static C10_NOINLINE c10::TypedOperatorHandle<result_type_Scalar_Scalar::schema> create_result_type_Scalar_Scalar_typed_handle() {
5951 return c10::Dispatcher::singleton()
5952 .findSchemaOrThrow(result_type_Scalar_Scalar::name, result_type_Scalar_Scalar::overload_name)
5953 .typed<result_type_Scalar_Scalar::schema>();
5954}
5955
5956// aten::result_type.Scalar_Scalar(Scalar scalar1, Scalar scalar2) -> ScalarType
5957at::ScalarType result_type_Scalar_Scalar::call(const at::Scalar & scalar1, const at::Scalar & scalar2) {
5958
5959 static auto op = create_result_type_Scalar_Scalar_typed_handle();
5960 return op.call(scalar1, scalar2);
5961}
5962
5963// aten::result_type.Scalar_Scalar(Scalar scalar1, Scalar scalar2) -> ScalarType
5964at::ScalarType result_type_Scalar_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & scalar1, const at::Scalar & scalar2) {
5965
5966 static auto op = create_result_type_Scalar_Scalar_typed_handle();
5967 return op.redispatch(dispatchKeySet, scalar1, scalar2);
5968}
5969
5970STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_thnn_fused_lstm_cell_backward, name, "aten::_thnn_fused_lstm_cell_backward")
5971STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_thnn_fused_lstm_cell_backward, overload_name, "")
5972STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_thnn_fused_lstm_cell_backward, schema_str, "_thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)")
5973
5974// aten::_thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
5975static C10_NOINLINE c10::TypedOperatorHandle<_thnn_fused_lstm_cell_backward::schema> create__thnn_fused_lstm_cell_backward_typed_handle() {
5976 return c10::Dispatcher::singleton()
5977 .findSchemaOrThrow(_thnn_fused_lstm_cell_backward::name, _thnn_fused_lstm_cell_backward::overload_name)
5978 .typed<_thnn_fused_lstm_cell_backward::schema>();
5979}
5980
5981// aten::_thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
5982::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_backward::call(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) {
5983
5984 static auto op = create__thnn_fused_lstm_cell_backward_typed_handle();
5985 return op.call(grad_hy, grad_cy, cx, cy, workspace, has_bias);
5986}
5987
5988// aten::_thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
5989::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) {
5990
5991 static auto op = create__thnn_fused_lstm_cell_backward_typed_handle();
5992 return op.redispatch(dispatchKeySet, grad_hy, grad_cy, cx, cy, workspace, has_bias);
5993}
5994
5995STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lstm_cell, name, "aten::lstm_cell")
5996STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lstm_cell, overload_name, "")
5997STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lstm_cell, schema_str, "lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor)")
5998
5999// aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor)
6000static C10_NOINLINE c10::TypedOperatorHandle<lstm_cell::schema> create_lstm_cell_typed_handle() {
6001 return c10::Dispatcher::singleton()
6002 .findSchemaOrThrow(lstm_cell::name, lstm_cell::overload_name)
6003 .typed<lstm_cell::schema>();
6004}
6005
6006// aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor)
6007::std::tuple<at::Tensor,at::Tensor> lstm_cell::call(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
6008
6009 static auto op = create_lstm_cell_typed_handle();
6010 return op.call(input, hx, w_ih, w_hh, b_ih, b_hh);
6011}
6012
6013// aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor)
6014::std::tuple<at::Tensor,at::Tensor> lstm_cell::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
6015
6016 static auto op = create_lstm_cell_typed_handle();
6017 return op.redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh);
6018}
6019
6020STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantized_rnn_relu_cell, name, "aten::quantized_rnn_relu_cell")
6021STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantized_rnn_relu_cell, overload_name, "")
6022STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantized_rnn_relu_cell, schema_str, "quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor")
6023
6024// aten::quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
6025static C10_NOINLINE c10::TypedOperatorHandle<quantized_rnn_relu_cell::schema> create_quantized_rnn_relu_cell_typed_handle() {
6026 return c10::Dispatcher::singleton()
6027 .findSchemaOrThrow(quantized_rnn_relu_cell::name, quantized_rnn_relu_cell::overload_name)
6028 .typed<quantized_rnn_relu_cell::schema>();
6029}
6030
6031// aten::quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
6032at::Tensor quantized_rnn_relu_cell::call(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
6033
6034 static auto op = create_quantized_rnn_relu_cell_typed_handle();
6035 return op.call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
6036}
6037
6038// aten::quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
6039at::Tensor quantized_rnn_relu_cell::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
6040
6041 static auto op = create_quantized_rnn_relu_cell_typed_handle();
6042 return op.redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
6043}
6044
6045STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_fill__Scalar, name, "aten::masked_fill_")
6046STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_fill__Scalar, overload_name, "Scalar")
6047STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_fill__Scalar, schema_str, "masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!)")
6048
6049// aten::masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!)
6050static C10_NOINLINE c10::TypedOperatorHandle<masked_fill__Scalar::schema> create_masked_fill__Scalar_typed_handle() {
6051 return c10::Dispatcher::singleton()
6052 .findSchemaOrThrow(masked_fill__Scalar::name, masked_fill__Scalar::overload_name)
6053 .typed<masked_fill__Scalar::schema>();
6054}
6055
6056// aten::masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!)
6057at::Tensor & masked_fill__Scalar::call(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
6058
6059 static auto op = create_masked_fill__Scalar_typed_handle();
6060 return op.call(self, mask, value);
6061}
6062
6063// aten::masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!)
6064at::Tensor & masked_fill__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
6065
6066 static auto op = create_masked_fill__Scalar_typed_handle();
6067 return op.redispatch(dispatchKeySet, self, mask, value);
6068}
6069
6070STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_fill_Scalar, name, "aten::masked_fill")
6071STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_fill_Scalar, overload_name, "Scalar")
6072STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_fill_Scalar, schema_str, "masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor")
6073
6074// aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor
6075static C10_NOINLINE c10::TypedOperatorHandle<masked_fill_Scalar::schema> create_masked_fill_Scalar_typed_handle() {
6076 return c10::Dispatcher::singleton()
6077 .findSchemaOrThrow(masked_fill_Scalar::name, masked_fill_Scalar::overload_name)
6078 .typed<masked_fill_Scalar::schema>();
6079}
6080
6081// aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor
6082at::Tensor masked_fill_Scalar::call(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
6083
6084 static auto op = create_masked_fill_Scalar_typed_handle();
6085 return op.call(self, mask, value);
6086}
6087
6088// aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor
6089at::Tensor masked_fill_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
6090
6091 static auto op = create_masked_fill_Scalar_typed_handle();
6092 return op.redispatch(dispatchKeySet, self, mask, value);
6093}
6094
6095STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_fill__Tensor, name, "aten::masked_fill_")
6096STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_fill__Tensor, overload_name, "Tensor")
6097STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_fill__Tensor, schema_str, "masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!)")
6098
6099// aten::masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!)
6100static C10_NOINLINE c10::TypedOperatorHandle<masked_fill__Tensor::schema> create_masked_fill__Tensor_typed_handle() {
6101 return c10::Dispatcher::singleton()
6102 .findSchemaOrThrow(masked_fill__Tensor::name, masked_fill__Tensor::overload_name)
6103 .typed<masked_fill__Tensor::schema>();
6104}
6105
6106// aten::masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!)
6107at::Tensor & masked_fill__Tensor::call(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
6108
6109 static auto op = create_masked_fill__Tensor_typed_handle();
6110 return op.call(self, mask, value);
6111}
6112
6113// aten::masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!)
6114at::Tensor & masked_fill__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
6115
6116 static auto op = create_masked_fill__Tensor_typed_handle();
6117 return op.redispatch(dispatchKeySet, self, mask, value);
6118}
6119
6120STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_fill_Tensor, name, "aten::masked_fill")
6121STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_fill_Tensor, overload_name, "Tensor")
6122STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_fill_Tensor, schema_str, "masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor")
6123
6124// aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor
6125static C10_NOINLINE c10::TypedOperatorHandle<masked_fill_Tensor::schema> create_masked_fill_Tensor_typed_handle() {
6126 return c10::Dispatcher::singleton()
6127 .findSchemaOrThrow(masked_fill_Tensor::name, masked_fill_Tensor::overload_name)
6128 .typed<masked_fill_Tensor::schema>();
6129}
6130
6131// aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor
6132at::Tensor masked_fill_Tensor::call(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
6133
6134 static auto op = create_masked_fill_Tensor_typed_handle();
6135 return op.call(self, mask, value);
6136}
6137
6138// aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor
6139at::Tensor masked_fill_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
6140
6141 static auto op = create_masked_fill_Tensor_typed_handle();
6142 return op.redispatch(dispatchKeySet, self, mask, value);
6143}
6144
6145STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_scatter_, name, "aten::masked_scatter_")
6146STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_scatter_, overload_name, "")
6147STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_scatter_, schema_str, "masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!)")
6148
6149// aten::masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!)
6150static C10_NOINLINE c10::TypedOperatorHandle<masked_scatter_::schema> create_masked_scatter__typed_handle() {
6151 return c10::Dispatcher::singleton()
6152 .findSchemaOrThrow(masked_scatter_::name, masked_scatter_::overload_name)
6153 .typed<masked_scatter_::schema>();
6154}
6155
6156// aten::masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!)
6157at::Tensor & masked_scatter_::call(at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) {
6158
6159 static auto op = create_masked_scatter__typed_handle();
6160 return op.call(self, mask, source);
6161}
6162
6163// aten::masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!)
6164at::Tensor & masked_scatter_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) {
6165
6166 static auto op = create_masked_scatter__typed_handle();
6167 return op.redispatch(dispatchKeySet, self, mask, source);
6168}
6169
6170STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_scatter, name, "aten::masked_scatter")
6171STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_scatter, overload_name, "")
6172STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_scatter, schema_str, "masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor")
6173
6174// aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor
6175static C10_NOINLINE c10::TypedOperatorHandle<masked_scatter::schema> create_masked_scatter_typed_handle() {
6176 return c10::Dispatcher::singleton()
6177 .findSchemaOrThrow(masked_scatter::name, masked_scatter::overload_name)
6178 .typed<masked_scatter::schema>();
6179}
6180
6181// aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor
6182at::Tensor masked_scatter::call(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) {
6183
6184 static auto op = create_masked_scatter_typed_handle();
6185 return op.call(self, mask, source);
6186}
6187
6188// aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor
6189at::Tensor masked_scatter::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) {
6190
6191 static auto op = create_masked_scatter_typed_handle();
6192 return op.redispatch(dispatchKeySet, self, mask, source);
6193}
6194
6195STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_masked_softmax_backward, name, "aten::_masked_softmax_backward")
6196STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_masked_softmax_backward, overload_name, "")
6197STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_masked_softmax_backward, schema_str, "_masked_softmax_backward(Tensor grad_output, Tensor output, Tensor mask, int? dim=None) -> Tensor")
6198
6199// aten::_masked_softmax_backward(Tensor grad_output, Tensor output, Tensor mask, int? dim=None) -> Tensor
6200static C10_NOINLINE c10::TypedOperatorHandle<_masked_softmax_backward::schema> create__masked_softmax_backward_typed_handle() {
6201 return c10::Dispatcher::singleton()
6202 .findSchemaOrThrow(_masked_softmax_backward::name, _masked_softmax_backward::overload_name)
6203 .typed<_masked_softmax_backward::schema>();
6204}
6205
6206// aten::_masked_softmax_backward(Tensor grad_output, Tensor output, Tensor mask, int? dim=None) -> Tensor
6207at::Tensor _masked_softmax_backward::call(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional<int64_t> dim) {
6208
6209 static auto op = create__masked_softmax_backward_typed_handle();
6210 return op.call(grad_output, output, mask, dim);
6211}
6212
6213// aten::_masked_softmax_backward(Tensor grad_output, Tensor output, Tensor mask, int? dim=None) -> Tensor
6214at::Tensor _masked_softmax_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional<int64_t> dim) {
6215
6216 static auto op = create__masked_softmax_backward_typed_handle();
6217 return op.redispatch(dispatchKeySet, grad_output, output, mask, dim);
6218}
6219
6220STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_add_out, name, "aten::index_add")
6221STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_add_out, overload_name, "out")
6222STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_add_out, schema_str, "index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)")
6223
6224// aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
6225static C10_NOINLINE c10::TypedOperatorHandle<index_add_out::schema> create_index_add_out_typed_handle() {
6226 return c10::Dispatcher::singleton()
6227 .findSchemaOrThrow(index_add_out::name, index_add_out::overload_name)
6228 .typed<index_add_out::schema>();
6229}
6230
6231// aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
6232at::Tensor & index_add_out::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out) {
6233
6234 static auto op = create_index_add_out_typed_handle();
6235 return op.call(self, dim, index, source, alpha, out);
6236}
6237
6238// aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
6239at::Tensor & index_add_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out) {
6240
6241 static auto op = create_index_add_out_typed_handle();
6242 return op.redispatch(dispatchKeySet, self, dim, index, source, alpha, out);
6243}
6244
6245STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_add_, name, "aten::index_add_")
6246STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_add_, overload_name, "")
6247STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_add_, schema_str, "index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!)")
6248
6249// aten::index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!)
6250static C10_NOINLINE c10::TypedOperatorHandle<index_add_::schema> create_index_add__typed_handle() {
6251 return c10::Dispatcher::singleton()
6252 .findSchemaOrThrow(index_add_::name, index_add_::overload_name)
6253 .typed<index_add_::schema>();
6254}
6255
6256// aten::index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!)
6257at::Tensor & index_add_::call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
6258
6259 static auto op = create_index_add__typed_handle();
6260 return op.call(self, dim, index, source, alpha);
6261}
6262
6263// aten::index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!)
6264at::Tensor & index_add_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
6265
6266 static auto op = create_index_add__typed_handle();
6267 return op.redispatch(dispatchKeySet, self, dim, index, source, alpha);
6268}
6269
6270STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_add, name, "aten::index_add")
6271STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_add, overload_name, "")
6272STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_add, schema_str, "index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor")
6273
6274// aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor
6275static C10_NOINLINE c10::TypedOperatorHandle<index_add::schema> create_index_add_typed_handle() {
6276 return c10::Dispatcher::singleton()
6277 .findSchemaOrThrow(index_add::name, index_add::overload_name)
6278 .typed<index_add::schema>();
6279}
6280
6281// aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor
6282at::Tensor index_add::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
6283
6284 static auto op = create_index_add_typed_handle();
6285 return op.call(self, dim, index, source, alpha);
6286}
6287
6288// aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor
6289at::Tensor index_add::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
6290
6291 static auto op = create_index_add_typed_handle();
6292 return op.redispatch(dispatchKeySet, self, dim, index, source, alpha);
6293}
6294
6295STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_add_dimname, name, "aten::index_add")
6296STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_add_dimname, overload_name, "dimname")
6297STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_add_dimname, schema_str, "index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor")
6298
6299// aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor
6300static C10_NOINLINE c10::TypedOperatorHandle<index_add_dimname::schema> create_index_add_dimname_typed_handle() {
6301 return c10::Dispatcher::singleton()
6302 .findSchemaOrThrow(index_add_dimname::name, index_add_dimname::overload_name)
6303 .typed<index_add_dimname::schema>();
6304}
6305
6306// aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor
6307at::Tensor index_add_dimname::call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
6308
6309 static auto op = create_index_add_dimname_typed_handle();
6310 return op.call(self, dim, index, source, alpha);
6311}
6312
6313// aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor
6314at::Tensor index_add_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
6315
6316 static auto op = create_index_add_dimname_typed_handle();
6317 return op.redispatch(dispatchKeySet, self, dim, index, source, alpha);
6318}
6319
6320STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_or_Tensor_out, name, "aten::bitwise_or")
6321STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_or_Tensor_out, overload_name, "Tensor_out")
6322STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_or_Tensor_out, schema_str, "bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
6323
6324// aten::bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
6325static C10_NOINLINE c10::TypedOperatorHandle<bitwise_or_Tensor_out::schema> create_bitwise_or_Tensor_out_typed_handle() {
6326 return c10::Dispatcher::singleton()
6327 .findSchemaOrThrow(bitwise_or_Tensor_out::name, bitwise_or_Tensor_out::overload_name)
6328 .typed<bitwise_or_Tensor_out::schema>();
6329}
6330
6331// aten::bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
6332at::Tensor & bitwise_or_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
6333
6334 static auto op = create_bitwise_or_Tensor_out_typed_handle();
6335 return op.call(self, other, out);
6336}
6337
6338// aten::bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
6339at::Tensor & bitwise_or_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
6340
6341 static auto op = create_bitwise_or_Tensor_out_typed_handle();
6342 return op.redispatch(dispatchKeySet, self, other, out);
6343}
6344
6345STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_or_Scalar_out, name, "aten::bitwise_or")
6346STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_or_Scalar_out, overload_name, "Scalar_out")
6347STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_or_Scalar_out, schema_str, "bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
6348
6349// aten::bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
6350static C10_NOINLINE c10::TypedOperatorHandle<bitwise_or_Scalar_out::schema> create_bitwise_or_Scalar_out_typed_handle() {
6351 return c10::Dispatcher::singleton()
6352 .findSchemaOrThrow(bitwise_or_Scalar_out::name, bitwise_or_Scalar_out::overload_name)
6353 .typed<bitwise_or_Scalar_out::schema>();
6354}
6355
6356// aten::bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
6357at::Tensor & bitwise_or_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
6358
6359 static auto op = create_bitwise_or_Scalar_out_typed_handle();
6360 return op.call(self, other, out);
6361}
6362
6363// aten::bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
6364at::Tensor & bitwise_or_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
6365
6366 static auto op = create_bitwise_or_Scalar_out_typed_handle();
6367 return op.redispatch(dispatchKeySet, self, other, out);
6368}
6369
6370STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_or_Scalar, name, "aten::bitwise_or")
6371STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_or_Scalar, overload_name, "Scalar")
6372STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_or_Scalar, schema_str, "bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor")
6373
6374// aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor
6375static C10_NOINLINE c10::TypedOperatorHandle<bitwise_or_Scalar::schema> create_bitwise_or_Scalar_typed_handle() {
6376 return c10::Dispatcher::singleton()
6377 .findSchemaOrThrow(bitwise_or_Scalar::name, bitwise_or_Scalar::overload_name)
6378 .typed<bitwise_or_Scalar::schema>();
6379}
6380
6381// aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor
6382at::Tensor bitwise_or_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
6383
6384 static auto op = create_bitwise_or_Scalar_typed_handle();
6385 return op.call(self, other);
6386}
6387
6388// aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor
6389at::Tensor bitwise_or_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
6390
6391 static auto op = create_bitwise_or_Scalar_typed_handle();
6392 return op.redispatch(dispatchKeySet, self, other);
6393}
6394
6395STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_or_Scalar_Tensor, name, "aten::bitwise_or")
6396STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_or_Scalar_Tensor, overload_name, "Scalar_Tensor")
6397STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_or_Scalar_Tensor, schema_str, "bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor")
6398
6399// aten::bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
6400static C10_NOINLINE c10::TypedOperatorHandle<bitwise_or_Scalar_Tensor::schema> create_bitwise_or_Scalar_Tensor_typed_handle() {
6401 return c10::Dispatcher::singleton()
6402 .findSchemaOrThrow(bitwise_or_Scalar_Tensor::name, bitwise_or_Scalar_Tensor::overload_name)
6403 .typed<bitwise_or_Scalar_Tensor::schema>();
6404}
6405
6406// aten::bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
6407at::Tensor bitwise_or_Scalar_Tensor::call(const at::Scalar & self, const at::Tensor & other) {
6408
6409 static auto op = create_bitwise_or_Scalar_Tensor_typed_handle();
6410 return op.call(self, other);
6411}
6412
6413// aten::bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
6414at::Tensor bitwise_or_Scalar_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) {
6415
6416 static auto op = create_bitwise_or_Scalar_Tensor_typed_handle();
6417 return op.redispatch(dispatchKeySet, self, other);
6418}
6419
6420STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_or_Tensor, name, "aten::bitwise_or")
6421STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_or_Tensor, overload_name, "Tensor")
6422STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_or_Tensor, schema_str, "bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor")
6423
6424// aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor
6425static C10_NOINLINE c10::TypedOperatorHandle<bitwise_or_Tensor::schema> create_bitwise_or_Tensor_typed_handle() {
6426 return c10::Dispatcher::singleton()
6427 .findSchemaOrThrow(bitwise_or_Tensor::name, bitwise_or_Tensor::overload_name)
6428 .typed<bitwise_or_Tensor::schema>();
6429}
6430
6431// aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor
6432at::Tensor bitwise_or_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
6433
6434 static auto op = create_bitwise_or_Tensor_typed_handle();
6435 return op.call(self, other);
6436}
6437
6438// aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor
6439at::Tensor bitwise_or_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
6440
6441 static auto op = create_bitwise_or_Tensor_typed_handle();
6442 return op.redispatch(dispatchKeySet, self, other);
6443}
6444
6445STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_or__Scalar, name, "aten::bitwise_or_")
6446STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_or__Scalar, overload_name, "Scalar")
6447STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_or__Scalar, schema_str, "bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
6448
6449// aten::bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
6450static C10_NOINLINE c10::TypedOperatorHandle<bitwise_or__Scalar::schema> create_bitwise_or__Scalar_typed_handle() {
6451 return c10::Dispatcher::singleton()
6452 .findSchemaOrThrow(bitwise_or__Scalar::name, bitwise_or__Scalar::overload_name)
6453 .typed<bitwise_or__Scalar::schema>();
6454}
6455
6456// aten::bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
6457at::Tensor & bitwise_or__Scalar::call(at::Tensor & self, const at::Scalar & other) {
6458
6459 static auto op = create_bitwise_or__Scalar_typed_handle();
6460 return op.call(self, other);
6461}
6462
6463// aten::bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
6464at::Tensor & bitwise_or__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
6465
6466 static auto op = create_bitwise_or__Scalar_typed_handle();
6467 return op.redispatch(dispatchKeySet, self, other);
6468}
6469
6470STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_or__Tensor, name, "aten::bitwise_or_")
6471STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_or__Tensor, overload_name, "Tensor")
6472STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_or__Tensor, schema_str, "bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
6473
6474// aten::bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
6475static C10_NOINLINE c10::TypedOperatorHandle<bitwise_or__Tensor::schema> create_bitwise_or__Tensor_typed_handle() {
6476 return c10::Dispatcher::singleton()
6477 .findSchemaOrThrow(bitwise_or__Tensor::name, bitwise_or__Tensor::overload_name)
6478 .typed<bitwise_or__Tensor::schema>();
6479}
6480
6481// aten::bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
6482at::Tensor & bitwise_or__Tensor::call(at::Tensor & self, const at::Tensor & other) {
6483
6484 static auto op = create_bitwise_or__Tensor_typed_handle();
6485 return op.call(self, other);
6486}
6487
6488// aten::bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
6489at::Tensor & bitwise_or__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
6490
6491 static auto op = create_bitwise_or__Tensor_typed_handle();
6492 return op.redispatch(dispatchKeySet, self, other);
6493}
6494
6495STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diag_out, name, "aten::diag")
6496STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diag_out, overload_name, "out")
6497STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diag_out, schema_str, "diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)")
6498
6499// aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
6500static C10_NOINLINE c10::TypedOperatorHandle<diag_out::schema> create_diag_out_typed_handle() {
6501 return c10::Dispatcher::singleton()
6502 .findSchemaOrThrow(diag_out::name, diag_out::overload_name)
6503 .typed<diag_out::schema>();
6504}
6505
6506// aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
6507at::Tensor & diag_out::call(const at::Tensor & self, int64_t diagonal, at::Tensor & out) {
6508
6509 static auto op = create_diag_out_typed_handle();
6510 return op.call(self, diagonal, out);
6511}
6512
6513// aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
6514at::Tensor & diag_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal, at::Tensor & out) {
6515
6516 static auto op = create_diag_out_typed_handle();
6517 return op.redispatch(dispatchKeySet, self, diagonal, out);
6518}
6519
6520STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diag, name, "aten::diag")
6521STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diag, overload_name, "")
6522STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diag, schema_str, "diag(Tensor self, int diagonal=0) -> Tensor")
6523
6524// aten::diag(Tensor self, int diagonal=0) -> Tensor
6525static C10_NOINLINE c10::TypedOperatorHandle<diag::schema> create_diag_typed_handle() {
6526 return c10::Dispatcher::singleton()
6527 .findSchemaOrThrow(diag::name, diag::overload_name)
6528 .typed<diag::schema>();
6529}
6530
6531// aten::diag(Tensor self, int diagonal=0) -> Tensor
6532at::Tensor diag::call(const at::Tensor & self, int64_t diagonal) {
6533
6534 static auto op = create_diag_typed_handle();
6535 return op.call(self, diagonal);
6536}
6537
6538// aten::diag(Tensor self, int diagonal=0) -> Tensor
6539at::Tensor diag::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal) {
6540
6541 static auto op = create_diag_typed_handle();
6542 return op.redispatch(dispatchKeySet, self, diagonal);
6543}
6544
6545STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(triu_indices, name, "aten::triu_indices")
6546STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(triu_indices, overload_name, "")
6547STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(triu_indices, schema_str, "triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
6548
6549// aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6550static C10_NOINLINE c10::TypedOperatorHandle<triu_indices::schema> create_triu_indices_typed_handle() {
6551 return c10::Dispatcher::singleton()
6552 .findSchemaOrThrow(triu_indices::name, triu_indices::overload_name)
6553 .typed<triu_indices::schema>();
6554}
6555
6556// aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6557at::Tensor triu_indices::call(int64_t row, int64_t col, int64_t offset, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
6558
6559 static auto op = create_triu_indices_typed_handle();
6560 return op.call(row, col, offset, dtype, layout, device, pin_memory);
6561}
6562
6563// aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6564at::Tensor triu_indices::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t row, int64_t col, int64_t offset, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
6565
6566 static auto op = create_triu_indices_typed_handle();
6567 return op.redispatch(dispatchKeySet, row, col, offset, dtype, layout, device, pin_memory);
6568}
6569
6570STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(trace, name, "aten::trace")
6571STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(trace, overload_name, "")
6572STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(trace, schema_str, "trace(Tensor self) -> Tensor")
6573
6574// aten::trace(Tensor self) -> Tensor
6575static C10_NOINLINE c10::TypedOperatorHandle<trace::schema> create_trace_typed_handle() {
6576 return c10::Dispatcher::singleton()
6577 .findSchemaOrThrow(trace::name, trace::overload_name)
6578 .typed<trace::schema>();
6579}
6580
6581// aten::trace(Tensor self) -> Tensor
6582at::Tensor trace::call(const at::Tensor & self) {
6583
6584 static auto op = create_trace_typed_handle();
6585 return op.call(self);
6586}
6587
6588// aten::trace(Tensor self) -> Tensor
6589at::Tensor trace::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
6590
6591 static auto op = create_trace_typed_handle();
6592 return op.redispatch(dispatchKeySet, self);
6593}
6594
6595STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_equal_Scalar_out, name, "aten::greater_equal")
6596STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_equal_Scalar_out, overload_name, "Scalar_out")
6597STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_equal_Scalar_out, schema_str, "greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
6598
6599// aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
6600static C10_NOINLINE c10::TypedOperatorHandle<greater_equal_Scalar_out::schema> create_greater_equal_Scalar_out_typed_handle() {
6601 return c10::Dispatcher::singleton()
6602 .findSchemaOrThrow(greater_equal_Scalar_out::name, greater_equal_Scalar_out::overload_name)
6603 .typed<greater_equal_Scalar_out::schema>();
6604}
6605
6606// aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
6607at::Tensor & greater_equal_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
6608
6609 static auto op = create_greater_equal_Scalar_out_typed_handle();
6610 return op.call(self, other, out);
6611}
6612
6613// aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
6614at::Tensor & greater_equal_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
6615
6616 static auto op = create_greater_equal_Scalar_out_typed_handle();
6617 return op.redispatch(dispatchKeySet, self, other, out);
6618}
6619
6620STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_equal_Scalar, name, "aten::greater_equal")
6621STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_equal_Scalar, overload_name, "Scalar")
6622STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_equal_Scalar, schema_str, "greater_equal.Scalar(Tensor self, Scalar other) -> Tensor")
6623
6624// aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor
6625static C10_NOINLINE c10::TypedOperatorHandle<greater_equal_Scalar::schema> create_greater_equal_Scalar_typed_handle() {
6626 return c10::Dispatcher::singleton()
6627 .findSchemaOrThrow(greater_equal_Scalar::name, greater_equal_Scalar::overload_name)
6628 .typed<greater_equal_Scalar::schema>();
6629}
6630
6631// aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor
6632at::Tensor greater_equal_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
6633
6634 static auto op = create_greater_equal_Scalar_typed_handle();
6635 return op.call(self, other);
6636}
6637
6638// aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor
6639at::Tensor greater_equal_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
6640
6641 static auto op = create_greater_equal_Scalar_typed_handle();
6642 return op.redispatch(dispatchKeySet, self, other);
6643}
6644
6645STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_equal_Tensor_out, name, "aten::greater_equal")
6646STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_equal_Tensor_out, overload_name, "Tensor_out")
6647STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_equal_Tensor_out, schema_str, "greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
6648
6649// aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
6650static C10_NOINLINE c10::TypedOperatorHandle<greater_equal_Tensor_out::schema> create_greater_equal_Tensor_out_typed_handle() {
6651 return c10::Dispatcher::singleton()
6652 .findSchemaOrThrow(greater_equal_Tensor_out::name, greater_equal_Tensor_out::overload_name)
6653 .typed<greater_equal_Tensor_out::schema>();
6654}
6655
6656// aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
6657at::Tensor & greater_equal_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
6658
6659 static auto op = create_greater_equal_Tensor_out_typed_handle();
6660 return op.call(self, other, out);
6661}
6662
6663// aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
6664at::Tensor & greater_equal_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
6665
6666 static auto op = create_greater_equal_Tensor_out_typed_handle();
6667 return op.redispatch(dispatchKeySet, self, other, out);
6668}
6669
6670STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_equal_Tensor, name, "aten::greater_equal")
6671STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_equal_Tensor, overload_name, "Tensor")
6672STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_equal_Tensor, schema_str, "greater_equal.Tensor(Tensor self, Tensor other) -> Tensor")
6673
6674// aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor
6675static C10_NOINLINE c10::TypedOperatorHandle<greater_equal_Tensor::schema> create_greater_equal_Tensor_typed_handle() {
6676 return c10::Dispatcher::singleton()
6677 .findSchemaOrThrow(greater_equal_Tensor::name, greater_equal_Tensor::overload_name)
6678 .typed<greater_equal_Tensor::schema>();
6679}
6680
6681// aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor
6682at::Tensor greater_equal_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
6683
6684 static auto op = create_greater_equal_Tensor_typed_handle();
6685 return op.call(self, other);
6686}
6687
6688// aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor
6689at::Tensor greater_equal_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
6690
6691 static auto op = create_greater_equal_Tensor_typed_handle();
6692 return op.redispatch(dispatchKeySet, self, other);
6693}
6694
6695STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_equal__Scalar, name, "aten::greater_equal_")
6696STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_equal__Scalar, overload_name, "Scalar")
6697STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_equal__Scalar, schema_str, "greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
6698
6699// aten::greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
6700static C10_NOINLINE c10::TypedOperatorHandle<greater_equal__Scalar::schema> create_greater_equal__Scalar_typed_handle() {
6701 return c10::Dispatcher::singleton()
6702 .findSchemaOrThrow(greater_equal__Scalar::name, greater_equal__Scalar::overload_name)
6703 .typed<greater_equal__Scalar::schema>();
6704}
6705
6706// aten::greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
6707at::Tensor & greater_equal__Scalar::call(at::Tensor & self, const at::Scalar & other) {
6708
6709 static auto op = create_greater_equal__Scalar_typed_handle();
6710 return op.call(self, other);
6711}
6712
6713// aten::greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
6714at::Tensor & greater_equal__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
6715
6716 static auto op = create_greater_equal__Scalar_typed_handle();
6717 return op.redispatch(dispatchKeySet, self, other);
6718}
6719
6720STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_equal__Tensor, name, "aten::greater_equal_")
6721STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_equal__Tensor, overload_name, "Tensor")
6722STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_equal__Tensor, schema_str, "greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
6723
6724// aten::greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
6725static C10_NOINLINE c10::TypedOperatorHandle<greater_equal__Tensor::schema> create_greater_equal__Tensor_typed_handle() {
6726 return c10::Dispatcher::singleton()
6727 .findSchemaOrThrow(greater_equal__Tensor::name, greater_equal__Tensor::overload_name)
6728 .typed<greater_equal__Tensor::schema>();
6729}
6730
6731// aten::greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
6732at::Tensor & greater_equal__Tensor::call(at::Tensor & self, const at::Tensor & other) {
6733
6734 static auto op = create_greater_equal__Tensor_typed_handle();
6735 return op.call(self, other);
6736}
6737
6738// aten::greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
6739at::Tensor & greater_equal__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
6740
6741 static auto op = create_greater_equal__Tensor_typed_handle();
6742 return op.redispatch(dispatchKeySet, self, other);
6743}
6744
6745STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(take_out, name, "aten::take")
6746STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(take_out, overload_name, "out")
6747STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(take_out, schema_str, "take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!)")
6748
6749// aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
6750static C10_NOINLINE c10::TypedOperatorHandle<take_out::schema> create_take_out_typed_handle() {
6751 return c10::Dispatcher::singleton()
6752 .findSchemaOrThrow(take_out::name, take_out::overload_name)
6753 .typed<take_out::schema>();
6754}
6755
6756// aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
6757at::Tensor & take_out::call(const at::Tensor & self, const at::Tensor & index, at::Tensor & out) {
6758
6759 static auto op = create_take_out_typed_handle();
6760 return op.call(self, index, out);
6761}
6762
6763// aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
6764at::Tensor & take_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & index, at::Tensor & out) {
6765
6766 static auto op = create_take_out_typed_handle();
6767 return op.redispatch(dispatchKeySet, self, index, out);
6768}
6769
6770STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(take, name, "aten::take")
6771STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(take, overload_name, "")
6772STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(take, schema_str, "take(Tensor self, Tensor index) -> Tensor")
6773
6774// aten::take(Tensor self, Tensor index) -> Tensor
6775static C10_NOINLINE c10::TypedOperatorHandle<take::schema> create_take_typed_handle() {
6776 return c10::Dispatcher::singleton()
6777 .findSchemaOrThrow(take::name, take::overload_name)
6778 .typed<take::schema>();
6779}
6780
6781// aten::take(Tensor self, Tensor index) -> Tensor
6782at::Tensor take::call(const at::Tensor & self, const at::Tensor & index) {
6783
6784 static auto op = create_take_typed_handle();
6785 return op.call(self, index);
6786}
6787
6788// aten::take(Tensor self, Tensor index) -> Tensor
6789at::Tensor take::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & index) {
6790
6791 static auto op = create_take_typed_handle();
6792 return op.redispatch(dispatchKeySet, self, index);
6793}
6794
6795STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_select_backward, name, "aten::index_select_backward")
6796STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_select_backward, overload_name, "")
6797STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_select_backward, schema_str, "index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor")
6798
6799// aten::index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor
6800static C10_NOINLINE c10::TypedOperatorHandle<index_select_backward::schema> create_index_select_backward_typed_handle() {
6801 return c10::Dispatcher::singleton()
6802 .findSchemaOrThrow(index_select_backward::name, index_select_backward::overload_name)
6803 .typed<index_select_backward::schema>();
6804}
6805
6806// aten::index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor
6807at::Tensor index_select_backward::call(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index) {
6808
6809 static auto op = create_index_select_backward_typed_handle();
6810 return op.call(grad, self_sizes, dim, index);
6811}
6812
6813// aten::index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor
6814at::Tensor index_select_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index) {
6815
6816 static auto op = create_index_select_backward_typed_handle();
6817 return op.redispatch(dispatchKeySet, grad, self_sizes, dim, index);
6818}
6819
6820STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(argwhere, name, "aten::argwhere")
6821STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(argwhere, overload_name, "")
6822STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(argwhere, schema_str, "argwhere(Tensor self) -> Tensor")
6823
6824// aten::argwhere(Tensor self) -> Tensor
6825static C10_NOINLINE c10::TypedOperatorHandle<argwhere::schema> create_argwhere_typed_handle() {
6826 return c10::Dispatcher::singleton()
6827 .findSchemaOrThrow(argwhere::name, argwhere::overload_name)
6828 .typed<argwhere::schema>();
6829}
6830
6831// aten::argwhere(Tensor self) -> Tensor
6832at::Tensor argwhere::call(const at::Tensor & self) {
6833
6834 static auto op = create_argwhere_typed_handle();
6835 return op.call(self);
6836}
6837
6838// aten::argwhere(Tensor self) -> Tensor
6839at::Tensor argwhere::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
6840
6841 static auto op = create_argwhere_typed_handle();
6842 return op.redispatch(dispatchKeySet, self);
6843}
6844
6845STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(svd_U, name, "aten::svd")
6846STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(svd_U, overload_name, "U")
6847STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(svd_U, schema_str, "svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)")
6848
6849// aten::svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)
6850static C10_NOINLINE c10::TypedOperatorHandle<svd_U::schema> create_svd_U_typed_handle() {
6851 return c10::Dispatcher::singleton()
6852 .findSchemaOrThrow(svd_U::name, svd_U::overload_name)
6853 .typed<svd_U::schema>();
6854}
6855
6856// aten::svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)
6857::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> svd_U::call(const at::Tensor & self, bool some, bool compute_uv, at::Tensor & U, at::Tensor & S, at::Tensor & V) {
6858
6859 static auto op = create_svd_U_typed_handle();
6860 return op.call(self, some, compute_uv, U, S, V);
6861}
6862
6863// aten::svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)
6864::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> svd_U::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool some, bool compute_uv, at::Tensor & U, at::Tensor & S, at::Tensor & V) {
6865
6866 static auto op = create_svd_U_typed_handle();
6867 return op.redispatch(dispatchKeySet, self, some, compute_uv, U, S, V);
6868}
6869
6870STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(svd, name, "aten::svd")
6871STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(svd, overload_name, "")
6872STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(svd, schema_str, "svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V)")
6873
6874// aten::svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V)
6875static C10_NOINLINE c10::TypedOperatorHandle<svd::schema> create_svd_typed_handle() {
6876 return c10::Dispatcher::singleton()
6877 .findSchemaOrThrow(svd::name, svd::overload_name)
6878 .typed<svd::schema>();
6879}
6880
6881// aten::svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V)
6882::std::tuple<at::Tensor,at::Tensor,at::Tensor> svd::call(const at::Tensor & self, bool some, bool compute_uv) {
6883
6884 static auto op = create_svd_typed_handle();
6885 return op.call(self, some, compute_uv);
6886}
6887
6888// aten::svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V)
6889::std::tuple<at::Tensor,at::Tensor,at::Tensor> svd::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool some, bool compute_uv) {
6890
6891 static auto op = create_svd_typed_handle();
6892 return op.redispatch(dispatchKeySet, self, some, compute_uv);
6893}
6894
6895STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(geqrf_a, name, "aten::geqrf")
6896STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(geqrf_a, overload_name, "a")
6897STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(geqrf_a, schema_str, "geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau)")
6898
6899// aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau)
6900static C10_NOINLINE c10::TypedOperatorHandle<geqrf_a::schema> create_geqrf_a_typed_handle() {
6901 return c10::Dispatcher::singleton()
6902 .findSchemaOrThrow(geqrf_a::name, geqrf_a::overload_name)
6903 .typed<geqrf_a::schema>();
6904}
6905
6906// aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau)
6907::std::tuple<at::Tensor &,at::Tensor &> geqrf_a::call(const at::Tensor & self, at::Tensor & a, at::Tensor & tau) {
6908
6909 static auto op = create_geqrf_a_typed_handle();
6910 return op.call(self, a, tau);
6911}
6912
6913// aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau)
6914::std::tuple<at::Tensor &,at::Tensor &> geqrf_a::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & a, at::Tensor & tau) {
6915
6916 static auto op = create_geqrf_a_typed_handle();
6917 return op.redispatch(dispatchKeySet, self, a, tau);
6918}
6919
6920STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(geqrf, name, "aten::geqrf")
6921STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(geqrf, overload_name, "")
6922STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(geqrf, schema_str, "geqrf(Tensor self) -> (Tensor a, Tensor tau)")
6923
6924// aten::geqrf(Tensor self) -> (Tensor a, Tensor tau)
6925static C10_NOINLINE c10::TypedOperatorHandle<geqrf::schema> create_geqrf_typed_handle() {
6926 return c10::Dispatcher::singleton()
6927 .findSchemaOrThrow(geqrf::name, geqrf::overload_name)
6928 .typed<geqrf::schema>();
6929}
6930
6931// aten::geqrf(Tensor self) -> (Tensor a, Tensor tau)
6932::std::tuple<at::Tensor,at::Tensor> geqrf::call(const at::Tensor & self) {
6933
6934 static auto op = create_geqrf_typed_handle();
6935 return op.call(self);
6936}
6937
6938// aten::geqrf(Tensor self) -> (Tensor a, Tensor tau)
6939::std::tuple<at::Tensor,at::Tensor> geqrf::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
6940
6941 static auto op = create_geqrf_typed_handle();
6942 return op.redispatch(dispatchKeySet, self);
6943}
6944
6945STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(orgqr, name, "aten::orgqr")
6946STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(orgqr, overload_name, "")
6947STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(orgqr, schema_str, "orgqr(Tensor self, Tensor input2) -> Tensor")
6948
6949// aten::orgqr(Tensor self, Tensor input2) -> Tensor
6950static C10_NOINLINE c10::TypedOperatorHandle<orgqr::schema> create_orgqr_typed_handle() {
6951 return c10::Dispatcher::singleton()
6952 .findSchemaOrThrow(orgqr::name, orgqr::overload_name)
6953 .typed<orgqr::schema>();
6954}
6955
6956// aten::orgqr(Tensor self, Tensor input2) -> Tensor
6957at::Tensor orgqr::call(const at::Tensor & self, const at::Tensor & input2) {
6958
6959 static auto op = create_orgqr_typed_handle();
6960 return op.call(self, input2);
6961}
6962
6963// aten::orgqr(Tensor self, Tensor input2) -> Tensor
6964at::Tensor orgqr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2) {
6965
6966 static auto op = create_orgqr_typed_handle();
6967 return op.redispatch(dispatchKeySet, self, input2);
6968}
6969
6970STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(orgqr_out, name, "aten::orgqr")
6971STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(orgqr_out, overload_name, "out")
6972STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(orgqr_out, schema_str, "orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!)")
6973
6974// aten::orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!)
6975static C10_NOINLINE c10::TypedOperatorHandle<orgqr_out::schema> create_orgqr_out_typed_handle() {
6976 return c10::Dispatcher::singleton()
6977 .findSchemaOrThrow(orgqr_out::name, orgqr_out::overload_name)
6978 .typed<orgqr_out::schema>();
6979}
6980
6981// aten::orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!)
6982at::Tensor & orgqr_out::call(const at::Tensor & self, const at::Tensor & input2, at::Tensor & out) {
6983
6984 static auto op = create_orgqr_out_typed_handle();
6985 return op.call(self, input2, out);
6986}
6987
6988// aten::orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!)
6989at::Tensor & orgqr_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, at::Tensor & out) {
6990
6991 static auto op = create_orgqr_out_typed_handle();
6992 return op.redispatch(dispatchKeySet, self, input2, out);
6993}
6994
6995STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(erfinv, name, "aten::erfinv")
6996STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(erfinv, overload_name, "")
6997STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(erfinv, schema_str, "erfinv(Tensor self) -> Tensor")
6998
6999// aten::erfinv(Tensor self) -> Tensor
7000static C10_NOINLINE c10::TypedOperatorHandle<erfinv::schema> create_erfinv_typed_handle() {
7001 return c10::Dispatcher::singleton()
7002 .findSchemaOrThrow(erfinv::name, erfinv::overload_name)
7003 .typed<erfinv::schema>();
7004}
7005
7006// aten::erfinv(Tensor self) -> Tensor
7007at::Tensor erfinv::call(const at::Tensor & self) {
7008
7009 static auto op = create_erfinv_typed_handle();
7010 return op.call(self);
7011}
7012
7013// aten::erfinv(Tensor self) -> Tensor
7014at::Tensor erfinv::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
7015
7016 static auto op = create_erfinv_typed_handle();
7017 return op.redispatch(dispatchKeySet, self);
7018}
7019
7020STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(erfinv_, name, "aten::erfinv_")
7021STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(erfinv_, overload_name, "")
7022STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(erfinv_, schema_str, "erfinv_(Tensor(a!) self) -> Tensor(a!)")
7023
7024// aten::erfinv_(Tensor(a!) self) -> Tensor(a!)
7025static C10_NOINLINE c10::TypedOperatorHandle<erfinv_::schema> create_erfinv__typed_handle() {
7026 return c10::Dispatcher::singleton()
7027 .findSchemaOrThrow(erfinv_::name, erfinv_::overload_name)
7028 .typed<erfinv_::schema>();
7029}
7030
7031// aten::erfinv_(Tensor(a!) self) -> Tensor(a!)
7032at::Tensor & erfinv_::call(at::Tensor & self) {
7033
7034 static auto op = create_erfinv__typed_handle();
7035 return op.call(self);
7036}
7037
7038// aten::erfinv_(Tensor(a!) self) -> Tensor(a!)
7039at::Tensor & erfinv_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
7040
7041 static auto op = create_erfinv__typed_handle();
7042 return op.redispatch(dispatchKeySet, self);
7043}
7044
7045STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(erfinv_out, name, "aten::erfinv")
7046STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(erfinv_out, overload_name, "out")
7047STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(erfinv_out, schema_str, "erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
7048
7049// aten::erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7050static C10_NOINLINE c10::TypedOperatorHandle<erfinv_out::schema> create_erfinv_out_typed_handle() {
7051 return c10::Dispatcher::singleton()
7052 .findSchemaOrThrow(erfinv_out::name, erfinv_out::overload_name)
7053 .typed<erfinv_out::schema>();
7054}
7055
7056// aten::erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7057at::Tensor & erfinv_out::call(const at::Tensor & self, at::Tensor & out) {
7058
7059 static auto op = create_erfinv_out_typed_handle();
7060 return op.call(self, out);
7061}
7062
7063// aten::erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7064at::Tensor & erfinv_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
7065
7066 static auto op = create_erfinv_out_typed_handle();
7067 return op.redispatch(dispatchKeySet, self, out);
7068}
7069
7070STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(signbit, name, "aten::signbit")
7071STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(signbit, overload_name, "")
7072STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(signbit, schema_str, "signbit(Tensor self) -> Tensor")
7073
7074// aten::signbit(Tensor self) -> Tensor
7075static C10_NOINLINE c10::TypedOperatorHandle<signbit::schema> create_signbit_typed_handle() {
7076 return c10::Dispatcher::singleton()
7077 .findSchemaOrThrow(signbit::name, signbit::overload_name)
7078 .typed<signbit::schema>();
7079}
7080
7081// aten::signbit(Tensor self) -> Tensor
7082at::Tensor signbit::call(const at::Tensor & self) {
7083
7084 static auto op = create_signbit_typed_handle();
7085 return op.call(self);
7086}
7087
7088// aten::signbit(Tensor self) -> Tensor
7089at::Tensor signbit::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
7090
7091 static auto op = create_signbit_typed_handle();
7092 return op.redispatch(dispatchKeySet, self);
7093}
7094
7095STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(signbit_out, name, "aten::signbit")
7096STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(signbit_out, overload_name, "out")
7097STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(signbit_out, schema_str, "signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
7098
7099// aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7100static C10_NOINLINE c10::TypedOperatorHandle<signbit_out::schema> create_signbit_out_typed_handle() {
7101 return c10::Dispatcher::singleton()
7102 .findSchemaOrThrow(signbit_out::name, signbit_out::overload_name)
7103 .typed<signbit_out::schema>();
7104}
7105
7106// aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7107at::Tensor & signbit_out::call(const at::Tensor & self, at::Tensor & out) {
7108
7109 static auto op = create_signbit_out_typed_handle();
7110 return op.call(self, out);
7111}
7112
7113// aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7114at::Tensor & signbit_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
7115
7116 static auto op = create_signbit_out_typed_handle();
7117 return op.redispatch(dispatchKeySet, self, out);
7118}
7119
7120STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dist, name, "aten::dist")
7121STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dist, overload_name, "")
7122STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dist, schema_str, "dist(Tensor self, Tensor other, Scalar p=2) -> Tensor")
7123
7124// aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor
7125static C10_NOINLINE c10::TypedOperatorHandle<dist::schema> create_dist_typed_handle() {
7126 return c10::Dispatcher::singleton()
7127 .findSchemaOrThrow(dist::name, dist::overload_name)
7128 .typed<dist::schema>();
7129}
7130
7131// aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor
7132at::Tensor dist::call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & p) {
7133
7134 static auto op = create_dist_typed_handle();
7135 return op.call(self, other, p);
7136}
7137
7138// aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor
7139at::Tensor dist::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & p) {
7140
7141 static auto op = create_dist_typed_handle();
7142 return op.redispatch(dispatchKeySet, self, other, p);
7143}
7144
7145STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_histogramdd_from_bin_cts, name, "aten::_histogramdd_from_bin_cts")
7146STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_histogramdd_from_bin_cts, overload_name, "")
7147STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_histogramdd_from_bin_cts, schema_str, "_histogramdd_from_bin_cts(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor")
7148
7149// aten::_histogramdd_from_bin_cts(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor
7150static C10_NOINLINE c10::TypedOperatorHandle<_histogramdd_from_bin_cts::schema> create__histogramdd_from_bin_cts_typed_handle() {
7151 return c10::Dispatcher::singleton()
7152 .findSchemaOrThrow(_histogramdd_from_bin_cts::name, _histogramdd_from_bin_cts::overload_name)
7153 .typed<_histogramdd_from_bin_cts::schema>();
7154}
7155
7156// aten::_histogramdd_from_bin_cts(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor
7157at::Tensor _histogramdd_from_bin_cts::call(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
7158
7159 static auto op = create__histogramdd_from_bin_cts_typed_handle();
7160 return op.call(self, bins, range, weight, density);
7161}
7162
7163// aten::_histogramdd_from_bin_cts(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor
7164at::Tensor _histogramdd_from_bin_cts::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
7165
7166 static auto op = create__histogramdd_from_bin_cts_typed_handle();
7167 return op.redispatch(dispatchKeySet, self, bins, range, weight, density);
7168}
7169
7170STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fmod_Scalar_out, name, "aten::fmod")
7171STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fmod_Scalar_out, overload_name, "Scalar_out")
7172STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fmod_Scalar_out, schema_str, "fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
7173
7174// aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
7175static C10_NOINLINE c10::TypedOperatorHandle<fmod_Scalar_out::schema> create_fmod_Scalar_out_typed_handle() {
7176 return c10::Dispatcher::singleton()
7177 .findSchemaOrThrow(fmod_Scalar_out::name, fmod_Scalar_out::overload_name)
7178 .typed<fmod_Scalar_out::schema>();
7179}
7180
7181// aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
7182at::Tensor & fmod_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
7183
7184 static auto op = create_fmod_Scalar_out_typed_handle();
7185 return op.call(self, other, out);
7186}
7187
7188// aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
7189at::Tensor & fmod_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
7190
7191 static auto op = create_fmod_Scalar_out_typed_handle();
7192 return op.redispatch(dispatchKeySet, self, other, out);
7193}
7194
7195STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fmod_Scalar, name, "aten::fmod")
7196STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fmod_Scalar, overload_name, "Scalar")
7197STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fmod_Scalar, schema_str, "fmod.Scalar(Tensor self, Scalar other) -> Tensor")
7198
7199// aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor
7200static C10_NOINLINE c10::TypedOperatorHandle<fmod_Scalar::schema> create_fmod_Scalar_typed_handle() {
7201 return c10::Dispatcher::singleton()
7202 .findSchemaOrThrow(fmod_Scalar::name, fmod_Scalar::overload_name)
7203 .typed<fmod_Scalar::schema>();
7204}
7205
7206// aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor
7207at::Tensor fmod_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
7208
7209 static auto op = create_fmod_Scalar_typed_handle();
7210 return op.call(self, other);
7211}
7212
7213// aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor
7214at::Tensor fmod_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
7215
7216 static auto op = create_fmod_Scalar_typed_handle();
7217 return op.redispatch(dispatchKeySet, self, other);
7218}
7219
7220STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fmod__Scalar, name, "aten::fmod_")
7221STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fmod__Scalar, overload_name, "Scalar")
7222STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fmod__Scalar, schema_str, "fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
7223
7224// aten::fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7225static C10_NOINLINE c10::TypedOperatorHandle<fmod__Scalar::schema> create_fmod__Scalar_typed_handle() {
7226 return c10::Dispatcher::singleton()
7227 .findSchemaOrThrow(fmod__Scalar::name, fmod__Scalar::overload_name)
7228 .typed<fmod__Scalar::schema>();
7229}
7230
7231// aten::fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7232at::Tensor & fmod__Scalar::call(at::Tensor & self, const at::Scalar & other) {
7233
7234 static auto op = create_fmod__Scalar_typed_handle();
7235 return op.call(self, other);
7236}
7237
7238// aten::fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7239at::Tensor & fmod__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
7240
7241 static auto op = create_fmod__Scalar_typed_handle();
7242 return op.redispatch(dispatchKeySet, self, other);
7243}
7244
7245STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fmod_Tensor_out, name, "aten::fmod")
7246STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fmod_Tensor_out, overload_name, "Tensor_out")
7247STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fmod_Tensor_out, schema_str, "fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
7248
7249// aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
7250static C10_NOINLINE c10::TypedOperatorHandle<fmod_Tensor_out::schema> create_fmod_Tensor_out_typed_handle() {
7251 return c10::Dispatcher::singleton()
7252 .findSchemaOrThrow(fmod_Tensor_out::name, fmod_Tensor_out::overload_name)
7253 .typed<fmod_Tensor_out::schema>();
7254}
7255
7256// aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
7257at::Tensor & fmod_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
7258
7259 static auto op = create_fmod_Tensor_out_typed_handle();
7260 return op.call(self, other, out);
7261}
7262
7263// aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
7264at::Tensor & fmod_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
7265
7266 static auto op = create_fmod_Tensor_out_typed_handle();
7267 return op.redispatch(dispatchKeySet, self, other, out);
7268}
7269
7270STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fmod_Tensor, name, "aten::fmod")
7271STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fmod_Tensor, overload_name, "Tensor")
7272STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fmod_Tensor, schema_str, "fmod.Tensor(Tensor self, Tensor other) -> Tensor")
7273
7274// aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor
7275static C10_NOINLINE c10::TypedOperatorHandle<fmod_Tensor::schema> create_fmod_Tensor_typed_handle() {
7276 return c10::Dispatcher::singleton()
7277 .findSchemaOrThrow(fmod_Tensor::name, fmod_Tensor::overload_name)
7278 .typed<fmod_Tensor::schema>();
7279}
7280
7281// aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor
7282at::Tensor fmod_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
7283
7284 static auto op = create_fmod_Tensor_typed_handle();
7285 return op.call(self, other);
7286}
7287
7288// aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor
7289at::Tensor fmod_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
7290
7291 static auto op = create_fmod_Tensor_typed_handle();
7292 return op.redispatch(dispatchKeySet, self, other);
7293}
7294
7295STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fmod__Tensor, name, "aten::fmod_")
7296STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fmod__Tensor, overload_name, "Tensor")
7297STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fmod__Tensor, schema_str, "fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
7298
7299// aten::fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
7300static C10_NOINLINE c10::TypedOperatorHandle<fmod__Tensor::schema> create_fmod__Tensor_typed_handle() {
7301 return c10::Dispatcher::singleton()
7302 .findSchemaOrThrow(fmod__Tensor::name, fmod__Tensor::overload_name)
7303 .typed<fmod__Tensor::schema>();
7304}
7305
7306// aten::fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
7307at::Tensor & fmod__Tensor::call(at::Tensor & self, const at::Tensor & other) {
7308
7309 static auto op = create_fmod__Tensor_typed_handle();
7310 return op.call(self, other);
7311}
7312
7313// aten::fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
7314at::Tensor & fmod__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
7315
7316 static auto op = create_fmod__Tensor_typed_handle();
7317 return op.redispatch(dispatchKeySet, self, other);
7318}
7319
7320STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(remainder_Scalar_out, name, "aten::remainder")
7321STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(remainder_Scalar_out, overload_name, "Scalar_out")
7322STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(remainder_Scalar_out, schema_str, "remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
7323
7324// aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
7325static C10_NOINLINE c10::TypedOperatorHandle<remainder_Scalar_out::schema> create_remainder_Scalar_out_typed_handle() {
7326 return c10::Dispatcher::singleton()
7327 .findSchemaOrThrow(remainder_Scalar_out::name, remainder_Scalar_out::overload_name)
7328 .typed<remainder_Scalar_out::schema>();
7329}
7330
7331// aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
7332at::Tensor & remainder_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
7333
7334 static auto op = create_remainder_Scalar_out_typed_handle();
7335 return op.call(self, other, out);
7336}
7337
7338// aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
7339at::Tensor & remainder_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
7340
7341 static auto op = create_remainder_Scalar_out_typed_handle();
7342 return op.redispatch(dispatchKeySet, self, other, out);
7343}
7344
7345STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(remainder_Scalar, name, "aten::remainder")
7346STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(remainder_Scalar, overload_name, "Scalar")
7347STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(remainder_Scalar, schema_str, "remainder.Scalar(Tensor self, Scalar other) -> Tensor")
7348
7349// aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor
7350static C10_NOINLINE c10::TypedOperatorHandle<remainder_Scalar::schema> create_remainder_Scalar_typed_handle() {
7351 return c10::Dispatcher::singleton()
7352 .findSchemaOrThrow(remainder_Scalar::name, remainder_Scalar::overload_name)
7353 .typed<remainder_Scalar::schema>();
7354}
7355
7356// aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor
7357at::Tensor remainder_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
7358
7359 static auto op = create_remainder_Scalar_typed_handle();
7360 return op.call(self, other);
7361}
7362
7363// aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor
7364at::Tensor remainder_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
7365
7366 static auto op = create_remainder_Scalar_typed_handle();
7367 return op.redispatch(dispatchKeySet, self, other);
7368}
7369
7370STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(remainder__Scalar, name, "aten::remainder_")
7371STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(remainder__Scalar, overload_name, "Scalar")
7372STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(remainder__Scalar, schema_str, "remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
7373
7374// aten::remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7375static C10_NOINLINE c10::TypedOperatorHandle<remainder__Scalar::schema> create_remainder__Scalar_typed_handle() {
7376 return c10::Dispatcher::singleton()
7377 .findSchemaOrThrow(remainder__Scalar::name, remainder__Scalar::overload_name)
7378 .typed<remainder__Scalar::schema>();
7379}
7380
7381// aten::remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7382at::Tensor & remainder__Scalar::call(at::Tensor & self, const at::Scalar & other) {
7383
7384 static auto op = create_remainder__Scalar_typed_handle();
7385 return op.call(self, other);
7386}
7387
7388// aten::remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7389at::Tensor & remainder__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
7390
7391 static auto op = create_remainder__Scalar_typed_handle();
7392 return op.redispatch(dispatchKeySet, self, other);
7393}
7394
7395STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(remainder_Tensor_out, name, "aten::remainder")
7396STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(remainder_Tensor_out, overload_name, "Tensor_out")
7397STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(remainder_Tensor_out, schema_str, "remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
7398
7399// aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
7400static C10_NOINLINE c10::TypedOperatorHandle<remainder_Tensor_out::schema> create_remainder_Tensor_out_typed_handle() {
7401 return c10::Dispatcher::singleton()
7402 .findSchemaOrThrow(remainder_Tensor_out::name, remainder_Tensor_out::overload_name)
7403 .typed<remainder_Tensor_out::schema>();
7404}
7405
7406// aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
7407at::Tensor & remainder_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
7408
7409 static auto op = create_remainder_Tensor_out_typed_handle();
7410 return op.call(self, other, out);
7411}
7412
7413// aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
7414at::Tensor & remainder_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
7415
7416 static auto op = create_remainder_Tensor_out_typed_handle();
7417 return op.redispatch(dispatchKeySet, self, other, out);
7418}
7419
7420STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(remainder_Tensor, name, "aten::remainder")
7421STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(remainder_Tensor, overload_name, "Tensor")
7422STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(remainder_Tensor, schema_str, "remainder.Tensor(Tensor self, Tensor other) -> Tensor")
7423
7424// aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor
7425static C10_NOINLINE c10::TypedOperatorHandle<remainder_Tensor::schema> create_remainder_Tensor_typed_handle() {
7426 return c10::Dispatcher::singleton()
7427 .findSchemaOrThrow(remainder_Tensor::name, remainder_Tensor::overload_name)
7428 .typed<remainder_Tensor::schema>();
7429}
7430
7431// aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor
7432at::Tensor remainder_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
7433
7434 static auto op = create_remainder_Tensor_typed_handle();
7435 return op.call(self, other);
7436}
7437
7438// aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor
7439at::Tensor remainder_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
7440
7441 static auto op = create_remainder_Tensor_typed_handle();
7442 return op.redispatch(dispatchKeySet, self, other);
7443}
7444
7445STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(remainder__Tensor, name, "aten::remainder_")
7446STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(remainder__Tensor, overload_name, "Tensor")
7447STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(remainder__Tensor, schema_str, "remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
7448
7449// aten::remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
7450static C10_NOINLINE c10::TypedOperatorHandle<remainder__Tensor::schema> create_remainder__Tensor_typed_handle() {
7451 return c10::Dispatcher::singleton()
7452 .findSchemaOrThrow(remainder__Tensor::name, remainder__Tensor::overload_name)
7453 .typed<remainder__Tensor::schema>();
7454}
7455
7456// aten::remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
7457at::Tensor & remainder__Tensor::call(at::Tensor & self, const at::Tensor & other) {
7458
7459 static auto op = create_remainder__Tensor_typed_handle();
7460 return op.call(self, other);
7461}
7462
7463// aten::remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
7464at::Tensor & remainder__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
7465
7466 static auto op = create_remainder__Tensor_typed_handle();
7467 return op.redispatch(dispatchKeySet, self, other);
7468}
7469
7470STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(remainder_Scalar_Tensor, name, "aten::remainder")
7471STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(remainder_Scalar_Tensor, overload_name, "Scalar_Tensor")
7472STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(remainder_Scalar_Tensor, schema_str, "remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor")
7473
7474// aten::remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
7475static C10_NOINLINE c10::TypedOperatorHandle<remainder_Scalar_Tensor::schema> create_remainder_Scalar_Tensor_typed_handle() {
7476 return c10::Dispatcher::singleton()
7477 .findSchemaOrThrow(remainder_Scalar_Tensor::name, remainder_Scalar_Tensor::overload_name)
7478 .typed<remainder_Scalar_Tensor::schema>();
7479}
7480
7481// aten::remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
7482at::Tensor remainder_Scalar_Tensor::call(const at::Scalar & self, const at::Tensor & other) {
7483
7484 static auto op = create_remainder_Scalar_Tensor_typed_handle();
7485 return op.call(self, other);
7486}
7487
7488// aten::remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
7489at::Tensor remainder_Scalar_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) {
7490
7491 static auto op = create_remainder_Scalar_Tensor_typed_handle();
7492 return op.redispatch(dispatchKeySet, self, other);
7493}
7494
7495STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanquantile, name, "aten::nanquantile")
7496STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanquantile, overload_name, "")
7497STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanquantile, schema_str, "nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor")
7498
7499// aten::nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
7500static C10_NOINLINE c10::TypedOperatorHandle<nanquantile::schema> create_nanquantile_typed_handle() {
7501 return c10::Dispatcher::singleton()
7502 .findSchemaOrThrow(nanquantile::name, nanquantile::overload_name)
7503 .typed<nanquantile::schema>();
7504}
7505
7506// aten::nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
7507at::Tensor nanquantile::call(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
7508
7509 static auto op = create_nanquantile_typed_handle();
7510 return op.call(self, q, dim, keepdim, interpolation);
7511}
7512
7513// aten::nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
7514at::Tensor nanquantile::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
7515
7516 static auto op = create_nanquantile_typed_handle();
7517 return op.redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation);
7518}
7519
7520STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanquantile_out, name, "aten::nanquantile")
7521STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanquantile_out, overload_name, "out")
7522STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanquantile_out, schema_str, "nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)")
7523
7524// aten::nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
7525static C10_NOINLINE c10::TypedOperatorHandle<nanquantile_out::schema> create_nanquantile_out_typed_handle() {
7526 return c10::Dispatcher::singleton()
7527 .findSchemaOrThrow(nanquantile_out::name, nanquantile_out::overload_name)
7528 .typed<nanquantile_out::schema>();
7529}
7530
7531// aten::nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
7532at::Tensor & nanquantile_out::call(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
7533
7534 static auto op = create_nanquantile_out_typed_handle();
7535 return op.call(self, q, dim, keepdim, interpolation, out);
7536}
7537
7538// aten::nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
7539at::Tensor & nanquantile_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
7540
7541 static auto op = create_nanquantile_out_typed_handle();
7542 return op.redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation, out);
7543}
7544
7545STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanquantile_scalar, name, "aten::nanquantile")
7546STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanquantile_scalar, overload_name, "scalar")
7547STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanquantile_scalar, schema_str, "nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor")
7548
7549// aten::nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
7550static C10_NOINLINE c10::TypedOperatorHandle<nanquantile_scalar::schema> create_nanquantile_scalar_typed_handle() {
7551 return c10::Dispatcher::singleton()
7552 .findSchemaOrThrow(nanquantile_scalar::name, nanquantile_scalar::overload_name)
7553 .typed<nanquantile_scalar::schema>();
7554}
7555
7556// aten::nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
7557at::Tensor nanquantile_scalar::call(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
7558
7559 static auto op = create_nanquantile_scalar_typed_handle();
7560 return op.call(self, q, dim, keepdim, interpolation);
7561}
7562
7563// aten::nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
7564at::Tensor nanquantile_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
7565
7566 static auto op = create_nanquantile_scalar_typed_handle();
7567 return op.redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation);
7568}
7569
7570STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanquantile_scalar_out, name, "aten::nanquantile")
7571STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanquantile_scalar_out, overload_name, "scalar_out")
7572STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanquantile_scalar_out, schema_str, "nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)")
7573
7574// aten::nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
7575static C10_NOINLINE c10::TypedOperatorHandle<nanquantile_scalar_out::schema> create_nanquantile_scalar_out_typed_handle() {
7576 return c10::Dispatcher::singleton()
7577 .findSchemaOrThrow(nanquantile_scalar_out::name, nanquantile_scalar_out::overload_name)
7578 .typed<nanquantile_scalar_out::schema>();
7579}
7580
7581// aten::nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
7582at::Tensor & nanquantile_scalar_out::call(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
7583
7584 static auto op = create_nanquantile_scalar_out_typed_handle();
7585 return op.call(self, q, dim, keepdim, interpolation, out);
7586}
7587
7588// aten::nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
7589at::Tensor & nanquantile_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
7590
7591 static auto op = create_nanquantile_scalar_out_typed_handle();
7592 return op.redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation, out);
7593}
7594
7595STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(any, name, "aten::any")
7596STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(any, overload_name, "")
7597STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(any, schema_str, "any(Tensor self) -> Tensor")
7598
7599// aten::any(Tensor self) -> Tensor
7600static C10_NOINLINE c10::TypedOperatorHandle<any::schema> create_any_typed_handle() {
7601 return c10::Dispatcher::singleton()
7602 .findSchemaOrThrow(any::name, any::overload_name)
7603 .typed<any::schema>();
7604}
7605
7606// aten::any(Tensor self) -> Tensor
7607at::Tensor any::call(const at::Tensor & self) {
7608
7609 static auto op = create_any_typed_handle();
7610 return op.call(self);
7611}
7612
7613// aten::any(Tensor self) -> Tensor
7614at::Tensor any::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
7615
7616 static auto op = create_any_typed_handle();
7617 return op.redispatch(dispatchKeySet, self);
7618}
7619
7620STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(any_all_out, name, "aten::any")
7621STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(any_all_out, overload_name, "all_out")
7622STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(any_all_out, schema_str, "any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
7623
7624// aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7625static C10_NOINLINE c10::TypedOperatorHandle<any_all_out::schema> create_any_all_out_typed_handle() {
7626 return c10::Dispatcher::singleton()
7627 .findSchemaOrThrow(any_all_out::name, any_all_out::overload_name)
7628 .typed<any_all_out::schema>();
7629}
7630
7631// aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7632at::Tensor & any_all_out::call(const at::Tensor & self, at::Tensor & out) {
7633
7634 static auto op = create_any_all_out_typed_handle();
7635 return op.call(self, out);
7636}
7637
7638// aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7639at::Tensor & any_all_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
7640
7641 static auto op = create_any_all_out_typed_handle();
7642 return op.redispatch(dispatchKeySet, self, out);
7643}
7644
7645STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(renorm_out, name, "aten::renorm")
7646STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(renorm_out, overload_name, "out")
7647STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(renorm_out, schema_str, "renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!)")
7648
7649// aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!)
7650static C10_NOINLINE c10::TypedOperatorHandle<renorm_out::schema> create_renorm_out_typed_handle() {
7651 return c10::Dispatcher::singleton()
7652 .findSchemaOrThrow(renorm_out::name, renorm_out::overload_name)
7653 .typed<renorm_out::schema>();
7654}
7655
7656// aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!)
7657at::Tensor & renorm_out::call(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm, at::Tensor & out) {
7658
7659 static auto op = create_renorm_out_typed_handle();
7660 return op.call(self, p, dim, maxnorm, out);
7661}
7662
7663// aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!)
7664at::Tensor & renorm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm, at::Tensor & out) {
7665
7666 static auto op = create_renorm_out_typed_handle();
7667 return op.redispatch(dispatchKeySet, self, p, dim, maxnorm, out);
7668}
7669
7670STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(renorm, name, "aten::renorm")
7671STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(renorm, overload_name, "")
7672STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(renorm, schema_str, "renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor")
7673
7674// aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor
7675static C10_NOINLINE c10::TypedOperatorHandle<renorm::schema> create_renorm_typed_handle() {
7676 return c10::Dispatcher::singleton()
7677 .findSchemaOrThrow(renorm::name, renorm::overload_name)
7678 .typed<renorm::schema>();
7679}
7680
7681// aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor
7682at::Tensor renorm::call(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
7683
7684 static auto op = create_renorm_typed_handle();
7685 return op.call(self, p, dim, maxnorm);
7686}
7687
7688// aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor
7689at::Tensor renorm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
7690
7691 static auto op = create_renorm_typed_handle();
7692 return op.redispatch(dispatchKeySet, self, p, dim, maxnorm);
7693}
7694
7695STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(renorm_, name, "aten::renorm_")
7696STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(renorm_, overload_name, "")
7697STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(renorm_, schema_str, "renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!)")
7698
7699// aten::renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!)
7700static C10_NOINLINE c10::TypedOperatorHandle<renorm_::schema> create_renorm__typed_handle() {
7701 return c10::Dispatcher::singleton()
7702 .findSchemaOrThrow(renorm_::name, renorm_::overload_name)
7703 .typed<renorm_::schema>();
7704}
7705
7706// aten::renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!)
7707at::Tensor & renorm_::call(at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
7708
7709 static auto op = create_renorm__typed_handle();
7710 return op.call(self, p, dim, maxnorm);
7711}
7712
7713// aten::renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!)
7714at::Tensor & renorm_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
7715
7716 static auto op = create_renorm__typed_handle();
7717 return op.redispatch(dispatchKeySet, self, p, dim, maxnorm);
7718}
7719
7720STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unfold, name, "aten::unfold")
7721STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unfold, overload_name, "")
7722STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unfold, schema_str, "unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)")
7723
7724// aten::unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)
7725static C10_NOINLINE c10::TypedOperatorHandle<unfold::schema> create_unfold_typed_handle() {
7726 return c10::Dispatcher::singleton()
7727 .findSchemaOrThrow(unfold::name, unfold::overload_name)
7728 .typed<unfold::schema>();
7729}
7730
7731// aten::unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)
7732at::Tensor unfold::call(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
7733
7734 static auto op = create_unfold_typed_handle();
7735 return op.call(self, dimension, size, step);
7736}
7737
7738// aten::unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)
7739at::Tensor unfold::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
7740
7741 static auto op = create_unfold_typed_handle();
7742 return op.redispatch(dispatchKeySet, self, dimension, size, step);
7743}
7744
7745STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(float_power_Tensor_Tensor_out, name, "aten::float_power")
7746STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(float_power_Tensor_Tensor_out, overload_name, "Tensor_Tensor_out")
7747STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(float_power_Tensor_Tensor_out, schema_str, "float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)")
7748
7749// aten::float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
7750static C10_NOINLINE c10::TypedOperatorHandle<float_power_Tensor_Tensor_out::schema> create_float_power_Tensor_Tensor_out_typed_handle() {
7751 return c10::Dispatcher::singleton()
7752 .findSchemaOrThrow(float_power_Tensor_Tensor_out::name, float_power_Tensor_Tensor_out::overload_name)
7753 .typed<float_power_Tensor_Tensor_out::schema>();
7754}
7755
7756// aten::float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
7757at::Tensor & float_power_Tensor_Tensor_out::call(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) {
7758
7759 static auto op = create_float_power_Tensor_Tensor_out_typed_handle();
7760 return op.call(self, exponent, out);
7761}
7762
7763// aten::float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
7764at::Tensor & float_power_Tensor_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) {
7765
7766 static auto op = create_float_power_Tensor_Tensor_out_typed_handle();
7767 return op.redispatch(dispatchKeySet, self, exponent, out);
7768}
7769
7770STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(float_power_Tensor_Tensor, name, "aten::float_power")
7771STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(float_power_Tensor_Tensor, overload_name, "Tensor_Tensor")
7772STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(float_power_Tensor_Tensor, schema_str, "float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor")
7773
7774// aten::float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor
7775static C10_NOINLINE c10::TypedOperatorHandle<float_power_Tensor_Tensor::schema> create_float_power_Tensor_Tensor_typed_handle() {
7776 return c10::Dispatcher::singleton()
7777 .findSchemaOrThrow(float_power_Tensor_Tensor::name, float_power_Tensor_Tensor::overload_name)
7778 .typed<float_power_Tensor_Tensor::schema>();
7779}
7780
7781// aten::float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor
7782at::Tensor float_power_Tensor_Tensor::call(const at::Tensor & self, const at::Tensor & exponent) {
7783
7784 static auto op = create_float_power_Tensor_Tensor_typed_handle();
7785 return op.call(self, exponent);
7786}
7787
7788// aten::float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor
7789at::Tensor float_power_Tensor_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent) {
7790
7791 static auto op = create_float_power_Tensor_Tensor_typed_handle();
7792 return op.redispatch(dispatchKeySet, self, exponent);
7793}
7794
7795STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(float_power_Scalar_out, name, "aten::float_power")
7796STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(float_power_Scalar_out, overload_name, "Scalar_out")
7797STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(float_power_Scalar_out, schema_str, "float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)")
7798
7799// aten::float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
7800static C10_NOINLINE c10::TypedOperatorHandle<float_power_Scalar_out::schema> create_float_power_Scalar_out_typed_handle() {
7801 return c10::Dispatcher::singleton()
7802 .findSchemaOrThrow(float_power_Scalar_out::name, float_power_Scalar_out::overload_name)
7803 .typed<float_power_Scalar_out::schema>();
7804}
7805
7806// aten::float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
7807at::Tensor & float_power_Scalar_out::call(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) {
7808
7809 static auto op = create_float_power_Scalar_out_typed_handle();
7810 return op.call(self, exponent, out);
7811}
7812
7813// aten::float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
7814at::Tensor & float_power_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) {
7815
7816 static auto op = create_float_power_Scalar_out_typed_handle();
7817 return op.redispatch(dispatchKeySet, self, exponent, out);
7818}
7819
7820STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(float_power_Scalar, name, "aten::float_power")
7821STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(float_power_Scalar, overload_name, "Scalar")
7822STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(float_power_Scalar, schema_str, "float_power.Scalar(Scalar self, Tensor exponent) -> Tensor")
7823
7824// aten::float_power.Scalar(Scalar self, Tensor exponent) -> Tensor
7825static C10_NOINLINE c10::TypedOperatorHandle<float_power_Scalar::schema> create_float_power_Scalar_typed_handle() {
7826 return c10::Dispatcher::singleton()
7827 .findSchemaOrThrow(float_power_Scalar::name, float_power_Scalar::overload_name)
7828 .typed<float_power_Scalar::schema>();
7829}
7830
7831// aten::float_power.Scalar(Scalar self, Tensor exponent) -> Tensor
7832at::Tensor float_power_Scalar::call(const at::Scalar & self, const at::Tensor & exponent) {
7833
7834 static auto op = create_float_power_Scalar_typed_handle();
7835 return op.call(self, exponent);
7836}
7837
7838// aten::float_power.Scalar(Scalar self, Tensor exponent) -> Tensor
7839at::Tensor float_power_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent) {
7840
7841 static auto op = create_float_power_Scalar_typed_handle();
7842 return op.redispatch(dispatchKeySet, self, exponent);
7843}
7844
7845STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(float_power_Tensor_Scalar_out, name, "aten::float_power")
7846STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(float_power_Tensor_Scalar_out, overload_name, "Tensor_Scalar_out")
7847STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(float_power_Tensor_Scalar_out, schema_str, "float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)")
7848
7849// aten::float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)
7850static C10_NOINLINE c10::TypedOperatorHandle<float_power_Tensor_Scalar_out::schema> create_float_power_Tensor_Scalar_out_typed_handle() {
7851 return c10::Dispatcher::singleton()
7852 .findSchemaOrThrow(float_power_Tensor_Scalar_out::name, float_power_Tensor_Scalar_out::overload_name)
7853 .typed<float_power_Tensor_Scalar_out::schema>();
7854}
7855
7856// aten::float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)
7857at::Tensor & float_power_Tensor_Scalar_out::call(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) {
7858
7859 static auto op = create_float_power_Tensor_Scalar_out_typed_handle();
7860 return op.call(self, exponent, out);
7861}
7862
7863// aten::float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)
7864at::Tensor & float_power_Tensor_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) {
7865
7866 static auto op = create_float_power_Tensor_Scalar_out_typed_handle();
7867 return op.redispatch(dispatchKeySet, self, exponent, out);
7868}
7869
7870STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(float_power_Tensor_Scalar, name, "aten::float_power")
7871STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(float_power_Tensor_Scalar, overload_name, "Tensor_Scalar")
7872STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(float_power_Tensor_Scalar, schema_str, "float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor")
7873
7874// aten::float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor
7875static C10_NOINLINE c10::TypedOperatorHandle<float_power_Tensor_Scalar::schema> create_float_power_Tensor_Scalar_typed_handle() {
7876 return c10::Dispatcher::singleton()
7877 .findSchemaOrThrow(float_power_Tensor_Scalar::name, float_power_Tensor_Scalar::overload_name)
7878 .typed<float_power_Tensor_Scalar::schema>();
7879}
7880
7881// aten::float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor
7882at::Tensor float_power_Tensor_Scalar::call(const at::Tensor & self, const at::Scalar & exponent) {
7883
7884 static auto op = create_float_power_Tensor_Scalar_typed_handle();
7885 return op.call(self, exponent);
7886}
7887
7888// aten::float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor
7889at::Tensor float_power_Tensor_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent) {
7890
7891 static auto op = create_float_power_Tensor_Scalar_typed_handle();
7892 return op.redispatch(dispatchKeySet, self, exponent);
7893}
7894
7895STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(float_power__Scalar, name, "aten::float_power_")
7896STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(float_power__Scalar, overload_name, "Scalar")
7897STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(float_power__Scalar, schema_str, "float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)")
7898
7899// aten::float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)
7900static C10_NOINLINE c10::TypedOperatorHandle<float_power__Scalar::schema> create_float_power__Scalar_typed_handle() {
7901 return c10::Dispatcher::singleton()
7902 .findSchemaOrThrow(float_power__Scalar::name, float_power__Scalar::overload_name)
7903 .typed<float_power__Scalar::schema>();
7904}
7905
7906// aten::float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)
7907at::Tensor & float_power__Scalar::call(at::Tensor & self, const at::Scalar & exponent) {
7908
7909 static auto op = create_float_power__Scalar_typed_handle();
7910 return op.call(self, exponent);
7911}
7912
7913// aten::float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)
7914at::Tensor & float_power__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & exponent) {
7915
7916 static auto op = create_float_power__Scalar_typed_handle();
7917 return op.redispatch(dispatchKeySet, self, exponent);
7918}
7919
7920STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(float_power__Tensor, name, "aten::float_power_")
7921STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(float_power__Tensor, overload_name, "Tensor")
7922STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(float_power__Tensor, schema_str, "float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)")
7923
7924// aten::float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)
7925static C10_NOINLINE c10::TypedOperatorHandle<float_power__Tensor::schema> create_float_power__Tensor_typed_handle() {
7926 return c10::Dispatcher::singleton()
7927 .findSchemaOrThrow(float_power__Tensor::name, float_power__Tensor::overload_name)
7928 .typed<float_power__Tensor::schema>();
7929}
7930
7931// aten::float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)
7932at::Tensor & float_power__Tensor::call(at::Tensor & self, const at::Tensor & exponent) {
7933
7934 static auto op = create_float_power__Tensor_typed_handle();
7935 return op.call(self, exponent);
7936}
7937
7938// aten::float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)
7939at::Tensor & float_power__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & exponent) {
7940
7941 static auto op = create_float_power__Tensor_typed_handle();
7942 return op.redispatch(dispatchKeySet, self, exponent);
7943}
7944
7945STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_max_Scalar, name, "aten::_foreach_clamp_max")
7946STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_max_Scalar, overload_name, "Scalar")
7947STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_max_Scalar, schema_str, "_foreach_clamp_max.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]")
7948
7949// aten::_foreach_clamp_max.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
7950static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_max_Scalar::schema> create__foreach_clamp_max_Scalar_typed_handle() {
7951 return c10::Dispatcher::singleton()
7952 .findSchemaOrThrow(_foreach_clamp_max_Scalar::name, _foreach_clamp_max_Scalar::overload_name)
7953 .typed<_foreach_clamp_max_Scalar::schema>();
7954}
7955
7956// aten::_foreach_clamp_max.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
7957::std::vector<at::Tensor> _foreach_clamp_max_Scalar::call(at::TensorList self, const at::Scalar & scalar) {
7958
7959 static auto op = create__foreach_clamp_max_Scalar_typed_handle();
7960 return op.call(self, scalar);
7961}
7962
7963// aten::_foreach_clamp_max.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
7964::std::vector<at::Tensor> _foreach_clamp_max_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
7965
7966 static auto op = create__foreach_clamp_max_Scalar_typed_handle();
7967 return op.redispatch(dispatchKeySet, self, scalar);
7968}
7969
7970STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_max__Scalar, name, "aten::_foreach_clamp_max_")
7971STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_max__Scalar, overload_name, "Scalar")
7972STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_max__Scalar, schema_str, "_foreach_clamp_max_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()")
7973
7974// aten::_foreach_clamp_max_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
7975static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_max__Scalar::schema> create__foreach_clamp_max__Scalar_typed_handle() {
7976 return c10::Dispatcher::singleton()
7977 .findSchemaOrThrow(_foreach_clamp_max__Scalar::name, _foreach_clamp_max__Scalar::overload_name)
7978 .typed<_foreach_clamp_max__Scalar::schema>();
7979}
7980
7981// aten::_foreach_clamp_max_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
7982void _foreach_clamp_max__Scalar::call(at::TensorList self, const at::Scalar & scalar) {
7983
7984 static auto op = create__foreach_clamp_max__Scalar_typed_handle();
7985 return op.call(self, scalar);
7986}
7987
7988// aten::_foreach_clamp_max_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
7989void _foreach_clamp_max__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
7990
7991 static auto op = create__foreach_clamp_max__Scalar_typed_handle();
7992 return op.redispatch(dispatchKeySet, self, scalar);
7993}
7994
7995STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_max_List, name, "aten::_foreach_clamp_max")
7996STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_max_List, overload_name, "List")
7997STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_max_List, schema_str, "_foreach_clamp_max.List(Tensor[] self, Tensor[] other) -> Tensor[]")
7998
7999// aten::_foreach_clamp_max.List(Tensor[] self, Tensor[] other) -> Tensor[]
8000static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_max_List::schema> create__foreach_clamp_max_List_typed_handle() {
8001 return c10::Dispatcher::singleton()
8002 .findSchemaOrThrow(_foreach_clamp_max_List::name, _foreach_clamp_max_List::overload_name)
8003 .typed<_foreach_clamp_max_List::schema>();
8004}
8005
8006// aten::_foreach_clamp_max.List(Tensor[] self, Tensor[] other) -> Tensor[]
8007::std::vector<at::Tensor> _foreach_clamp_max_List::call(at::TensorList self, at::TensorList other) {
8008
8009 static auto op = create__foreach_clamp_max_List_typed_handle();
8010 return op.call(self, other);
8011}
8012
8013// aten::_foreach_clamp_max.List(Tensor[] self, Tensor[] other) -> Tensor[]
8014::std::vector<at::Tensor> _foreach_clamp_max_List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
8015
8016 static auto op = create__foreach_clamp_max_List_typed_handle();
8017 return op.redispatch(dispatchKeySet, self, other);
8018}
8019
8020STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_max__List, name, "aten::_foreach_clamp_max_")
8021STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_max__List, overload_name, "List")
8022STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_max__List, schema_str, "_foreach_clamp_max_.List(Tensor(a!)[] self, Tensor[] other) -> ()")
8023
8024// aten::_foreach_clamp_max_.List(Tensor(a!)[] self, Tensor[] other) -> ()
8025static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_max__List::schema> create__foreach_clamp_max__List_typed_handle() {
8026 return c10::Dispatcher::singleton()
8027 .findSchemaOrThrow(_foreach_clamp_max__List::name, _foreach_clamp_max__List::overload_name)
8028 .typed<_foreach_clamp_max__List::schema>();
8029}
8030
8031// aten::_foreach_clamp_max_.List(Tensor(a!)[] self, Tensor[] other) -> ()
8032void _foreach_clamp_max__List::call(at::TensorList self, at::TensorList other) {
8033
8034 static auto op = create__foreach_clamp_max__List_typed_handle();
8035 return op.call(self, other);
8036}
8037
8038// aten::_foreach_clamp_max_.List(Tensor(a!)[] self, Tensor[] other) -> ()
8039void _foreach_clamp_max__List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
8040
8041 static auto op = create__foreach_clamp_max__List_typed_handle();
8042 return op.redispatch(dispatchKeySet, self, other);
8043}
8044
8045STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_max_ScalarList, name, "aten::_foreach_clamp_max")
8046STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_max_ScalarList, overload_name, "ScalarList")
8047STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_max_ScalarList, schema_str, "_foreach_clamp_max.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]")
8048
8049// aten::_foreach_clamp_max.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
8050static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_max_ScalarList::schema> create__foreach_clamp_max_ScalarList_typed_handle() {
8051 return c10::Dispatcher::singleton()
8052 .findSchemaOrThrow(_foreach_clamp_max_ScalarList::name, _foreach_clamp_max_ScalarList::overload_name)
8053 .typed<_foreach_clamp_max_ScalarList::schema>();
8054}
8055
8056// aten::_foreach_clamp_max.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
8057::std::vector<at::Tensor> _foreach_clamp_max_ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
8058
8059 static auto op = create__foreach_clamp_max_ScalarList_typed_handle();
8060 return op.call(self, scalars);
8061}
8062
8063// aten::_foreach_clamp_max.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
8064::std::vector<at::Tensor> _foreach_clamp_max_ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
8065
8066 static auto op = create__foreach_clamp_max_ScalarList_typed_handle();
8067 return op.redispatch(dispatchKeySet, self, scalars);
8068}
8069
8070STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_max__ScalarList, name, "aten::_foreach_clamp_max_")
8071STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_max__ScalarList, overload_name, "ScalarList")
8072STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_max__ScalarList, schema_str, "_foreach_clamp_max_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()")
8073
8074// aten::_foreach_clamp_max_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
8075static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_max__ScalarList::schema> create__foreach_clamp_max__ScalarList_typed_handle() {
8076 return c10::Dispatcher::singleton()
8077 .findSchemaOrThrow(_foreach_clamp_max__ScalarList::name, _foreach_clamp_max__ScalarList::overload_name)
8078 .typed<_foreach_clamp_max__ScalarList::schema>();
8079}
8080
8081// aten::_foreach_clamp_max_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
8082void _foreach_clamp_max__ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
8083
8084 static auto op = create__foreach_clamp_max__ScalarList_typed_handle();
8085 return op.call(self, scalars);
8086}
8087
8088// aten::_foreach_clamp_max_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
8089void _foreach_clamp_max__ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
8090
8091 static auto op = create__foreach_clamp_max__ScalarList_typed_handle();
8092 return op.redispatch(dispatchKeySet, self, scalars);
8093}
8094
8095STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_abs, name, "aten::_foreach_abs")
8096STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_abs, overload_name, "")
8097STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_abs, schema_str, "_foreach_abs(Tensor[] self) -> Tensor[]")
8098
8099// aten::_foreach_abs(Tensor[] self) -> Tensor[]
8100static C10_NOINLINE c10::TypedOperatorHandle<_foreach_abs::schema> create__foreach_abs_typed_handle() {
8101 return c10::Dispatcher::singleton()
8102 .findSchemaOrThrow(_foreach_abs::name, _foreach_abs::overload_name)
8103 .typed<_foreach_abs::schema>();
8104}
8105
8106// aten::_foreach_abs(Tensor[] self) -> Tensor[]
8107::std::vector<at::Tensor> _foreach_abs::call(at::TensorList self) {
8108
8109 static auto op = create__foreach_abs_typed_handle();
8110 return op.call(self);
8111}
8112
8113// aten::_foreach_abs(Tensor[] self) -> Tensor[]
8114::std::vector<at::Tensor> _foreach_abs::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
8115
8116 static auto op = create__foreach_abs_typed_handle();
8117 return op.redispatch(dispatchKeySet, self);
8118}
8119
8120STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_abs_, name, "aten::_foreach_abs_")
8121STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_abs_, overload_name, "")
8122STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_abs_, schema_str, "_foreach_abs_(Tensor(a!)[] self) -> ()")
8123
8124// aten::_foreach_abs_(Tensor(a!)[] self) -> ()
8125static C10_NOINLINE c10::TypedOperatorHandle<_foreach_abs_::schema> create__foreach_abs__typed_handle() {
8126 return c10::Dispatcher::singleton()
8127 .findSchemaOrThrow(_foreach_abs_::name, _foreach_abs_::overload_name)
8128 .typed<_foreach_abs_::schema>();
8129}
8130
8131// aten::_foreach_abs_(Tensor(a!)[] self) -> ()
8132void _foreach_abs_::call(at::TensorList self) {
8133
8134 static auto op = create__foreach_abs__typed_handle();
8135 return op.call(self);
8136}
8137
8138// aten::_foreach_abs_(Tensor(a!)[] self) -> ()
8139void _foreach_abs_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
8140
8141 static auto op = create__foreach_abs__typed_handle();
8142 return op.redispatch(dispatchKeySet, self);
8143}
8144
8145STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_expm1, name, "aten::_foreach_expm1")
8146STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_expm1, overload_name, "")
8147STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_expm1, schema_str, "_foreach_expm1(Tensor[] self) -> Tensor[]")
8148
8149// aten::_foreach_expm1(Tensor[] self) -> Tensor[]
8150static C10_NOINLINE c10::TypedOperatorHandle<_foreach_expm1::schema> create__foreach_expm1_typed_handle() {
8151 return c10::Dispatcher::singleton()
8152 .findSchemaOrThrow(_foreach_expm1::name, _foreach_expm1::overload_name)
8153 .typed<_foreach_expm1::schema>();
8154}
8155
8156// aten::_foreach_expm1(Tensor[] self) -> Tensor[]
8157::std::vector<at::Tensor> _foreach_expm1::call(at::TensorList self) {
8158
8159 static auto op = create__foreach_expm1_typed_handle();
8160 return op.call(self);
8161}
8162
8163// aten::_foreach_expm1(Tensor[] self) -> Tensor[]
8164::std::vector<at::Tensor> _foreach_expm1::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
8165
8166 static auto op = create__foreach_expm1_typed_handle();
8167 return op.redispatch(dispatchKeySet, self);
8168}
8169
8170STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_expm1_, name, "aten::_foreach_expm1_")
8171STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_expm1_, overload_name, "")
8172STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_expm1_, schema_str, "_foreach_expm1_(Tensor(a!)[] self) -> ()")
8173
8174// aten::_foreach_expm1_(Tensor(a!)[] self) -> ()
8175static C10_NOINLINE c10::TypedOperatorHandle<_foreach_expm1_::schema> create__foreach_expm1__typed_handle() {
8176 return c10::Dispatcher::singleton()
8177 .findSchemaOrThrow(_foreach_expm1_::name, _foreach_expm1_::overload_name)
8178 .typed<_foreach_expm1_::schema>();
8179}
8180
8181// aten::_foreach_expm1_(Tensor(a!)[] self) -> ()
8182void _foreach_expm1_::call(at::TensorList self) {
8183
8184 static auto op = create__foreach_expm1__typed_handle();
8185 return op.call(self);
8186}
8187
8188// aten::_foreach_expm1_(Tensor(a!)[] self) -> ()
8189void _foreach_expm1_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
8190
8191 static auto op = create__foreach_expm1__typed_handle();
8192 return op.redispatch(dispatchKeySet, self);
8193}
8194
8195STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_log10, name, "aten::_foreach_log10")
8196STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_log10, overload_name, "")
8197STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_log10, schema_str, "_foreach_log10(Tensor[] self) -> Tensor[]")
8198
8199// aten::_foreach_log10(Tensor[] self) -> Tensor[]
8200static C10_NOINLINE c10::TypedOperatorHandle<_foreach_log10::schema> create__foreach_log10_typed_handle() {
8201 return c10::Dispatcher::singleton()
8202 .findSchemaOrThrow(_foreach_log10::name, _foreach_log10::overload_name)
8203 .typed<_foreach_log10::schema>();
8204}
8205
8206// aten::_foreach_log10(Tensor[] self) -> Tensor[]
8207::std::vector<at::Tensor> _foreach_log10::call(at::TensorList self) {
8208
8209 static auto op = create__foreach_log10_typed_handle();
8210 return op.call(self);
8211}
8212
8213// aten::_foreach_log10(Tensor[] self) -> Tensor[]
8214::std::vector<at::Tensor> _foreach_log10::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
8215
8216 static auto op = create__foreach_log10_typed_handle();
8217 return op.redispatch(dispatchKeySet, self);
8218}
8219
8220STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_log10_, name, "aten::_foreach_log10_")
8221STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_log10_, overload_name, "")
8222STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_log10_, schema_str, "_foreach_log10_(Tensor(a!)[] self) -> ()")
8223
8224// aten::_foreach_log10_(Tensor(a!)[] self) -> ()
8225static C10_NOINLINE c10::TypedOperatorHandle<_foreach_log10_::schema> create__foreach_log10__typed_handle() {
8226 return c10::Dispatcher::singleton()
8227 .findSchemaOrThrow(_foreach_log10_::name, _foreach_log10_::overload_name)
8228 .typed<_foreach_log10_::schema>();
8229}
8230
8231// aten::_foreach_log10_(Tensor(a!)[] self) -> ()
8232void _foreach_log10_::call(at::TensorList self) {
8233
8234 static auto op = create__foreach_log10__typed_handle();
8235 return op.call(self);
8236}
8237
8238// aten::_foreach_log10_(Tensor(a!)[] self) -> ()
8239void _foreach_log10_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
8240
8241 static auto op = create__foreach_log10__typed_handle();
8242 return op.redispatch(dispatchKeySet, self);
8243}
8244
8245STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_tan, name, "aten::_foreach_tan")
8246STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_tan, overload_name, "")
8247STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_tan, schema_str, "_foreach_tan(Tensor[] self) -> Tensor[]")
8248
8249// aten::_foreach_tan(Tensor[] self) -> Tensor[]
8250static C10_NOINLINE c10::TypedOperatorHandle<_foreach_tan::schema> create__foreach_tan_typed_handle() {
8251 return c10::Dispatcher::singleton()
8252 .findSchemaOrThrow(_foreach_tan::name, _foreach_tan::overload_name)
8253 .typed<_foreach_tan::schema>();
8254}
8255
8256// aten::_foreach_tan(Tensor[] self) -> Tensor[]
8257::std::vector<at::Tensor> _foreach_tan::call(at::TensorList self) {
8258
8259 static auto op = create__foreach_tan_typed_handle();
8260 return op.call(self);
8261}
8262
8263// aten::_foreach_tan(Tensor[] self) -> Tensor[]
8264::std::vector<at::Tensor> _foreach_tan::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
8265
8266 static auto op = create__foreach_tan_typed_handle();
8267 return op.redispatch(dispatchKeySet, self);
8268}
8269
8270STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_tan_, name, "aten::_foreach_tan_")
8271STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_tan_, overload_name, "")
8272STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_tan_, schema_str, "_foreach_tan_(Tensor(a!)[] self) -> ()")
8273
8274// aten::_foreach_tan_(Tensor(a!)[] self) -> ()
8275static C10_NOINLINE c10::TypedOperatorHandle<_foreach_tan_::schema> create__foreach_tan__typed_handle() {
8276 return c10::Dispatcher::singleton()
8277 .findSchemaOrThrow(_foreach_tan_::name, _foreach_tan_::overload_name)
8278 .typed<_foreach_tan_::schema>();
8279}
8280
8281// aten::_foreach_tan_(Tensor(a!)[] self) -> ()
8282void _foreach_tan_::call(at::TensorList self) {
8283
8284 static auto op = create__foreach_tan__typed_handle();
8285 return op.call(self);
8286}
8287
8288// aten::_foreach_tan_(Tensor(a!)[] self) -> ()
8289void _foreach_tan_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
8290
8291 static auto op = create__foreach_tan__typed_handle();
8292 return op.redispatch(dispatchKeySet, self);
8293}
8294
8295STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sinh, name, "aten::_foreach_sinh")
8296STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sinh, overload_name, "")
8297STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sinh, schema_str, "_foreach_sinh(Tensor[] self) -> Tensor[]")
8298
8299// aten::_foreach_sinh(Tensor[] self) -> Tensor[]
8300static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sinh::schema> create__foreach_sinh_typed_handle() {
8301 return c10::Dispatcher::singleton()
8302 .findSchemaOrThrow(_foreach_sinh::name, _foreach_sinh::overload_name)
8303 .typed<_foreach_sinh::schema>();
8304}
8305
8306// aten::_foreach_sinh(Tensor[] self) -> Tensor[]
8307::std::vector<at::Tensor> _foreach_sinh::call(at::TensorList self) {
8308
8309 static auto op = create__foreach_sinh_typed_handle();
8310 return op.call(self);
8311}
8312
8313// aten::_foreach_sinh(Tensor[] self) -> Tensor[]
8314::std::vector<at::Tensor> _foreach_sinh::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
8315
8316 static auto op = create__foreach_sinh_typed_handle();
8317 return op.redispatch(dispatchKeySet, self);
8318}
8319
8320STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sinh_, name, "aten::_foreach_sinh_")
8321STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sinh_, overload_name, "")
8322STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sinh_, schema_str, "_foreach_sinh_(Tensor(a!)[] self) -> ()")
8323
8324// aten::_foreach_sinh_(Tensor(a!)[] self) -> ()
8325static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sinh_::schema> create__foreach_sinh__typed_handle() {
8326 return c10::Dispatcher::singleton()
8327 .findSchemaOrThrow(_foreach_sinh_::name, _foreach_sinh_::overload_name)
8328 .typed<_foreach_sinh_::schema>();
8329}
8330
8331// aten::_foreach_sinh_(Tensor(a!)[] self) -> ()
8332void _foreach_sinh_::call(at::TensorList self) {
8333
8334 static auto op = create__foreach_sinh__typed_handle();
8335 return op.call(self);
8336}
8337
8338// aten::_foreach_sinh_(Tensor(a!)[] self) -> ()
8339void _foreach_sinh_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
8340
8341 static auto op = create__foreach_sinh__typed_handle();
8342 return op.redispatch(dispatchKeySet, self);
8343}
8344
8345STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(searchsorted_Tensor, name, "aten::searchsorted")
8346STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(searchsorted_Tensor, overload_name, "Tensor")
8347STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(searchsorted_Tensor, schema_str, "searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor")
8348
8349// aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor
8350static C10_NOINLINE c10::TypedOperatorHandle<searchsorted_Tensor::schema> create_searchsorted_Tensor_typed_handle() {
8351 return c10::Dispatcher::singleton()
8352 .findSchemaOrThrow(searchsorted_Tensor::name, searchsorted_Tensor::overload_name)
8353 .typed<searchsorted_Tensor::schema>();
8354}
8355
8356// aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor
8357at::Tensor searchsorted_Tensor::call(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter) {
8358
8359 static auto op = create_searchsorted_Tensor_typed_handle();
8360 return op.call(sorted_sequence, self, out_int32, right, side, sorter);
8361}
8362
8363// aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor
8364at::Tensor searchsorted_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter) {
8365
8366 static auto op = create_searchsorted_Tensor_typed_handle();
8367 return op.redispatch(dispatchKeySet, sorted_sequence, self, out_int32, right, side, sorter);
8368}
8369
8370STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(searchsorted_Tensor_out, name, "aten::searchsorted")
8371STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(searchsorted_Tensor_out, overload_name, "Tensor_out")
8372STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(searchsorted_Tensor_out, schema_str, "searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)")
8373
8374// aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)
8375static C10_NOINLINE c10::TypedOperatorHandle<searchsorted_Tensor_out::schema> create_searchsorted_Tensor_out_typed_handle() {
8376 return c10::Dispatcher::singleton()
8377 .findSchemaOrThrow(searchsorted_Tensor_out::name, searchsorted_Tensor_out::overload_name)
8378 .typed<searchsorted_Tensor_out::schema>();
8379}
8380
8381// aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)
8382at::Tensor & searchsorted_Tensor_out::call(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter, at::Tensor & out) {
8383
8384 static auto op = create_searchsorted_Tensor_out_typed_handle();
8385 return op.call(sorted_sequence, self, out_int32, right, side, sorter, out);
8386}
8387
8388// aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)
8389at::Tensor & searchsorted_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter, at::Tensor & out) {
8390
8391 static auto op = create_searchsorted_Tensor_out_typed_handle();
8392 return op.redispatch(dispatchKeySet, sorted_sequence, self, out_int32, right, side, sorter, out);
8393}
8394
8395STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(searchsorted_Scalar, name, "aten::searchsorted")
8396STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(searchsorted_Scalar, overload_name, "Scalar")
8397STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(searchsorted_Scalar, schema_str, "searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor")
8398
8399// aten::searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor
8400static C10_NOINLINE c10::TypedOperatorHandle<searchsorted_Scalar::schema> create_searchsorted_Scalar_typed_handle() {
8401 return c10::Dispatcher::singleton()
8402 .findSchemaOrThrow(searchsorted_Scalar::name, searchsorted_Scalar::overload_name)
8403 .typed<searchsorted_Scalar::schema>();
8404}
8405
8406// aten::searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor
8407at::Tensor searchsorted_Scalar::call(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter) {
8408
8409 static auto op = create_searchsorted_Scalar_typed_handle();
8410 return op.call(sorted_sequence, self, out_int32, right, side, sorter);
8411}
8412
8413// aten::searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor
8414at::Tensor searchsorted_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter) {
8415
8416 static auto op = create_searchsorted_Scalar_typed_handle();
8417 return op.redispatch(dispatchKeySet, sorted_sequence, self, out_int32, right, side, sorter);
8418}
8419
8420STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(smooth_l1_loss_out, name, "aten::smooth_l1_loss")
8421STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(smooth_l1_loss_out, overload_name, "out")
8422STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(smooth_l1_loss_out, schema_str, "smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!)")
8423
8424// aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!)
8425static C10_NOINLINE c10::TypedOperatorHandle<smooth_l1_loss_out::schema> create_smooth_l1_loss_out_typed_handle() {
8426 return c10::Dispatcher::singleton()
8427 .findSchemaOrThrow(smooth_l1_loss_out::name, smooth_l1_loss_out::overload_name)
8428 .typed<smooth_l1_loss_out::schema>();
8429}
8430
8431// aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!)
8432at::Tensor & smooth_l1_loss_out::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & out) {
8433
8434 static auto op = create_smooth_l1_loss_out_typed_handle();
8435 return op.call(self, target, reduction, beta, out);
8436}
8437
8438// aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!)
8439at::Tensor & smooth_l1_loss_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & out) {
8440
8441 static auto op = create_smooth_l1_loss_out_typed_handle();
8442 return op.redispatch(dispatchKeySet, self, target, reduction, beta, out);
8443}
8444
8445STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(smooth_l1_loss, name, "aten::smooth_l1_loss")
8446STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(smooth_l1_loss, overload_name, "")
8447STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(smooth_l1_loss, schema_str, "smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, float beta=1.0) -> Tensor")
8448
8449// aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, float beta=1.0) -> Tensor
8450static C10_NOINLINE c10::TypedOperatorHandle<smooth_l1_loss::schema> create_smooth_l1_loss_typed_handle() {
8451 return c10::Dispatcher::singleton()
8452 .findSchemaOrThrow(smooth_l1_loss::name, smooth_l1_loss::overload_name)
8453 .typed<smooth_l1_loss::schema>();
8454}
8455
8456// aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, float beta=1.0) -> Tensor
8457at::Tensor smooth_l1_loss::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
8458
8459 static auto op = create_smooth_l1_loss_typed_handle();
8460 return op.call(self, target, reduction, beta);
8461}
8462
8463// aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, float beta=1.0) -> Tensor
8464at::Tensor smooth_l1_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
8465
8466 static auto op = create_smooth_l1_loss_typed_handle();
8467 return op.redispatch(dispatchKeySet, self, target, reduction, beta);
8468}
8469
8470STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(elu_out, name, "aten::elu")
8471STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(elu_out, overload_name, "out")
8472STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(elu_out, schema_str, "elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!)")
8473
8474// aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!)
8475static C10_NOINLINE c10::TypedOperatorHandle<elu_out::schema> create_elu_out_typed_handle() {
8476 return c10::Dispatcher::singleton()
8477 .findSchemaOrThrow(elu_out::name, elu_out::overload_name)
8478 .typed<elu_out::schema>();
8479}
8480
8481// aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!)
8482at::Tensor & elu_out::call(const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, at::Tensor & out) {
8483
8484 static auto op = create_elu_out_typed_handle();
8485 return op.call(self, alpha, scale, input_scale, out);
8486}
8487
8488// aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!)
8489at::Tensor & elu_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, at::Tensor & out) {
8490
8491 static auto op = create_elu_out_typed_handle();
8492 return op.redispatch(dispatchKeySet, self, alpha, scale, input_scale, out);
8493}
8494
8495STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(elu, name, "aten::elu")
8496STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(elu, overload_name, "")
8497STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(elu, schema_str, "elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor")
8498
8499// aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor
8500static C10_NOINLINE c10::TypedOperatorHandle<elu::schema> create_elu_typed_handle() {
8501 return c10::Dispatcher::singleton()
8502 .findSchemaOrThrow(elu::name, elu::overload_name)
8503 .typed<elu::schema>();
8504}
8505
8506// aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor
8507at::Tensor elu::call(const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
8508
8509 static auto op = create_elu_typed_handle();
8510 return op.call(self, alpha, scale, input_scale);
8511}
8512
8513// aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor
8514at::Tensor elu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
8515
8516 static auto op = create_elu_typed_handle();
8517 return op.redispatch(dispatchKeySet, self, alpha, scale, input_scale);
8518}
8519
8520STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(elu_, name, "aten::elu_")
8521STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(elu_, overload_name, "")
8522STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(elu_, schema_str, "elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!)")
8523
8524// aten::elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!)
8525static C10_NOINLINE c10::TypedOperatorHandle<elu_::schema> create_elu__typed_handle() {
8526 return c10::Dispatcher::singleton()
8527 .findSchemaOrThrow(elu_::name, elu_::overload_name)
8528 .typed<elu_::schema>();
8529}
8530
8531// aten::elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!)
8532at::Tensor & elu_::call(at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
8533
8534 static auto op = create_elu__typed_handle();
8535 return op.call(self, alpha, scale, input_scale);
8536}
8537
8538// aten::elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!)
8539at::Tensor & elu_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
8540
8541 static auto op = create_elu__typed_handle();
8542 return op.redispatch(dispatchKeySet, self, alpha, scale, input_scale);
8543}
8544
8545STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(glu_backward_grad_input, name, "aten::glu_backward")
8546STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(glu_backward_grad_input, overload_name, "grad_input")
8547STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(glu_backward_grad_input, schema_str, "glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!)")
8548
8549// aten::glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!)
8550static C10_NOINLINE c10::TypedOperatorHandle<glu_backward_grad_input::schema> create_glu_backward_grad_input_typed_handle() {
8551 return c10::Dispatcher::singleton()
8552 .findSchemaOrThrow(glu_backward_grad_input::name, glu_backward_grad_input::overload_name)
8553 .typed<glu_backward_grad_input::schema>();
8554}
8555
8556// aten::glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!)
8557at::Tensor & glu_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, at::Tensor & grad_input) {
8558
8559 static auto op = create_glu_backward_grad_input_typed_handle();
8560 return op.call(grad_output, self, dim, grad_input);
8561}
8562
8563// aten::glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!)
8564at::Tensor & glu_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, at::Tensor & grad_input) {
8565
8566 static auto op = create_glu_backward_grad_input_typed_handle();
8567 return op.redispatch(dispatchKeySet, grad_output, self, dim, grad_input);
8568}
8569
8570STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(glu_backward, name, "aten::glu_backward")
8571STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(glu_backward, overload_name, "")
8572STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(glu_backward, schema_str, "glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor")
8573
8574// aten::glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor
8575static C10_NOINLINE c10::TypedOperatorHandle<glu_backward::schema> create_glu_backward_typed_handle() {
8576 return c10::Dispatcher::singleton()
8577 .findSchemaOrThrow(glu_backward::name, glu_backward::overload_name)
8578 .typed<glu_backward::schema>();
8579}
8580
8581// aten::glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor
8582at::Tensor glu_backward::call(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) {
8583
8584 static auto op = create_glu_backward_typed_handle();
8585 return op.call(grad_output, self, dim);
8586}
8587
8588// aten::glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor
8589at::Tensor glu_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) {
8590
8591 static auto op = create_glu_backward_typed_handle();
8592 return op.redispatch(dispatchKeySet, grad_output, self, dim);
8593}
8594
8595STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardtanh_backward_grad_input, name, "aten::hardtanh_backward")
8596STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardtanh_backward_grad_input, overload_name, "grad_input")
8597STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardtanh_backward_grad_input, schema_str, "hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!)")
8598
8599// aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!)
8600static C10_NOINLINE c10::TypedOperatorHandle<hardtanh_backward_grad_input::schema> create_hardtanh_backward_grad_input_typed_handle() {
8601 return c10::Dispatcher::singleton()
8602 .findSchemaOrThrow(hardtanh_backward_grad_input::name, hardtanh_backward_grad_input::overload_name)
8603 .typed<hardtanh_backward_grad_input::schema>();
8604}
8605
8606// aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!)
8607at::Tensor & hardtanh_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input) {
8608
8609 static auto op = create_hardtanh_backward_grad_input_typed_handle();
8610 return op.call(grad_output, self, min_val, max_val, grad_input);
8611}
8612
8613// aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!)
8614at::Tensor & hardtanh_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input) {
8615
8616 static auto op = create_hardtanh_backward_grad_input_typed_handle();
8617 return op.redispatch(dispatchKeySet, grad_output, self, min_val, max_val, grad_input);
8618}
8619
8620STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardtanh_backward, name, "aten::hardtanh_backward")
8621STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardtanh_backward, overload_name, "")
8622STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardtanh_backward, schema_str, "hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor")
8623
8624// aten::hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor
8625static C10_NOINLINE c10::TypedOperatorHandle<hardtanh_backward::schema> create_hardtanh_backward_typed_handle() {
8626 return c10::Dispatcher::singleton()
8627 .findSchemaOrThrow(hardtanh_backward::name, hardtanh_backward::overload_name)
8628 .typed<hardtanh_backward::schema>();
8629}
8630
8631// aten::hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor
8632at::Tensor hardtanh_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
8633
8634 static auto op = create_hardtanh_backward_typed_handle();
8635 return op.call(grad_output, self, min_val, max_val);
8636}
8637
8638// aten::hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor
8639at::Tensor hardtanh_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
8640
8641 static auto op = create_hardtanh_backward_typed_handle();
8642 return op.redispatch(dispatchKeySet, grad_output, self, min_val, max_val);
8643}
8644
8645STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(leaky_relu_backward_grad_input, name, "aten::leaky_relu_backward")
8646STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(leaky_relu_backward_grad_input, overload_name, "grad_input")
8647STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(leaky_relu_backward_grad_input, schema_str, "leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!)")
8648
8649// aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!)
8650static C10_NOINLINE c10::TypedOperatorHandle<leaky_relu_backward_grad_input::schema> create_leaky_relu_backward_grad_input_typed_handle() {
8651 return c10::Dispatcher::singleton()
8652 .findSchemaOrThrow(leaky_relu_backward_grad_input::name, leaky_relu_backward_grad_input::overload_name)
8653 .typed<leaky_relu_backward_grad_input::schema>();
8654}
8655
8656// aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!)
8657at::Tensor & leaky_relu_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input) {
8658
8659 static auto op = create_leaky_relu_backward_grad_input_typed_handle();
8660 return op.call(grad_output, self, negative_slope, self_is_result, grad_input);
8661}
8662
8663// aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!)
8664at::Tensor & leaky_relu_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input) {
8665
8666 static auto op = create_leaky_relu_backward_grad_input_typed_handle();
8667 return op.redispatch(dispatchKeySet, grad_output, self, negative_slope, self_is_result, grad_input);
8668}
8669
8670STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(leaky_relu_backward, name, "aten::leaky_relu_backward")
8671STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(leaky_relu_backward, overload_name, "")
8672STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(leaky_relu_backward, schema_str, "leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor")
8673
8674// aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor
8675static C10_NOINLINE c10::TypedOperatorHandle<leaky_relu_backward::schema> create_leaky_relu_backward_typed_handle() {
8676 return c10::Dispatcher::singleton()
8677 .findSchemaOrThrow(leaky_relu_backward::name, leaky_relu_backward::overload_name)
8678 .typed<leaky_relu_backward::schema>();
8679}
8680
8681// aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor
8682at::Tensor leaky_relu_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) {
8683
8684 static auto op = create_leaky_relu_backward_typed_handle();
8685 return op.call(grad_output, self, negative_slope, self_is_result);
8686}
8687
8688// aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor
8689at::Tensor leaky_relu_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) {
8690
8691 static auto op = create_leaky_relu_backward_typed_handle();
8692 return op.redispatch(dispatchKeySet, grad_output, self, negative_slope, self_is_result);
8693}
8694
8695STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(softplus_out, name, "aten::softplus")
8696STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(softplus_out, overload_name, "out")
8697STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(softplus_out, schema_str, "softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)")
8698
8699// aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)
8700static C10_NOINLINE c10::TypedOperatorHandle<softplus_out::schema> create_softplus_out_typed_handle() {
8701 return c10::Dispatcher::singleton()
8702 .findSchemaOrThrow(softplus_out::name, softplus_out::overload_name)
8703 .typed<softplus_out::schema>();
8704}
8705
8706// aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)
8707at::Tensor & softplus_out::call(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & out) {
8708
8709 static auto op = create_softplus_out_typed_handle();
8710 return op.call(self, beta, threshold, out);
8711}
8712
8713// aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)
8714at::Tensor & softplus_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & out) {
8715
8716 static auto op = create_softplus_out_typed_handle();
8717 return op.redispatch(dispatchKeySet, self, beta, threshold, out);
8718}
8719
8720STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(softplus, name, "aten::softplus")
8721STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(softplus, overload_name, "")
8722STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(softplus, schema_str, "softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor")
8723
8724// aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor
8725static C10_NOINLINE c10::TypedOperatorHandle<softplus::schema> create_softplus_typed_handle() {
8726 return c10::Dispatcher::singleton()
8727 .findSchemaOrThrow(softplus::name, softplus::overload_name)
8728 .typed<softplus::schema>();
8729}
8730
8731// aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor
8732at::Tensor softplus::call(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
8733
8734 static auto op = create_softplus_typed_handle();
8735 return op.call(self, beta, threshold);
8736}
8737
8738// aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor
8739at::Tensor softplus::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
8740
8741 static auto op = create_softplus_typed_handle();
8742 return op.redispatch(dispatchKeySet, self, beta, threshold);
8743}
8744
8745STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_adaptive_avg_pool2d, name, "aten::mkldnn_adaptive_avg_pool2d")
8746STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_adaptive_avg_pool2d, overload_name, "")
8747STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_adaptive_avg_pool2d, schema_str, "mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor")
8748
8749// aten::mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor
8750static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_adaptive_avg_pool2d::schema> create_mkldnn_adaptive_avg_pool2d_typed_handle() {
8751 return c10::Dispatcher::singleton()
8752 .findSchemaOrThrow(mkldnn_adaptive_avg_pool2d::name, mkldnn_adaptive_avg_pool2d::overload_name)
8753 .typed<mkldnn_adaptive_avg_pool2d::schema>();
8754}
8755
8756// aten::mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor
8757at::Tensor mkldnn_adaptive_avg_pool2d::call(const at::Tensor & self, at::IntArrayRef output_size) {
8758
8759 static auto op = create_mkldnn_adaptive_avg_pool2d_typed_handle();
8760 return op.call(self, output_size);
8761}
8762
8763// aten::mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor
8764at::Tensor mkldnn_adaptive_avg_pool2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) {
8765
8766 static auto op = create_mkldnn_adaptive_avg_pool2d_typed_handle();
8767 return op.redispatch(dispatchKeySet, self, output_size);
8768}
8769
8770STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_adaptive_avg_pool2d_out, name, "aten::mkldnn_adaptive_avg_pool2d")
8771STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_adaptive_avg_pool2d_out, overload_name, "out")
8772STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_adaptive_avg_pool2d_out, schema_str, "mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)")
8773
8774// aten::mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
8775static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_adaptive_avg_pool2d_out::schema> create_mkldnn_adaptive_avg_pool2d_out_typed_handle() {
8776 return c10::Dispatcher::singleton()
8777 .findSchemaOrThrow(mkldnn_adaptive_avg_pool2d_out::name, mkldnn_adaptive_avg_pool2d_out::overload_name)
8778 .typed<mkldnn_adaptive_avg_pool2d_out::schema>();
8779}
8780
8781// aten::mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
8782at::Tensor & mkldnn_adaptive_avg_pool2d_out::call(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) {
8783
8784 static auto op = create_mkldnn_adaptive_avg_pool2d_out_typed_handle();
8785 return op.call(self, output_size, out);
8786}
8787
8788// aten::mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
8789at::Tensor & mkldnn_adaptive_avg_pool2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) {
8790
8791 static auto op = create_mkldnn_adaptive_avg_pool2d_out_typed_handle();
8792 return op.redispatch(dispatchKeySet, self, output_size, out);
8793}
8794
8795STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_adaptive_avg_pool2d, name, "aten::_adaptive_avg_pool2d")
8796STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_adaptive_avg_pool2d, overload_name, "")
8797STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_adaptive_avg_pool2d, schema_str, "_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor")
8798
8799// aten::_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor
8800static C10_NOINLINE c10::TypedOperatorHandle<_adaptive_avg_pool2d::schema> create__adaptive_avg_pool2d_typed_handle() {
8801 return c10::Dispatcher::singleton()
8802 .findSchemaOrThrow(_adaptive_avg_pool2d::name, _adaptive_avg_pool2d::overload_name)
8803 .typed<_adaptive_avg_pool2d::schema>();
8804}
8805
8806// aten::_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor
8807at::Tensor _adaptive_avg_pool2d::call(const at::Tensor & self, c10::SymIntArrayRef output_size) {
8808
8809 static auto op = create__adaptive_avg_pool2d_typed_handle();
8810 return op.call(self, output_size);
8811}
8812
8813// aten::_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor
8814at::Tensor _adaptive_avg_pool2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size) {
8815
8816 static auto op = create__adaptive_avg_pool2d_typed_handle();
8817 return op.redispatch(dispatchKeySet, self, output_size);
8818}
8819
8820STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(avg_pool3d_out, name, "aten::avg_pool3d")
8821STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(avg_pool3d_out, overload_name, "out")
8822STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(avg_pool3d_out, schema_str, "avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)")
8823
8824// aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)
8825static C10_NOINLINE c10::TypedOperatorHandle<avg_pool3d_out::schema> create_avg_pool3d_out_typed_handle() {
8826 return c10::Dispatcher::singleton()
8827 .findSchemaOrThrow(avg_pool3d_out::name, avg_pool3d_out::overload_name)
8828 .typed<avg_pool3d_out::schema>();
8829}
8830
8831// aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)
8832at::Tensor & avg_pool3d_out::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & out) {
8833
8834 static auto op = create_avg_pool3d_out_typed_handle();
8835 return op.call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out);
8836}
8837
8838// aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)
8839at::Tensor & avg_pool3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & out) {
8840
8841 static auto op = create_avg_pool3d_out_typed_handle();
8842 return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out);
8843}
8844
8845STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(avg_pool3d, name, "aten::avg_pool3d")
8846STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(avg_pool3d, overload_name, "")
8847STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(avg_pool3d, schema_str, "avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor")
8848
8849// aten::avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor
8850static C10_NOINLINE c10::TypedOperatorHandle<avg_pool3d::schema> create_avg_pool3d_typed_handle() {
8851 return c10::Dispatcher::singleton()
8852 .findSchemaOrThrow(avg_pool3d::name, avg_pool3d::overload_name)
8853 .typed<avg_pool3d::schema>();
8854}
8855
8856// aten::avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor
8857at::Tensor avg_pool3d::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
8858
8859 static auto op = create_avg_pool3d_typed_handle();
8860 return op.call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
8861}
8862
8863// aten::avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor
8864at::Tensor avg_pool3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
8865
8866 static auto op = create_avg_pool3d_typed_handle();
8867 return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
8868}
8869
8870STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(avg_pool3d_backward_grad_input, name, "aten::avg_pool3d_backward")
8871STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(avg_pool3d_backward_grad_input, overload_name, "grad_input")
8872STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(avg_pool3d_backward_grad_input, schema_str, "avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)")
8873
8874// aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)
8875static C10_NOINLINE c10::TypedOperatorHandle<avg_pool3d_backward_grad_input::schema> create_avg_pool3d_backward_grad_input_typed_handle() {
8876 return c10::Dispatcher::singleton()
8877 .findSchemaOrThrow(avg_pool3d_backward_grad_input::name, avg_pool3d_backward_grad_input::overload_name)
8878 .typed<avg_pool3d_backward_grad_input::schema>();
8879}
8880
8881// aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)
8882at::Tensor & avg_pool3d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & grad_input) {
8883
8884 static auto op = create_avg_pool3d_backward_grad_input_typed_handle();
8885 return op.call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input);
8886}
8887
8888// aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)
8889at::Tensor & avg_pool3d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & grad_input) {
8890
8891 static auto op = create_avg_pool3d_backward_grad_input_typed_handle();
8892 return op.redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input);
8893}
8894
8895STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(avg_pool3d_backward, name, "aten::avg_pool3d_backward")
8896STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(avg_pool3d_backward, overload_name, "")
8897STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(avg_pool3d_backward, schema_str, "avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor")
8898
8899// aten::avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor
8900static C10_NOINLINE c10::TypedOperatorHandle<avg_pool3d_backward::schema> create_avg_pool3d_backward_typed_handle() {
8901 return c10::Dispatcher::singleton()
8902 .findSchemaOrThrow(avg_pool3d_backward::name, avg_pool3d_backward::overload_name)
8903 .typed<avg_pool3d_backward::schema>();
8904}
8905
8906// aten::avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor
8907at::Tensor avg_pool3d_backward::call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
8908
8909 static auto op = create_avg_pool3d_backward_typed_handle();
8910 return op.call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
8911}
8912
8913// aten::avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor
8914at::Tensor avg_pool3d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
8915
8916 static auto op = create_avg_pool3d_backward_typed_handle();
8917 return op.redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
8918}
8919
8920STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool2d_with_indices_backward_grad_input, name, "aten::max_pool2d_with_indices_backward")
8921STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool2d_with_indices_backward_grad_input, overload_name, "grad_input")
8922STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool2d_with_indices_backward_grad_input, schema_str, "max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)")
8923
8924// aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
8925static C10_NOINLINE c10::TypedOperatorHandle<max_pool2d_with_indices_backward_grad_input::schema> create_max_pool2d_with_indices_backward_grad_input_typed_handle() {
8926 return c10::Dispatcher::singleton()
8927 .findSchemaOrThrow(max_pool2d_with_indices_backward_grad_input::name, max_pool2d_with_indices_backward_grad_input::overload_name)
8928 .typed<max_pool2d_with_indices_backward_grad_input::schema>();
8929}
8930
8931// aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
8932at::Tensor & max_pool2d_with_indices_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) {
8933
8934 static auto op = create_max_pool2d_with_indices_backward_grad_input_typed_handle();
8935 return op.call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input);
8936}
8937
8938// aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
8939at::Tensor & max_pool2d_with_indices_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) {
8940
8941 static auto op = create_max_pool2d_with_indices_backward_grad_input_typed_handle();
8942 return op.redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input);
8943}
8944
8945STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool2d_with_indices_backward, name, "aten::max_pool2d_with_indices_backward")
8946STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool2d_with_indices_backward, overload_name, "")
8947STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool2d_with_indices_backward, schema_str, "max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor")
8948
8949// aten::max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor
8950static C10_NOINLINE c10::TypedOperatorHandle<max_pool2d_with_indices_backward::schema> create_max_pool2d_with_indices_backward_typed_handle() {
8951 return c10::Dispatcher::singleton()
8952 .findSchemaOrThrow(max_pool2d_with_indices_backward::name, max_pool2d_with_indices_backward::overload_name)
8953 .typed<max_pool2d_with_indices_backward::schema>();
8954}
8955
8956// aten::max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor
8957at::Tensor max_pool2d_with_indices_backward::call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
8958
8959 static auto op = create_max_pool2d_with_indices_backward_typed_handle();
8960 return op.call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
8961}
8962
8963// aten::max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor
8964at::Tensor max_pool2d_with_indices_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
8965
8966 static auto op = create_max_pool2d_with_indices_backward_typed_handle();
8967 return op.redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
8968}
8969
8970STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool3d_with_indices_out, name, "aten::max_pool3d_with_indices")
8971STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool3d_with_indices_out, overload_name, "out")
8972STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool3d_with_indices_out, schema_str, "max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))")
8973
8974// aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
8975static C10_NOINLINE c10::TypedOperatorHandle<max_pool3d_with_indices_out::schema> create_max_pool3d_with_indices_out_typed_handle() {
8976 return c10::Dispatcher::singleton()
8977 .findSchemaOrThrow(max_pool3d_with_indices_out::name, max_pool3d_with_indices_out::overload_name)
8978 .typed<max_pool3d_with_indices_out::schema>();
8979}
8980
8981// aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
8982::std::tuple<at::Tensor &,at::Tensor &> max_pool3d_with_indices_out::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) {
8983
8984 static auto op = create_max_pool3d_with_indices_out_typed_handle();
8985 return op.call(self, kernel_size, stride, padding, dilation, ceil_mode, out, indices);
8986}
8987
8988// aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
8989::std::tuple<at::Tensor &,at::Tensor &> max_pool3d_with_indices_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) {
8990
8991 static auto op = create_max_pool3d_with_indices_out_typed_handle();
8992 return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out, indices);
8993}
8994
8995STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool3d_with_indices, name, "aten::max_pool3d_with_indices")
8996STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool3d_with_indices, overload_name, "")
8997STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool3d_with_indices, schema_str, "max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)")
8998
8999// aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
9000static C10_NOINLINE c10::TypedOperatorHandle<max_pool3d_with_indices::schema> create_max_pool3d_with_indices_typed_handle() {
9001 return c10::Dispatcher::singleton()
9002 .findSchemaOrThrow(max_pool3d_with_indices::name, max_pool3d_with_indices::overload_name)
9003 .typed<max_pool3d_with_indices::schema>();
9004}
9005
9006// aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
9007::std::tuple<at::Tensor,at::Tensor> max_pool3d_with_indices::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
9008
9009 static auto op = create_max_pool3d_with_indices_typed_handle();
9010 return op.call(self, kernel_size, stride, padding, dilation, ceil_mode);
9011}
9012
9013// aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
9014::std::tuple<at::Tensor,at::Tensor> max_pool3d_with_indices::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
9015
9016 static auto op = create_max_pool3d_with_indices_typed_handle();
9017 return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode);
9018}
9019
9020STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reflection_pad2d_out, name, "aten::reflection_pad2d")
9021STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reflection_pad2d_out, overload_name, "out")
9022STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reflection_pad2d_out, schema_str, "reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)")
9023
9024// aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
9025static C10_NOINLINE c10::TypedOperatorHandle<reflection_pad2d_out::schema> create_reflection_pad2d_out_typed_handle() {
9026 return c10::Dispatcher::singleton()
9027 .findSchemaOrThrow(reflection_pad2d_out::name, reflection_pad2d_out::overload_name)
9028 .typed<reflection_pad2d_out::schema>();
9029}
9030
9031// aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
9032at::Tensor & reflection_pad2d_out::call(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
9033
9034 static auto op = create_reflection_pad2d_out_typed_handle();
9035 return op.call(self, padding, out);
9036}
9037
9038// aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
9039at::Tensor & reflection_pad2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
9040
9041 static auto op = create_reflection_pad2d_out_typed_handle();
9042 return op.redispatch(dispatchKeySet, self, padding, out);
9043}
9044
9045STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reflection_pad2d, name, "aten::reflection_pad2d")
9046STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reflection_pad2d, overload_name, "")
9047STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reflection_pad2d, schema_str, "reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor")
9048
9049// aten::reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor
9050static C10_NOINLINE c10::TypedOperatorHandle<reflection_pad2d::schema> create_reflection_pad2d_typed_handle() {
9051 return c10::Dispatcher::singleton()
9052 .findSchemaOrThrow(reflection_pad2d::name, reflection_pad2d::overload_name)
9053 .typed<reflection_pad2d::schema>();
9054}
9055
9056// aten::reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor
9057at::Tensor reflection_pad2d::call(const at::Tensor & self, c10::SymIntArrayRef padding) {
9058
9059 static auto op = create_reflection_pad2d_typed_handle();
9060 return op.call(self, padding);
9061}
9062
9063// aten::reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor
9064at::Tensor reflection_pad2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding) {
9065
9066 static auto op = create_reflection_pad2d_typed_handle();
9067 return op.redispatch(dispatchKeySet, self, padding);
9068}
9069
9070STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_bilinear2d_aa_vec, name, "aten::_upsample_bilinear2d_aa")
9071STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_bilinear2d_aa_vec, overload_name, "vec")
9072STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_bilinear2d_aa_vec, schema_str, "_upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor")
9073
9074// aten::_upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
9075static C10_NOINLINE c10::TypedOperatorHandle<_upsample_bilinear2d_aa_vec::schema> create__upsample_bilinear2d_aa_vec_typed_handle() {
9076 return c10::Dispatcher::singleton()
9077 .findSchemaOrThrow(_upsample_bilinear2d_aa_vec::name, _upsample_bilinear2d_aa_vec::overload_name)
9078 .typed<_upsample_bilinear2d_aa_vec::schema>();
9079}
9080
9081// aten::_upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
9082at::Tensor _upsample_bilinear2d_aa_vec::call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
9083
9084 static auto op = create__upsample_bilinear2d_aa_vec_typed_handle();
9085 return op.call(input, output_size, align_corners, scale_factors);
9086}
9087
9088// aten::_upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
9089at::Tensor _upsample_bilinear2d_aa_vec::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
9090
9091 static auto op = create__upsample_bilinear2d_aa_vec_typed_handle();
9092 return op.redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors);
9093}
9094
9095STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_linear1d_backward_grad_input, name, "aten::upsample_linear1d_backward")
9096STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_linear1d_backward_grad_input, overload_name, "grad_input")
9097STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_linear1d_backward_grad_input, schema_str, "upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)")
9098
9099// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
9100static C10_NOINLINE c10::TypedOperatorHandle<upsample_linear1d_backward_grad_input::schema> create_upsample_linear1d_backward_grad_input_typed_handle() {
9101 return c10::Dispatcher::singleton()
9102 .findSchemaOrThrow(upsample_linear1d_backward_grad_input::name, upsample_linear1d_backward_grad_input::overload_name)
9103 .typed<upsample_linear1d_backward_grad_input::schema>();
9104}
9105
9106// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
9107at::Tensor & upsample_linear1d_backward_grad_input::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales, at::Tensor & grad_input) {
9108
9109 static auto op = create_upsample_linear1d_backward_grad_input_typed_handle();
9110 return op.call(grad_output, output_size, input_size, align_corners, scales, grad_input);
9111}
9112
9113// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
9114at::Tensor & upsample_linear1d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales, at::Tensor & grad_input) {
9115
9116 static auto op = create_upsample_linear1d_backward_grad_input_typed_handle();
9117 return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales, grad_input);
9118}
9119
9120STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_linear1d_backward, name, "aten::upsample_linear1d_backward")
9121STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_linear1d_backward, overload_name, "")
9122STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_linear1d_backward, schema_str, "upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor")
9123
9124// aten::upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor
9125static C10_NOINLINE c10::TypedOperatorHandle<upsample_linear1d_backward::schema> create_upsample_linear1d_backward_typed_handle() {
9126 return c10::Dispatcher::singleton()
9127 .findSchemaOrThrow(upsample_linear1d_backward::name, upsample_linear1d_backward::overload_name)
9128 .typed<upsample_linear1d_backward::schema>();
9129}
9130
9131// aten::upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor
9132at::Tensor upsample_linear1d_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales) {
9133
9134 static auto op = create_upsample_linear1d_backward_typed_handle();
9135 return op.call(grad_output, output_size, input_size, align_corners, scales);
9136}
9137
9138// aten::upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor
9139at::Tensor upsample_linear1d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales) {
9140
9141 static auto op = create_upsample_linear1d_backward_typed_handle();
9142 return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales);
9143}
9144
9145STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_bilinear2d_aa_out, name, "aten::_upsample_bilinear2d_aa")
9146STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_bilinear2d_aa_out, overload_name, "out")
9147STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_bilinear2d_aa_out, schema_str, "_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)")
9148
9149// aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
9150static C10_NOINLINE c10::TypedOperatorHandle<_upsample_bilinear2d_aa_out::schema> create__upsample_bilinear2d_aa_out_typed_handle() {
9151 return c10::Dispatcher::singleton()
9152 .findSchemaOrThrow(_upsample_bilinear2d_aa_out::name, _upsample_bilinear2d_aa_out::overload_name)
9153 .typed<_upsample_bilinear2d_aa_out::schema>();
9154}
9155
9156// aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
9157at::Tensor & _upsample_bilinear2d_aa_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
9158
9159 static auto op = create__upsample_bilinear2d_aa_out_typed_handle();
9160 return op.call(self, output_size, align_corners, scales_h, scales_w, out);
9161}
9162
9163// aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
9164at::Tensor & _upsample_bilinear2d_aa_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
9165
9166 static auto op = create__upsample_bilinear2d_aa_out_typed_handle();
9167 return op.redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w, out);
9168}
9169
9170STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_bilinear2d_aa, name, "aten::_upsample_bilinear2d_aa")
9171STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_bilinear2d_aa, overload_name, "")
9172STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_bilinear2d_aa, schema_str, "_upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor")
9173
9174// aten::_upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
9175static C10_NOINLINE c10::TypedOperatorHandle<_upsample_bilinear2d_aa::schema> create__upsample_bilinear2d_aa_typed_handle() {
9176 return c10::Dispatcher::singleton()
9177 .findSchemaOrThrow(_upsample_bilinear2d_aa::name, _upsample_bilinear2d_aa::overload_name)
9178 .typed<_upsample_bilinear2d_aa::schema>();
9179}
9180
9181// aten::_upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
9182at::Tensor _upsample_bilinear2d_aa::call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
9183
9184 static auto op = create__upsample_bilinear2d_aa_typed_handle();
9185 return op.call(self, output_size, align_corners, scales_h, scales_w);
9186}
9187
9188// aten::_upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
9189at::Tensor _upsample_bilinear2d_aa::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
9190
9191 static auto op = create__upsample_bilinear2d_aa_typed_handle();
9192 return op.redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w);
9193}
9194
9195STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest1d_backward_grad_input, name, "aten::upsample_nearest1d_backward")
9196STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest1d_backward_grad_input, overload_name, "grad_input")
9197STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest1d_backward_grad_input, schema_str, "upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)")
9198
9199// aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
9200static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest1d_backward_grad_input::schema> create_upsample_nearest1d_backward_grad_input_typed_handle() {
9201 return c10::Dispatcher::singleton()
9202 .findSchemaOrThrow(upsample_nearest1d_backward_grad_input::name, upsample_nearest1d_backward_grad_input::overload_name)
9203 .typed<upsample_nearest1d_backward_grad_input::schema>();
9204}
9205
9206// aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
9207at::Tensor & upsample_nearest1d_backward_grad_input::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales, at::Tensor & grad_input) {
9208
9209 static auto op = create_upsample_nearest1d_backward_grad_input_typed_handle();
9210 return op.call(grad_output, output_size, input_size, scales, grad_input);
9211}
9212
9213// aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
9214at::Tensor & upsample_nearest1d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales, at::Tensor & grad_input) {
9215
9216 static auto op = create_upsample_nearest1d_backward_grad_input_typed_handle();
9217 return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, scales, grad_input);
9218}
9219
9220STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest1d_backward, name, "aten::upsample_nearest1d_backward")
9221STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest1d_backward, overload_name, "")
9222STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest1d_backward, schema_str, "upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor")
9223
9224// aten::upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor
9225static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest1d_backward::schema> create_upsample_nearest1d_backward_typed_handle() {
9226 return c10::Dispatcher::singleton()
9227 .findSchemaOrThrow(upsample_nearest1d_backward::name, upsample_nearest1d_backward::overload_name)
9228 .typed<upsample_nearest1d_backward::schema>();
9229}
9230
9231// aten::upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor
9232at::Tensor upsample_nearest1d_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales) {
9233
9234 static auto op = create_upsample_nearest1d_backward_typed_handle();
9235 return op.call(grad_output, output_size, input_size, scales);
9236}
9237
9238// aten::upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor
9239at::Tensor upsample_nearest1d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales) {
9240
9241 static auto op = create_upsample_nearest1d_backward_typed_handle();
9242 return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, scales);
9243}
9244
9245STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest2d_backward_grad_input, name, "aten::upsample_nearest2d_backward")
9246STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest2d_backward_grad_input, overload_name, "grad_input")
9247STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest2d_backward_grad_input, schema_str, "upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)")
9248
9249// aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
9250static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest2d_backward_grad_input::schema> create_upsample_nearest2d_backward_grad_input_typed_handle() {
9251 return c10::Dispatcher::singleton()
9252 .findSchemaOrThrow(upsample_nearest2d_backward_grad_input::name, upsample_nearest2d_backward_grad_input::overload_name)
9253 .typed<upsample_nearest2d_backward_grad_input::schema>();
9254}
9255
9256// aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
9257at::Tensor & upsample_nearest2d_backward_grad_input::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
9258
9259 static auto op = create_upsample_nearest2d_backward_grad_input_typed_handle();
9260 return op.call(grad_output, output_size, input_size, scales_h, scales_w, grad_input);
9261}
9262
9263// aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
9264at::Tensor & upsample_nearest2d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
9265
9266 static auto op = create_upsample_nearest2d_backward_grad_input_typed_handle();
9267 return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_h, scales_w, grad_input);
9268}
9269
9270STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest2d_backward, name, "aten::upsample_nearest2d_backward")
9271STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest2d_backward, overload_name, "")
9272STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest2d_backward, schema_str, "upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor")
9273
9274// aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor
9275static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest2d_backward::schema> create_upsample_nearest2d_backward_typed_handle() {
9276 return c10::Dispatcher::singleton()
9277 .findSchemaOrThrow(upsample_nearest2d_backward::name, upsample_nearest2d_backward::overload_name)
9278 .typed<upsample_nearest2d_backward::schema>();
9279}
9280
9281// aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor
9282at::Tensor upsample_nearest2d_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
9283
9284 static auto op = create_upsample_nearest2d_backward_typed_handle();
9285 return op.call(grad_output, output_size, input_size, scales_h, scales_w);
9286}
9287
9288// aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor
9289at::Tensor upsample_nearest2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
9290
9291 static auto op = create_upsample_nearest2d_backward_typed_handle();
9292 return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_h, scales_w);
9293}
9294
9295STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv_transpose3d_out, name, "aten::slow_conv_transpose3d")
9296STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv_transpose3d_out, overload_name, "out")
9297STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv_transpose3d_out, schema_str, "slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)")
9298
9299// aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
9300static C10_NOINLINE c10::TypedOperatorHandle<slow_conv_transpose3d_out::schema> create_slow_conv_transpose3d_out_typed_handle() {
9301 return c10::Dispatcher::singleton()
9302 .findSchemaOrThrow(slow_conv_transpose3d_out::name, slow_conv_transpose3d_out::overload_name)
9303 .typed<slow_conv_transpose3d_out::schema>();
9304}
9305
9306// aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
9307at::Tensor & slow_conv_transpose3d_out::call(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) {
9308
9309 static auto op = create_slow_conv_transpose3d_out_typed_handle();
9310 return op.call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out);
9311}
9312
9313// aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
9314at::Tensor & slow_conv_transpose3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) {
9315
9316 static auto op = create_slow_conv_transpose3d_out_typed_handle();
9317 return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out);
9318}
9319
9320STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv_transpose3d, name, "aten::slow_conv_transpose3d")
9321STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv_transpose3d, overload_name, "")
9322STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv_transpose3d, schema_str, "slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1) -> Tensor")
9323
9324// aten::slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1) -> Tensor
9325static C10_NOINLINE c10::TypedOperatorHandle<slow_conv_transpose3d::schema> create_slow_conv_transpose3d_typed_handle() {
9326 return c10::Dispatcher::singleton()
9327 .findSchemaOrThrow(slow_conv_transpose3d::name, slow_conv_transpose3d::overload_name)
9328 .typed<slow_conv_transpose3d::schema>();
9329}
9330
9331// aten::slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1) -> Tensor
9332at::Tensor slow_conv_transpose3d::call(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation) {
9333
9334 static auto op = create_slow_conv_transpose3d_typed_handle();
9335 return op.call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
9336}
9337
9338// aten::slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1) -> Tensor
9339at::Tensor slow_conv_transpose3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation) {
9340
9341 static auto op = create_slow_conv_transpose3d_typed_handle();
9342 return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
9343}
9344
9345STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv3d_forward_output, name, "aten::slow_conv3d_forward")
9346STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv3d_forward_output, overload_name, "output")
9347STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv3d_forward_output, schema_str, "slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!)")
9348
9349// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!)
9350static C10_NOINLINE c10::TypedOperatorHandle<slow_conv3d_forward_output::schema> create_slow_conv3d_forward_output_typed_handle() {
9351 return c10::Dispatcher::singleton()
9352 .findSchemaOrThrow(slow_conv3d_forward_output::name, slow_conv3d_forward_output::overload_name)
9353 .typed<slow_conv3d_forward_output::schema>();
9354}
9355
9356// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!)
9357at::Tensor & slow_conv3d_forward_output::call(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & output) {
9358
9359 static auto op = create_slow_conv3d_forward_output_typed_handle();
9360 return op.call(self, weight, kernel_size, bias, stride, padding, output);
9361}
9362
9363// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!)
9364at::Tensor & slow_conv3d_forward_output::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & output) {
9365
9366 static auto op = create_slow_conv3d_forward_output_typed_handle();
9367 return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output);
9368}
9369
9370STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv3d_forward, name, "aten::slow_conv3d_forward")
9371STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv3d_forward, overload_name, "")
9372STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv3d_forward, schema_str, "slow_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding) -> Tensor")
9373
9374// aten::slow_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding) -> Tensor
9375static C10_NOINLINE c10::TypedOperatorHandle<slow_conv3d_forward::schema> create_slow_conv3d_forward_typed_handle() {
9376 return c10::Dispatcher::singleton()
9377 .findSchemaOrThrow(slow_conv3d_forward::name, slow_conv3d_forward::overload_name)
9378 .typed<slow_conv3d_forward::schema>();
9379}
9380
9381// aten::slow_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding) -> Tensor
9382at::Tensor slow_conv3d_forward::call(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding) {
9383
9384 static auto op = create_slow_conv3d_forward_typed_handle();
9385 return op.call(self, weight, kernel_size, bias, stride, padding);
9386}
9387
9388// aten::slow_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding) -> Tensor
9389at::Tensor slow_conv3d_forward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding) {
9390
9391 static auto op = create_slow_conv3d_forward_typed_handle();
9392 return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding);
9393}
9394
9395STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(im2col_out, name, "aten::im2col")
9396STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(im2col_out, overload_name, "out")
9397STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(im2col_out, schema_str, "im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)")
9398
9399// aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
9400static C10_NOINLINE c10::TypedOperatorHandle<im2col_out::schema> create_im2col_out_typed_handle() {
9401 return c10::Dispatcher::singleton()
9402 .findSchemaOrThrow(im2col_out::name, im2col_out::overload_name)
9403 .typed<im2col_out::schema>();
9404}
9405
9406// aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
9407at::Tensor & im2col_out::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
9408
9409 static auto op = create_im2col_out_typed_handle();
9410 return op.call(self, kernel_size, dilation, padding, stride, out);
9411}
9412
9413// aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
9414at::Tensor & im2col_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
9415
9416 static auto op = create_im2col_out_typed_handle();
9417 return op.redispatch(dispatchKeySet, self, kernel_size, dilation, padding, stride, out);
9418}
9419
9420STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(im2col, name, "aten::im2col")
9421STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(im2col, overload_name, "")
9422STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(im2col, schema_str, "im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor")
9423
9424// aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
9425static C10_NOINLINE c10::TypedOperatorHandle<im2col::schema> create_im2col_typed_handle() {
9426 return c10::Dispatcher::singleton()
9427 .findSchemaOrThrow(im2col::name, im2col::overload_name)
9428 .typed<im2col::schema>();
9429}
9430
9431// aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
9432at::Tensor im2col::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
9433
9434 static auto op = create_im2col_typed_handle();
9435 return op.call(self, kernel_size, dilation, padding, stride);
9436}
9437
9438// aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
9439at::Tensor im2col::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
9440
9441 static auto op = create_im2col_typed_handle();
9442 return op.redispatch(dispatchKeySet, self, kernel_size, dilation, padding, stride);
9443}
9444
9445STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(isneginf, name, "aten::isneginf")
9446STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(isneginf, overload_name, "")
9447STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(isneginf, schema_str, "isneginf(Tensor self) -> Tensor")
9448
9449// aten::isneginf(Tensor self) -> Tensor
9450static C10_NOINLINE c10::TypedOperatorHandle<isneginf::schema> create_isneginf_typed_handle() {
9451 return c10::Dispatcher::singleton()
9452 .findSchemaOrThrow(isneginf::name, isneginf::overload_name)
9453 .typed<isneginf::schema>();
9454}
9455
9456// aten::isneginf(Tensor self) -> Tensor
9457at::Tensor isneginf::call(const at::Tensor & self) {
9458
9459 static auto op = create_isneginf_typed_handle();
9460 return op.call(self);
9461}
9462
9463// aten::isneginf(Tensor self) -> Tensor
9464at::Tensor isneginf::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
9465
9466 static auto op = create_isneginf_typed_handle();
9467 return op.redispatch(dispatchKeySet, self);
9468}
9469
9470STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(isneginf_out, name, "aten::isneginf")
9471STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(isneginf_out, overload_name, "out")
9472STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(isneginf_out, schema_str, "isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
9473
9474// aten::isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
9475static C10_NOINLINE c10::TypedOperatorHandle<isneginf_out::schema> create_isneginf_out_typed_handle() {
9476 return c10::Dispatcher::singleton()
9477 .findSchemaOrThrow(isneginf_out::name, isneginf_out::overload_name)
9478 .typed<isneginf_out::schema>();
9479}
9480
9481// aten::isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
9482at::Tensor & isneginf_out::call(const at::Tensor & self, at::Tensor & out) {
9483
9484 static auto op = create_isneginf_out_typed_handle();
9485 return op.call(self, out);
9486}
9487
9488// aten::isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
9489at::Tensor & isneginf_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
9490
9491 static auto op = create_isneginf_out_typed_handle();
9492 return op.redispatch(dispatchKeySet, self, out);
9493}
9494
9495STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_add_batch_dim, name, "aten::_add_batch_dim")
9496STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_add_batch_dim, overload_name, "")
9497STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_add_batch_dim, schema_str, "_add_batch_dim(Tensor self, int batch_dim, int level) -> Tensor")
9498
9499// aten::_add_batch_dim(Tensor self, int batch_dim, int level) -> Tensor
9500static C10_NOINLINE c10::TypedOperatorHandle<_add_batch_dim::schema> create__add_batch_dim_typed_handle() {
9501 return c10::Dispatcher::singleton()
9502 .findSchemaOrThrow(_add_batch_dim::name, _add_batch_dim::overload_name)
9503 .typed<_add_batch_dim::schema>();
9504}
9505
9506// aten::_add_batch_dim(Tensor self, int batch_dim, int level) -> Tensor
9507at::Tensor _add_batch_dim::call(const at::Tensor & self, int64_t batch_dim, int64_t level) {
9508
9509 static auto op = create__add_batch_dim_typed_handle();
9510 return op.call(self, batch_dim, level);
9511}
9512
9513// aten::_add_batch_dim(Tensor self, int batch_dim, int level) -> Tensor
9514at::Tensor _add_batch_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t batch_dim, int64_t level) {
9515
9516 static auto op = create__add_batch_dim_typed_handle();
9517 return op.redispatch(dispatchKeySet, self, batch_dim, level);
9518}
9519
9520STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_psi, name, "aten::special_psi")
9521STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_psi, overload_name, "")
9522STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_psi, schema_str, "special_psi(Tensor self) -> Tensor")
9523
9524// aten::special_psi(Tensor self) -> Tensor
9525static C10_NOINLINE c10::TypedOperatorHandle<special_psi::schema> create_special_psi_typed_handle() {
9526 return c10::Dispatcher::singleton()
9527 .findSchemaOrThrow(special_psi::name, special_psi::overload_name)
9528 .typed<special_psi::schema>();
9529}
9530
9531// aten::special_psi(Tensor self) -> Tensor
9532at::Tensor special_psi::call(const at::Tensor & self) {
9533
9534 static auto op = create_special_psi_typed_handle();
9535 return op.call(self);
9536}
9537
9538// aten::special_psi(Tensor self) -> Tensor
9539at::Tensor special_psi::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
9540
9541 static auto op = create_special_psi_typed_handle();
9542 return op.redispatch(dispatchKeySet, self);
9543}
9544
9545STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_psi_out, name, "aten::special_psi")
9546STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_psi_out, overload_name, "out")
9547STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_psi_out, schema_str, "special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
9548
9549// aten::special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
9550static C10_NOINLINE c10::TypedOperatorHandle<special_psi_out::schema> create_special_psi_out_typed_handle() {
9551 return c10::Dispatcher::singleton()
9552 .findSchemaOrThrow(special_psi_out::name, special_psi_out::overload_name)
9553 .typed<special_psi_out::schema>();
9554}
9555
9556// aten::special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
9557at::Tensor & special_psi_out::call(const at::Tensor & self, at::Tensor & out) {
9558
9559 static auto op = create_special_psi_out_typed_handle();
9560 return op.call(self, out);
9561}
9562
9563// aten::special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
9564at::Tensor & special_psi_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
9565
9566 static auto op = create_special_psi_out_typed_handle();
9567 return op.redispatch(dispatchKeySet, self, out);
9568}
9569
9570STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_erfcx, name, "aten::special_erfcx")
9571STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_erfcx, overload_name, "")
9572STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_erfcx, schema_str, "special_erfcx(Tensor self) -> Tensor")
9573
9574// aten::special_erfcx(Tensor self) -> Tensor
9575static C10_NOINLINE c10::TypedOperatorHandle<special_erfcx::schema> create_special_erfcx_typed_handle() {
9576 return c10::Dispatcher::singleton()
9577 .findSchemaOrThrow(special_erfcx::name, special_erfcx::overload_name)
9578 .typed<special_erfcx::schema>();
9579}
9580
9581// aten::special_erfcx(Tensor self) -> Tensor
9582at::Tensor special_erfcx::call(const at::Tensor & self) {
9583
9584 static auto op = create_special_erfcx_typed_handle();
9585 return op.call(self);
9586}
9587
9588// aten::special_erfcx(Tensor self) -> Tensor
9589at::Tensor special_erfcx::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
9590
9591 static auto op = create_special_erfcx_typed_handle();
9592 return op.redispatch(dispatchKeySet, self);
9593}
9594
9595STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_erfcx_out, name, "aten::special_erfcx")
9596STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_erfcx_out, overload_name, "out")
9597STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_erfcx_out, schema_str, "special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
9598
9599// aten::special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
9600static C10_NOINLINE c10::TypedOperatorHandle<special_erfcx_out::schema> create_special_erfcx_out_typed_handle() {
9601 return c10::Dispatcher::singleton()
9602 .findSchemaOrThrow(special_erfcx_out::name, special_erfcx_out::overload_name)
9603 .typed<special_erfcx_out::schema>();
9604}
9605
9606// aten::special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
9607at::Tensor & special_erfcx_out::call(const at::Tensor & self, at::Tensor & out) {
9608
9609 static auto op = create_special_erfcx_out_typed_handle();
9610 return op.call(self, out);
9611}
9612
9613// aten::special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
9614at::Tensor & special_erfcx_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
9615
9616 static auto op = create_special_erfcx_out_typed_handle();
9617 return op.redispatch(dispatchKeySet, self, out);
9618}
9619
9620STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_i0e, name, "aten::special_i0e")
9621STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_i0e, overload_name, "")
9622STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_i0e, schema_str, "special_i0e(Tensor self) -> Tensor")
9623
9624// aten::special_i0e(Tensor self) -> Tensor
9625static C10_NOINLINE c10::TypedOperatorHandle<special_i0e::schema> create_special_i0e_typed_handle() {
9626 return c10::Dispatcher::singleton()
9627 .findSchemaOrThrow(special_i0e::name, special_i0e::overload_name)
9628 .typed<special_i0e::schema>();
9629}
9630
9631// aten::special_i0e(Tensor self) -> Tensor
9632at::Tensor special_i0e::call(const at::Tensor & self) {
9633
9634 static auto op = create_special_i0e_typed_handle();
9635 return op.call(self);
9636}
9637
9638// aten::special_i0e(Tensor self) -> Tensor
9639at::Tensor special_i0e::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
9640
9641 static auto op = create_special_i0e_typed_handle();
9642 return op.redispatch(dispatchKeySet, self);
9643}
9644
9645STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_i0e_out, name, "aten::special_i0e")
9646STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_i0e_out, overload_name, "out")
9647STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_i0e_out, schema_str, "special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
9648
9649// aten::special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
9650static C10_NOINLINE c10::TypedOperatorHandle<special_i0e_out::schema> create_special_i0e_out_typed_handle() {
9651 return c10::Dispatcher::singleton()
9652 .findSchemaOrThrow(special_i0e_out::name, special_i0e_out::overload_name)
9653 .typed<special_i0e_out::schema>();
9654}
9655
9656// aten::special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
9657at::Tensor & special_i0e_out::call(const at::Tensor & self, at::Tensor & out) {
9658
9659 static auto op = create_special_i0e_out_typed_handle();
9660 return op.call(self, out);
9661}
9662
9663// aten::special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
9664at::Tensor & special_i0e_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
9665
9666 static auto op = create_special_i0e_out_typed_handle();
9667 return op.redispatch(dispatchKeySet, self, out);
9668}
9669
9670STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_i1, name, "aten::special_i1")
9671STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_i1, overload_name, "")
9672STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_i1, schema_str, "special_i1(Tensor self) -> Tensor")
9673
9674// aten::special_i1(Tensor self) -> Tensor
9675static C10_NOINLINE c10::TypedOperatorHandle<special_i1::schema> create_special_i1_typed_handle() {
9676 return c10::Dispatcher::singleton()
9677 .findSchemaOrThrow(special_i1::name, special_i1::overload_name)
9678 .typed<special_i1::schema>();
9679}
9680
9681// aten::special_i1(Tensor self) -> Tensor
9682at::Tensor special_i1::call(const at::Tensor & self) {
9683
9684 static auto op = create_special_i1_typed_handle();
9685 return op.call(self);
9686}
9687
9688// aten::special_i1(Tensor self) -> Tensor
9689at::Tensor special_i1::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
9690
9691 static auto op = create_special_i1_typed_handle();
9692 return op.redispatch(dispatchKeySet, self);
9693}
9694
9695STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_i1_out, name, "aten::special_i1")
9696STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_i1_out, overload_name, "out")
9697STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_i1_out, schema_str, "special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
9698
9699// aten::special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
9700static C10_NOINLINE c10::TypedOperatorHandle<special_i1_out::schema> create_special_i1_out_typed_handle() {
9701 return c10::Dispatcher::singleton()
9702 .findSchemaOrThrow(special_i1_out::name, special_i1_out::overload_name)
9703 .typed<special_i1_out::schema>();
9704}
9705
9706// aten::special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
9707at::Tensor & special_i1_out::call(const at::Tensor & self, at::Tensor & out) {
9708
9709 static auto op = create_special_i1_out_typed_handle();
9710 return op.call(self, out);
9711}
9712
9713// aten::special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
9714at::Tensor & special_i1_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
9715
9716 static auto op = create_special_i1_out_typed_handle();
9717 return op.redispatch(dispatchKeySet, self, out);
9718}
9719
9720STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_logit, name, "aten::special_logit")
9721STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_logit, overload_name, "")
9722STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_logit, schema_str, "special_logit(Tensor self, float? eps=None) -> Tensor")
9723
9724// aten::special_logit(Tensor self, float? eps=None) -> Tensor
9725static C10_NOINLINE c10::TypedOperatorHandle<special_logit::schema> create_special_logit_typed_handle() {
9726 return c10::Dispatcher::singleton()
9727 .findSchemaOrThrow(special_logit::name, special_logit::overload_name)
9728 .typed<special_logit::schema>();
9729}
9730
9731// aten::special_logit(Tensor self, float? eps=None) -> Tensor
9732at::Tensor special_logit::call(const at::Tensor & self, c10::optional<double> eps) {
9733
9734 static auto op = create_special_logit_typed_handle();
9735 return op.call(self, eps);
9736}
9737
9738// aten::special_logit(Tensor self, float? eps=None) -> Tensor
9739at::Tensor special_logit::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<double> eps) {
9740
9741 static auto op = create_special_logit_typed_handle();
9742 return op.redispatch(dispatchKeySet, self, eps);
9743}
9744
9745STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_logit_out, name, "aten::special_logit")
9746STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_logit_out, overload_name, "out")
9747STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_logit_out, schema_str, "special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)")
9748
9749// aten::special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)
9750static C10_NOINLINE c10::TypedOperatorHandle<special_logit_out::schema> create_special_logit_out_typed_handle() {
9751 return c10::Dispatcher::singleton()
9752 .findSchemaOrThrow(special_logit_out::name, special_logit_out::overload_name)
9753 .typed<special_logit_out::schema>();
9754}
9755
9756// aten::special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)
9757at::Tensor & special_logit_out::call(const at::Tensor & self, c10::optional<double> eps, at::Tensor & out) {
9758
9759 static auto op = create_special_logit_out_typed_handle();
9760 return op.call(self, eps, out);
9761}
9762
9763// aten::special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)
9764at::Tensor & special_logit_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<double> eps, at::Tensor & out) {
9765
9766 static auto op = create_special_logit_out_typed_handle();
9767 return op.redispatch(dispatchKeySet, self, eps, out);
9768}
9769
9770STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_log_softmax, name, "aten::special_log_softmax")
9771STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_log_softmax, overload_name, "")
9772STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_log_softmax, schema_str, "special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor")
9773
9774// aten::special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
9775static C10_NOINLINE c10::TypedOperatorHandle<special_log_softmax::schema> create_special_log_softmax_typed_handle() {
9776 return c10::Dispatcher::singleton()
9777 .findSchemaOrThrow(special_log_softmax::name, special_log_softmax::overload_name)
9778 .typed<special_log_softmax::schema>();
9779}
9780
9781// aten::special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
9782at::Tensor special_log_softmax::call(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
9783
9784 static auto op = create_special_log_softmax_typed_handle();
9785 return op.call(self, dim, dtype);
9786}
9787
9788// aten::special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
9789at::Tensor special_log_softmax::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
9790
9791 static auto op = create_special_log_softmax_typed_handle();
9792 return op.redispatch(dispatchKeySet, self, dim, dtype);
9793}
9794
9795STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_gammaincc_out, name, "aten::special_gammaincc")
9796STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_gammaincc_out, overload_name, "out")
9797STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_gammaincc_out, schema_str, "special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
9798
9799// aten::special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
9800static C10_NOINLINE c10::TypedOperatorHandle<special_gammaincc_out::schema> create_special_gammaincc_out_typed_handle() {
9801 return c10::Dispatcher::singleton()
9802 .findSchemaOrThrow(special_gammaincc_out::name, special_gammaincc_out::overload_name)
9803 .typed<special_gammaincc_out::schema>();
9804}
9805
9806// aten::special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
9807at::Tensor & special_gammaincc_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
9808
9809 static auto op = create_special_gammaincc_out_typed_handle();
9810 return op.call(self, other, out);
9811}
9812
9813// aten::special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
9814at::Tensor & special_gammaincc_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
9815
9816 static auto op = create_special_gammaincc_out_typed_handle();
9817 return op.redispatch(dispatchKeySet, self, other, out);
9818}
9819
9820STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_gammaincc, name, "aten::special_gammaincc")
9821STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_gammaincc, overload_name, "")
9822STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_gammaincc, schema_str, "special_gammaincc(Tensor self, Tensor other) -> Tensor")
9823
9824// aten::special_gammaincc(Tensor self, Tensor other) -> Tensor
9825static C10_NOINLINE c10::TypedOperatorHandle<special_gammaincc::schema> create_special_gammaincc_typed_handle() {
9826 return c10::Dispatcher::singleton()
9827 .findSchemaOrThrow(special_gammaincc::name, special_gammaincc::overload_name)
9828 .typed<special_gammaincc::schema>();
9829}
9830
9831// aten::special_gammaincc(Tensor self, Tensor other) -> Tensor
9832at::Tensor special_gammaincc::call(const at::Tensor & self, const at::Tensor & other) {
9833
9834 static auto op = create_special_gammaincc_typed_handle();
9835 return op.call(self, other);
9836}
9837
9838// aten::special_gammaincc(Tensor self, Tensor other) -> Tensor
9839at::Tensor special_gammaincc::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
9840
9841 static auto op = create_special_gammaincc_typed_handle();
9842 return op.redispatch(dispatchKeySet, self, other);
9843}
9844
9845STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_multigammaln, name, "aten::special_multigammaln")
9846STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_multigammaln, overload_name, "")
9847STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_multigammaln, schema_str, "special_multigammaln(Tensor self, int p) -> Tensor")
9848
9849// aten::special_multigammaln(Tensor self, int p) -> Tensor
9850static C10_NOINLINE c10::TypedOperatorHandle<special_multigammaln::schema> create_special_multigammaln_typed_handle() {
9851 return c10::Dispatcher::singleton()
9852 .findSchemaOrThrow(special_multigammaln::name, special_multigammaln::overload_name)
9853 .typed<special_multigammaln::schema>();
9854}
9855
9856// aten::special_multigammaln(Tensor self, int p) -> Tensor
9857at::Tensor special_multigammaln::call(const at::Tensor & self, int64_t p) {
9858
9859 static auto op = create_special_multigammaln_typed_handle();
9860 return op.call(self, p);
9861}
9862
9863// aten::special_multigammaln(Tensor self, int p) -> Tensor
9864at::Tensor special_multigammaln::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t p) {
9865
9866 static auto op = create_special_multigammaln_typed_handle();
9867 return op.redispatch(dispatchKeySet, self, p);
9868}
9869
9870STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_multigammaln_out, name, "aten::special_multigammaln")
9871STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_multigammaln_out, overload_name, "out")
9872STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_multigammaln_out, schema_str, "special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)")
9873
9874// aten::special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)
9875static C10_NOINLINE c10::TypedOperatorHandle<special_multigammaln_out::schema> create_special_multigammaln_out_typed_handle() {
9876 return c10::Dispatcher::singleton()
9877 .findSchemaOrThrow(special_multigammaln_out::name, special_multigammaln_out::overload_name)
9878 .typed<special_multigammaln_out::schema>();
9879}
9880
9881// aten::special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)
9882at::Tensor & special_multigammaln_out::call(const at::Tensor & self, int64_t p, at::Tensor & out) {
9883
9884 static auto op = create_special_multigammaln_out_typed_handle();
9885 return op.call(self, p, out);
9886}
9887
9888// aten::special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)
9889at::Tensor & special_multigammaln_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t p, at::Tensor & out) {
9890
9891 static auto op = create_special_multigammaln_out_typed_handle();
9892 return op.redispatch(dispatchKeySet, self, p, out);
9893}
9894
9895STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_fft2, name, "aten::fft_fft2")
9896STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_fft2, overload_name, "")
9897STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_fft2, schema_str, "fft_fft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor")
9898
9899// aten::fft_fft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
9900static C10_NOINLINE c10::TypedOperatorHandle<fft_fft2::schema> create_fft_fft2_typed_handle() {
9901 return c10::Dispatcher::singleton()
9902 .findSchemaOrThrow(fft_fft2::name, fft_fft2::overload_name)
9903 .typed<fft_fft2::schema>();
9904}
9905
9906// aten::fft_fft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
9907at::Tensor fft_fft2::call(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
9908
9909 static auto op = create_fft_fft2_typed_handle();
9910 return op.call(self, s, dim, norm);
9911}
9912
9913// aten::fft_fft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
9914at::Tensor fft_fft2::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
9915
9916 static auto op = create_fft_fft2_typed_handle();
9917 return op.redispatch(dispatchKeySet, self, s, dim, norm);
9918}
9919
9920STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_fft2_out, name, "aten::fft_fft2")
9921STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_fft2_out, overload_name, "out")
9922STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_fft2_out, schema_str, "fft_fft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)")
9923
9924// aten::fft_fft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
9925static C10_NOINLINE c10::TypedOperatorHandle<fft_fft2_out::schema> create_fft_fft2_out_typed_handle() {
9926 return c10::Dispatcher::singleton()
9927 .findSchemaOrThrow(fft_fft2_out::name, fft_fft2_out::overload_name)
9928 .typed<fft_fft2_out::schema>();
9929}
9930
9931// aten::fft_fft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
9932at::Tensor & fft_fft2_out::call(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
9933
9934 static auto op = create_fft_fft2_out_typed_handle();
9935 return op.call(self, s, dim, norm, out);
9936}
9937
9938// aten::fft_fft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
9939at::Tensor & fft_fft2_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
9940
9941 static auto op = create_fft_fft2_out_typed_handle();
9942 return op.redispatch(dispatchKeySet, self, s, dim, norm, out);
9943}
9944
9945STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_fftn, name, "aten::fft_fftn")
9946STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_fftn, overload_name, "")
9947STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_fftn, schema_str, "fft_fftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor")
9948
9949// aten::fft_fftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
9950static C10_NOINLINE c10::TypedOperatorHandle<fft_fftn::schema> create_fft_fftn_typed_handle() {
9951 return c10::Dispatcher::singleton()
9952 .findSchemaOrThrow(fft_fftn::name, fft_fftn::overload_name)
9953 .typed<fft_fftn::schema>();
9954}
9955
9956// aten::fft_fftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
9957at::Tensor fft_fftn::call(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
9958
9959 static auto op = create_fft_fftn_typed_handle();
9960 return op.call(self, s, dim, norm);
9961}
9962
9963// aten::fft_fftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
9964at::Tensor fft_fftn::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
9965
9966 static auto op = create_fft_fftn_typed_handle();
9967 return op.redispatch(dispatchKeySet, self, s, dim, norm);
9968}
9969
9970STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_fftn_out, name, "aten::fft_fftn")
9971STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_fftn_out, overload_name, "out")
9972STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_fftn_out, schema_str, "fft_fftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)")
9973
9974// aten::fft_fftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
9975static C10_NOINLINE c10::TypedOperatorHandle<fft_fftn_out::schema> create_fft_fftn_out_typed_handle() {
9976 return c10::Dispatcher::singleton()
9977 .findSchemaOrThrow(fft_fftn_out::name, fft_fftn_out::overload_name)
9978 .typed<fft_fftn_out::schema>();
9979}
9980
9981// aten::fft_fftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
9982at::Tensor & fft_fftn_out::call(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
9983
9984 static auto op = create_fft_fftn_out_typed_handle();
9985 return op.call(self, s, dim, norm, out);
9986}
9987
9988// aten::fft_fftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
9989at::Tensor & fft_fftn_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
9990
9991 static auto op = create_fft_fftn_out_typed_handle();
9992 return op.redispatch(dispatchKeySet, self, s, dim, norm, out);
9993}
9994
9995STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_fftshift, name, "aten::fft_fftshift")
9996STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_fftshift, overload_name, "")
9997STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_fftshift, schema_str, "fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor")
9998
9999// aten::fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor
10000static C10_NOINLINE c10::TypedOperatorHandle<fft_fftshift::schema> create_fft_fftshift_typed_handle() {
10001 return c10::Dispatcher::singleton()
10002 .findSchemaOrThrow(fft_fftshift::name, fft_fftshift::overload_name)
10003 .typed<fft_fftshift::schema>();
10004}
10005
10006// aten::fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor
10007at::Tensor fft_fftshift::call(const at::Tensor & self, at::OptionalIntArrayRef dim) {
10008
10009 static auto op = create_fft_fftshift_typed_handle();
10010 return op.call(self, dim);
10011}
10012
10013// aten::fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor
10014at::Tensor fft_fftshift::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim) {
10015
10016 static auto op = create_fft_fftshift_typed_handle();
10017 return op.redispatch(dispatchKeySet, self, dim);
10018}
10019
10020STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lu_factor, name, "aten::linalg_lu_factor")
10021STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lu_factor, overload_name, "")
10022STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lu_factor, schema_str, "linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots)")
10023
10024// aten::linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots)
10025static C10_NOINLINE c10::TypedOperatorHandle<linalg_lu_factor::schema> create_linalg_lu_factor_typed_handle() {
10026 return c10::Dispatcher::singleton()
10027 .findSchemaOrThrow(linalg_lu_factor::name, linalg_lu_factor::overload_name)
10028 .typed<linalg_lu_factor::schema>();
10029}
10030
10031// aten::linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots)
10032::std::tuple<at::Tensor,at::Tensor> linalg_lu_factor::call(const at::Tensor & A, bool pivot) {
10033
10034 static auto op = create_linalg_lu_factor_typed_handle();
10035 return op.call(A, pivot);
10036}
10037
10038// aten::linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots)
10039::std::tuple<at::Tensor,at::Tensor> linalg_lu_factor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot) {
10040
10041 static auto op = create_linalg_lu_factor_typed_handle();
10042 return op.redispatch(dispatchKeySet, A, pivot);
10043}
10044
10045STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lu_factor_out, name, "aten::linalg_lu_factor")
10046STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lu_factor_out, overload_name, "out")
10047STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lu_factor_out, schema_str, "linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots)")
10048
10049// aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots)
10050static C10_NOINLINE c10::TypedOperatorHandle<linalg_lu_factor_out::schema> create_linalg_lu_factor_out_typed_handle() {
10051 return c10::Dispatcher::singleton()
10052 .findSchemaOrThrow(linalg_lu_factor_out::name, linalg_lu_factor_out::overload_name)
10053 .typed<linalg_lu_factor_out::schema>();
10054}
10055
10056// aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots)
10057::std::tuple<at::Tensor &,at::Tensor &> linalg_lu_factor_out::call(const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots) {
10058
10059 static auto op = create_linalg_lu_factor_out_typed_handle();
10060 return op.call(A, pivot, LU, pivots);
10061}
10062
10063// aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots)
10064::std::tuple<at::Tensor &,at::Tensor &> linalg_lu_factor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots) {
10065
10066 static auto op = create_linalg_lu_factor_out_typed_handle();
10067 return op.redispatch(dispatchKeySet, A, pivot, LU, pivots);
10068}
10069
10070STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lu_solve, name, "aten::linalg_lu_solve")
10071STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lu_solve, overload_name, "")
10072STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lu_solve, schema_str, "linalg_lu_solve(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False) -> Tensor")
10073
10074// aten::linalg_lu_solve(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False) -> Tensor
10075static C10_NOINLINE c10::TypedOperatorHandle<linalg_lu_solve::schema> create_linalg_lu_solve_typed_handle() {
10076 return c10::Dispatcher::singleton()
10077 .findSchemaOrThrow(linalg_lu_solve::name, linalg_lu_solve::overload_name)
10078 .typed<linalg_lu_solve::schema>();
10079}
10080
10081// aten::linalg_lu_solve(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False) -> Tensor
10082at::Tensor linalg_lu_solve::call(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint) {
10083
10084 static auto op = create_linalg_lu_solve_typed_handle();
10085 return op.call(LU, pivots, B, left, adjoint);
10086}
10087
10088// aten::linalg_lu_solve(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False) -> Tensor
10089at::Tensor linalg_lu_solve::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint) {
10090
10091 static auto op = create_linalg_lu_solve_typed_handle();
10092 return op.redispatch(dispatchKeySet, LU, pivots, B, left, adjoint);
10093}
10094
10095STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lu_solve_out, name, "aten::linalg_lu_solve")
10096STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lu_solve_out, overload_name, "out")
10097STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lu_solve_out, schema_str, "linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!)")
10098
10099// aten::linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!)
10100static C10_NOINLINE c10::TypedOperatorHandle<linalg_lu_solve_out::schema> create_linalg_lu_solve_out_typed_handle() {
10101 return c10::Dispatcher::singleton()
10102 .findSchemaOrThrow(linalg_lu_solve_out::name, linalg_lu_solve_out::overload_name)
10103 .typed<linalg_lu_solve_out::schema>();
10104}
10105
10106// aten::linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!)
10107at::Tensor & linalg_lu_solve_out::call(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, at::Tensor & out) {
10108
10109 static auto op = create_linalg_lu_solve_out_typed_handle();
10110 return op.call(LU, pivots, B, left, adjoint, out);
10111}
10112
10113// aten::linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!)
10114at::Tensor & linalg_lu_solve_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, at::Tensor & out) {
10115
10116 static auto op = create_linalg_lu_solve_out_typed_handle();
10117 return op.redispatch(dispatchKeySet, LU, pivots, B, left, adjoint, out);
10118}
10119
10120STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_det, name, "aten::linalg_det")
10121STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_det, overload_name, "")
10122STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_det, schema_str, "linalg_det(Tensor A) -> Tensor")
10123
10124// aten::linalg_det(Tensor A) -> Tensor
10125static C10_NOINLINE c10::TypedOperatorHandle<linalg_det::schema> create_linalg_det_typed_handle() {
10126 return c10::Dispatcher::singleton()
10127 .findSchemaOrThrow(linalg_det::name, linalg_det::overload_name)
10128 .typed<linalg_det::schema>();
10129}
10130
10131// aten::linalg_det(Tensor A) -> Tensor
10132at::Tensor linalg_det::call(const at::Tensor & A) {
10133
10134 static auto op = create_linalg_det_typed_handle();
10135 return op.call(A);
10136}
10137
10138// aten::linalg_det(Tensor A) -> Tensor
10139at::Tensor linalg_det::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A) {
10140
10141 static auto op = create_linalg_det_typed_handle();
10142 return op.redispatch(dispatchKeySet, A);
10143}
10144
10145STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_det_out, name, "aten::linalg_det")
10146STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_det_out, overload_name, "out")
10147STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_det_out, schema_str, "linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)")
10148
10149// aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
10150static C10_NOINLINE c10::TypedOperatorHandle<linalg_det_out::schema> create_linalg_det_out_typed_handle() {
10151 return c10::Dispatcher::singleton()
10152 .findSchemaOrThrow(linalg_det_out::name, linalg_det_out::overload_name)
10153 .typed<linalg_det_out::schema>();
10154}
10155
10156// aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
10157at::Tensor & linalg_det_out::call(const at::Tensor & A, at::Tensor & out) {
10158
10159 static auto op = create_linalg_det_out_typed_handle();
10160 return op.call(A, out);
10161}
10162
10163// aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
10164at::Tensor & linalg_det_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & out) {
10165
10166 static auto op = create_linalg_det_out_typed_handle();
10167 return op.redispatch(dispatchKeySet, A, out);
10168}
10169
10170STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_slogdet, name, "aten::_linalg_slogdet")
10171STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_slogdet, overload_name, "")
10172STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_slogdet, schema_str, "_linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet, Tensor LU, Tensor pivots)")
10173
10174// aten::_linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet, Tensor LU, Tensor pivots)
10175static C10_NOINLINE c10::TypedOperatorHandle<_linalg_slogdet::schema> create__linalg_slogdet_typed_handle() {
10176 return c10::Dispatcher::singleton()
10177 .findSchemaOrThrow(_linalg_slogdet::name, _linalg_slogdet::overload_name)
10178 .typed<_linalg_slogdet::schema>();
10179}
10180
10181// aten::_linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet, Tensor LU, Tensor pivots)
10182::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_slogdet::call(const at::Tensor & A) {
10183
10184 static auto op = create__linalg_slogdet_typed_handle();
10185 return op.call(A);
10186}
10187
10188// aten::_linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet, Tensor LU, Tensor pivots)
10189::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_slogdet::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A) {
10190
10191 static auto op = create__linalg_slogdet_typed_handle();
10192 return op.redispatch(dispatchKeySet, A);
10193}
10194
10195STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_slogdet_sign, name, "aten::_linalg_slogdet")
10196STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_slogdet_sign, overload_name, "sign")
10197STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_slogdet_sign, schema_str, "_linalg_slogdet.sign(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) -> (Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots)")
10198
10199// aten::_linalg_slogdet.sign(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) -> (Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots)
10200static C10_NOINLINE c10::TypedOperatorHandle<_linalg_slogdet_sign::schema> create__linalg_slogdet_sign_typed_handle() {
10201 return c10::Dispatcher::singleton()
10202 .findSchemaOrThrow(_linalg_slogdet_sign::name, _linalg_slogdet_sign::overload_name)
10203 .typed<_linalg_slogdet_sign::schema>();
10204}
10205
10206// aten::_linalg_slogdet.sign(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) -> (Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots)
10207::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_slogdet_sign::call(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots) {
10208
10209 static auto op = create__linalg_slogdet_sign_typed_handle();
10210 return op.call(A, sign, logabsdet, LU, pivots);
10211}
10212
10213// aten::_linalg_slogdet.sign(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) -> (Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots)
10214::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_slogdet_sign::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots) {
10215
10216 static auto op = create__linalg_slogdet_sign_typed_handle();
10217 return op.redispatch(dispatchKeySet, A, sign, logabsdet, LU, pivots);
10218}
10219
10220STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_inv, name, "aten::linalg_inv")
10221STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_inv, overload_name, "")
10222STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_inv, schema_str, "linalg_inv(Tensor A) -> Tensor")
10223
10224// aten::linalg_inv(Tensor A) -> Tensor
10225static C10_NOINLINE c10::TypedOperatorHandle<linalg_inv::schema> create_linalg_inv_typed_handle() {
10226 return c10::Dispatcher::singleton()
10227 .findSchemaOrThrow(linalg_inv::name, linalg_inv::overload_name)
10228 .typed<linalg_inv::schema>();
10229}
10230
10231// aten::linalg_inv(Tensor A) -> Tensor
10232at::Tensor linalg_inv::call(const at::Tensor & A) {
10233
10234 static auto op = create_linalg_inv_typed_handle();
10235 return op.call(A);
10236}
10237
10238// aten::linalg_inv(Tensor A) -> Tensor
10239at::Tensor linalg_inv::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A) {
10240
10241 static auto op = create_linalg_inv_typed_handle();
10242 return op.redispatch(dispatchKeySet, A);
10243}
10244
10245STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_inv_out, name, "aten::linalg_inv")
10246STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_inv_out, overload_name, "out")
10247STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_inv_out, schema_str, "linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)")
10248
10249// aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
10250static C10_NOINLINE c10::TypedOperatorHandle<linalg_inv_out::schema> create_linalg_inv_out_typed_handle() {
10251 return c10::Dispatcher::singleton()
10252 .findSchemaOrThrow(linalg_inv_out::name, linalg_inv_out::overload_name)
10253 .typed<linalg_inv_out::schema>();
10254}
10255
10256// aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
10257at::Tensor & linalg_inv_out::call(const at::Tensor & A, at::Tensor & out) {
10258
10259 static auto op = create_linalg_inv_out_typed_handle();
10260 return op.call(A, out);
10261}
10262
10263// aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
10264at::Tensor & linalg_inv_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & out) {
10265
10266 static auto op = create_linalg_inv_out_typed_handle();
10267 return op.redispatch(dispatchKeySet, A, out);
10268}
10269
10270STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(outer, name, "aten::outer")
10271STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(outer, overload_name, "")
10272STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(outer, schema_str, "outer(Tensor self, Tensor vec2) -> Tensor")
10273
10274// aten::outer(Tensor self, Tensor vec2) -> Tensor
10275static C10_NOINLINE c10::TypedOperatorHandle<outer::schema> create_outer_typed_handle() {
10276 return c10::Dispatcher::singleton()
10277 .findSchemaOrThrow(outer::name, outer::overload_name)
10278 .typed<outer::schema>();
10279}
10280
10281// aten::outer(Tensor self, Tensor vec2) -> Tensor
10282at::Tensor outer::call(const at::Tensor & self, const at::Tensor & vec2) {
10283
10284 static auto op = create_outer_typed_handle();
10285 return op.call(self, vec2);
10286}
10287
10288// aten::outer(Tensor self, Tensor vec2) -> Tensor
10289at::Tensor outer::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec2) {
10290
10291 static auto op = create_outer_typed_handle();
10292 return op.redispatch(dispatchKeySet, self, vec2);
10293}
10294
10295STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(outer_out, name, "aten::outer")
10296STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(outer_out, overload_name, "out")
10297STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(outer_out, schema_str, "outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)")
10298
10299// aten::outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
10300static C10_NOINLINE c10::TypedOperatorHandle<outer_out::schema> create_outer_out_typed_handle() {
10301 return c10::Dispatcher::singleton()
10302 .findSchemaOrThrow(outer_out::name, outer_out::overload_name)
10303 .typed<outer_out::schema>();
10304}
10305
10306// aten::outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
10307at::Tensor & outer_out::call(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) {
10308
10309 static auto op = create_outer_out_typed_handle();
10310 return op.call(self, vec2, out);
10311}
10312
10313// aten::outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
10314at::Tensor & outer_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) {
10315
10316 static auto op = create_outer_out_typed_handle();
10317 return op.redispatch(dispatchKeySet, self, vec2, out);
10318}
10319
10320STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ger, name, "aten::ger")
10321STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ger, overload_name, "")
10322STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ger, schema_str, "ger(Tensor self, Tensor vec2) -> Tensor")
10323
10324// aten::ger(Tensor self, Tensor vec2) -> Tensor
10325static C10_NOINLINE c10::TypedOperatorHandle<ger::schema> create_ger_typed_handle() {
10326 return c10::Dispatcher::singleton()
10327 .findSchemaOrThrow(ger::name, ger::overload_name)
10328 .typed<ger::schema>();
10329}
10330
10331// aten::ger(Tensor self, Tensor vec2) -> Tensor
10332at::Tensor ger::call(const at::Tensor & self, const at::Tensor & vec2) {
10333
10334 static auto op = create_ger_typed_handle();
10335 return op.call(self, vec2);
10336}
10337
10338// aten::ger(Tensor self, Tensor vec2) -> Tensor
10339at::Tensor ger::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec2) {
10340
10341 static auto op = create_ger_typed_handle();
10342 return op.redispatch(dispatchKeySet, self, vec2);
10343}
10344
10345STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ger_out, name, "aten::ger")
10346STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ger_out, overload_name, "out")
10347STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ger_out, schema_str, "ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)")
10348
10349// aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
10350static C10_NOINLINE c10::TypedOperatorHandle<ger_out::schema> create_ger_out_typed_handle() {
10351 return c10::Dispatcher::singleton()
10352 .findSchemaOrThrow(ger_out::name, ger_out::overload_name)
10353 .typed<ger_out::schema>();
10354}
10355
10356// aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
10357at::Tensor & ger_out::call(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) {
10358
10359 static auto op = create_ger_out_typed_handle();
10360 return op.call(self, vec2, out);
10361}
10362
10363// aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
10364at::Tensor & ger_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) {
10365
10366 static auto op = create_ger_out_typed_handle();
10367 return op.redispatch(dispatchKeySet, self, vec2, out);
10368}
10369
10370STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_svd, name, "aten::_linalg_svd")
10371STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_svd, overload_name, "")
10372STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_svd, schema_str, "_linalg_svd(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)")
10373
10374// aten::_linalg_svd(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)
10375static C10_NOINLINE c10::TypedOperatorHandle<_linalg_svd::schema> create__linalg_svd_typed_handle() {
10376 return c10::Dispatcher::singleton()
10377 .findSchemaOrThrow(_linalg_svd::name, _linalg_svd::overload_name)
10378 .typed<_linalg_svd::schema>();
10379}
10380
10381// aten::_linalg_svd(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)
10382::std::tuple<at::Tensor,at::Tensor,at::Tensor> _linalg_svd::call(const at::Tensor & A, bool full_matrices, bool compute_uv, c10::optional<c10::string_view> driver) {
10383
10384 static auto op = create__linalg_svd_typed_handle();
10385 return op.call(A, full_matrices, compute_uv, driver);
10386}
10387
10388// aten::_linalg_svd(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)
10389::std::tuple<at::Tensor,at::Tensor,at::Tensor> _linalg_svd::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool full_matrices, bool compute_uv, c10::optional<c10::string_view> driver) {
10390
10391 static auto op = create__linalg_svd_typed_handle();
10392 return op.redispatch(dispatchKeySet, A, full_matrices, compute_uv, driver);
10393}
10394
10395STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_svd_U, name, "aten::_linalg_svd")
10396STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_svd_U, overload_name, "U")
10397STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_svd_U, schema_str, "_linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)")
10398
10399// aten::_linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)
10400static C10_NOINLINE c10::TypedOperatorHandle<_linalg_svd_U::schema> create__linalg_svd_U_typed_handle() {
10401 return c10::Dispatcher::singleton()
10402 .findSchemaOrThrow(_linalg_svd_U::name, _linalg_svd_U::overload_name)
10403 .typed<_linalg_svd_U::schema>();
10404}
10405
10406// aten::_linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)
10407::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _linalg_svd_U::call(const at::Tensor & A, bool full_matrices, bool compute_uv, c10::optional<c10::string_view> driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) {
10408
10409 static auto op = create__linalg_svd_U_typed_handle();
10410 return op.call(A, full_matrices, compute_uv, driver, U, S, Vh);
10411}
10412
10413// aten::_linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)
10414::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _linalg_svd_U::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool full_matrices, bool compute_uv, c10::optional<c10::string_view> driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) {
10415
10416 static auto op = create__linalg_svd_U_typed_handle();
10417 return op.redispatch(dispatchKeySet, A, full_matrices, compute_uv, driver, U, S, Vh);
10418}
10419
10420STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_solve_ex, name, "aten::_linalg_solve_ex")
10421STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_solve_ex, overload_name, "")
10422STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_solve_ex, schema_str, "_linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor LU, Tensor pivots, Tensor info)")
10423
10424// aten::_linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor LU, Tensor pivots, Tensor info)
10425static C10_NOINLINE c10::TypedOperatorHandle<_linalg_solve_ex::schema> create__linalg_solve_ex_typed_handle() {
10426 return c10::Dispatcher::singleton()
10427 .findSchemaOrThrow(_linalg_solve_ex::name, _linalg_solve_ex::overload_name)
10428 .typed<_linalg_solve_ex::schema>();
10429}
10430
10431// aten::_linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor LU, Tensor pivots, Tensor info)
10432::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_solve_ex::call(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
10433
10434 static auto op = create__linalg_solve_ex_typed_handle();
10435 return op.call(A, B, left, check_errors);
10436}
10437
10438// aten::_linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor LU, Tensor pivots, Tensor info)
10439::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_solve_ex::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
10440
10441 static auto op = create__linalg_solve_ex_typed_handle();
10442 return op.redispatch(dispatchKeySet, A, B, left, check_errors);
10443}
10444
10445STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_solve_ex_result, name, "aten::_linalg_solve_ex")
10446STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_solve_ex_result, overload_name, "result")
10447STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_solve_ex_result, schema_str, "_linalg_solve_ex.result(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info)")
10448
10449// aten::_linalg_solve_ex.result(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info)
10450static C10_NOINLINE c10::TypedOperatorHandle<_linalg_solve_ex_result::schema> create__linalg_solve_ex_result_typed_handle() {
10451 return c10::Dispatcher::singleton()
10452 .findSchemaOrThrow(_linalg_solve_ex_result::name, _linalg_solve_ex_result::overload_name)
10453 .typed<_linalg_solve_ex_result::schema>();
10454}
10455
10456// aten::_linalg_solve_ex.result(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info)
10457::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_solve_ex_result::call(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) {
10458
10459 static auto op = create__linalg_solve_ex_result_typed_handle();
10460 return op.call(A, B, left, check_errors, result, LU, pivots, info);
10461}
10462
10463// aten::_linalg_solve_ex.result(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info)
10464::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_solve_ex_result::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) {
10465
10466 static auto op = create__linalg_solve_ex_result_typed_handle();
10467 return op.redispatch(dispatchKeySet, A, B, left, check_errors, result, LU, pivots, info);
10468}
10469
10470STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_qr, name, "aten::linalg_qr")
10471STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_qr, overload_name, "")
10472STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_qr, schema_str, "linalg_qr(Tensor A, str mode='reduced') -> (Tensor Q, Tensor R)")
10473
10474// aten::linalg_qr(Tensor A, str mode='reduced') -> (Tensor Q, Tensor R)
10475static C10_NOINLINE c10::TypedOperatorHandle<linalg_qr::schema> create_linalg_qr_typed_handle() {
10476 return c10::Dispatcher::singleton()
10477 .findSchemaOrThrow(linalg_qr::name, linalg_qr::overload_name)
10478 .typed<linalg_qr::schema>();
10479}
10480
10481// aten::linalg_qr(Tensor A, str mode='reduced') -> (Tensor Q, Tensor R)
10482::std::tuple<at::Tensor,at::Tensor> linalg_qr::call(const at::Tensor & A, c10::string_view mode) {
10483
10484 static auto op = create_linalg_qr_typed_handle();
10485 return op.call(A, mode);
10486}
10487
10488// aten::linalg_qr(Tensor A, str mode='reduced') -> (Tensor Q, Tensor R)
10489::std::tuple<at::Tensor,at::Tensor> linalg_qr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::string_view mode) {
10490
10491 static auto op = create_linalg_qr_typed_handle();
10492 return op.redispatch(dispatchKeySet, A, mode);
10493}
10494
10495STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_qr_out, name, "aten::linalg_qr")
10496STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_qr_out, overload_name, "out")
10497STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_qr_out, schema_str, "linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)")
10498
10499// aten::linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)
10500static C10_NOINLINE c10::TypedOperatorHandle<linalg_qr_out::schema> create_linalg_qr_out_typed_handle() {
10501 return c10::Dispatcher::singleton()
10502 .findSchemaOrThrow(linalg_qr_out::name, linalg_qr_out::overload_name)
10503 .typed<linalg_qr_out::schema>();
10504}
10505
10506// aten::linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)
10507::std::tuple<at::Tensor &,at::Tensor &> linalg_qr_out::call(const at::Tensor & A, c10::string_view mode, at::Tensor & Q, at::Tensor & R) {
10508
10509 static auto op = create_linalg_qr_out_typed_handle();
10510 return op.call(A, mode, Q, R);
10511}
10512
10513// aten::linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)
10514::std::tuple<at::Tensor &,at::Tensor &> linalg_qr_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::string_view mode, at::Tensor & Q, at::Tensor & R) {
10515
10516 static auto op = create_linalg_qr_out_typed_handle();
10517 return op.redispatch(dispatchKeySet, A, mode, Q, R);
10518}
10519
10520STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nested_to_padded_tensor, name, "aten::nested_to_padded_tensor")
10521STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nested_to_padded_tensor, overload_name, "")
10522STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nested_to_padded_tensor, schema_str, "nested_to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor")
10523
10524// aten::nested_to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor
10525static C10_NOINLINE c10::TypedOperatorHandle<nested_to_padded_tensor::schema> create_nested_to_padded_tensor_typed_handle() {
10526 return c10::Dispatcher::singleton()
10527 .findSchemaOrThrow(nested_to_padded_tensor::name, nested_to_padded_tensor::overload_name)
10528 .typed<nested_to_padded_tensor::schema>();
10529}
10530
10531// aten::nested_to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor
10532at::Tensor nested_to_padded_tensor::call(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size) {
10533
10534 static auto op = create_nested_to_padded_tensor_typed_handle();
10535 return op.call(self, padding, output_size);
10536}
10537
10538// aten::nested_to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor
10539at::Tensor nested_to_padded_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size) {
10540
10541 static auto op = create_nested_to_padded_tensor_typed_handle();
10542 return op.redispatch(dispatchKeySet, self, padding, output_size);
10543}
10544
10545STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_warn_in_autograd, name, "aten::_test_warn_in_autograd")
10546STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_warn_in_autograd, overload_name, "")
10547STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_warn_in_autograd, schema_str, "_test_warn_in_autograd(Tensor self) -> Tensor")
10548
10549// aten::_test_warn_in_autograd(Tensor self) -> Tensor
10550static C10_NOINLINE c10::TypedOperatorHandle<_test_warn_in_autograd::schema> create__test_warn_in_autograd_typed_handle() {
10551 return c10::Dispatcher::singleton()
10552 .findSchemaOrThrow(_test_warn_in_autograd::name, _test_warn_in_autograd::overload_name)
10553 .typed<_test_warn_in_autograd::schema>();
10554}
10555
10556// aten::_test_warn_in_autograd(Tensor self) -> Tensor
10557at::Tensor _test_warn_in_autograd::call(const at::Tensor & self) {
10558
10559 static auto op = create__test_warn_in_autograd_typed_handle();
10560 return op.call(self);
10561}
10562
10563// aten::_test_warn_in_autograd(Tensor self) -> Tensor
10564at::Tensor _test_warn_in_autograd::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
10565
10566 static auto op = create__test_warn_in_autograd_typed_handle();
10567 return op.redispatch(dispatchKeySet, self);
10568}
10569
10570STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_autograd_multiple_dispatch_view, name, "aten::_test_autograd_multiple_dispatch_view")
10571STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_autograd_multiple_dispatch_view, overload_name, "")
10572STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_autograd_multiple_dispatch_view, schema_str, "_test_autograd_multiple_dispatch_view(Tensor(a) self) -> Tensor(a)")
10573
10574// aten::_test_autograd_multiple_dispatch_view(Tensor(a) self) -> Tensor(a)
10575static C10_NOINLINE c10::TypedOperatorHandle<_test_autograd_multiple_dispatch_view::schema> create__test_autograd_multiple_dispatch_view_typed_handle() {
10576 return c10::Dispatcher::singleton()
10577 .findSchemaOrThrow(_test_autograd_multiple_dispatch_view::name, _test_autograd_multiple_dispatch_view::overload_name)
10578 .typed<_test_autograd_multiple_dispatch_view::schema>();
10579}
10580
10581// aten::_test_autograd_multiple_dispatch_view(Tensor(a) self) -> Tensor(a)
10582at::Tensor _test_autograd_multiple_dispatch_view::call(const at::Tensor & self) {
10583
10584 static auto op = create__test_autograd_multiple_dispatch_view_typed_handle();
10585 return op.call(self);
10586}
10587
10588// aten::_test_autograd_multiple_dispatch_view(Tensor(a) self) -> Tensor(a)
10589at::Tensor _test_autograd_multiple_dispatch_view::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
10590
10591 static auto op = create__test_autograd_multiple_dispatch_view_typed_handle();
10592 return op.redispatch(dispatchKeySet, self);
10593}
10594
10595STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diagonal_copy, name, "aten::diagonal_copy")
10596STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diagonal_copy, overload_name, "")
10597STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diagonal_copy, schema_str, "diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor")
10598
10599// aten::diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor
10600static C10_NOINLINE c10::TypedOperatorHandle<diagonal_copy::schema> create_diagonal_copy_typed_handle() {
10601 return c10::Dispatcher::singleton()
10602 .findSchemaOrThrow(diagonal_copy::name, diagonal_copy::overload_name)
10603 .typed<diagonal_copy::schema>();
10604}
10605
10606// aten::diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor
10607at::Tensor diagonal_copy::call(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
10608
10609 static auto op = create_diagonal_copy_typed_handle();
10610 return op.call(self, offset, dim1, dim2);
10611}
10612
10613// aten::diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor
10614at::Tensor diagonal_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
10615
10616 static auto op = create_diagonal_copy_typed_handle();
10617 return op.redispatch(dispatchKeySet, self, offset, dim1, dim2);
10618}
10619
10620STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(permute_copy, name, "aten::permute_copy")
10621STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(permute_copy, overload_name, "")
10622STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(permute_copy, schema_str, "permute_copy(Tensor self, int[] dims) -> Tensor")
10623
10624// aten::permute_copy(Tensor self, int[] dims) -> Tensor
10625static C10_NOINLINE c10::TypedOperatorHandle<permute_copy::schema> create_permute_copy_typed_handle() {
10626 return c10::Dispatcher::singleton()
10627 .findSchemaOrThrow(permute_copy::name, permute_copy::overload_name)
10628 .typed<permute_copy::schema>();
10629}
10630
10631// aten::permute_copy(Tensor self, int[] dims) -> Tensor
10632at::Tensor permute_copy::call(const at::Tensor & self, at::IntArrayRef dims) {
10633
10634 static auto op = create_permute_copy_typed_handle();
10635 return op.call(self, dims);
10636}
10637
10638// aten::permute_copy(Tensor self, int[] dims) -> Tensor
10639at::Tensor permute_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims) {
10640
10641 static auto op = create_permute_copy_typed_handle();
10642 return op.redispatch(dispatchKeySet, self, dims);
10643}
10644
10645STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(select_copy_int, name, "aten::select_copy")
10646STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(select_copy_int, overload_name, "int")
10647STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(select_copy_int, schema_str, "select_copy.int(Tensor self, int dim, SymInt index) -> Tensor")
10648
10649// aten::select_copy.int(Tensor self, int dim, SymInt index) -> Tensor
10650static C10_NOINLINE c10::TypedOperatorHandle<select_copy_int::schema> create_select_copy_int_typed_handle() {
10651 return c10::Dispatcher::singleton()
10652 .findSchemaOrThrow(select_copy_int::name, select_copy_int::overload_name)
10653 .typed<select_copy_int::schema>();
10654}
10655
10656// aten::select_copy.int(Tensor self, int dim, SymInt index) -> Tensor
10657at::Tensor select_copy_int::call(const at::Tensor & self, int64_t dim, c10::SymInt index) {
10658
10659 static auto op = create_select_copy_int_typed_handle();
10660 return op.call(self, dim, index);
10661}
10662
10663// aten::select_copy.int(Tensor self, int dim, SymInt index) -> Tensor
10664at::Tensor select_copy_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt index) {
10665
10666 static auto op = create_select_copy_int_typed_handle();
10667 return op.redispatch(dispatchKeySet, self, dim, index);
10668}
10669
10670STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slice_copy_Tensor, name, "aten::slice_copy")
10671STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slice_copy_Tensor, overload_name, "Tensor")
10672STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slice_copy_Tensor, schema_str, "slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor")
10673
10674// aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
10675static C10_NOINLINE c10::TypedOperatorHandle<slice_copy_Tensor::schema> create_slice_copy_Tensor_typed_handle() {
10676 return c10::Dispatcher::singleton()
10677 .findSchemaOrThrow(slice_copy_Tensor::name, slice_copy_Tensor::overload_name)
10678 .typed<slice_copy_Tensor::schema>();
10679}
10680
10681// aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
10682at::Tensor slice_copy_Tensor::call(const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
10683
10684 static auto op = create_slice_copy_Tensor_typed_handle();
10685 return op.call(self, dim, start, end, step);
10686}
10687
10688// aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
10689at::Tensor slice_copy_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
10690
10691 static auto op = create_slice_copy_Tensor_typed_handle();
10692 return op.redispatch(dispatchKeySet, self, dim, start, end, step);
10693}
10694
10695STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(split_with_sizes_copy, name, "aten::split_with_sizes_copy")
10696STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(split_with_sizes_copy, overload_name, "")
10697STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(split_with_sizes_copy, schema_str, "split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]")
10698
10699// aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]
10700static C10_NOINLINE c10::TypedOperatorHandle<split_with_sizes_copy::schema> create_split_with_sizes_copy_typed_handle() {
10701 return c10::Dispatcher::singleton()
10702 .findSchemaOrThrow(split_with_sizes_copy::name, split_with_sizes_copy::overload_name)
10703 .typed<split_with_sizes_copy::schema>();
10704}
10705
10706// aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]
10707::std::vector<at::Tensor> split_with_sizes_copy::call(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
10708
10709 static auto op = create_split_with_sizes_copy_typed_handle();
10710 return op.call(self, split_sizes, dim);
10711}
10712
10713// aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]
10714::std::vector<at::Tensor> split_with_sizes_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
10715
10716 static auto op = create_split_with_sizes_copy_typed_handle();
10717 return op.redispatch(dispatchKeySet, self, split_sizes, dim);
10718}
10719
10720STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(t_copy, name, "aten::t_copy")
10721STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(t_copy, overload_name, "")
10722STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(t_copy, schema_str, "t_copy(Tensor self) -> Tensor")
10723
10724// aten::t_copy(Tensor self) -> Tensor
10725static C10_NOINLINE c10::TypedOperatorHandle<t_copy::schema> create_t_copy_typed_handle() {
10726 return c10::Dispatcher::singleton()
10727 .findSchemaOrThrow(t_copy::name, t_copy::overload_name)
10728 .typed<t_copy::schema>();
10729}
10730
10731// aten::t_copy(Tensor self) -> Tensor
10732at::Tensor t_copy::call(const at::Tensor & self) {
10733
10734 static auto op = create_t_copy_typed_handle();
10735 return op.call(self);
10736}
10737
10738// aten::t_copy(Tensor self) -> Tensor
10739at::Tensor t_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
10740
10741 static auto op = create_t_copy_typed_handle();
10742 return op.redispatch(dispatchKeySet, self);
10743}
10744
10745STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(col_indices_copy, name, "aten::col_indices_copy")
10746STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(col_indices_copy, overload_name, "")
10747STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(col_indices_copy, schema_str, "col_indices_copy(Tensor self) -> Tensor")
10748
10749// aten::col_indices_copy(Tensor self) -> Tensor
10750static C10_NOINLINE c10::TypedOperatorHandle<col_indices_copy::schema> create_col_indices_copy_typed_handle() {
10751 return c10::Dispatcher::singleton()
10752 .findSchemaOrThrow(col_indices_copy::name, col_indices_copy::overload_name)
10753 .typed<col_indices_copy::schema>();
10754}
10755
10756// aten::col_indices_copy(Tensor self) -> Tensor
10757at::Tensor col_indices_copy::call(const at::Tensor & self) {
10758
10759 static auto op = create_col_indices_copy_typed_handle();
10760 return op.call(self);
10761}
10762
10763// aten::col_indices_copy(Tensor self) -> Tensor
10764at::Tensor col_indices_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
10765
10766 static auto op = create_col_indices_copy_typed_handle();
10767 return op.redispatch(dispatchKeySet, self);
10768}
10769
10770STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unbind_copy_int, name, "aten::unbind_copy")
10771STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unbind_copy_int, overload_name, "int")
10772STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unbind_copy_int, schema_str, "unbind_copy.int(Tensor self, int dim=0) -> Tensor[]")
10773
10774// aten::unbind_copy.int(Tensor self, int dim=0) -> Tensor[]
10775static C10_NOINLINE c10::TypedOperatorHandle<unbind_copy_int::schema> create_unbind_copy_int_typed_handle() {
10776 return c10::Dispatcher::singleton()
10777 .findSchemaOrThrow(unbind_copy_int::name, unbind_copy_int::overload_name)
10778 .typed<unbind_copy_int::schema>();
10779}
10780
10781// aten::unbind_copy.int(Tensor self, int dim=0) -> Tensor[]
10782::std::vector<at::Tensor> unbind_copy_int::call(const at::Tensor & self, int64_t dim) {
10783
10784 static auto op = create_unbind_copy_int_typed_handle();
10785 return op.call(self, dim);
10786}
10787
10788// aten::unbind_copy.int(Tensor self, int dim=0) -> Tensor[]
10789::std::vector<at::Tensor> unbind_copy_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
10790
10791 static auto op = create_unbind_copy_int_typed_handle();
10792 return op.redispatch(dispatchKeySet, self, dim);
10793}
10794
10795STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unbind_copy_int_out, name, "aten::unbind_copy")
10796STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unbind_copy_int_out, overload_name, "int_out")
10797STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unbind_copy_int_out, schema_str, "unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> ()")
10798
10799// aten::unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> ()
10800static C10_NOINLINE c10::TypedOperatorHandle<unbind_copy_int_out::schema> create_unbind_copy_int_out_typed_handle() {
10801 return c10::Dispatcher::singleton()
10802 .findSchemaOrThrow(unbind_copy_int_out::name, unbind_copy_int_out::overload_name)
10803 .typed<unbind_copy_int_out::schema>();
10804}
10805
10806// aten::unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> ()
10807void unbind_copy_int_out::call(const at::Tensor & self, int64_t dim, at::TensorList out) {
10808
10809 static auto op = create_unbind_copy_int_out_typed_handle();
10810 return op.call(self, dim, out);
10811}
10812
10813// aten::unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> ()
10814void unbind_copy_int_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::TensorList out) {
10815
10816 static auto op = create_unbind_copy_int_out_typed_handle();
10817 return op.redispatch(dispatchKeySet, self, dim, out);
10818}
10819
10820STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(split_with_sizes_copy_out, name, "aten::split_with_sizes_copy")
10821STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(split_with_sizes_copy_out, overload_name, "out")
10822STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(split_with_sizes_copy_out, schema_str, "split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()")
10823
10824// aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()
10825static C10_NOINLINE c10::TypedOperatorHandle<split_with_sizes_copy_out::schema> create_split_with_sizes_copy_out_typed_handle() {
10826 return c10::Dispatcher::singleton()
10827 .findSchemaOrThrow(split_with_sizes_copy_out::name, split_with_sizes_copy_out::overload_name)
10828 .typed<split_with_sizes_copy_out::schema>();
10829}
10830
10831// aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()
10832void split_with_sizes_copy_out::call(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) {
10833
10834 static auto op = create_split_with_sizes_copy_out_typed_handle();
10835 return op.call(self, split_sizes, dim, out);
10836}
10837
10838// aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()
10839void split_with_sizes_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) {
10840
10841 static auto op = create_split_with_sizes_copy_out_typed_handle();
10842 return op.redispatch(dispatchKeySet, self, split_sizes, dim, out);
10843}
10844
10845STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(alias_copy, name, "aten::alias_copy")
10846STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(alias_copy, overload_name, "")
10847STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(alias_copy, schema_str, "alias_copy(Tensor self) -> Tensor")
10848
10849// aten::alias_copy(Tensor self) -> Tensor
10850static C10_NOINLINE c10::TypedOperatorHandle<alias_copy::schema> create_alias_copy_typed_handle() {
10851 return c10::Dispatcher::singleton()
10852 .findSchemaOrThrow(alias_copy::name, alias_copy::overload_name)
10853 .typed<alias_copy::schema>();
10854}
10855
10856// aten::alias_copy(Tensor self) -> Tensor
10857at::Tensor alias_copy::call(const at::Tensor & self) {
10858
10859 static auto op = create_alias_copy_typed_handle();
10860 return op.call(self);
10861}
10862
10863// aten::alias_copy(Tensor self) -> Tensor
10864at::Tensor alias_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
10865
10866 static auto op = create_alias_copy_typed_handle();
10867 return op.redispatch(dispatchKeySet, self);
10868}
10869
10870STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_scaled_dot_product_attention_math, name, "aten::_scaled_dot_product_attention_math")
10871STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_scaled_dot_product_attention_math, overload_name, "")
10872STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_scaled_dot_product_attention_math, schema_str, "_scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None) -> (Tensor, Tensor)")
10873
10874// aten::_scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None) -> (Tensor, Tensor)
10875static C10_NOINLINE c10::TypedOperatorHandle<_scaled_dot_product_attention_math::schema> create__scaled_dot_product_attention_math_typed_handle() {
10876 return c10::Dispatcher::singleton()
10877 .findSchemaOrThrow(_scaled_dot_product_attention_math::name, _scaled_dot_product_attention_math::overload_name)
10878 .typed<_scaled_dot_product_attention_math::schema>();
10879}
10880
10881// aten::_scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None) -> (Tensor, Tensor)
10882::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_attention_math::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, const c10::optional<at::Tensor> & dropout_mask) {
10883
10884 static auto op = create__scaled_dot_product_attention_math_typed_handle();
10885 return op.call(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask);
10886}
10887
10888// aten::_scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None) -> (Tensor, Tensor)
10889::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_attention_math::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, const c10::optional<at::Tensor> & dropout_mask) {
10890
10891 static auto op = create__scaled_dot_product_attention_math_typed_handle();
10892 return op.redispatch(dispatchKeySet, query, key, value, attn_mask, dropout_p, is_causal, dropout_mask);
10893}
10894
10895STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_scaled_dot_product_flash_attention_backward, name, "aten::_scaled_dot_product_flash_attention_backward")
10896STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_scaled_dot_product_flash_attention_backward, overload_name, "")
10897STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_scaled_dot_product_flash_attention_backward, schema_str, "_scaled_dot_product_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, int philox_seed, int philox_offset) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value)")
10898
10899// aten::_scaled_dot_product_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, int philox_seed, int philox_offset) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value)
10900static C10_NOINLINE c10::TypedOperatorHandle<_scaled_dot_product_flash_attention_backward::schema> create__scaled_dot_product_flash_attention_backward_typed_handle() {
10901 return c10::Dispatcher::singleton()
10902 .findSchemaOrThrow(_scaled_dot_product_flash_attention_backward::name, _scaled_dot_product_flash_attention_backward::overload_name)
10903 .typed<_scaled_dot_product_flash_attention_backward::schema>();
10904}
10905
10906// aten::_scaled_dot_product_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, int philox_seed, int philox_offset) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value)
10907::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_flash_attention_backward::call(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, int64_t philox_seed, int64_t philox_offset) {
10908
10909 static auto op = create__scaled_dot_product_flash_attention_backward_typed_handle();
10910 return op.call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset);
10911}
10912
10913// aten::_scaled_dot_product_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, int philox_seed, int philox_offset) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value)
10914::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_flash_attention_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, int64_t philox_seed, int64_t philox_offset) {
10915
10916 static auto op = create__scaled_dot_product_flash_attention_backward_typed_handle();
10917 return op.redispatch(dispatchKeySet, grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset);
10918}
10919
10920STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_triton_scaled_dot_attention, name, "aten::_triton_scaled_dot_attention")
10921STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_triton_scaled_dot_attention, overload_name, "")
10922STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_triton_scaled_dot_attention, schema_str, "_triton_scaled_dot_attention(Tensor q, Tensor k, Tensor v, float dropout_p=0.0) -> Tensor")
10923
10924// aten::_triton_scaled_dot_attention(Tensor q, Tensor k, Tensor v, float dropout_p=0.0) -> Tensor
10925static C10_NOINLINE c10::TypedOperatorHandle<_triton_scaled_dot_attention::schema> create__triton_scaled_dot_attention_typed_handle() {
10926 return c10::Dispatcher::singleton()
10927 .findSchemaOrThrow(_triton_scaled_dot_attention::name, _triton_scaled_dot_attention::overload_name)
10928 .typed<_triton_scaled_dot_attention::schema>();
10929}
10930
10931// aten::_triton_scaled_dot_attention(Tensor q, Tensor k, Tensor v, float dropout_p=0.0) -> Tensor
10932at::Tensor _triton_scaled_dot_attention::call(const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p) {
10933
10934 static auto op = create__triton_scaled_dot_attention_typed_handle();
10935 return op.call(q, k, v, dropout_p);
10936}
10937
10938// aten::_triton_scaled_dot_attention(Tensor q, Tensor k, Tensor v, float dropout_p=0.0) -> Tensor
10939at::Tensor _triton_scaled_dot_attention::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p) {
10940
10941 static auto op = create__triton_scaled_dot_attention_typed_handle();
10942 return op.redispatch(dispatchKeySet, q, k, v, dropout_p);
10943}
10944
10945STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_t, name, "aten::special_chebyshev_polynomial_t")
10946STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_t, overload_name, "")
10947STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_t, schema_str, "special_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor")
10948
10949// aten::special_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor
10950static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_t::schema> create_special_chebyshev_polynomial_t_typed_handle() {
10951 return c10::Dispatcher::singleton()
10952 .findSchemaOrThrow(special_chebyshev_polynomial_t::name, special_chebyshev_polynomial_t::overload_name)
10953 .typed<special_chebyshev_polynomial_t::schema>();
10954}
10955
10956// aten::special_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor
10957at::Tensor special_chebyshev_polynomial_t::call(const at::Tensor & x, const at::Tensor & n) {
10958
10959 static auto op = create_special_chebyshev_polynomial_t_typed_handle();
10960 return op.call(x, n);
10961}
10962
10963// aten::special_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor
10964at::Tensor special_chebyshev_polynomial_t::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) {
10965
10966 static auto op = create_special_chebyshev_polynomial_t_typed_handle();
10967 return op.redispatch(dispatchKeySet, x, n);
10968}
10969
10970STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_t_x_scalar, name, "aten::special_chebyshev_polynomial_t")
10971STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_t_x_scalar, overload_name, "x_scalar")
10972STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_t_x_scalar, schema_str, "special_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor")
10973
10974// aten::special_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor
10975static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_t_x_scalar::schema> create_special_chebyshev_polynomial_t_x_scalar_typed_handle() {
10976 return c10::Dispatcher::singleton()
10977 .findSchemaOrThrow(special_chebyshev_polynomial_t_x_scalar::name, special_chebyshev_polynomial_t_x_scalar::overload_name)
10978 .typed<special_chebyshev_polynomial_t_x_scalar::schema>();
10979}
10980
10981// aten::special_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor
10982at::Tensor special_chebyshev_polynomial_t_x_scalar::call(const at::Scalar & x, const at::Tensor & n) {
10983
10984 static auto op = create_special_chebyshev_polynomial_t_x_scalar_typed_handle();
10985 return op.call(x, n);
10986}
10987
10988// aten::special_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor
10989at::Tensor special_chebyshev_polynomial_t_x_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) {
10990
10991 static auto op = create_special_chebyshev_polynomial_t_x_scalar_typed_handle();
10992 return op.redispatch(dispatchKeySet, x, n);
10993}
10994
10995STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_t_n_scalar, name, "aten::special_chebyshev_polynomial_t")
10996STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_t_n_scalar, overload_name, "n_scalar")
10997STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_t_n_scalar, schema_str, "special_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor")
10998
10999// aten::special_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor
11000static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_t_n_scalar::schema> create_special_chebyshev_polynomial_t_n_scalar_typed_handle() {
11001 return c10::Dispatcher::singleton()
11002 .findSchemaOrThrow(special_chebyshev_polynomial_t_n_scalar::name, special_chebyshev_polynomial_t_n_scalar::overload_name)
11003 .typed<special_chebyshev_polynomial_t_n_scalar::schema>();
11004}
11005
11006// aten::special_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor
11007at::Tensor special_chebyshev_polynomial_t_n_scalar::call(const at::Tensor & x, const at::Scalar & n) {
11008
11009 static auto op = create_special_chebyshev_polynomial_t_n_scalar_typed_handle();
11010 return op.call(x, n);
11011}
11012
11013// aten::special_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor
11014at::Tensor special_chebyshev_polynomial_t_n_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) {
11015
11016 static auto op = create_special_chebyshev_polynomial_t_n_scalar_typed_handle();
11017 return op.redispatch(dispatchKeySet, x, n);
11018}
11019
11020STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_t_out, name, "aten::special_chebyshev_polynomial_t")
11021STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_t_out, overload_name, "out")
11022STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_t_out, schema_str, "special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)")
11023
11024// aten::special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
11025static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_t_out::schema> create_special_chebyshev_polynomial_t_out_typed_handle() {
11026 return c10::Dispatcher::singleton()
11027 .findSchemaOrThrow(special_chebyshev_polynomial_t_out::name, special_chebyshev_polynomial_t_out::overload_name)
11028 .typed<special_chebyshev_polynomial_t_out::schema>();
11029}
11030
11031// aten::special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
11032at::Tensor & special_chebyshev_polynomial_t_out::call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
11033
11034 static auto op = create_special_chebyshev_polynomial_t_out_typed_handle();
11035 return op.call(x, n, out);
11036}
11037
11038// aten::special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
11039at::Tensor & special_chebyshev_polynomial_t_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
11040
11041 static auto op = create_special_chebyshev_polynomial_t_out_typed_handle();
11042 return op.redispatch(dispatchKeySet, x, n, out);
11043}
11044
11045STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_t_x_scalar_out, name, "aten::special_chebyshev_polynomial_t")
11046STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_t_x_scalar_out, overload_name, "x_scalar_out")
11047STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_t_x_scalar_out, schema_str, "special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)")
11048
11049// aten::special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
11050static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_t_x_scalar_out::schema> create_special_chebyshev_polynomial_t_x_scalar_out_typed_handle() {
11051 return c10::Dispatcher::singleton()
11052 .findSchemaOrThrow(special_chebyshev_polynomial_t_x_scalar_out::name, special_chebyshev_polynomial_t_x_scalar_out::overload_name)
11053 .typed<special_chebyshev_polynomial_t_x_scalar_out::schema>();
11054}
11055
11056// aten::special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
11057at::Tensor & special_chebyshev_polynomial_t_x_scalar_out::call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
11058
11059 static auto op = create_special_chebyshev_polynomial_t_x_scalar_out_typed_handle();
11060 return op.call(x, n, out);
11061}
11062
11063// aten::special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
11064at::Tensor & special_chebyshev_polynomial_t_x_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
11065
11066 static auto op = create_special_chebyshev_polynomial_t_x_scalar_out_typed_handle();
11067 return op.redispatch(dispatchKeySet, x, n, out);
11068}
11069
11070STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_t_n_scalar_out, name, "aten::special_chebyshev_polynomial_t")
11071STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_t_n_scalar_out, overload_name, "n_scalar_out")
11072STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_t_n_scalar_out, schema_str, "special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)")
11073
11074// aten::special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
11075static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_t_n_scalar_out::schema> create_special_chebyshev_polynomial_t_n_scalar_out_typed_handle() {
11076 return c10::Dispatcher::singleton()
11077 .findSchemaOrThrow(special_chebyshev_polynomial_t_n_scalar_out::name, special_chebyshev_polynomial_t_n_scalar_out::overload_name)
11078 .typed<special_chebyshev_polynomial_t_n_scalar_out::schema>();
11079}
11080
11081// aten::special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
11082at::Tensor & special_chebyshev_polynomial_t_n_scalar_out::call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
11083
11084 static auto op = create_special_chebyshev_polynomial_t_n_scalar_out_typed_handle();
11085 return op.call(x, n, out);
11086}
11087
11088// aten::special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
11089at::Tensor & special_chebyshev_polynomial_t_n_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
11090
11091 static auto op = create_special_chebyshev_polynomial_t_n_scalar_out_typed_handle();
11092 return op.redispatch(dispatchKeySet, x, n, out);
11093}
11094
11095STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_scaled_modified_bessel_k1, name, "aten::special_scaled_modified_bessel_k1")
11096STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_scaled_modified_bessel_k1, overload_name, "")
11097STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_scaled_modified_bessel_k1, schema_str, "special_scaled_modified_bessel_k1(Tensor x) -> Tensor")
11098
11099// aten::special_scaled_modified_bessel_k1(Tensor x) -> Tensor
11100static C10_NOINLINE c10::TypedOperatorHandle<special_scaled_modified_bessel_k1::schema> create_special_scaled_modified_bessel_k1_typed_handle() {
11101 return c10::Dispatcher::singleton()
11102 .findSchemaOrThrow(special_scaled_modified_bessel_k1::name, special_scaled_modified_bessel_k1::overload_name)
11103 .typed<special_scaled_modified_bessel_k1::schema>();
11104}
11105
11106// aten::special_scaled_modified_bessel_k1(Tensor x) -> Tensor
11107at::Tensor special_scaled_modified_bessel_k1::call(const at::Tensor & x) {
11108
11109 static auto op = create_special_scaled_modified_bessel_k1_typed_handle();
11110 return op.call(x);
11111}
11112
11113// aten::special_scaled_modified_bessel_k1(Tensor x) -> Tensor
11114at::Tensor special_scaled_modified_bessel_k1::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x) {
11115
11116 static auto op = create_special_scaled_modified_bessel_k1_typed_handle();
11117 return op.redispatch(dispatchKeySet, x);
11118}
11119
11120STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_scaled_modified_bessel_k1_out, name, "aten::special_scaled_modified_bessel_k1")
11121STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_scaled_modified_bessel_k1_out, overload_name, "out")
11122STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_scaled_modified_bessel_k1_out, schema_str, "special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)")
11123
11124// aten::special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
11125static C10_NOINLINE c10::TypedOperatorHandle<special_scaled_modified_bessel_k1_out::schema> create_special_scaled_modified_bessel_k1_out_typed_handle() {
11126 return c10::Dispatcher::singleton()
11127 .findSchemaOrThrow(special_scaled_modified_bessel_k1_out::name, special_scaled_modified_bessel_k1_out::overload_name)
11128 .typed<special_scaled_modified_bessel_k1_out::schema>();
11129}
11130
11131// aten::special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
11132at::Tensor & special_scaled_modified_bessel_k1_out::call(const at::Tensor & x, at::Tensor & out) {
11133
11134 static auto op = create_special_scaled_modified_bessel_k1_out_typed_handle();
11135 return op.call(x, out);
11136}
11137
11138// aten::special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
11139at::Tensor & special_scaled_modified_bessel_k1_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, at::Tensor & out) {
11140
11141 static auto op = create_special_scaled_modified_bessel_k1_out_typed_handle();
11142 return op.redispatch(dispatchKeySet, x, out);
11143}
11144
11145STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foobar, name, "aten::_foobar")
11146STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foobar, overload_name, "")
11147STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foobar, schema_str, "_foobar(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True) -> Tensor")
11148
11149// aten::_foobar(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True) -> Tensor
11150static C10_NOINLINE c10::TypedOperatorHandle<_foobar::schema> create__foobar_typed_handle() {
11151 return c10::Dispatcher::singleton()
11152 .findSchemaOrThrow(_foobar::name, _foobar::overload_name)
11153 .typed<_foobar::schema>();
11154}
11155
11156// aten::_foobar(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True) -> Tensor
11157at::Tensor _foobar::call(const at::Tensor & self, bool arg1, bool arg2, bool arg3) {
11158
11159 static auto op = create__foobar_typed_handle();
11160 return op.call(self, arg1, arg2, arg3);
11161}
11162
11163// aten::_foobar(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True) -> Tensor
11164at::Tensor _foobar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool arg1, bool arg2, bool arg3) {
11165
11166 static auto op = create__foobar_typed_handle();
11167 return op.redispatch(dispatchKeySet, self, arg1, arg2, arg3);
11168}
11169
11170STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_masked_scale_out, name, "aten::_masked_scale")
11171STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_masked_scale_out, overload_name, "out")
11172STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_masked_scale_out, schema_str, "_masked_scale.out(Tensor self, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!)")
11173
11174// aten::_masked_scale.out(Tensor self, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!)
11175static C10_NOINLINE c10::TypedOperatorHandle<_masked_scale_out::schema> create__masked_scale_out_typed_handle() {
11176 return c10::Dispatcher::singleton()
11177 .findSchemaOrThrow(_masked_scale_out::name, _masked_scale_out::overload_name)
11178 .typed<_masked_scale_out::schema>();
11179}
11180
11181// aten::_masked_scale.out(Tensor self, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!)
11182at::Tensor & _masked_scale_out::call(const at::Tensor & self, const at::Tensor & mask, double scale, at::Tensor & out) {
11183
11184 static auto op = create__masked_scale_out_typed_handle();
11185 return op.call(self, mask, scale, out);
11186}
11187
11188// aten::_masked_scale.out(Tensor self, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!)
11189at::Tensor & _masked_scale_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, double scale, at::Tensor & out) {
11190
11191 static auto op = create__masked_scale_out_typed_handle();
11192 return op.redispatch(dispatchKeySet, self, mask, scale, out);
11193}
11194
11195STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(constant_pad_nd_out, name, "aten::constant_pad_nd")
11196STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(constant_pad_nd_out, overload_name, "out")
11197STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(constant_pad_nd_out, schema_str, "constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!)")
11198
11199// aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!)
11200static C10_NOINLINE c10::TypedOperatorHandle<constant_pad_nd_out::schema> create_constant_pad_nd_out_typed_handle() {
11201 return c10::Dispatcher::singleton()
11202 .findSchemaOrThrow(constant_pad_nd_out::name, constant_pad_nd_out::overload_name)
11203 .typed<constant_pad_nd_out::schema>();
11204}
11205
11206// aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!)
11207at::Tensor & constant_pad_nd_out::call(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value, at::Tensor & out) {
11208
11209 static auto op = create_constant_pad_nd_out_typed_handle();
11210 return op.call(self, pad, value, out);
11211}
11212
11213// aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!)
11214at::Tensor & constant_pad_nd_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value, at::Tensor & out) {
11215
11216 static auto op = create_constant_pad_nd_out_typed_handle();
11217 return op.redispatch(dispatchKeySet, self, pad, value, out);
11218}
11219
11220STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(convolution_backward_out, name, "aten::convolution_backward")
11221STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(convolution_backward_out, overload_name, "out")
11222STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(convolution_backward_out, schema_str, "convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
11223
11224// aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
11225static C10_NOINLINE c10::TypedOperatorHandle<convolution_backward_out::schema> create_convolution_backward_out_typed_handle() {
11226 return c10::Dispatcher::singleton()
11227 .findSchemaOrThrow(convolution_backward_out::name, convolution_backward_out::overload_name)
11228 .typed<convolution_backward_out::schema>();
11229}
11230
11231// aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
11232::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_out::call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
11233
11234 static auto op = create_convolution_backward_out_typed_handle();
11235 return op.call(grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2);
11236}
11237
11238// aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
11239::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
11240
11241 static auto op = create_convolution_backward_out_typed_handle();
11242 return op.redispatch(dispatchKeySet, grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2);
11243}
11244
11245STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(convolution_overrideable_out, name, "aten::convolution_overrideable")
11246STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(convolution_overrideable_out, overload_name, "out")
11247STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(convolution_overrideable_out, schema_str, "convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!)")
11248
11249// aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!)
11250static C10_NOINLINE c10::TypedOperatorHandle<convolution_overrideable_out::schema> create_convolution_overrideable_out_typed_handle() {
11251 return c10::Dispatcher::singleton()
11252 .findSchemaOrThrow(convolution_overrideable_out::name, convolution_overrideable_out::overload_name)
11253 .typed<convolution_overrideable_out::schema>();
11254}
11255
11256// aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!)
11257at::Tensor & convolution_overrideable_out::call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, at::Tensor & out) {
11258
11259 static auto op = create_convolution_overrideable_out_typed_handle();
11260 return op.call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out);
11261}
11262
11263// aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!)
11264at::Tensor & convolution_overrideable_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, at::Tensor & out) {
11265
11266 static auto op = create_convolution_overrideable_out_typed_handle();
11267 return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out);
11268}
11269
11270STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_copy_from_out, name, "aten::_copy_from")
11271STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_copy_from_out, overload_name, "out")
11272STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_copy_from_out, schema_str, "_copy_from.out(Tensor self, Tensor dst, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)")
11273
11274// aten::_copy_from.out(Tensor self, Tensor dst, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)
11275static C10_NOINLINE c10::TypedOperatorHandle<_copy_from_out::schema> create__copy_from_out_typed_handle() {
11276 return c10::Dispatcher::singleton()
11277 .findSchemaOrThrow(_copy_from_out::name, _copy_from_out::overload_name)
11278 .typed<_copy_from_out::schema>();
11279}
11280
11281// aten::_copy_from.out(Tensor self, Tensor dst, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)
11282at::Tensor & _copy_from_out::call(const at::Tensor & self, const at::Tensor & dst, bool non_blocking, at::Tensor & out) {
11283
11284 static auto op = create__copy_from_out_typed_handle();
11285 return op.call(self, dst, non_blocking, out);
11286}
11287
11288// aten::_copy_from.out(Tensor self, Tensor dst, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)
11289at::Tensor & _copy_from_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & dst, bool non_blocking, at::Tensor & out) {
11290
11291 static auto op = create__copy_from_out_typed_handle();
11292 return op.redispatch(dispatchKeySet, self, dst, non_blocking, out);
11293}
11294
11295STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_batch_norm_out, name, "aten::cudnn_batch_norm")
11296STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_batch_norm_out, overload_name, "out")
11297STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_batch_norm_out, schema_str, "cudnn_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))")
11298
11299// aten::cudnn_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
11300static C10_NOINLINE c10::TypedOperatorHandle<cudnn_batch_norm_out::schema> create_cudnn_batch_norm_out_typed_handle() {
11301 return c10::Dispatcher::singleton()
11302 .findSchemaOrThrow(cudnn_batch_norm_out::name, cudnn_batch_norm_out::overload_name)
11303 .typed<cudnn_batch_norm_out::schema>();
11304}
11305
11306// aten::cudnn_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
11307::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> cudnn_batch_norm_out::call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
11308
11309 static auto op = create_cudnn_batch_norm_out_typed_handle();
11310 return op.call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2, out3);
11311}
11312
11313// aten::cudnn_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
11314::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> cudnn_batch_norm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
11315
11316 static auto op = create_cudnn_batch_norm_out_typed_handle();
11317 return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2, out3);
11318}
11319
11320STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_mps_convolution_transpose_out, name, "aten::_mps_convolution_transpose")
11321STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_mps_convolution_transpose_out, overload_name, "out")
11322STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_mps_convolution_transpose_out, schema_str, "_mps_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)")
11323
11324// aten::_mps_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)
11325static C10_NOINLINE c10::TypedOperatorHandle<_mps_convolution_transpose_out::schema> create__mps_convolution_transpose_out_typed_handle() {
11326 return c10::Dispatcher::singleton()
11327 .findSchemaOrThrow(_mps_convolution_transpose_out::name, _mps_convolution_transpose_out::overload_name)
11328 .typed<_mps_convolution_transpose_out::schema>();
11329}
11330
11331// aten::_mps_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)
11332at::Tensor & _mps_convolution_transpose_out::call(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) {
11333
11334 static auto op = create__mps_convolution_transpose_out_typed_handle();
11335 return op.call(self, weight, padding, output_padding, stride, dilation, groups, out);
11336}
11337
11338// aten::_mps_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)
11339at::Tensor & _mps_convolution_transpose_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) {
11340
11341 static auto op = create__mps_convolution_transpose_out_typed_handle();
11342 return op.redispatch(dispatchKeySet, self, weight, padding, output_padding, stride, dilation, groups, out);
11343}
11344
11345STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mps_convolution_transpose_backward_out, name, "aten::mps_convolution_transpose_backward")
11346STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mps_convolution_transpose_backward_out, overload_name, "out")
11347STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mps_convolution_transpose_backward_out, schema_str, "mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
11348
11349// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
11350static C10_NOINLINE c10::TypedOperatorHandle<mps_convolution_transpose_backward_out::schema> create_mps_convolution_transpose_backward_out_typed_handle() {
11351 return c10::Dispatcher::singleton()
11352 .findSchemaOrThrow(mps_convolution_transpose_backward_out::name, mps_convolution_transpose_backward_out::overload_name)
11353 .typed<mps_convolution_transpose_backward_out::schema>();
11354}
11355
11356// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
11357::std::tuple<at::Tensor &,at::Tensor &> mps_convolution_transpose_backward_out::call(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) {
11358
11359 static auto op = create_mps_convolution_transpose_backward_out_typed_handle();
11360 return op.call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask, out0, out1);
11361}
11362
11363// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
11364::std::tuple<at::Tensor &,at::Tensor &> mps_convolution_transpose_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) {
11365
11366 static auto op = create_mps_convolution_transpose_backward_out_typed_handle();
11367 return op.redispatch(dispatchKeySet, self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask, out0, out1);
11368}
11369
11370STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(embedding_out, name, "aten::embedding")
11371STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(embedding_out, overload_name, "out")
11372STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(embedding_out, schema_str, "embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!)")
11373
11374// aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!)
11375static C10_NOINLINE c10::TypedOperatorHandle<embedding_out::schema> create_embedding_out_typed_handle() {
11376 return c10::Dispatcher::singleton()
11377 .findSchemaOrThrow(embedding_out::name, embedding_out::overload_name)
11378 .typed<embedding_out::schema>();
11379}
11380
11381// aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!)
11382at::Tensor & embedding_out::call(const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse, at::Tensor & out) {
11383
11384 static auto op = create_embedding_out_typed_handle();
11385 return op.call(weight, indices, padding_idx, scale_grad_by_freq, sparse, out);
11386}
11387
11388// aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!)
11389at::Tensor & embedding_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse, at::Tensor & out) {
11390
11391 static auto op = create_embedding_out_typed_handle();
11392 return op.redispatch(dispatchKeySet, weight, indices, padding_idx, scale_grad_by_freq, sparse, out);
11393}
11394
11395STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_embedding_bag_dense_backward_out, name, "aten::_embedding_bag_dense_backward")
11396STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_embedding_bag_dense_backward_out, overload_name, "out")
11397STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_embedding_bag_dense_backward_out, schema_str, "_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)")
11398
11399// aten::_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)
11400static C10_NOINLINE c10::TypedOperatorHandle<_embedding_bag_dense_backward_out::schema> create__embedding_bag_dense_backward_out_typed_handle() {
11401 return c10::Dispatcher::singleton()
11402 .findSchemaOrThrow(_embedding_bag_dense_backward_out::name, _embedding_bag_dense_backward_out::overload_name)
11403 .typed<_embedding_bag_dense_backward_out::schema>();
11404}
11405
11406// aten::_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)
11407at::Tensor & _embedding_bag_dense_backward_out::call(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx, at::Tensor & out) {
11408
11409 static auto op = create__embedding_bag_dense_backward_out_typed_handle();
11410 return op.call(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx, out);
11411}
11412
11413// aten::_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)
11414at::Tensor & _embedding_bag_dense_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx, at::Tensor & out) {
11415
11416 static auto op = create__embedding_bag_dense_backward_out_typed_handle();
11417 return op.redispatch(dispatchKeySet, grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx, out);
11418}
11419
11420STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resize_out, name, "aten::resize")
11421STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resize_out, overload_name, "out")
11422STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resize_out, schema_str, "resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)")
11423
11424// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
11425static C10_NOINLINE c10::TypedOperatorHandle<resize_out::schema> create_resize_out_typed_handle() {
11426 return c10::Dispatcher::singleton()
11427 .findSchemaOrThrow(resize_out::name, resize_out::overload_name)
11428 .typed<resize_out::schema>();
11429}
11430
11431// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
11432const at::Tensor & resize_out::call(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format, const at::Tensor & out) {
11433
11434 static auto op = create_resize_out_typed_handle();
11435 return op.call(self, size, memory_format, out);
11436}
11437
11438// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
11439const at::Tensor & resize_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format, const at::Tensor & out) {
11440
11441 static auto op = create_resize_out_typed_handle();
11442 return op.redispatch(dispatchKeySet, self, size, memory_format, out);
11443}
11444
11445STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resize, name, "aten::resize")
11446STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resize, overload_name, "")
11447STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resize, schema_str, "resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor")
11448
11449// aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor
11450static C10_NOINLINE c10::TypedOperatorHandle<resize::schema> create_resize_typed_handle() {
11451 return c10::Dispatcher::singleton()
11452 .findSchemaOrThrow(resize::name, resize::overload_name)
11453 .typed<resize::schema>();
11454}
11455
11456// aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor
11457at::Tensor resize::call(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) {
11458
11459 static auto op = create_resize_typed_handle();
11460 return op.call(self, size, memory_format);
11461}
11462
11463// aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor
11464at::Tensor resize::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) {
11465
11466 static auto op = create_resize_typed_handle();
11467 return op.redispatch(dispatchKeySet, self, size, memory_format);
11468}
11469
11470STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(full_names_out, name, "aten::full")
11471STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(full_names_out, overload_name, "names_out")
11472STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(full_names_out, schema_str, "full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)")
11473
11474// aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
11475static C10_NOINLINE c10::TypedOperatorHandle<full_names_out::schema> create_full_names_out_typed_handle() {
11476 return c10::Dispatcher::singleton()
11477 .findSchemaOrThrow(full_names_out::name, full_names_out::overload_name)
11478 .typed<full_names_out::schema>();
11479}
11480
11481// aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
11482at::Tensor & full_names_out::call(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::DimnameList> names, at::Tensor & out) {
11483
11484 static auto op = create_full_names_out_typed_handle();
11485 return op.call(size, fill_value, names, out);
11486}
11487
11488// aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
11489at::Tensor & full_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::DimnameList> names, at::Tensor & out) {
11490
11491 static auto op = create_full_names_out_typed_handle();
11492 return op.redispatch(dispatchKeySet, size, fill_value, names, out);
11493}
11494
11495STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(full_like_out, name, "aten::full_like")
11496STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(full_like_out, overload_name, "out")
11497STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(full_like_out, schema_str, "full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)")
11498
11499// aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
11500static C10_NOINLINE c10::TypedOperatorHandle<full_like_out::schema> create_full_like_out_typed_handle() {
11501 return c10::Dispatcher::singleton()
11502 .findSchemaOrThrow(full_like_out::name, full_like_out::overload_name)
11503 .typed<full_like_out::schema>();
11504}
11505
11506// aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
11507at::Tensor & full_like_out::call(const at::Tensor & self, const at::Scalar & fill_value, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
11508
11509 static auto op = create_full_like_out_typed_handle();
11510 return op.call(self, fill_value, memory_format, out);
11511}
11512
11513// aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
11514at::Tensor & full_like_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & fill_value, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
11515
11516 static auto op = create_full_like_out_typed_handle();
11517 return op.redispatch(dispatchKeySet, self, fill_value, memory_format, out);
11518}
11519
11520STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(grid_sampler_2d_out, name, "aten::grid_sampler_2d")
11521STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(grid_sampler_2d_out, overload_name, "out")
11522STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(grid_sampler_2d_out, schema_str, "grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)")
11523
11524// aten::grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
11525static C10_NOINLINE c10::TypedOperatorHandle<grid_sampler_2d_out::schema> create_grid_sampler_2d_out_typed_handle() {
11526 return c10::Dispatcher::singleton()
11527 .findSchemaOrThrow(grid_sampler_2d_out::name, grid_sampler_2d_out::overload_name)
11528 .typed<grid_sampler_2d_out::schema>();
11529}
11530
11531// aten::grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
11532at::Tensor & grid_sampler_2d_out::call(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) {
11533
11534 static auto op = create_grid_sampler_2d_out_typed_handle();
11535 return op.call(input, grid, interpolation_mode, padding_mode, align_corners, out);
11536}
11537
11538// aten::grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
11539at::Tensor & grid_sampler_2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) {
11540
11541 static auto op = create_grid_sampler_2d_out_typed_handle();
11542 return op.redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners, out);
11543}
11544
11545STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kaiser_window_out, name, "aten::kaiser_window")
11546STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kaiser_window_out, overload_name, "out")
11547STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kaiser_window_out, schema_str, "kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)")
11548
11549// aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
11550static C10_NOINLINE c10::TypedOperatorHandle<kaiser_window_out::schema> create_kaiser_window_out_typed_handle() {
11551 return c10::Dispatcher::singleton()
11552 .findSchemaOrThrow(kaiser_window_out::name, kaiser_window_out::overload_name)
11553 .typed<kaiser_window_out::schema>();
11554}
11555
11556// aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
11557at::Tensor & kaiser_window_out::call(int64_t window_length, at::Tensor & out) {
11558
11559 static auto op = create_kaiser_window_out_typed_handle();
11560 return op.call(window_length, out);
11561}
11562
11563// aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
11564at::Tensor & kaiser_window_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) {
11565
11566 static auto op = create_kaiser_window_out_typed_handle();
11567 return op.redispatch(dispatchKeySet, window_length, out);
11568}
11569
11570STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kaiser_window_periodic_out, name, "aten::kaiser_window")
11571STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kaiser_window_periodic_out, overload_name, "periodic_out")
11572STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kaiser_window_periodic_out, schema_str, "kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)")
11573
11574// aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
11575static C10_NOINLINE c10::TypedOperatorHandle<kaiser_window_periodic_out::schema> create_kaiser_window_periodic_out_typed_handle() {
11576 return c10::Dispatcher::singleton()
11577 .findSchemaOrThrow(kaiser_window_periodic_out::name, kaiser_window_periodic_out::overload_name)
11578 .typed<kaiser_window_periodic_out::schema>();
11579}
11580
11581// aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
11582at::Tensor & kaiser_window_periodic_out::call(int64_t window_length, bool periodic, at::Tensor & out) {
11583
11584 static auto op = create_kaiser_window_periodic_out_typed_handle();
11585 return op.call(window_length, periodic, out);
11586}
11587
11588// aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
11589at::Tensor & kaiser_window_periodic_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) {
11590
11591 static auto op = create_kaiser_window_periodic_out_typed_handle();
11592 return op.redispatch(dispatchKeySet, window_length, periodic, out);
11593}
11594
11595STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kaiser_window_beta_out, name, "aten::kaiser_window")
11596STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kaiser_window_beta_out, overload_name, "beta_out")
11597STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kaiser_window_beta_out, schema_str, "kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!)")
11598
11599// aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!)
11600static C10_NOINLINE c10::TypedOperatorHandle<kaiser_window_beta_out::schema> create_kaiser_window_beta_out_typed_handle() {
11601 return c10::Dispatcher::singleton()
11602 .findSchemaOrThrow(kaiser_window_beta_out::name, kaiser_window_beta_out::overload_name)
11603 .typed<kaiser_window_beta_out::schema>();
11604}
11605
11606// aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!)
11607at::Tensor & kaiser_window_beta_out::call(int64_t window_length, bool periodic, double beta, at::Tensor & out) {
11608
11609 static auto op = create_kaiser_window_beta_out_typed_handle();
11610 return op.call(window_length, periodic, beta, out);
11611}
11612
11613// aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!)
11614at::Tensor & kaiser_window_beta_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double beta, at::Tensor & out) {
11615
11616 static auto op = create_kaiser_window_beta_out_typed_handle();
11617 return op.redispatch(dispatchKeySet, window_length, periodic, beta, out);
11618}
11619
11620STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_put_out, name, "aten::index_put")
11621STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_put_out, overload_name, "out")
11622STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_put_out, schema_str, "index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)")
11623
11624// aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)
11625static C10_NOINLINE c10::TypedOperatorHandle<index_put_out::schema> create_index_put_out_typed_handle() {
11626 return c10::Dispatcher::singleton()
11627 .findSchemaOrThrow(index_put_out::name, index_put_out::overload_name)
11628 .typed<index_put_out::schema>();
11629}
11630
11631// aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)
11632at::Tensor & index_put_out::call(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, at::Tensor & out) {
11633
11634 static auto op = create_index_put_out_typed_handle();
11635 return op.call(self, indices, values, accumulate, out);
11636}
11637
11638// aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)
11639at::Tensor & index_put_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, at::Tensor & out) {
11640
11641 static auto op = create_index_put_out_typed_handle();
11642 return op.redispatch(dispatchKeySet, self, indices, values, accumulate, out);
11643}
11644
11645STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(matmul_backward_out, name, "aten::matmul_backward")
11646STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(matmul_backward_out, overload_name, "out")
11647STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(matmul_backward_out, schema_str, "matmul_backward.out(Tensor grad, Tensor self, Tensor other, bool[2] mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
11648
11649// aten::matmul_backward.out(Tensor grad, Tensor self, Tensor other, bool[2] mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
11650static C10_NOINLINE c10::TypedOperatorHandle<matmul_backward_out::schema> create_matmul_backward_out_typed_handle() {
11651 return c10::Dispatcher::singleton()
11652 .findSchemaOrThrow(matmul_backward_out::name, matmul_backward_out::overload_name)
11653 .typed<matmul_backward_out::schema>();
11654}
11655
11656// aten::matmul_backward.out(Tensor grad, Tensor self, Tensor other, bool[2] mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
11657::std::tuple<at::Tensor &,at::Tensor &> matmul_backward_out::call(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array<bool,2> mask, at::Tensor & out0, at::Tensor & out1) {
11658
11659 static auto op = create_matmul_backward_out_typed_handle();
11660 return op.call(grad, self, other, mask, out0, out1);
11661}
11662
11663// aten::matmul_backward.out(Tensor grad, Tensor self, Tensor other, bool[2] mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
11664::std::tuple<at::Tensor &,at::Tensor &> matmul_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array<bool,2> mask, at::Tensor & out0, at::Tensor & out1) {
11665
11666 static auto op = create_matmul_backward_out_typed_handle();
11667 return op.redispatch(dispatchKeySet, grad, self, other, mask, out0, out1);
11668}
11669
11670STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_max_pool2d_backward_out, name, "aten::mkldnn_max_pool2d_backward")
11671STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_max_pool2d_backward_out, overload_name, "out")
11672STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_max_pool2d_backward_out, schema_str, "mkldnn_max_pool2d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)")
11673
11674// aten::mkldnn_max_pool2d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
11675static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_max_pool2d_backward_out::schema> create_mkldnn_max_pool2d_backward_out_typed_handle() {
11676 return c10::Dispatcher::singleton()
11677 .findSchemaOrThrow(mkldnn_max_pool2d_backward_out::name, mkldnn_max_pool2d_backward_out::overload_name)
11678 .typed<mkldnn_max_pool2d_backward_out::schema>();
11679}
11680
11681// aten::mkldnn_max_pool2d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
11682at::Tensor & mkldnn_max_pool2d_backward_out::call(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
11683
11684 static auto op = create_mkldnn_max_pool2d_backward_out_typed_handle();
11685 return op.call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode, out);
11686}
11687
11688// aten::mkldnn_max_pool2d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
11689at::Tensor & mkldnn_max_pool2d_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
11690
11691 static auto op = create_mkldnn_max_pool2d_backward_out_typed_handle();
11692 return op.redispatch(dispatchKeySet, grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode, out);
11693}
11694
11695STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(median_out, name, "aten::median")
11696STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(median_out, overload_name, "out")
11697STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(median_out, schema_str, "median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
11698
11699// aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11700static C10_NOINLINE c10::TypedOperatorHandle<median_out::schema> create_median_out_typed_handle() {
11701 return c10::Dispatcher::singleton()
11702 .findSchemaOrThrow(median_out::name, median_out::overload_name)
11703 .typed<median_out::schema>();
11704}
11705
11706// aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11707at::Tensor & median_out::call(const at::Tensor & self, at::Tensor & out) {
11708
11709 static auto op = create_median_out_typed_handle();
11710 return op.call(self, out);
11711}
11712
11713// aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11714at::Tensor & median_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
11715
11716 static auto op = create_median_out_typed_handle();
11717 return op.redispatch(dispatchKeySet, self, out);
11718}
11719
11720STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanmedian_out, name, "aten::nanmedian")
11721STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanmedian_out, overload_name, "out")
11722STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanmedian_out, schema_str, "nanmedian.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
11723
11724// aten::nanmedian.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11725static C10_NOINLINE c10::TypedOperatorHandle<nanmedian_out::schema> create_nanmedian_out_typed_handle() {
11726 return c10::Dispatcher::singleton()
11727 .findSchemaOrThrow(nanmedian_out::name, nanmedian_out::overload_name)
11728 .typed<nanmedian_out::schema>();
11729}
11730
11731// aten::nanmedian.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11732at::Tensor & nanmedian_out::call(const at::Tensor & self, at::Tensor & out) {
11733
11734 static auto op = create_nanmedian_out_typed_handle();
11735 return op.call(self, out);
11736}
11737
11738// aten::nanmedian.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11739at::Tensor & nanmedian_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
11740
11741 static auto op = create_nanmedian_out_typed_handle();
11742 return op.redispatch(dispatchKeySet, self, out);
11743}
11744
11745STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_batch_norm_out, name, "aten::miopen_batch_norm")
11746STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_batch_norm_out, overload_name, "out")
11747STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_batch_norm_out, schema_str, "miopen_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
11748
11749// aten::miopen_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
11750static C10_NOINLINE c10::TypedOperatorHandle<miopen_batch_norm_out::schema> create_miopen_batch_norm_out_typed_handle() {
11751 return c10::Dispatcher::singleton()
11752 .findSchemaOrThrow(miopen_batch_norm_out::name, miopen_batch_norm_out::overload_name)
11753 .typed<miopen_batch_norm_out::schema>();
11754}
11755
11756// aten::miopen_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
11757::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> miopen_batch_norm_out::call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
11758
11759 static auto op = create_miopen_batch_norm_out_typed_handle();
11760 return op.call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2);
11761}
11762
11763// aten::miopen_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
11764::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> miopen_batch_norm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
11765
11766 static auto op = create_miopen_batch_norm_out_typed_handle();
11767 return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2);
11768}
11769
11770STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_convolution_transpose_out, name, "aten::miopen_convolution_transpose")
11771STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_convolution_transpose_out, overload_name, "out")
11772STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_convolution_transpose_out, schema_str, "miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)")
11773
11774// aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
11775static C10_NOINLINE c10::TypedOperatorHandle<miopen_convolution_transpose_out::schema> create_miopen_convolution_transpose_out_typed_handle() {
11776 return c10::Dispatcher::singleton()
11777 .findSchemaOrThrow(miopen_convolution_transpose_out::name, miopen_convolution_transpose_out::overload_name)
11778 .typed<miopen_convolution_transpose_out::schema>();
11779}
11780
11781// aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
11782at::Tensor & miopen_convolution_transpose_out::call(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) {
11783
11784 static auto op = create_miopen_convolution_transpose_out_typed_handle();
11785 return op.call(self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic, out);
11786}
11787
11788// aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
11789at::Tensor & miopen_convolution_transpose_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) {
11790
11791 static auto op = create_miopen_convolution_transpose_out_typed_handle();
11792 return op.redispatch(dispatchKeySet, self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic, out);
11793}
11794
11795STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_rnn_backward_out, name, "aten::miopen_rnn_backward")
11796STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_rnn_backward_out, overload_name, "out")
11797STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_rnn_backward_out, schema_str, "miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()")
11798
11799// aten::miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()
11800static C10_NOINLINE c10::TypedOperatorHandle<miopen_rnn_backward_out::schema> create_miopen_rnn_backward_out_typed_handle() {
11801 return c10::Dispatcher::singleton()
11802 .findSchemaOrThrow(miopen_rnn_backward_out::name, miopen_rnn_backward_out::overload_name)
11803 .typed<miopen_rnn_backward_out::schema>();
11804}
11805
11806// aten::miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()
11807void miopen_rnn_backward_out::call(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) {
11808
11809 static auto op = create_miopen_rnn_backward_out_typed_handle();
11810 return op.call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3);
11811}
11812
11813// aten::miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()
11814void miopen_rnn_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) {
11815
11816 static auto op = create_miopen_rnn_backward_out_typed_handle();
11817 return op.redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3);
11818}
11819
11820STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(channel_shuffle_out, name, "aten::channel_shuffle")
11821STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(channel_shuffle_out, overload_name, "out")
11822STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(channel_shuffle_out, schema_str, "channel_shuffle.out(Tensor self, int groups, *, Tensor(a!) out) -> Tensor(a!)")
11823
11824// aten::channel_shuffle.out(Tensor self, int groups, *, Tensor(a!) out) -> Tensor(a!)
11825static C10_NOINLINE c10::TypedOperatorHandle<channel_shuffle_out::schema> create_channel_shuffle_out_typed_handle() {
11826 return c10::Dispatcher::singleton()
11827 .findSchemaOrThrow(channel_shuffle_out::name, channel_shuffle_out::overload_name)
11828 .typed<channel_shuffle_out::schema>();
11829}
11830
11831// aten::channel_shuffle.out(Tensor self, int groups, *, Tensor(a!) out) -> Tensor(a!)
11832at::Tensor & channel_shuffle_out::call(const at::Tensor & self, int64_t groups, at::Tensor & out) {
11833
11834 static auto op = create_channel_shuffle_out_typed_handle();
11835 return op.call(self, groups, out);
11836}
11837
11838// aten::channel_shuffle.out(Tensor self, int groups, *, Tensor(a!) out) -> Tensor(a!)
11839at::Tensor & channel_shuffle_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t groups, at::Tensor & out) {
11840
11841 static auto op = create_channel_shuffle_out_typed_handle();
11842 return op.redispatch(dispatchKeySet, self, groups, out);
11843}
11844
11845STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(relu_out, name, "aten::relu")
11846STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(relu_out, overload_name, "out")
11847STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(relu_out, schema_str, "relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
11848
11849// aten::relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11850static C10_NOINLINE c10::TypedOperatorHandle<relu_out::schema> create_relu_out_typed_handle() {
11851 return c10::Dispatcher::singleton()
11852 .findSchemaOrThrow(relu_out::name, relu_out::overload_name)
11853 .typed<relu_out::schema>();
11854}
11855
11856// aten::relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11857at::Tensor & relu_out::call(const at::Tensor & self, at::Tensor & out) {
11858
11859 static auto op = create_relu_out_typed_handle();
11860 return op.call(self, out);
11861}
11862
11863// aten::relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11864at::Tensor & relu_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
11865
11866 static auto op = create_relu_out_typed_handle();
11867 return op.redispatch(dispatchKeySet, self, out);
11868}
11869
11870STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(select_scatter_out, name, "aten::select_scatter")
11871STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(select_scatter_out, overload_name, "out")
11872STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(select_scatter_out, schema_str, "select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)")
11873
11874// aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
11875static C10_NOINLINE c10::TypedOperatorHandle<select_scatter_out::schema> create_select_scatter_out_typed_handle() {
11876 return c10::Dispatcher::singleton()
11877 .findSchemaOrThrow(select_scatter_out::name, select_scatter_out::overload_name)
11878 .typed<select_scatter_out::schema>();
11879}
11880
11881// aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
11882at::Tensor & select_scatter_out::call(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index, at::Tensor & out) {
11883
11884 static auto op = create_select_scatter_out_typed_handle();
11885 return op.call(self, src, dim, index, out);
11886}
11887
11888// aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
11889at::Tensor & select_scatter_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index, at::Tensor & out) {
11890
11891 static auto op = create_select_scatter_out_typed_handle();
11892 return op.redispatch(dispatchKeySet, self, src, dim, index, out);
11893}
11894
11895STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unsafe_split_with_sizes_out, name, "aten::unsafe_split_with_sizes")
11896STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unsafe_split_with_sizes_out, overload_name, "out")
11897STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unsafe_split_with_sizes_out, schema_str, "unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()")
11898
11899// aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()
11900static C10_NOINLINE c10::TypedOperatorHandle<unsafe_split_with_sizes_out::schema> create_unsafe_split_with_sizes_out_typed_handle() {
11901 return c10::Dispatcher::singleton()
11902 .findSchemaOrThrow(unsafe_split_with_sizes_out::name, unsafe_split_with_sizes_out::overload_name)
11903 .typed<unsafe_split_with_sizes_out::schema>();
11904}
11905
11906// aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()
11907void unsafe_split_with_sizes_out::call(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) {
11908
11909 static auto op = create_unsafe_split_with_sizes_out_typed_handle();
11910 return op.call(self, split_sizes, dim, out);
11911}
11912
11913// aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()
11914void unsafe_split_with_sizes_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) {
11915
11916 static auto op = create_unsafe_split_with_sizes_out_typed_handle();
11917 return op.redispatch(dispatchKeySet, self, split_sizes, dim, out);
11918}
11919
11920STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(prod_out, name, "aten::prod")
11921STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(prod_out, overload_name, "out")
11922STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(prod_out, schema_str, "prod.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)")
11923
11924// aten::prod.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
11925static C10_NOINLINE c10::TypedOperatorHandle<prod_out::schema> create_prod_out_typed_handle() {
11926 return c10::Dispatcher::singleton()
11927 .findSchemaOrThrow(prod_out::name, prod_out::overload_name)
11928 .typed<prod_out::schema>();
11929}
11930
11931// aten::prod.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
11932at::Tensor & prod_out::call(const at::Tensor & self, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
11933
11934 static auto op = create_prod_out_typed_handle();
11935 return op.call(self, dtype, out);
11936}
11937
11938// aten::prod.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
11939at::Tensor & prod_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
11940
11941 static auto op = create_prod_out_typed_handle();
11942 return op.redispatch(dispatchKeySet, self, dtype, out);
11943}
11944
11945STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_tensor_from_mask_out, name, "aten::_nested_tensor_from_mask")
11946STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_tensor_from_mask_out, overload_name, "out")
11947STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_tensor_from_mask_out, schema_str, "_nested_tensor_from_mask.out(Tensor t, Tensor mask, bool mask_check=True, *, Tensor(a!) out) -> Tensor(a!)")
11948
11949// aten::_nested_tensor_from_mask.out(Tensor t, Tensor mask, bool mask_check=True, *, Tensor(a!) out) -> Tensor(a!)
11950static C10_NOINLINE c10::TypedOperatorHandle<_nested_tensor_from_mask_out::schema> create__nested_tensor_from_mask_out_typed_handle() {
11951 return c10::Dispatcher::singleton()
11952 .findSchemaOrThrow(_nested_tensor_from_mask_out::name, _nested_tensor_from_mask_out::overload_name)
11953 .typed<_nested_tensor_from_mask_out::schema>();
11954}
11955
11956// aten::_nested_tensor_from_mask.out(Tensor t, Tensor mask, bool mask_check=True, *, Tensor(a!) out) -> Tensor(a!)
11957at::Tensor & _nested_tensor_from_mask_out::call(const at::Tensor & t, const at::Tensor & mask, bool mask_check, at::Tensor & out) {
11958
11959 static auto op = create__nested_tensor_from_mask_out_typed_handle();
11960 return op.call(t, mask, mask_check, out);
11961}
11962
11963// aten::_nested_tensor_from_mask.out(Tensor t, Tensor mask, bool mask_check=True, *, Tensor(a!) out) -> Tensor(a!)
11964at::Tensor & _nested_tensor_from_mask_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & t, const at::Tensor & mask, bool mask_check, at::Tensor & out) {
11965
11966 static auto op = create__nested_tensor_from_mask_out_typed_handle();
11967 return op.redispatch(dispatchKeySet, t, mask, mask_check, out);
11968}
11969
11970STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_tensor_size_out, name, "aten::_nested_tensor_size")
11971STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_tensor_size_out, overload_name, "out")
11972STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_tensor_size_out, schema_str, "_nested_tensor_size.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
11973
11974// aten::_nested_tensor_size.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11975static C10_NOINLINE c10::TypedOperatorHandle<_nested_tensor_size_out::schema> create__nested_tensor_size_out_typed_handle() {
11976 return c10::Dispatcher::singleton()
11977 .findSchemaOrThrow(_nested_tensor_size_out::name, _nested_tensor_size_out::overload_name)
11978 .typed<_nested_tensor_size_out::schema>();
11979}
11980
11981// aten::_nested_tensor_size.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11982at::Tensor & _nested_tensor_size_out::call(const at::Tensor & self, at::Tensor & out) {
11983
11984 static auto op = create__nested_tensor_size_out_typed_handle();
11985 return op.call(self, out);
11986}
11987
11988// aten::_nested_tensor_size.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11989at::Tensor & _nested_tensor_size_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
11990
11991 static auto op = create__nested_tensor_size_out_typed_handle();
11992 return op.redispatch(dispatchKeySet, self, out);
11993}
11994
11995STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_view_from_buffer_copy_out, name, "aten::_nested_view_from_buffer_copy")
11996STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_view_from_buffer_copy_out, overload_name, "out")
11997STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_view_from_buffer_copy_out, schema_str, "_nested_view_from_buffer_copy.out(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets, *, Tensor(a!) out) -> Tensor(a!)")
11998
11999// aten::_nested_view_from_buffer_copy.out(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets, *, Tensor(a!) out) -> Tensor(a!)
12000static C10_NOINLINE c10::TypedOperatorHandle<_nested_view_from_buffer_copy_out::schema> create__nested_view_from_buffer_copy_out_typed_handle() {
12001 return c10::Dispatcher::singleton()
12002 .findSchemaOrThrow(_nested_view_from_buffer_copy_out::name, _nested_view_from_buffer_copy_out::overload_name)
12003 .typed<_nested_view_from_buffer_copy_out::schema>();
12004}
12005
12006// aten::_nested_view_from_buffer_copy.out(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets, *, Tensor(a!) out) -> Tensor(a!)
12007at::Tensor & _nested_view_from_buffer_copy_out::call(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets, at::Tensor & out) {
12008
12009 static auto op = create__nested_view_from_buffer_copy_out_typed_handle();
12010 return op.call(self, nested_size, nested_strides, offsets, out);
12011}
12012
12013// aten::_nested_view_from_buffer_copy.out(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets, *, Tensor(a!) out) -> Tensor(a!)
12014at::Tensor & _nested_view_from_buffer_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets, at::Tensor & out) {
12015
12016 static auto op = create__nested_view_from_buffer_copy_out_typed_handle();
12017 return op.redispatch(dispatchKeySet, self, nested_size, nested_strides, offsets, out);
12018}
12019
12020STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unique_dim_consecutive_out, name, "aten::unique_dim_consecutive")
12021STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unique_dim_consecutive_out, overload_name, "out")
12022STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unique_dim_consecutive_out, schema_str, "unique_dim_consecutive.out(Tensor self, int dim, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
12023
12024// aten::unique_dim_consecutive.out(Tensor self, int dim, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
12025static C10_NOINLINE c10::TypedOperatorHandle<unique_dim_consecutive_out::schema> create_unique_dim_consecutive_out_typed_handle() {
12026 return c10::Dispatcher::singleton()
12027 .findSchemaOrThrow(unique_dim_consecutive_out::name, unique_dim_consecutive_out::overload_name)
12028 .typed<unique_dim_consecutive_out::schema>();
12029}
12030
12031// aten::unique_dim_consecutive.out(Tensor self, int dim, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
12032::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_dim_consecutive_out::call(const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
12033
12034 static auto op = create_unique_dim_consecutive_out_typed_handle();
12035 return op.call(self, dim, return_inverse, return_counts, out0, out1, out2);
12036}
12037
12038// aten::unique_dim_consecutive.out(Tensor self, int dim, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
12039::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_dim_consecutive_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
12040
12041 static auto op = create_unique_dim_consecutive_out_typed_handle();
12042 return op.redispatch(dispatchKeySet, self, dim, return_inverse, return_counts, out0, out1, out2);
12043}
12044
12045STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_unsafe_view_out, name, "aten::_unsafe_view")
12046STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_unsafe_view_out, overload_name, "out")
12047STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_unsafe_view_out, schema_str, "_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)")
12048
12049// aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
12050static C10_NOINLINE c10::TypedOperatorHandle<_unsafe_view_out::schema> create__unsafe_view_out_typed_handle() {
12051 return c10::Dispatcher::singleton()
12052 .findSchemaOrThrow(_unsafe_view_out::name, _unsafe_view_out::overload_name)
12053 .typed<_unsafe_view_out::schema>();
12054}
12055
12056// aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
12057at::Tensor & _unsafe_view_out::call(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
12058
12059 static auto op = create__unsafe_view_out_typed_handle();
12060 return op.call(self, size, out);
12061}
12062
12063// aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
12064at::Tensor & _unsafe_view_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
12065
12066 static auto op = create__unsafe_view_out_typed_handle();
12067 return op.redispatch(dispatchKeySet, self, size, out);
12068}
12069
12070STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_efficientzerotensor_out, name, "aten::_efficientzerotensor")
12071STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_efficientzerotensor_out, overload_name, "out")
12072STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_efficientzerotensor_out, schema_str, "_efficientzerotensor.out(int[] size, *, Tensor(a!) out) -> Tensor(a!)")
12073
12074// aten::_efficientzerotensor.out(int[] size, *, Tensor(a!) out) -> Tensor(a!)
12075static C10_NOINLINE c10::TypedOperatorHandle<_efficientzerotensor_out::schema> create__efficientzerotensor_out_typed_handle() {
12076 return c10::Dispatcher::singleton()
12077 .findSchemaOrThrow(_efficientzerotensor_out::name, _efficientzerotensor_out::overload_name)
12078 .typed<_efficientzerotensor_out::schema>();
12079}
12080
12081// aten::_efficientzerotensor.out(int[] size, *, Tensor(a!) out) -> Tensor(a!)
12082at::Tensor & _efficientzerotensor_out::call(at::IntArrayRef size, at::Tensor & out) {
12083
12084 static auto op = create__efficientzerotensor_out_typed_handle();
12085 return op.call(size, out);
12086}
12087
12088// aten::_efficientzerotensor.out(int[] size, *, Tensor(a!) out) -> Tensor(a!)
12089at::Tensor & _efficientzerotensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::Tensor & out) {
12090
12091 static auto op = create__efficientzerotensor_out_typed_handle();
12092 return op.redispatch(dispatchKeySet, size, out);
12093}
12094
12095STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(poisson_out, name, "aten::poisson")
12096STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(poisson_out, overload_name, "out")
12097STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(poisson_out, schema_str, "poisson.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)")
12098
12099// aten::poisson.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
12100static C10_NOINLINE c10::TypedOperatorHandle<poisson_out::schema> create_poisson_out_typed_handle() {
12101 return c10::Dispatcher::singleton()
12102 .findSchemaOrThrow(poisson_out::name, poisson_out::overload_name)
12103 .typed<poisson_out::schema>();
12104}
12105
12106// aten::poisson.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
12107at::Tensor & poisson_out::call(const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) {
12108
12109 static auto op = create_poisson_out_typed_handle();
12110 return op.call(self, generator, out);
12111}
12112
12113// aten::poisson.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
12114at::Tensor & poisson_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) {
12115
12116 static auto op = create_poisson_out_typed_handle();
12117 return op.redispatch(dispatchKeySet, self, generator, out);
12118}
12119
12120STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sub_Scalar_out, name, "aten::sub")
12121STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sub_Scalar_out, overload_name, "Scalar_out")
12122STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sub_Scalar_out, schema_str, "sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)")
12123
12124// aten::sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
12125static C10_NOINLINE c10::TypedOperatorHandle<sub_Scalar_out::schema> create_sub_Scalar_out_typed_handle() {
12126 return c10::Dispatcher::singleton()
12127 .findSchemaOrThrow(sub_Scalar_out::name, sub_Scalar_out::overload_name)
12128 .typed<sub_Scalar_out::schema>();
12129}
12130
12131// aten::sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
12132at::Tensor & sub_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) {
12133
12134 static auto op = create_sub_Scalar_out_typed_handle();
12135 return op.call(self, other, alpha, out);
12136}
12137
12138// aten::sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
12139at::Tensor & sub_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) {
12140
12141 static auto op = create_sub_Scalar_out_typed_handle();
12142 return op.redispatch(dispatchKeySet, self, other, alpha, out);
12143}
12144
12145STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_coo_tensor_size_out, name, "aten::sparse_coo_tensor")
12146STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_coo_tensor_size_out, overload_name, "size_out")
12147STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_coo_tensor_size_out, schema_str, "sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!)")
12148
12149// aten::sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!)
12150static C10_NOINLINE c10::TypedOperatorHandle<sparse_coo_tensor_size_out::schema> create_sparse_coo_tensor_size_out_typed_handle() {
12151 return c10::Dispatcher::singleton()
12152 .findSchemaOrThrow(sparse_coo_tensor_size_out::name, sparse_coo_tensor_size_out::overload_name)
12153 .typed<sparse_coo_tensor_size_out::schema>();
12154}
12155
12156// aten::sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!)
12157at::Tensor & sparse_coo_tensor_size_out::call(at::IntArrayRef size, at::Tensor & out) {
12158
12159 static auto op = create_sparse_coo_tensor_size_out_typed_handle();
12160 return op.call(size, out);
12161}
12162
12163// aten::sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!)
12164at::Tensor & sparse_coo_tensor_size_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::Tensor & out) {
12165
12166 static auto op = create_sparse_coo_tensor_size_out_typed_handle();
12167 return op.redispatch(dispatchKeySet, size, out);
12168}
12169
12170STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_resize_and_clear_out, name, "aten::sparse_resize_and_clear")
12171STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_resize_and_clear_out, overload_name, "out")
12172STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_resize_and_clear_out, schema_str, "sparse_resize_and_clear.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)")
12173
12174// aten::sparse_resize_and_clear.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)
12175static C10_NOINLINE c10::TypedOperatorHandle<sparse_resize_and_clear_out::schema> create_sparse_resize_and_clear_out_typed_handle() {
12176 return c10::Dispatcher::singleton()
12177 .findSchemaOrThrow(sparse_resize_and_clear_out::name, sparse_resize_and_clear_out::overload_name)
12178 .typed<sparse_resize_and_clear_out::schema>();
12179}
12180
12181// aten::sparse_resize_and_clear.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)
12182const at::Tensor & sparse_resize_and_clear_out::call(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out) {
12183
12184 static auto op = create_sparse_resize_and_clear_out_typed_handle();
12185 return op.call(self, size, sparse_dim, dense_dim, out);
12186}
12187
12188// aten::sparse_resize_and_clear.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)
12189const at::Tensor & sparse_resize_and_clear_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out) {
12190
12191 static auto op = create_sparse_resize_and_clear_out_typed_handle();
12192 return op.redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim, out);
12193}
12194
12195STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_resize_and_clear, name, "aten::sparse_resize_and_clear")
12196STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_resize_and_clear, overload_name, "")
12197STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_resize_and_clear, schema_str, "sparse_resize_and_clear(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor")
12198
12199// aten::sparse_resize_and_clear(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor
12200static C10_NOINLINE c10::TypedOperatorHandle<sparse_resize_and_clear::schema> create_sparse_resize_and_clear_typed_handle() {
12201 return c10::Dispatcher::singleton()
12202 .findSchemaOrThrow(sparse_resize_and_clear::name, sparse_resize_and_clear::overload_name)
12203 .typed<sparse_resize_and_clear::schema>();
12204}
12205
12206// aten::sparse_resize_and_clear(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor
12207at::Tensor sparse_resize_and_clear::call(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
12208
12209 static auto op = create_sparse_resize_and_clear_typed_handle();
12210 return op.call(self, size, sparse_dim, dense_dim);
12211}
12212
12213// aten::sparse_resize_and_clear(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor
12214at::Tensor sparse_resize_and_clear::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
12215
12216 static auto op = create_sparse_resize_and_clear_typed_handle();
12217 return op.redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim);
12218}
12219
12220STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse_csr_out, name, "aten::to_sparse_csr")
12221STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse_csr_out, overload_name, "out")
12222STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse_csr_out, schema_str, "to_sparse_csr.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)")
12223
12224// aten::to_sparse_csr.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
12225static C10_NOINLINE c10::TypedOperatorHandle<to_sparse_csr_out::schema> create_to_sparse_csr_out_typed_handle() {
12226 return c10::Dispatcher::singleton()
12227 .findSchemaOrThrow(to_sparse_csr_out::name, to_sparse_csr_out::overload_name)
12228 .typed<to_sparse_csr_out::schema>();
12229}
12230
12231// aten::to_sparse_csr.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
12232at::Tensor & to_sparse_csr_out::call(const at::Tensor & self, c10::optional<int64_t> dense_dim, at::Tensor & out) {
12233
12234 static auto op = create_to_sparse_csr_out_typed_handle();
12235 return op.call(self, dense_dim, out);
12236}
12237
12238// aten::to_sparse_csr.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
12239at::Tensor & to_sparse_csr_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> dense_dim, at::Tensor & out) {
12240
12241 static auto op = create_to_sparse_csr_out_typed_handle();
12242 return op.redispatch(dispatchKeySet, self, dense_dim, out);
12243}
12244
12245STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse_bsr_out, name, "aten::to_sparse_bsr")
12246STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse_bsr_out, overload_name, "out")
12247STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse_bsr_out, schema_str, "to_sparse_bsr.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)")
12248
12249// aten::to_sparse_bsr.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
12250static C10_NOINLINE c10::TypedOperatorHandle<to_sparse_bsr_out::schema> create_to_sparse_bsr_out_typed_handle() {
12251 return c10::Dispatcher::singleton()
12252 .findSchemaOrThrow(to_sparse_bsr_out::name, to_sparse_bsr_out::overload_name)
12253 .typed<to_sparse_bsr_out::schema>();
12254}
12255
12256// aten::to_sparse_bsr.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
12257at::Tensor & to_sparse_bsr_out::call(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim, at::Tensor & out) {
12258
12259 static auto op = create_to_sparse_bsr_out_typed_handle();
12260 return op.call(self, blocksize, dense_dim, out);
12261}
12262
12263// aten::to_sparse_bsr.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
12264at::Tensor & to_sparse_bsr_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim, at::Tensor & out) {
12265
12266 static auto op = create_to_sparse_bsr_out_typed_handle();
12267 return op.redispatch(dispatchKeySet, self, blocksize, dense_dim, out);
12268}
12269
12270STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_reorder_conv3d_weight_out, name, "aten::mkldnn_reorder_conv3d_weight")
12271STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_reorder_conv3d_weight_out, overload_name, "out")
12272STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_reorder_conv3d_weight_out, schema_str, "mkldnn_reorder_conv3d_weight.out(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1, *, Tensor(a!) out) -> Tensor(a!)")
12273
12274// aten::mkldnn_reorder_conv3d_weight.out(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1, *, Tensor(a!) out) -> Tensor(a!)
12275static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_reorder_conv3d_weight_out::schema> create_mkldnn_reorder_conv3d_weight_out_typed_handle() {
12276 return c10::Dispatcher::singleton()
12277 .findSchemaOrThrow(mkldnn_reorder_conv3d_weight_out::name, mkldnn_reorder_conv3d_weight_out::overload_name)
12278 .typed<mkldnn_reorder_conv3d_weight_out::schema>();
12279}
12280
12281// aten::mkldnn_reorder_conv3d_weight.out(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1, *, Tensor(a!) out) -> Tensor(a!)
12282at::Tensor & mkldnn_reorder_conv3d_weight_out::call(const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) {
12283
12284 static auto op = create_mkldnn_reorder_conv3d_weight_out_typed_handle();
12285 return op.call(self, padding, stride, dilation, groups, out);
12286}
12287
12288// aten::mkldnn_reorder_conv3d_weight.out(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1, *, Tensor(a!) out) -> Tensor(a!)
12289at::Tensor & mkldnn_reorder_conv3d_weight_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) {
12290
12291 static auto op = create_mkldnn_reorder_conv3d_weight_out_typed_handle();
12292 return op.redispatch(dispatchKeySet, self, padding, stride, dilation, groups, out);
12293}
12294
12295STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_make_per_tensor_quantized_tensor_out, name, "aten::_make_per_tensor_quantized_tensor")
12296STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_make_per_tensor_quantized_tensor_out, overload_name, "out")
12297STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_make_per_tensor_quantized_tensor_out, schema_str, "_make_per_tensor_quantized_tensor.out(Tensor self, float scale, int zero_point, *, Tensor(a!) out) -> Tensor(a!)")
12298
12299// aten::_make_per_tensor_quantized_tensor.out(Tensor self, float scale, int zero_point, *, Tensor(a!) out) -> Tensor(a!)
12300static C10_NOINLINE c10::TypedOperatorHandle<_make_per_tensor_quantized_tensor_out::schema> create__make_per_tensor_quantized_tensor_out_typed_handle() {
12301 return c10::Dispatcher::singleton()
12302 .findSchemaOrThrow(_make_per_tensor_quantized_tensor_out::name, _make_per_tensor_quantized_tensor_out::overload_name)
12303 .typed<_make_per_tensor_quantized_tensor_out::schema>();
12304}
12305
12306// aten::_make_per_tensor_quantized_tensor.out(Tensor self, float scale, int zero_point, *, Tensor(a!) out) -> Tensor(a!)
12307at::Tensor & _make_per_tensor_quantized_tensor_out::call(const at::Tensor & self, double scale, int64_t zero_point, at::Tensor & out) {
12308
12309 static auto op = create__make_per_tensor_quantized_tensor_out_typed_handle();
12310 return op.call(self, scale, zero_point, out);
12311}
12312
12313// aten::_make_per_tensor_quantized_tensor.out(Tensor self, float scale, int zero_point, *, Tensor(a!) out) -> Tensor(a!)
12314at::Tensor & _make_per_tensor_quantized_tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, at::Tensor & out) {
12315
12316 static auto op = create__make_per_tensor_quantized_tensor_out_typed_handle();
12317 return op.redispatch(dispatchKeySet, self, scale, zero_point, out);
12318}
12319
12320STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_make_per_channel_quantized_tensor_out, name, "aten::_make_per_channel_quantized_tensor")
12321STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_make_per_channel_quantized_tensor_out, overload_name, "out")
12322STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_make_per_channel_quantized_tensor_out, schema_str, "_make_per_channel_quantized_tensor.out(Tensor self, Tensor scale, Tensor zero_point, int axis, *, Tensor(a!) out) -> Tensor(a!)")
12323
12324// aten::_make_per_channel_quantized_tensor.out(Tensor self, Tensor scale, Tensor zero_point, int axis, *, Tensor(a!) out) -> Tensor(a!)
12325static C10_NOINLINE c10::TypedOperatorHandle<_make_per_channel_quantized_tensor_out::schema> create__make_per_channel_quantized_tensor_out_typed_handle() {
12326 return c10::Dispatcher::singleton()
12327 .findSchemaOrThrow(_make_per_channel_quantized_tensor_out::name, _make_per_channel_quantized_tensor_out::overload_name)
12328 .typed<_make_per_channel_quantized_tensor_out::schema>();
12329}
12330
12331// aten::_make_per_channel_quantized_tensor.out(Tensor self, Tensor scale, Tensor zero_point, int axis, *, Tensor(a!) out) -> Tensor(a!)
12332at::Tensor & _make_per_channel_quantized_tensor_out::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, at::Tensor & out) {
12333
12334 static auto op = create__make_per_channel_quantized_tensor_out_typed_handle();
12335 return op.call(self, scale, zero_point, axis, out);
12336}
12337
12338// aten::_make_per_channel_quantized_tensor.out(Tensor self, Tensor scale, Tensor zero_point, int axis, *, Tensor(a!) out) -> Tensor(a!)
12339at::Tensor & _make_per_channel_quantized_tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, at::Tensor & out) {
12340
12341 static auto op = create__make_per_channel_quantized_tensor_out_typed_handle();
12342 return op.redispatch(dispatchKeySet, self, scale, zero_point, axis, out);
12343}
12344
12345STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_fill_Scalar_out, name, "aten::masked_fill")
12346STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_fill_Scalar_out, overload_name, "Scalar_out")
12347STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_fill_Scalar_out, schema_str, "masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!)")
12348
12349// aten::masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
12350static C10_NOINLINE c10::TypedOperatorHandle<masked_fill_Scalar_out::schema> create_masked_fill_Scalar_out_typed_handle() {
12351 return c10::Dispatcher::singleton()
12352 .findSchemaOrThrow(masked_fill_Scalar_out::name, masked_fill_Scalar_out::overload_name)
12353 .typed<masked_fill_Scalar_out::schema>();
12354}
12355
12356// aten::masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
12357at::Tensor & masked_fill_Scalar_out::call(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value, at::Tensor & out) {
12358
12359 static auto op = create_masked_fill_Scalar_out_typed_handle();
12360 return op.call(self, mask, value, out);
12361}
12362
12363// aten::masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
12364at::Tensor & masked_fill_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value, at::Tensor & out) {
12365
12366 static auto op = create_masked_fill_Scalar_out_typed_handle();
12367 return op.redispatch(dispatchKeySet, self, mask, value, out);
12368}
12369
12370STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_fill_Tensor_out, name, "aten::masked_fill")
12371STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_fill_Tensor_out, overload_name, "Tensor_out")
12372STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_fill_Tensor_out, schema_str, "masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!)")
12373
12374// aten::masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!)
12375static C10_NOINLINE c10::TypedOperatorHandle<masked_fill_Tensor_out::schema> create_masked_fill_Tensor_out_typed_handle() {
12376 return c10::Dispatcher::singleton()
12377 .findSchemaOrThrow(masked_fill_Tensor_out::name, masked_fill_Tensor_out::overload_name)
12378 .typed<masked_fill_Tensor_out::schema>();
12379}
12380
12381// aten::masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!)
12382at::Tensor & masked_fill_Tensor_out::call(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value, at::Tensor & out) {
12383
12384 static auto op = create_masked_fill_Tensor_out_typed_handle();
12385 return op.call(self, mask, value, out);
12386}
12387
12388// aten::masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!)
12389at::Tensor & masked_fill_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value, at::Tensor & out) {
12390
12391 static auto op = create_masked_fill_Tensor_out_typed_handle();
12392 return op.redispatch(dispatchKeySet, self, mask, value, out);
12393}
12394
12395STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_scatter_out, name, "aten::masked_scatter")
12396STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_scatter_out, overload_name, "out")
12397STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_scatter_out, schema_str, "masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!)")
12398
12399// aten::masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
12400static C10_NOINLINE c10::TypedOperatorHandle<masked_scatter_out::schema> create_masked_scatter_out_typed_handle() {
12401 return c10::Dispatcher::singleton()
12402 .findSchemaOrThrow(masked_scatter_out::name, masked_scatter_out::overload_name)
12403 .typed<masked_scatter_out::schema>();
12404}
12405
12406// aten::masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
12407at::Tensor & masked_scatter_out::call(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source, at::Tensor & out) {
12408
12409 static auto op = create_masked_scatter_out_typed_handle();
12410 return op.call(self, mask, source, out);
12411}
12412
12413// aten::masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
12414at::Tensor & masked_scatter_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source, at::Tensor & out) {
12415
12416 static auto op = create_masked_scatter_out_typed_handle();
12417 return op.redispatch(dispatchKeySet, self, mask, source, out);
12418}
12419
12420STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_masked_softmax_backward_out, name, "aten::_masked_softmax_backward")
12421STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_masked_softmax_backward_out, overload_name, "out")
12422STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_masked_softmax_backward_out, schema_str, "_masked_softmax_backward.out(Tensor grad_output, Tensor output, Tensor mask, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)")
12423
12424// aten::_masked_softmax_backward.out(Tensor grad_output, Tensor output, Tensor mask, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
12425static C10_NOINLINE c10::TypedOperatorHandle<_masked_softmax_backward_out::schema> create__masked_softmax_backward_out_typed_handle() {
12426 return c10::Dispatcher::singleton()
12427 .findSchemaOrThrow(_masked_softmax_backward_out::name, _masked_softmax_backward_out::overload_name)
12428 .typed<_masked_softmax_backward_out::schema>();
12429}
12430
12431// aten::_masked_softmax_backward.out(Tensor grad_output, Tensor output, Tensor mask, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
12432at::Tensor & _masked_softmax_backward_out::call(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional<int64_t> dim, at::Tensor & out) {
12433
12434 static auto op = create__masked_softmax_backward_out_typed_handle();
12435 return op.call(grad_output, output, mask, dim, out);
12436}
12437
12438// aten::_masked_softmax_backward.out(Tensor grad_output, Tensor output, Tensor mask, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
12439at::Tensor & _masked_softmax_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional<int64_t> dim, at::Tensor & out) {
12440
12441 static auto op = create__masked_softmax_backward_out_typed_handle();
12442 return op.redispatch(dispatchKeySet, grad_output, output, mask, dim, out);
12443}
12444
12445STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_or_Scalar_Tensor_out, name, "aten::bitwise_or")
12446STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_or_Scalar_Tensor_out, overload_name, "Scalar_Tensor_out")
12447STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_or_Scalar_Tensor_out, schema_str, "bitwise_or.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
12448
12449// aten::bitwise_or.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
12450static C10_NOINLINE c10::TypedOperatorHandle<bitwise_or_Scalar_Tensor_out::schema> create_bitwise_or_Scalar_Tensor_out_typed_handle() {
12451 return c10::Dispatcher::singleton()
12452 .findSchemaOrThrow(bitwise_or_Scalar_Tensor_out::name, bitwise_or_Scalar_Tensor_out::overload_name)
12453 .typed<bitwise_or_Scalar_Tensor_out::schema>();
12454}
12455
12456// aten::bitwise_or.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
12457at::Tensor & bitwise_or_Scalar_Tensor_out::call(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
12458
12459 static auto op = create_bitwise_or_Scalar_Tensor_out_typed_handle();
12460 return op.call(self, other, out);
12461}
12462
12463// aten::bitwise_or.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
12464at::Tensor & bitwise_or_Scalar_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
12465
12466 static auto op = create_bitwise_or_Scalar_Tensor_out_typed_handle();
12467 return op.redispatch(dispatchKeySet, self, other, out);
12468}
12469
12470STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(triu_indices_out, name, "aten::triu_indices")
12471STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(triu_indices_out, overload_name, "out")
12472STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(triu_indices_out, schema_str, "triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)")
12473
12474// aten::triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)
12475static C10_NOINLINE c10::TypedOperatorHandle<triu_indices_out::schema> create_triu_indices_out_typed_handle() {
12476 return c10::Dispatcher::singleton()
12477 .findSchemaOrThrow(triu_indices_out::name, triu_indices_out::overload_name)
12478 .typed<triu_indices_out::schema>();
12479}
12480
12481// aten::triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)
12482at::Tensor & triu_indices_out::call(int64_t row, int64_t col, int64_t offset, at::Tensor & out) {
12483
12484 static auto op = create_triu_indices_out_typed_handle();
12485 return op.call(row, col, offset, out);
12486}
12487
12488// aten::triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)
12489at::Tensor & triu_indices_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t row, int64_t col, int64_t offset, at::Tensor & out) {
12490
12491 static auto op = create_triu_indices_out_typed_handle();
12492 return op.redispatch(dispatchKeySet, row, col, offset, out);
12493}
12494
12495STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(trace_out, name, "aten::trace")
12496STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(trace_out, overload_name, "out")
12497STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(trace_out, schema_str, "trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
12498
12499// aten::trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
12500static C10_NOINLINE c10::TypedOperatorHandle<trace_out::schema> create_trace_out_typed_handle() {
12501 return c10::Dispatcher::singleton()
12502 .findSchemaOrThrow(trace_out::name, trace_out::overload_name)
12503 .typed<trace_out::schema>();
12504}
12505
12506// aten::trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
12507at::Tensor & trace_out::call(const at::Tensor & self, at::Tensor & out) {
12508
12509 static auto op = create_trace_out_typed_handle();
12510 return op.call(self, out);
12511}
12512
12513// aten::trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
12514at::Tensor & trace_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
12515
12516 static auto op = create_trace_out_typed_handle();
12517 return op.redispatch(dispatchKeySet, self, out);
12518}
12519
12520STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dist_out, name, "aten::dist")
12521STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dist_out, overload_name, "out")
12522STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dist_out, schema_str, "dist.out(Tensor self, Tensor other, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)")
12523
12524// aten::dist.out(Tensor self, Tensor other, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)
12525static C10_NOINLINE c10::TypedOperatorHandle<dist_out::schema> create_dist_out_typed_handle() {
12526 return c10::Dispatcher::singleton()
12527 .findSchemaOrThrow(dist_out::name, dist_out::overload_name)
12528 .typed<dist_out::schema>();
12529}
12530
12531// aten::dist.out(Tensor self, Tensor other, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)
12532at::Tensor & dist_out::call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & p, at::Tensor & out) {
12533
12534 static auto op = create_dist_out_typed_handle();
12535 return op.call(self, other, p, out);
12536}
12537
12538// aten::dist.out(Tensor self, Tensor other, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)
12539at::Tensor & dist_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & p, at::Tensor & out) {
12540
12541 static auto op = create_dist_out_typed_handle();
12542 return op.redispatch(dispatchKeySet, self, other, p, out);
12543}
12544
12545STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_histogramdd_from_bin_cts_out, name, "aten::_histogramdd_from_bin_cts")
12546STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_histogramdd_from_bin_cts_out, overload_name, "out")
12547STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_histogramdd_from_bin_cts_out, schema_str, "_histogramdd_from_bin_cts.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)")
12548
12549// aten::_histogramdd_from_bin_cts.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)
12550static C10_NOINLINE c10::TypedOperatorHandle<_histogramdd_from_bin_cts_out::schema> create__histogramdd_from_bin_cts_out_typed_handle() {
12551 return c10::Dispatcher::singleton()
12552 .findSchemaOrThrow(_histogramdd_from_bin_cts_out::name, _histogramdd_from_bin_cts_out::overload_name)
12553 .typed<_histogramdd_from_bin_cts_out::schema>();
12554}
12555
12556// aten::_histogramdd_from_bin_cts.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)
12557at::Tensor & _histogramdd_from_bin_cts_out::call(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density, at::Tensor & out) {
12558
12559 static auto op = create__histogramdd_from_bin_cts_out_typed_handle();
12560 return op.call(self, bins, range, weight, density, out);
12561}
12562
12563// aten::_histogramdd_from_bin_cts.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)
12564at::Tensor & _histogramdd_from_bin_cts_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density, at::Tensor & out) {
12565
12566 static auto op = create__histogramdd_from_bin_cts_out_typed_handle();
12567 return op.redispatch(dispatchKeySet, self, bins, range, weight, density, out);
12568}
12569
12570STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(remainder_Scalar_Tensor_out, name, "aten::remainder")
12571STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(remainder_Scalar_Tensor_out, overload_name, "Scalar_Tensor_out")
12572STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(remainder_Scalar_Tensor_out, schema_str, "remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
12573
12574// aten::remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
12575static C10_NOINLINE c10::TypedOperatorHandle<remainder_Scalar_Tensor_out::schema> create_remainder_Scalar_Tensor_out_typed_handle() {
12576 return c10::Dispatcher::singleton()
12577 .findSchemaOrThrow(remainder_Scalar_Tensor_out::name, remainder_Scalar_Tensor_out::overload_name)
12578 .typed<remainder_Scalar_Tensor_out::schema>();
12579}
12580
12581// aten::remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
12582at::Tensor & remainder_Scalar_Tensor_out::call(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
12583
12584 static auto op = create_remainder_Scalar_Tensor_out_typed_handle();
12585 return op.call(self, other, out);
12586}
12587
12588// aten::remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
12589at::Tensor & remainder_Scalar_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
12590
12591 static auto op = create_remainder_Scalar_Tensor_out_typed_handle();
12592 return op.redispatch(dispatchKeySet, self, other, out);
12593}
12594
12595STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_max_Scalar_out, name, "aten::_foreach_clamp_max")
12596STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_max_Scalar_out, overload_name, "Scalar_out")
12597STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_max_Scalar_out, schema_str, "_foreach_clamp_max.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()")
12598
12599// aten::_foreach_clamp_max.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
12600static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_max_Scalar_out::schema> create__foreach_clamp_max_Scalar_out_typed_handle() {
12601 return c10::Dispatcher::singleton()
12602 .findSchemaOrThrow(_foreach_clamp_max_Scalar_out::name, _foreach_clamp_max_Scalar_out::overload_name)
12603 .typed<_foreach_clamp_max_Scalar_out::schema>();
12604}
12605
12606// aten::_foreach_clamp_max.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
12607void _foreach_clamp_max_Scalar_out::call(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
12608
12609 static auto op = create__foreach_clamp_max_Scalar_out_typed_handle();
12610 return op.call(self, scalar, out);
12611}
12612
12613// aten::_foreach_clamp_max.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
12614void _foreach_clamp_max_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
12615
12616 static auto op = create__foreach_clamp_max_Scalar_out_typed_handle();
12617 return op.redispatch(dispatchKeySet, self, scalar, out);
12618}
12619
12620STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_max_List_out, name, "aten::_foreach_clamp_max")
12621STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_max_List_out, overload_name, "List_out")
12622STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_max_List_out, schema_str, "_foreach_clamp_max.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()")
12623
12624// aten::_foreach_clamp_max.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
12625static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_max_List_out::schema> create__foreach_clamp_max_List_out_typed_handle() {
12626 return c10::Dispatcher::singleton()
12627 .findSchemaOrThrow(_foreach_clamp_max_List_out::name, _foreach_clamp_max_List_out::overload_name)
12628 .typed<_foreach_clamp_max_List_out::schema>();
12629}
12630
12631// aten::_foreach_clamp_max.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
12632void _foreach_clamp_max_List_out::call(at::TensorList self, at::TensorList other, at::TensorList out) {
12633
12634 static auto op = create__foreach_clamp_max_List_out_typed_handle();
12635 return op.call(self, other, out);
12636}
12637
12638// aten::_foreach_clamp_max.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
12639void _foreach_clamp_max_List_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) {
12640
12641 static auto op = create__foreach_clamp_max_List_out_typed_handle();
12642 return op.redispatch(dispatchKeySet, self, other, out);
12643}
12644
12645STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_max_ScalarList_out, name, "aten::_foreach_clamp_max")
12646STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_max_ScalarList_out, overload_name, "ScalarList_out")
12647STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_max_ScalarList_out, schema_str, "_foreach_clamp_max.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()")
12648
12649// aten::_foreach_clamp_max.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
12650static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_max_ScalarList_out::schema> create__foreach_clamp_max_ScalarList_out_typed_handle() {
12651 return c10::Dispatcher::singleton()
12652 .findSchemaOrThrow(_foreach_clamp_max_ScalarList_out::name, _foreach_clamp_max_ScalarList_out::overload_name)
12653 .typed<_foreach_clamp_max_ScalarList_out::schema>();
12654}
12655
12656// aten::_foreach_clamp_max.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
12657void _foreach_clamp_max_ScalarList_out::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
12658
12659 static auto op = create__foreach_clamp_max_ScalarList_out_typed_handle();
12660 return op.call(self, scalars, out);
12661}
12662
12663// aten::_foreach_clamp_max.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
12664void _foreach_clamp_max_ScalarList_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
12665
12666 static auto op = create__foreach_clamp_max_ScalarList_out_typed_handle();
12667 return op.redispatch(dispatchKeySet, self, scalars, out);
12668}
12669
12670STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_abs_out, name, "aten::_foreach_abs")
12671STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_abs_out, overload_name, "out")
12672STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_abs_out, schema_str, "_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
12673
12674// aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
12675static C10_NOINLINE c10::TypedOperatorHandle<_foreach_abs_out::schema> create__foreach_abs_out_typed_handle() {
12676 return c10::Dispatcher::singleton()
12677 .findSchemaOrThrow(_foreach_abs_out::name, _foreach_abs_out::overload_name)
12678 .typed<_foreach_abs_out::schema>();
12679}
12680
12681// aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
12682void _foreach_abs_out::call(at::TensorList self, at::TensorList out) {
12683
12684 static auto op = create__foreach_abs_out_typed_handle();
12685 return op.call(self, out);
12686}
12687
12688// aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
12689void _foreach_abs_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
12690
12691 static auto op = create__foreach_abs_out_typed_handle();
12692 return op.redispatch(dispatchKeySet, self, out);
12693}
12694
12695STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_expm1_out, name, "aten::_foreach_expm1")
12696STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_expm1_out, overload_name, "out")
12697STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_expm1_out, schema_str, "_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
12698
12699// aten::_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
12700static C10_NOINLINE c10::TypedOperatorHandle<_foreach_expm1_out::schema> create__foreach_expm1_out_typed_handle() {
12701 return c10::Dispatcher::singleton()
12702 .findSchemaOrThrow(_foreach_expm1_out::name, _foreach_expm1_out::overload_name)
12703 .typed<_foreach_expm1_out::schema>();
12704}
12705
12706// aten::_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
12707void _foreach_expm1_out::call(at::TensorList self, at::TensorList out) {
12708
12709 static auto op = create__foreach_expm1_out_typed_handle();
12710 return op.call(self, out);
12711}
12712
12713// aten::_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
12714void _foreach_expm1_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
12715
12716 static auto op = create__foreach_expm1_out_typed_handle();
12717 return op.redispatch(dispatchKeySet, self, out);
12718}
12719
12720STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_log10_out, name, "aten::_foreach_log10")
12721STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_log10_out, overload_name, "out")
12722STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_log10_out, schema_str, "_foreach_log10.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
12723
12724// aten::_foreach_log10.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
12725static C10_NOINLINE c10::TypedOperatorHandle<_foreach_log10_out::schema> create__foreach_log10_out_typed_handle() {
12726 return c10::Dispatcher::singleton()
12727 .findSchemaOrThrow(_foreach_log10_out::name, _foreach_log10_out::overload_name)
12728 .typed<_foreach_log10_out::schema>();
12729}
12730
12731// aten::_foreach_log10.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
12732void _foreach_log10_out::call(at::TensorList self, at::TensorList out) {
12733
12734 static auto op = create__foreach_log10_out_typed_handle();
12735 return op.call(self, out);
12736}
12737
12738// aten::_foreach_log10.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
12739void _foreach_log10_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
12740
12741 static auto op = create__foreach_log10_out_typed_handle();
12742 return op.redispatch(dispatchKeySet, self, out);
12743}
12744
12745STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_tan_out, name, "aten::_foreach_tan")
12746STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_tan_out, overload_name, "out")
12747STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_tan_out, schema_str, "_foreach_tan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
12748
12749// aten::_foreach_tan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
12750static C10_NOINLINE c10::TypedOperatorHandle<_foreach_tan_out::schema> create__foreach_tan_out_typed_handle() {
12751 return c10::Dispatcher::singleton()
12752 .findSchemaOrThrow(_foreach_tan_out::name, _foreach_tan_out::overload_name)
12753 .typed<_foreach_tan_out::schema>();
12754}
12755
12756// aten::_foreach_tan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
12757void _foreach_tan_out::call(at::TensorList self, at::TensorList out) {
12758
12759 static auto op = create__foreach_tan_out_typed_handle();
12760 return op.call(self, out);
12761}
12762
12763// aten::_foreach_tan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
12764void _foreach_tan_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
12765
12766 static auto op = create__foreach_tan_out_typed_handle();
12767 return op.redispatch(dispatchKeySet, self, out);
12768}
12769
12770STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sinh_out, name, "aten::_foreach_sinh")
12771STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sinh_out, overload_name, "out")
12772STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sinh_out, schema_str, "_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
12773
12774// aten::_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
12775static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sinh_out::schema> create__foreach_sinh_out_typed_handle() {
12776 return c10::Dispatcher::singleton()
12777 .findSchemaOrThrow(_foreach_sinh_out::name, _foreach_sinh_out::overload_name)
12778 .typed<_foreach_sinh_out::schema>();
12779}
12780
12781// aten::_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
12782void _foreach_sinh_out::call(at::TensorList self, at::TensorList out) {
12783
12784 static auto op = create__foreach_sinh_out_typed_handle();
12785 return op.call(self, out);
12786}
12787
12788// aten::_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
12789void _foreach_sinh_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
12790
12791 static auto op = create__foreach_sinh_out_typed_handle();
12792 return op.redispatch(dispatchKeySet, self, out);
12793}
12794
12795STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(searchsorted_Scalar_out, name, "aten::searchsorted")
12796STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(searchsorted_Scalar_out, overload_name, "Scalar_out")
12797STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(searchsorted_Scalar_out, schema_str, "searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)")
12798
12799// aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)
12800static C10_NOINLINE c10::TypedOperatorHandle<searchsorted_Scalar_out::schema> create_searchsorted_Scalar_out_typed_handle() {
12801 return c10::Dispatcher::singleton()
12802 .findSchemaOrThrow(searchsorted_Scalar_out::name, searchsorted_Scalar_out::overload_name)
12803 .typed<searchsorted_Scalar_out::schema>();
12804}
12805
12806// aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)
12807at::Tensor & searchsorted_Scalar_out::call(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter, at::Tensor & out) {
12808
12809 static auto op = create_searchsorted_Scalar_out_typed_handle();
12810 return op.call(sorted_sequence, self, out_int32, right, side, sorter, out);
12811}
12812
12813// aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)
12814at::Tensor & searchsorted_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter, at::Tensor & out) {
12815
12816 static auto op = create_searchsorted_Scalar_out_typed_handle();
12817 return op.redispatch(dispatchKeySet, sorted_sequence, self, out_int32, right, side, sorter, out);
12818}
12819
12820STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_adaptive_avg_pool2d_out, name, "aten::_adaptive_avg_pool2d")
12821STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_adaptive_avg_pool2d_out, overload_name, "out")
12822STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_adaptive_avg_pool2d_out, schema_str, "_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)")
12823
12824// aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
12825static C10_NOINLINE c10::TypedOperatorHandle<_adaptive_avg_pool2d_out::schema> create__adaptive_avg_pool2d_out_typed_handle() {
12826 return c10::Dispatcher::singleton()
12827 .findSchemaOrThrow(_adaptive_avg_pool2d_out::name, _adaptive_avg_pool2d_out::overload_name)
12828 .typed<_adaptive_avg_pool2d_out::schema>();
12829}
12830
12831// aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
12832at::Tensor & _adaptive_avg_pool2d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
12833
12834 static auto op = create__adaptive_avg_pool2d_out_typed_handle();
12835 return op.call(self, output_size, out);
12836}
12837
12838// aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
12839at::Tensor & _adaptive_avg_pool2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
12840
12841 static auto op = create__adaptive_avg_pool2d_out_typed_handle();
12842 return op.redispatch(dispatchKeySet, self, output_size, out);
12843}
12844
12845STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_warn_in_autograd_out, name, "aten::_test_warn_in_autograd")
12846STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_warn_in_autograd_out, overload_name, "out")
12847STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_warn_in_autograd_out, schema_str, "_test_warn_in_autograd.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
12848
12849// aten::_test_warn_in_autograd.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
12850static C10_NOINLINE c10::TypedOperatorHandle<_test_warn_in_autograd_out::schema> create__test_warn_in_autograd_out_typed_handle() {
12851 return c10::Dispatcher::singleton()
12852 .findSchemaOrThrow(_test_warn_in_autograd_out::name, _test_warn_in_autograd_out::overload_name)
12853 .typed<_test_warn_in_autograd_out::schema>();
12854}
12855
12856// aten::_test_warn_in_autograd.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
12857at::Tensor & _test_warn_in_autograd_out::call(const at::Tensor & self, at::Tensor & out) {
12858
12859 static auto op = create__test_warn_in_autograd_out_typed_handle();
12860 return op.call(self, out);
12861}
12862
12863// aten::_test_warn_in_autograd.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
12864at::Tensor & _test_warn_in_autograd_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
12865
12866 static auto op = create__test_warn_in_autograd_out_typed_handle();
12867 return op.redispatch(dispatchKeySet, self, out);
12868}
12869
12870STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diagonal_copy_out, name, "aten::diagonal_copy")
12871STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diagonal_copy_out, overload_name, "out")
12872STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diagonal_copy_out, schema_str, "diagonal_copy.out(Tensor self, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)")
12873
12874// aten::diagonal_copy.out(Tensor self, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)
12875static C10_NOINLINE c10::TypedOperatorHandle<diagonal_copy_out::schema> create_diagonal_copy_out_typed_handle() {
12876 return c10::Dispatcher::singleton()
12877 .findSchemaOrThrow(diagonal_copy_out::name, diagonal_copy_out::overload_name)
12878 .typed<diagonal_copy_out::schema>();
12879}
12880
12881// aten::diagonal_copy.out(Tensor self, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)
12882at::Tensor & diagonal_copy_out::call(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
12883
12884 static auto op = create_diagonal_copy_out_typed_handle();
12885 return op.call(self, offset, dim1, dim2, out);
12886}
12887
12888// aten::diagonal_copy.out(Tensor self, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)
12889at::Tensor & diagonal_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
12890
12891 static auto op = create_diagonal_copy_out_typed_handle();
12892 return op.redispatch(dispatchKeySet, self, offset, dim1, dim2, out);
12893}
12894
12895STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(permute_copy_out, name, "aten::permute_copy")
12896STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(permute_copy_out, overload_name, "out")
12897STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(permute_copy_out, schema_str, "permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)")
12898
12899// aten::permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)
12900static C10_NOINLINE c10::TypedOperatorHandle<permute_copy_out::schema> create_permute_copy_out_typed_handle() {
12901 return c10::Dispatcher::singleton()
12902 .findSchemaOrThrow(permute_copy_out::name, permute_copy_out::overload_name)
12903 .typed<permute_copy_out::schema>();
12904}
12905
12906// aten::permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)
12907at::Tensor & permute_copy_out::call(const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) {
12908
12909 static auto op = create_permute_copy_out_typed_handle();
12910 return op.call(self, dims, out);
12911}
12912
12913// aten::permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)
12914at::Tensor & permute_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) {
12915
12916 static auto op = create_permute_copy_out_typed_handle();
12917 return op.redispatch(dispatchKeySet, self, dims, out);
12918}
12919
12920STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(select_copy_int_out, name, "aten::select_copy")
12921STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(select_copy_int_out, overload_name, "int_out")
12922STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(select_copy_int_out, schema_str, "select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)")
12923
12924// aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
12925static C10_NOINLINE c10::TypedOperatorHandle<select_copy_int_out::schema> create_select_copy_int_out_typed_handle() {
12926 return c10::Dispatcher::singleton()
12927 .findSchemaOrThrow(select_copy_int_out::name, select_copy_int_out::overload_name)
12928 .typed<select_copy_int_out::schema>();
12929}
12930
12931// aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
12932at::Tensor & select_copy_int_out::call(const at::Tensor & self, int64_t dim, c10::SymInt index, at::Tensor & out) {
12933
12934 static auto op = create_select_copy_int_out_typed_handle();
12935 return op.call(self, dim, index, out);
12936}
12937
12938// aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
12939at::Tensor & select_copy_int_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt index, at::Tensor & out) {
12940
12941 static auto op = create_select_copy_int_out_typed_handle();
12942 return op.redispatch(dispatchKeySet, self, dim, index, out);
12943}
12944
12945STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slice_copy_Tensor_out, name, "aten::slice_copy")
12946STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slice_copy_Tensor_out, overload_name, "Tensor_out")
12947STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slice_copy_Tensor_out, schema_str, "slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)")
12948
12949// aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)
12950static C10_NOINLINE c10::TypedOperatorHandle<slice_copy_Tensor_out::schema> create_slice_copy_Tensor_out_typed_handle() {
12951 return c10::Dispatcher::singleton()
12952 .findSchemaOrThrow(slice_copy_Tensor_out::name, slice_copy_Tensor_out::overload_name)
12953 .typed<slice_copy_Tensor_out::schema>();
12954}
12955
12956// aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)
12957at::Tensor & slice_copy_Tensor_out::call(const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step, at::Tensor & out) {
12958
12959 static auto op = create_slice_copy_Tensor_out_typed_handle();
12960 return op.call(self, dim, start, end, step, out);
12961}
12962
12963// aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)
12964at::Tensor & slice_copy_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step, at::Tensor & out) {
12965
12966 static auto op = create_slice_copy_Tensor_out_typed_handle();
12967 return op.redispatch(dispatchKeySet, self, dim, start, end, step, out);
12968}
12969
12970STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(t_copy_out, name, "aten::t_copy")
12971STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(t_copy_out, overload_name, "out")
12972STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(t_copy_out, schema_str, "t_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
12973
12974// aten::t_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
12975static C10_NOINLINE c10::TypedOperatorHandle<t_copy_out::schema> create_t_copy_out_typed_handle() {
12976 return c10::Dispatcher::singleton()
12977 .findSchemaOrThrow(t_copy_out::name, t_copy_out::overload_name)
12978 .typed<t_copy_out::schema>();
12979}
12980
12981// aten::t_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
12982at::Tensor & t_copy_out::call(const at::Tensor & self, at::Tensor & out) {
12983
12984 static auto op = create_t_copy_out_typed_handle();
12985 return op.call(self, out);
12986}
12987
12988// aten::t_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
12989at::Tensor & t_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
12990
12991 static auto op = create_t_copy_out_typed_handle();
12992 return op.redispatch(dispatchKeySet, self, out);
12993}
12994
12995STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(col_indices_copy_out, name, "aten::col_indices_copy")
12996STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(col_indices_copy_out, overload_name, "out")
12997STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(col_indices_copy_out, schema_str, "col_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
12998
12999// aten::col_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13000static C10_NOINLINE c10::TypedOperatorHandle<col_indices_copy_out::schema> create_col_indices_copy_out_typed_handle() {
13001 return c10::Dispatcher::singleton()
13002 .findSchemaOrThrow(col_indices_copy_out::name, col_indices_copy_out::overload_name)
13003 .typed<col_indices_copy_out::schema>();
13004}
13005
13006// aten::col_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13007at::Tensor & col_indices_copy_out::call(const at::Tensor & self, at::Tensor & out) {
13008
13009 static auto op = create_col_indices_copy_out_typed_handle();
13010 return op.call(self, out);
13011}
13012
13013// aten::col_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13014at::Tensor & col_indices_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
13015
13016 static auto op = create_col_indices_copy_out_typed_handle();
13017 return op.redispatch(dispatchKeySet, self, out);
13018}
13019
13020STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(alias_copy_out, name, "aten::alias_copy")
13021STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(alias_copy_out, overload_name, "out")
13022STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(alias_copy_out, schema_str, "alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
13023
13024// aten::alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13025static C10_NOINLINE c10::TypedOperatorHandle<alias_copy_out::schema> create_alias_copy_out_typed_handle() {
13026 return c10::Dispatcher::singleton()
13027 .findSchemaOrThrow(alias_copy_out::name, alias_copy_out::overload_name)
13028 .typed<alias_copy_out::schema>();
13029}
13030
13031// aten::alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13032at::Tensor & alias_copy_out::call(const at::Tensor & self, at::Tensor & out) {
13033
13034 static auto op = create_alias_copy_out_typed_handle();
13035 return op.call(self, out);
13036}
13037
13038// aten::alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13039at::Tensor & alias_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
13040
13041 static auto op = create_alias_copy_out_typed_handle();
13042 return op.redispatch(dispatchKeySet, self, out);
13043}
13044
13045STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_triton_scaled_dot_attention_out, name, "aten::_triton_scaled_dot_attention")
13046STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_triton_scaled_dot_attention_out, overload_name, "out")
13047STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_triton_scaled_dot_attention_out, schema_str, "_triton_scaled_dot_attention.out(Tensor q, Tensor k, Tensor v, float dropout_p=0.0, *, Tensor(a!) out) -> Tensor(a!)")
13048
13049// aten::_triton_scaled_dot_attention.out(Tensor q, Tensor k, Tensor v, float dropout_p=0.0, *, Tensor(a!) out) -> Tensor(a!)
13050static C10_NOINLINE c10::TypedOperatorHandle<_triton_scaled_dot_attention_out::schema> create__triton_scaled_dot_attention_out_typed_handle() {
13051 return c10::Dispatcher::singleton()
13052 .findSchemaOrThrow(_triton_scaled_dot_attention_out::name, _triton_scaled_dot_attention_out::overload_name)
13053 .typed<_triton_scaled_dot_attention_out::schema>();
13054}
13055
13056// aten::_triton_scaled_dot_attention.out(Tensor q, Tensor k, Tensor v, float dropout_p=0.0, *, Tensor(a!) out) -> Tensor(a!)
13057at::Tensor & _triton_scaled_dot_attention_out::call(const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p, at::Tensor & out) {
13058
13059 static auto op = create__triton_scaled_dot_attention_out_typed_handle();
13060 return op.call(q, k, v, dropout_p, out);
13061}
13062
13063// aten::_triton_scaled_dot_attention.out(Tensor q, Tensor k, Tensor v, float dropout_p=0.0, *, Tensor(a!) out) -> Tensor(a!)
13064at::Tensor & _triton_scaled_dot_attention_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p, at::Tensor & out) {
13065
13066 static auto op = create__triton_scaled_dot_attention_out_typed_handle();
13067 return op.redispatch(dispatchKeySet, q, k, v, dropout_p, out);
13068}
13069
13070STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foobar_out, name, "aten::_foobar")
13071STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foobar_out, overload_name, "out")
13072STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foobar_out, schema_str, "_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!)")
13073
13074// aten::_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!)
13075static C10_NOINLINE c10::TypedOperatorHandle<_foobar_out::schema> create__foobar_out_typed_handle() {
13076 return c10::Dispatcher::singleton()
13077 .findSchemaOrThrow(_foobar_out::name, _foobar_out::overload_name)
13078 .typed<_foobar_out::schema>();
13079}
13080
13081// aten::_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!)
13082at::Tensor & _foobar_out::call(const at::Tensor & self, bool arg1, bool arg2, bool arg3, at::Tensor & out) {
13083
13084 static auto op = create__foobar_out_typed_handle();
13085 return op.call(self, arg1, arg2, arg3, out);
13086}
13087
13088// aten::_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!)
13089at::Tensor & _foobar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool arg1, bool arg2, bool arg3, at::Tensor & out) {
13090
13091 static auto op = create__foobar_out_typed_handle();
13092 return op.redispatch(dispatchKeySet, self, arg1, arg2, arg3, out);
13093}
13094
13095}} // namespace at::_ops
13096