1#include <ATen/Tensor.h>
2#include <ATen/core/dispatch/Dispatcher.h>
3
4// @generated by torchgen/gen.py from Operators.cpp
5// NOTE See [Sharded File] comment in VariableType
6
7#ifndef AT_PER_OPERATOR_HEADERS
8#include <ATen/Operators.h>
9#else
10#include <ATen/ops/_cast_Byte.h>
11#include <ATen/ops/_cast_Char.h>
12#include <ATen/ops/is_leaf.h>
13#include <ATen/ops/requires_grad.h>
14#include <ATen/ops/retains_grad.h>
15#include <ATen/ops/_unpack_dual.h>
16#include <ATen/ops/_has_same_storage_numel.h>
17#include <ATen/ops/align_to.h>
18#include <ATen/ops/align_to.h>
19#include <ATen/ops/_use_cudnn_ctc_loss.h>
20#include <ATen/ops/_use_cudnn_ctc_loss.h>
21#include <ATen/ops/_cudnn_ctc_loss.h>
22#include <ATen/ops/_cudnn_ctc_loss.h>
23#include <ATen/ops/_cudnn_rnn.h>
24#include <ATen/ops/_debug_has_internal_overlap.h>
25#include <ATen/ops/_fused_dropout.h>
26#include <ATen/ops/_sobol_engine_initialize_state.h>
27#include <ATen/ops/_shape_as_tensor.h>
28#include <ATen/ops/dropout.h>
29#include <ATen/ops/dropout.h>
30#include <ATen/ops/sgn.h>
31#include <ATen/ops/sgn.h>
32#include <ATen/ops/sgn.h>
33#include <ATen/ops/real.h>
34#include <ATen/ops/_conj.h>
35#include <ATen/ops/_conj_physical.h>
36#include <ATen/ops/_neg_view.h>
37#include <ATen/ops/avg_pool1d.h>
38#include <ATen/ops/adaptive_avg_pool1d.h>
39#include <ATen/ops/_is_all_true.h>
40#include <ATen/ops/_test_check_tensor.h>
41#include <ATen/ops/allclose.h>
42#include <ATen/ops/argmax.h>
43#include <ATen/ops/argmax.h>
44#include <ATen/ops/acosh.h>
45#include <ATen/ops/acosh.h>
46#include <ATen/ops/acosh.h>
47#include <ATen/ops/arctanh.h>
48#include <ATen/ops/arctanh.h>
49#include <ATen/ops/arctanh.h>
50#include <ATen/ops/as_strided.h>
51#include <ATen/ops/as_strided.h>
52#include <ATen/ops/atleast_3d.h>
53#include <ATen/ops/atleast_3d.h>
54#include <ATen/ops/_batch_norm_impl_index.h>
55#include <ATen/ops/_batch_norm_impl_index_backward.h>
56#include <ATen/ops/logical_or.h>
57#include <ATen/ops/logical_or.h>
58#include <ATen/ops/logical_or.h>
59#include <ATen/ops/blackman_window.h>
60#include <ATen/ops/blackman_window.h>
61#include <ATen/ops/broadcast_tensors.h>
62#include <ATen/ops/cat.h>
63#include <ATen/ops/cat.h>
64#include <ATen/ops/cat.h>
65#include <ATen/ops/cat.h>
66#include <ATen/ops/convolution.h>
67#include <ATen/ops/convolution_backward_overrideable.h>
68#include <ATen/ops/_convolution.h>
69#include <ATen/ops/_convolution.h>
70#include <ATen/ops/conv_transpose1d.h>
71#include <ATen/ops/cos.h>
72#include <ATen/ops/cos.h>
73#include <ATen/ops/cos.h>
74#include <ATen/ops/cudnn_affine_grid_generator.h>
75#include <ATen/ops/cudnn_batch_norm_backward.h>
76#include <ATen/ops/cudnn_convolution_transpose.h>
77#include <ATen/ops/cudnn_grid_sampler_backward.h>
78#include <ATen/ops/cumsum.h>
79#include <ATen/ops/cumsum.h>
80#include <ATen/ops/cumsum.h>
81#include <ATen/ops/cumsum.h>
82#include <ATen/ops/cumsum.h>
83#include <ATen/ops/cumsum.h>
84#include <ATen/ops/_ctc_loss.h>
85#include <ATen/ops/_ctc_loss.h>
86#include <ATen/ops/diagflat.h>
87#include <ATen/ops/linalg_diagonal.h>
88#include <ATen/ops/true_divide.h>
89#include <ATen/ops/true_divide.h>
90#include <ATen/ops/true_divide.h>
91#include <ATen/ops/true_divide.h>
92#include <ATen/ops/true_divide.h>
93#include <ATen/ops/vdot.h>
94#include <ATen/ops/vdot.h>
95#include <ATen/ops/embedding_backward.h>
96#include <ATen/ops/embedding_dense_backward.h>
97#include <ATen/ops/_embedding_bag.h>
98#include <ATen/ops/_embedding_bag_sparse_backward.h>
99#include <ATen/ops/new_empty.h>
100#include <ATen/ops/expm1.h>
101#include <ATen/ops/expm1.h>
102#include <ATen/ops/expm1.h>
103#include <ATen/ops/expand_as.h>
104#include <ATen/ops/unflatten.h>
105#include <ATen/ops/unflatten.h>
106#include <ATen/ops/fill.h>
107#include <ATen/ops/fill.h>
108#include <ATen/ops/fill.h>
109#include <ATen/ops/fill.h>
110#include <ATen/ops/lcm.h>
111#include <ATen/ops/lcm.h>
112#include <ATen/ops/lcm.h>
113#include <ATen/ops/grid_sampler_2d_backward.h>
114#include <ATen/ops/group_norm.h>
115#include <ATen/ops/index_copy.h>
116#include <ATen/ops/index_copy.h>
117#include <ATen/ops/index_copy.h>
118#include <ATen/ops/index_copy.h>
119#include <ATen/ops/index_copy.h>
120#include <ATen/ops/_index_put_impl.h>
121#include <ATen/ops/is_distributed.h>
122#include <ATen/ops/is_inference.h>
123#include <ATen/ops/kron.h>
124#include <ATen/ops/kron.h>
125#include <ATen/ops/linear.h>
126#include <ATen/ops/linear.h>
127#include <ATen/ops/mkldnn_linear.h>
128#include <ATen/ops/mkldnn_linear_backward_weights.h>
129#include <ATen/ops/fbgemm_linear_quantize_weight.h>
130#include <ATen/ops/linspace.h>
131#include <ATen/ops/linspace.h>
132#include <ATen/ops/log10.h>
133#include <ATen/ops/log10.h>
134#include <ATen/ops/log10.h>
135#include <ATen/ops/log1p.h>
136#include <ATen/ops/log1p.h>
137#include <ATen/ops/log1p.h>
138#include <ATen/ops/logaddexp2.h>
139#include <ATen/ops/logaddexp2.h>
140#include <ATen/ops/_log_softmax.h>
141#include <ATen/ops/_log_softmax.h>
142#include <ATen/ops/logsumexp.h>
143#include <ATen/ops/logsumexp.h>
144#include <ATen/ops/logsumexp.h>
145#include <ATen/ops/logsumexp.h>
146#include <ATen/ops/_aminmax.h>
147#include <ATen/ops/_aminmax.h>
148#include <ATen/ops/aminmax.h>
149#include <ATen/ops/aminmax.h>
150#include <ATen/ops/max.h>
151#include <ATen/ops/max.h>
152#include <ATen/ops/max.h>
153#include <ATen/ops/max.h>
154#include <ATen/ops/max_pool1d_with_indices.h>
155#include <ATen/ops/mkldnn_max_pool3d_backward.h>
156#include <ATen/ops/quantized_max_pool1d.h>
157#include <ATen/ops/mkldnn_convolution.h>
158#include <ATen/ops/miopen_batch_norm_backward.h>
159#include <ATen/ops/miopen_convolution_relu.h>
160#include <ATen/ops/mode.h>
161#include <ATen/ops/mode.h>
162#include <ATen/ops/mode.h>
163#include <ATen/ops/mode.h>
164#include <ATen/ops/mul.h>
165#include <ATen/ops/mul.h>
166#include <ATen/ops/mul.h>
167#include <ATen/ops/mul.h>
168#include <ATen/ops/mul.h>
169#include <ATen/ops/mvlgamma.h>
170#include <ATen/ops/mvlgamma.h>
171#include <ATen/ops/mvlgamma.h>
172#include <ATen/ops/narrow.h>
173#include <ATen/ops/narrow.h>
174#include <ATen/ops/batch_norm_backward_elemt.h>
175#include <ATen/ops/pdist.h>
176#include <ATen/ops/moveaxis.h>
177#include <ATen/ops/moveaxis.h>
178#include <ATen/ops/pixel_unshuffle.h>
179#include <ATen/ops/is_pinned.h>
180#include <ATen/ops/pin_memory.h>
181#include <ATen/ops/_pin_memory.h>
182#include <ATen/ops/randn.h>
183#include <ATen/ops/randn.h>
184#include <ATen/ops/randn.h>
185#include <ATen/ops/randn.h>
186#include <ATen/ops/randn.h>
187#include <ATen/ops/randn.h>
188#include <ATen/ops/range.h>
189#include <ATen/ops/range.h>
190#include <ATen/ops/range.h>
191#include <ATen/ops/range.h>
192#include <ATen/ops/ravel.h>
193#include <ATen/ops/reciprocal.h>
194#include <ATen/ops/reciprocal.h>
195#include <ATen/ops/reciprocal.h>
196#include <ATen/ops/neg.h>
197#include <ATen/ops/neg.h>
198#include <ATen/ops/neg.h>
199#include <ATen/ops/reshape_as.h>
200#include <ATen/ops/rrelu.h>
201#include <ATen/ops/rrelu.h>
202#include <ATen/ops/relu6.h>
203#include <ATen/ops/relu6.h>
204#include <ATen/ops/prelu.h>
205#include <ATen/ops/_prelu_kernel_backward.h>
206#include <ATen/ops/gelu_backward.h>
207#include <ATen/ops/gelu_backward.h>
208#include <ATen/ops/selu.h>
209#include <ATen/ops/selu.h>
210#include <ATen/ops/silu_backward.h>
211#include <ATen/ops/silu_backward.h>
212#include <ATen/ops/sin.h>
213#include <ATen/ops/sin.h>
214#include <ATen/ops/sin.h>
215#include <ATen/ops/diagonal_scatter.h>
216#include <ATen/ops/as_strided_scatter.h>
217#include <ATen/ops/split.h>
218#include <ATen/ops/split.h>
219#include <ATen/ops/squeeze.h>
220#include <ATen/ops/squeeze.h>
221#include <ATen/ops/squeeze.h>
222#include <ATen/ops/squeeze.h>
223#include <ATen/ops/squeeze.h>
224#include <ATen/ops/squeeze.h>
225#include <ATen/ops/squeeze.h>
226#include <ATen/ops/squeeze.h>
227#include <ATen/ops/sspaddmm.h>
228#include <ATen/ops/sspaddmm.h>
229#include <ATen/ops/stride.h>
230#include <ATen/ops/stride.h>
231#include <ATen/ops/threshold_backward.h>
232#include <ATen/ops/threshold_backward.h>
233#include <ATen/ops/one_hot.h>
234#include <ATen/ops/_transform_bias_rescale_qkv.h>
235#include <ATen/ops/_unique.h>
236#include <ATen/ops/where.h>
237#include <ATen/ops/where.h>
238#include <ATen/ops/where.h>
239#include <ATen/ops/where.h>
240#include <ATen/ops/where.h>
241#include <ATen/ops/where.h>
242#include <ATen/ops/_weight_norm.h>
243#include <ATen/ops/_weight_norm_interface.h>
244#include <ATen/ops/_weight_norm_differentiable_backward.h>
245#include <ATen/ops/zeros.h>
246#include <ATen/ops/zeros.h>
247#include <ATen/ops/zeros.h>
248#include <ATen/ops/_standard_gamma.h>
249#include <ATen/ops/_sample_dirichlet.h>
250#include <ATen/ops/binomial.h>
251#include <ATen/ops/_sparse_sum.h>
252#include <ATen/ops/_sparse_sum.h>
253#include <ATen/ops/_sparse_sum.h>
254#include <ATen/ops/_sparse_sum.h>
255#include <ATen/ops/_sparse_addmm.h>
256#include <ATen/ops/_sparse_mm_reduce_impl_backward.h>
257#include <ATen/ops/addmm.h>
258#include <ATen/ops/addmm.h>
259#include <ATen/ops/addmm.h>
260#include <ATen/ops/sparse_csc_tensor.h>
261#include <ATen/ops/sparse_bsc_tensor.h>
262#include <ATen/ops/sparse_csc_tensor.h>
263#include <ATen/ops/sparse_bsc_tensor.h>
264#include <ATen/ops/_sparse_compressed_tensor_unsafe.h>
265#include <ATen/ops/_sparse_csr_tensor_unsafe.h>
266#include <ATen/ops/_sparse_coo_tensor_unsafe.h>
267#include <ATen/ops/_validate_sparse_csr_tensor_args.h>
268#include <ATen/ops/_validate_sparse_bsr_tensor_args.h>
269#include <ATen/ops/_validate_sparse_bsc_tensor_args.h>
270#include <ATen/ops/sparse_resize.h>
271#include <ATen/ops/sparse_mask.h>
272#include <ATen/ops/_to_cpu.h>
273#include <ATen/ops/values.h>
274#include <ATen/ops/row_indices.h>
275#include <ATen/ops/copy_sparse_to_sparse.h>
276#include <ATen/ops/unbind.h>
277#include <ATen/ops/unbind.h>
278#include <ATen/ops/to_sparse.h>
279#include <ATen/ops/to_sparse.h>
280#include <ATen/ops/to_mkldnn.h>
281#include <ATen/ops/to_mkldnn_backward.h>
282#include <ATen/ops/int_repr.h>
283#include <ATen/ops/qscheme.h>
284#include <ATen/ops/fake_quantize_per_channel_affine.h>
285#include <ATen/ops/fake_quantize_per_channel_affine_cachemask.h>
286#include <ATen/ops/_fused_moving_avg_obs_fq_helper.h>
287#include <ATen/ops/_to_copy.h>
288#include <ATen/ops/_thnn_differentiable_lstm_cell_backward.h>
289#include <ATen/ops/_thnn_differentiable_gru_cell_backward.h>
290#include <ATen/ops/rnn_tanh_cell.h>
291#include <ATen/ops/quantized_gru_cell.h>
292#include <ATen/ops/_pack_padded_sequence_backward.h>
293#include <ATen/ops/lift.h>
294#include <ATen/ops/lift_fresh.h>
295#include <ATen/ops/eq.h>
296#include <ATen/ops/eq.h>
297#include <ATen/ops/bitwise_and.h>
298#include <ATen/ops/bitwise_and.h>
299#include <ATen/ops/bitwise_and.h>
300#include <ATen/ops/bitwise_and.h>
301#include <ATen/ops/bitwise_and.h>
302#include <ATen/ops/bitwise_and.h>
303#include <ATen/ops/bitwise_and.h>
304#include <ATen/ops/or.h>
305#include <ATen/ops/or.h>
306#include <ATen/ops/or.h>
307#include <ATen/ops/or.h>
308#include <ATen/ops/bitwise_xor.h>
309#include <ATen/ops/bitwise_xor.h>
310#include <ATen/ops/bitwise_xor.h>
311#include <ATen/ops/bitwise_xor.h>
312#include <ATen/ops/bitwise_xor.h>
313#include <ATen/ops/bitwise_xor.h>
314#include <ATen/ops/bitwise_xor.h>
315#include <ATen/ops/lshift.h>
316#include <ATen/ops/lshift.h>
317#include <ATen/ops/lshift.h>
318#include <ATen/ops/lshift.h>
319#include <ATen/ops/bitwise_left_shift.h>
320#include <ATen/ops/bitwise_left_shift.h>
321#include <ATen/ops/bitwise_left_shift.h>
322#include <ATen/ops/bitwise_left_shift.h>
323#include <ATen/ops/bitwise_left_shift.h>
324#include <ATen/ops/bitwise_left_shift.h>
325#include <ATen/ops/bitwise_left_shift.h>
326#include <ATen/ops/rshift.h>
327#include <ATen/ops/rshift.h>
328#include <ATen/ops/rshift.h>
329#include <ATen/ops/rshift.h>
330#include <ATen/ops/bitwise_right_shift.h>
331#include <ATen/ops/bitwise_right_shift.h>
332#include <ATen/ops/bitwise_right_shift.h>
333#include <ATen/ops/bitwise_right_shift.h>
334#include <ATen/ops/bitwise_right_shift.h>
335#include <ATen/ops/bitwise_right_shift.h>
336#include <ATen/ops/bitwise_right_shift.h>
337#include <ATen/ops/exponential.h>
338#include <ATen/ops/geometric.h>
339#include <ATen/ops/trace_backward.h>
340#include <ATen/ops/eq.h>
341#include <ATen/ops/eq.h>
342#include <ATen/ops/eq.h>
343#include <ATen/ops/eq.h>
344#include <ATen/ops/le.h>
345#include <ATen/ops/le.h>
346#include <ATen/ops/le.h>
347#include <ATen/ops/le.h>
348#include <ATen/ops/le.h>
349#include <ATen/ops/le.h>
350#include <ATen/ops/take_along_dim.h>
351#include <ATen/ops/take_along_dim.h>
352#include <ATen/ops/index_select.h>
353#include <ATen/ops/index_select.h>
354#include <ATen/ops/index_select.h>
355#include <ATen/ops/index_select.h>
356#include <ATen/ops/masked_select_backward.h>
357#include <ATen/ops/nonzero.h>
358#include <ATen/ops/nonzero.h>
359#include <ATen/ops/nonzero_numpy.h>
360#include <ATen/ops/addcmul.h>
361#include <ATen/ops/addcmul.h>
362#include <ATen/ops/addcmul.h>
363#include <ATen/ops/swapdims.h>
364#include <ATen/ops/swapdims.h>
365#include <ATen/ops/cholesky.h>
366#include <ATen/ops/cholesky.h>
367#include <ATen/ops/lu_solve.h>
368#include <ATen/ops/lu_solve.h>
369#include <ATen/ops/lu_unpack.h>
370#include <ATen/ops/lu_unpack.h>
371#include <ATen/ops/multinomial.h>
372#include <ATen/ops/multinomial.h>
373#include <ATen/ops/lgamma.h>
374#include <ATen/ops/lgamma.h>
375#include <ATen/ops/lgamma.h>
376#include <ATen/ops/arctan2.h>
377#include <ATen/ops/arctan2.h>
378#include <ATen/ops/arctan2.h>
379#include <ATen/ops/histogram.h>
380#include <ATen/ops/histogram.h>
381#include <ATen/ops/histogram.h>
382#include <ATen/ops/histogram.h>
383#include <ATen/ops/igamma.h>
384#include <ATen/ops/igamma.h>
385#include <ATen/ops/igamma.h>
386#include <ATen/ops/max.h>
387#include <ATen/ops/max.h>
388#include <ATen/ops/max.h>
389#include <ATen/ops/max.h>
390#include <ATen/ops/pow.h>
391#include <ATen/ops/pow.h>
392#include <ATen/ops/pow.h>
393#include <ATen/ops/pow.h>
394#include <ATen/ops/pow.h>
395#include <ATen/ops/pow.h>
396#include <ATen/ops/pow.h>
397#include <ATen/ops/pow.h>
398#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale.h>
399#include <ATen/ops/_foreach_add.h>
400#include <ATen/ops/_foreach_add.h>
401#include <ATen/ops/_foreach_clamp_min.h>
402#include <ATen/ops/_foreach_clamp_min.h>
403#include <ATen/ops/_foreach_minimum.h>
404#include <ATen/ops/_foreach_minimum.h>
405#include <ATen/ops/_foreach_add.h>
406#include <ATen/ops/_foreach_add.h>
407#include <ATen/ops/_foreach_clamp_min.h>
408#include <ATen/ops/_foreach_clamp_min.h>
409#include <ATen/ops/_foreach_minimum.h>
410#include <ATen/ops/_foreach_minimum.h>
411#include <ATen/ops/_foreach_add.h>
412#include <ATen/ops/_foreach_add.h>
413#include <ATen/ops/_foreach_clamp_min.h>
414#include <ATen/ops/_foreach_clamp_min.h>
415#include <ATen/ops/_foreach_minimum.h>
416#include <ATen/ops/_foreach_minimum.h>
417#include <ATen/ops/_foreach_cosh.h>
418#include <ATen/ops/_foreach_cosh.h>
419#include <ATen/ops/_foreach_erfc.h>
420#include <ATen/ops/_foreach_erfc.h>
421#include <ATen/ops/_foreach_round.h>
422#include <ATen/ops/_foreach_round.h>
423#include <ATen/ops/_foreach_lgamma.h>
424#include <ATen/ops/_foreach_lgamma.h>
425#include <ATen/ops/_foreach_frac.h>
426#include <ATen/ops/_foreach_frac.h>
427#include <ATen/ops/_foreach_trunc.h>
428#include <ATen/ops/_foreach_trunc.h>
429#include <ATen/ops/_foreach_lerp.h>
430#include <ATen/ops/_foreach_lerp.h>
431#include <ATen/ops/_foreach_lerp.h>
432#include <ATen/ops/_foreach_lerp.h>
433#include <ATen/ops/mse_loss_backward.h>
434#include <ATen/ops/mse_loss_backward.h>
435#include <ATen/ops/multi_margin_loss_backward.h>
436#include <ATen/ops/multi_margin_loss_backward.h>
437#include <ATen/ops/multilabel_margin_loss_backward.h>
438#include <ATen/ops/multilabel_margin_loss_backward.h>
439#include <ATen/ops/elu_backward.h>
440#include <ATen/ops/elu_backward.h>
441#include <ATen/ops/hardsigmoid_backward.h>
442#include <ATen/ops/hardsigmoid_backward.h>
443#include <ATen/ops/rrelu_with_noise_backward.h>
444#include <ATen/ops/softplus_backward.h>
445#include <ATen/ops/softplus_backward.h>
446#include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward.h>
447#include <ATen/ops/fractional_max_pool3d_backward.h>
448#include <ATen/ops/fractional_max_pool3d_backward.h>
449#include <ATen/ops/max_pool2d_with_indices.h>
450#include <ATen/ops/max_pool2d_with_indices.h>
451#include <ATen/ops/reflection_pad1d.h>
452#include <ATen/ops/reflection_pad1d.h>
453#include <ATen/ops/_pad_enum.h>
454#include <ATen/ops/upsample_trilinear3d.h>
455#include <ATen/ops/_upsample_bicubic2d_aa.h>
456#include <ATen/ops/upsample_nearest3d.h>
457#include <ATen/ops/_upsample_bilinear2d_aa_backward.h>
458#include <ATen/ops/_upsample_bilinear2d_aa_backward.h>
459#include <ATen/ops/_upsample_bicubic2d_aa.h>
460#include <ATen/ops/_upsample_bicubic2d_aa.h>
461#include <ATen/ops/upsample_trilinear3d.h>
462#include <ATen/ops/upsample_trilinear3d.h>
463#include <ATen/ops/upsample_nearest3d.h>
464#include <ATen/ops/upsample_nearest3d.h>
465#include <ATen/ops/sigmoid_backward.h>
466#include <ATen/ops/sigmoid_backward.h>
467#include <ATen/ops/tanh_backward.h>
468#include <ATen/ops/tanh_backward.h>
469#include <ATen/ops/thnn_conv2d.h>
470#include <ATen/ops/thnn_conv2d.h>
471#include <ATen/ops/_slow_conv2d_forward.h>
472#include <ATen/ops/_slow_conv2d_forward.h>
473#include <ATen/ops/column_stack.h>
474#include <ATen/ops/column_stack.h>
475#include <ATen/ops/special_entr.h>
476#include <ATen/ops/special_entr.h>
477#include <ATen/ops/special_ndtri.h>
478#include <ATen/ops/special_ndtri.h>
479#include <ATen/ops/special_erfc.h>
480#include <ATen/ops/special_erfc.h>
481#include <ATen/ops/special_i1e.h>
482#include <ATen/ops/special_i1e.h>
483#include <ATen/ops/special_logsumexp.h>
484#include <ATen/ops/special_logsumexp.h>
485#include <ATen/ops/special_gammainc.h>
486#include <ATen/ops/special_gammainc.h>
487#include <ATen/ops/fft_rfft2.h>
488#include <ATen/ops/fft_rfft2.h>
489#include <ATen/ops/fft_hfftn.h>
490#include <ATen/ops/fft_hfftn.h>
491#include <ATen/ops/linalg_lu.h>
492#include <ATen/ops/linalg_lu.h>
493#include <ATen/ops/linalg_ldl_factor_ex.h>
494#include <ATen/ops/linalg_ldl_factor_ex.h>
495#include <ATen/ops/linalg_ldl_solve.h>
496#include <ATen/ops/linalg_ldl_solve.h>
497#include <ATen/ops/linalg_lstsq.h>
498#include <ATen/ops/linalg_lstsq.h>
499#include <ATen/ops/linalg_vecdot.h>
500#include <ATen/ops/linalg_vecdot.h>
501#include <ATen/ops/linalg_matrix_exp.h>
502#include <ATen/ops/_linalg_eigh.h>
503#include <ATen/ops/_linalg_eigh.h>
504#include <ATen/ops/linalg_norm.h>
505#include <ATen/ops/linalg_norm.h>
506#include <ATen/ops/linalg_norm.h>
507#include <ATen/ops/linalg_norm.h>
508#include <ATen/ops/linalg_svdvals.h>
509#include <ATen/ops/linalg_svdvals.h>
510#include <ATen/ops/linalg_matrix_power.h>
511#include <ATen/ops/linalg_matrix_power.h>
512#include <ATen/ops/_test_serialization_subcmul.h>
513#include <ATen/ops/_test_optional_intlist.h>
514#include <ATen/ops/_test_ambiguous_defaults.h>
515#include <ATen/ops/_test_ambiguous_defaults.h>
516#include <ATen/ops/_test_autograd_multiple_dispatch.h>
517#include <ATen/ops/_test_autograd_multiple_dispatch.h>
518#include <ATen/ops/segment_reduce.h>
519#include <ATen/ops/_segment_reduce_backward.h>
520#include <ATen/ops/_make_dual_copy.h>
521#include <ATen/ops/view_as_complex_copy.h>
522#include <ATen/ops/_neg_view_copy.h>
523#include <ATen/ops/expand_copy.h>
524#include <ATen/ops/unsqueeze_copy.h>
525#include <ATen/ops/crow_indices_copy.h>
526#include <ATen/ops/to_padded_tensor.h>
527#include <ATen/ops/_nested_tensor_softmax_with_shape.h>
528#include <ATen/ops/_flash_attention_forward.h>
529#include <ATen/ops/special_bessel_j0.h>
530#include <ATen/ops/special_bessel_j0.h>
531#include <ATen/ops/special_bessel_y0.h>
532#include <ATen/ops/special_bessel_y0.h>
533#include <ATen/ops/special_chebyshev_polynomial_u.h>
534#include <ATen/ops/special_chebyshev_polynomial_u.h>
535#include <ATen/ops/special_chebyshev_polynomial_u.h>
536#include <ATen/ops/special_chebyshev_polynomial_u.h>
537#include <ATen/ops/special_chebyshev_polynomial_u.h>
538#include <ATen/ops/special_chebyshev_polynomial_u.h>
539#include <ATen/ops/special_hermite_polynomial_he.h>
540#include <ATen/ops/special_hermite_polynomial_he.h>
541#include <ATen/ops/special_hermite_polynomial_he.h>
542#include <ATen/ops/special_hermite_polynomial_he.h>
543#include <ATen/ops/special_hermite_polynomial_he.h>
544#include <ATen/ops/special_hermite_polynomial_he.h>
545#include <ATen/ops/special_modified_bessel_i1.h>
546#include <ATen/ops/special_modified_bessel_i1.h>
547#include <ATen/ops/special_shifted_chebyshev_polynomial_v.h>
548#include <ATen/ops/special_shifted_chebyshev_polynomial_v.h>
549#include <ATen/ops/special_shifted_chebyshev_polynomial_v.h>
550#include <ATen/ops/special_shifted_chebyshev_polynomial_v.h>
551#include <ATen/ops/special_shifted_chebyshev_polynomial_v.h>
552#include <ATen/ops/special_shifted_chebyshev_polynomial_v.h>
553#include <ATen/ops/special_shifted_chebyshev_polynomial_w.h>
554#include <ATen/ops/special_shifted_chebyshev_polynomial_w.h>
555#include <ATen/ops/special_shifted_chebyshev_polynomial_w.h>
556#include <ATen/ops/special_shifted_chebyshev_polynomial_w.h>
557#include <ATen/ops/special_shifted_chebyshev_polynomial_w.h>
558#include <ATen/ops/special_shifted_chebyshev_polynomial_w.h>
559#include <ATen/ops/special_spherical_bessel_j0.h>
560#include <ATen/ops/special_spherical_bessel_j0.h>
561#include <ATen/ops/_cudnn_ctc_loss.h>
562#include <ATen/ops/_cudnn_rnn.h>
563#include <ATen/ops/_fused_dropout.h>
564#include <ATen/ops/_conj_physical.h>
565#include <ATen/ops/blackman_window.h>
566#include <ATen/ops/blackman_window.h>
567#include <ATen/ops/convolution.h>
568#include <ATen/ops/convolution_backward_overrideable.h>
569#include <ATen/ops/_convolution.h>
570#include <ATen/ops/cudnn_affine_grid_generator.h>
571#include <ATen/ops/cudnn_batch_norm_backward.h>
572#include <ATen/ops/cudnn_convolution_transpose.h>
573#include <ATen/ops/cudnn_grid_sampler_backward.h>
574#include <ATen/ops/_ctc_loss.h>
575#include <ATen/ops/_ctc_loss.h>
576#include <ATen/ops/embedding_dense_backward.h>
577#include <ATen/ops/_embedding_bag.h>
578#include <ATen/ops/new_empty.h>
579#include <ATen/ops/fill.h>
580#include <ATen/ops/fill.h>
581#include <ATen/ops/grid_sampler_2d_backward.h>
582#include <ATen/ops/_index_put_impl.h>
583#include <ATen/ops/_index_put_impl.h>
584#include <ATen/ops/mkldnn_linear.h>
585#include <ATen/ops/mkldnn_linear_backward_weights.h>
586#include <ATen/ops/_aminmax.h>
587#include <ATen/ops/_aminmax.h>
588#include <ATen/ops/mkldnn_max_pool3d_backward.h>
589#include <ATen/ops/quantized_max_pool1d.h>
590#include <ATen/ops/mkldnn_convolution.h>
591#include <ATen/ops/miopen_batch_norm_backward.h>
592#include <ATen/ops/mul.h>
593#include <ATen/ops/batch_norm_backward_elemt.h>
594#include <ATen/ops/pixel_unshuffle.h>
595#include <ATen/ops/_pin_memory.h>
596#include <ATen/ops/randn.h>
597#include <ATen/ops/randn.h>
598#include <ATen/ops/diagonal_scatter.h>
599#include <ATen/ops/as_strided_scatter.h>
600#include <ATen/ops/_transform_bias_rescale_qkv.h>
601#include <ATen/ops/_unique.h>
602#include <ATen/ops/_weight_norm_interface.h>
603#include <ATen/ops/zeros.h>
604#include <ATen/ops/_standard_gamma.h>
605#include <ATen/ops/_sample_dirichlet.h>
606#include <ATen/ops/binomial.h>
607#include <ATen/ops/_sparse_sum.h>
608#include <ATen/ops/_sparse_addmm.h>
609#include <ATen/ops/sparse_resize.h>
610#include <ATen/ops/sparse_resize.h>
611#include <ATen/ops/sparse_mask.h>
612#include <ATen/ops/copy_sparse_to_sparse.h>
613#include <ATen/ops/copy_sparse_to_sparse.h>
614#include <ATen/ops/to_sparse.h>
615#include <ATen/ops/to_sparse.h>
616#include <ATen/ops/to_mkldnn.h>
617#include <ATen/ops/int_repr.h>
618#include <ATen/ops/fake_quantize_per_channel_affine_cachemask.h>
619#include <ATen/ops/_fused_moving_avg_obs_fq_helper.h>
620#include <ATen/ops/_fused_moving_avg_obs_fq_helper.h>
621#include <ATen/ops/_to_copy.h>
622#include <ATen/ops/lift.h>
623#include <ATen/ops/bitwise_and.h>
624#include <ATen/ops/bitwise_xor.h>
625#include <ATen/ops/lshift.h>
626#include <ATen/ops/lshift.h>
627#include <ATen/ops/bitwise_left_shift.h>
628#include <ATen/ops/rshift.h>
629#include <ATen/ops/rshift.h>
630#include <ATen/ops/bitwise_right_shift.h>
631#include <ATen/ops/exponential.h>
632#include <ATen/ops/exponential.h>
633#include <ATen/ops/geometric.h>
634#include <ATen/ops/geometric.h>
635#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale.h>
636#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale.h>
637#include <ATen/ops/_foreach_add.h>
638#include <ATen/ops/_foreach_clamp_min.h>
639#include <ATen/ops/_foreach_minimum.h>
640#include <ATen/ops/_foreach_add.h>
641#include <ATen/ops/_foreach_clamp_min.h>
642#include <ATen/ops/_foreach_minimum.h>
643#include <ATen/ops/_foreach_add.h>
644#include <ATen/ops/_foreach_clamp_min.h>
645#include <ATen/ops/_foreach_minimum.h>
646#include <ATen/ops/_foreach_cosh.h>
647#include <ATen/ops/_foreach_erfc.h>
648#include <ATen/ops/_foreach_round.h>
649#include <ATen/ops/_foreach_lgamma.h>
650#include <ATen/ops/_foreach_frac.h>
651#include <ATen/ops/_foreach_trunc.h>
652#include <ATen/ops/_foreach_lerp.h>
653#include <ATen/ops/_foreach_lerp.h>
654#include <ATen/ops/rrelu_with_noise_backward.h>
655#include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward.h>
656#include <ATen/ops/linalg_matrix_exp.h>
657#include <ATen/ops/_test_optional_intlist.h>
658#include <ATen/ops/_test_autograd_multiple_dispatch.h>
659#include <ATen/ops/segment_reduce.h>
660#include <ATen/ops/_segment_reduce_backward.h>
661#include <ATen/ops/_make_dual_copy.h>
662#include <ATen/ops/view_as_complex_copy.h>
663#include <ATen/ops/_neg_view_copy.h>
664#include <ATen/ops/expand_copy.h>
665#include <ATen/ops/unsqueeze_copy.h>
666#include <ATen/ops/crow_indices_copy.h>
667#include <ATen/ops/to_padded_tensor.h>
668#endif
669
670
671
672namespace at { namespace _ops {
673
674
675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cast_Byte, name, "aten::_cast_Byte")
676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cast_Byte, overload_name, "")
677STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cast_Byte, schema_str, "_cast_Byte(Tensor self, bool non_blocking=False) -> Tensor")
678
679// aten::_cast_Byte(Tensor self, bool non_blocking=False) -> Tensor
680static C10_NOINLINE c10::TypedOperatorHandle<_cast_Byte::schema> create__cast_Byte_typed_handle() {
681 return c10::Dispatcher::singleton()
682 .findSchemaOrThrow(_cast_Byte::name, _cast_Byte::overload_name)
683 .typed<_cast_Byte::schema>();
684}
685
686// aten::_cast_Byte(Tensor self, bool non_blocking=False) -> Tensor
687at::Tensor _cast_Byte::call(const at::Tensor & self, bool non_blocking) {
688
689 static auto op = create__cast_Byte_typed_handle();
690 return op.call(self, non_blocking);
691}
692
693// aten::_cast_Byte(Tensor self, bool non_blocking=False) -> Tensor
694at::Tensor _cast_Byte::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking) {
695
696 static auto op = create__cast_Byte_typed_handle();
697 return op.redispatch(dispatchKeySet, self, non_blocking);
698}
699
700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cast_Char, name, "aten::_cast_Char")
701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cast_Char, overload_name, "")
702STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cast_Char, schema_str, "_cast_Char(Tensor self, bool non_blocking=False) -> Tensor")
703
704// aten::_cast_Char(Tensor self, bool non_blocking=False) -> Tensor
705static C10_NOINLINE c10::TypedOperatorHandle<_cast_Char::schema> create__cast_Char_typed_handle() {
706 return c10::Dispatcher::singleton()
707 .findSchemaOrThrow(_cast_Char::name, _cast_Char::overload_name)
708 .typed<_cast_Char::schema>();
709}
710
711// aten::_cast_Char(Tensor self, bool non_blocking=False) -> Tensor
712at::Tensor _cast_Char::call(const at::Tensor & self, bool non_blocking) {
713
714 static auto op = create__cast_Char_typed_handle();
715 return op.call(self, non_blocking);
716}
717
718// aten::_cast_Char(Tensor self, bool non_blocking=False) -> Tensor
719at::Tensor _cast_Char::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking) {
720
721 static auto op = create__cast_Char_typed_handle();
722 return op.redispatch(dispatchKeySet, self, non_blocking);
723}
724
725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_leaf, name, "aten::is_leaf")
726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_leaf, overload_name, "")
727STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_leaf, schema_str, "is_leaf(Tensor self) -> bool")
728
729// aten::is_leaf(Tensor self) -> bool
730static C10_NOINLINE c10::TypedOperatorHandle<is_leaf::schema> create_is_leaf_typed_handle() {
731 return c10::Dispatcher::singleton()
732 .findSchemaOrThrow(is_leaf::name, is_leaf::overload_name)
733 .typed<is_leaf::schema>();
734}
735
736// aten::is_leaf(Tensor self) -> bool
737bool is_leaf::call(const at::Tensor & self) {
738
739 static auto op = create_is_leaf_typed_handle();
740 return op.call(self);
741}
742
743// aten::is_leaf(Tensor self) -> bool
744bool is_leaf::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
745
746 static auto op = create_is_leaf_typed_handle();
747 return op.redispatch(dispatchKeySet, self);
748}
749
750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(requires_grad_, name, "aten::requires_grad_")
751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(requires_grad_, overload_name, "")
752STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(requires_grad_, schema_str, "requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!)")
753
754// aten::requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!)
755static C10_NOINLINE c10::TypedOperatorHandle<requires_grad_::schema> create_requires_grad__typed_handle() {
756 return c10::Dispatcher::singleton()
757 .findSchemaOrThrow(requires_grad_::name, requires_grad_::overload_name)
758 .typed<requires_grad_::schema>();
759}
760
761// aten::requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!)
762at::Tensor & requires_grad_::call(at::Tensor & self, bool requires_grad) {
763
764 static auto op = create_requires_grad__typed_handle();
765 return op.call(self, requires_grad);
766}
767
768// aten::requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!)
769at::Tensor & requires_grad_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, bool requires_grad) {
770
771 static auto op = create_requires_grad__typed_handle();
772 return op.redispatch(dispatchKeySet, self, requires_grad);
773}
774
775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(retains_grad, name, "aten::retains_grad")
776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(retains_grad, overload_name, "")
777STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(retains_grad, schema_str, "retains_grad(Tensor self) -> bool")
778
779// aten::retains_grad(Tensor self) -> bool
780static C10_NOINLINE c10::TypedOperatorHandle<retains_grad::schema> create_retains_grad_typed_handle() {
781 return c10::Dispatcher::singleton()
782 .findSchemaOrThrow(retains_grad::name, retains_grad::overload_name)
783 .typed<retains_grad::schema>();
784}
785
786// aten::retains_grad(Tensor self) -> bool
787bool retains_grad::call(const at::Tensor & self) {
788
789 static auto op = create_retains_grad_typed_handle();
790 return op.call(self);
791}
792
793// aten::retains_grad(Tensor self) -> bool
794bool retains_grad::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
795
796 static auto op = create_retains_grad_typed_handle();
797 return op.redispatch(dispatchKeySet, self);
798}
799
800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_unpack_dual, name, "aten::_unpack_dual")
801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_unpack_dual, overload_name, "")
802STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_unpack_dual, schema_str, "_unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent)")
803
804// aten::_unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent)
805static C10_NOINLINE c10::TypedOperatorHandle<_unpack_dual::schema> create__unpack_dual_typed_handle() {
806 return c10::Dispatcher::singleton()
807 .findSchemaOrThrow(_unpack_dual::name, _unpack_dual::overload_name)
808 .typed<_unpack_dual::schema>();
809}
810
811// aten::_unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent)
812::std::tuple<at::Tensor,at::Tensor> _unpack_dual::call(const at::Tensor & dual, int64_t level) {
813
814 static auto op = create__unpack_dual_typed_handle();
815 return op.call(dual, level);
816}
817
818// aten::_unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent)
819::std::tuple<at::Tensor,at::Tensor> _unpack_dual::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dual, int64_t level) {
820
821 static auto op = create__unpack_dual_typed_handle();
822 return op.redispatch(dispatchKeySet, dual, level);
823}
824
825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_has_same_storage_numel, name, "aten::_has_same_storage_numel")
826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_has_same_storage_numel, overload_name, "")
827STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_has_same_storage_numel, schema_str, "_has_same_storage_numel(Tensor self, Tensor other) -> bool")
828
829// aten::_has_same_storage_numel(Tensor self, Tensor other) -> bool
830static C10_NOINLINE c10::TypedOperatorHandle<_has_same_storage_numel::schema> create__has_same_storage_numel_typed_handle() {
831 return c10::Dispatcher::singleton()
832 .findSchemaOrThrow(_has_same_storage_numel::name, _has_same_storage_numel::overload_name)
833 .typed<_has_same_storage_numel::schema>();
834}
835
836// aten::_has_same_storage_numel(Tensor self, Tensor other) -> bool
837bool _has_same_storage_numel::call(const at::Tensor & self, const at::Tensor & other) {
838
839 static auto op = create__has_same_storage_numel_typed_handle();
840 return op.call(self, other);
841}
842
843// aten::_has_same_storage_numel(Tensor self, Tensor other) -> bool
844bool _has_same_storage_numel::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
845
846 static auto op = create__has_same_storage_numel_typed_handle();
847 return op.redispatch(dispatchKeySet, self, other);
848}
849
850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(align_to, name, "aten::align_to")
851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(align_to, overload_name, "")
852STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(align_to, schema_str, "align_to(Tensor(a) self, Dimname[] names) -> Tensor(a)")
853
854// aten::align_to(Tensor(a) self, Dimname[] names) -> Tensor(a)
855static C10_NOINLINE c10::TypedOperatorHandle<align_to::schema> create_align_to_typed_handle() {
856 return c10::Dispatcher::singleton()
857 .findSchemaOrThrow(align_to::name, align_to::overload_name)
858 .typed<align_to::schema>();
859}
860
861// aten::align_to(Tensor(a) self, Dimname[] names) -> Tensor(a)
862at::Tensor align_to::call(const at::Tensor & self, at::DimnameList names) {
863
864 static auto op = create_align_to_typed_handle();
865 return op.call(self, names);
866}
867
868// aten::align_to(Tensor(a) self, Dimname[] names) -> Tensor(a)
869at::Tensor align_to::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList names) {
870
871 static auto op = create_align_to_typed_handle();
872 return op.redispatch(dispatchKeySet, self, names);
873}
874
875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(align_to_ellipsis_idx, name, "aten::align_to")
876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(align_to_ellipsis_idx, overload_name, "ellipsis_idx")
877STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(align_to_ellipsis_idx, schema_str, "align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a)")
878
879// aten::align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a)
880static C10_NOINLINE c10::TypedOperatorHandle<align_to_ellipsis_idx::schema> create_align_to_ellipsis_idx_typed_handle() {
881 return c10::Dispatcher::singleton()
882 .findSchemaOrThrow(align_to_ellipsis_idx::name, align_to_ellipsis_idx::overload_name)
883 .typed<align_to_ellipsis_idx::schema>();
884}
885
886// aten::align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a)
887at::Tensor align_to_ellipsis_idx::call(const at::Tensor & self, at::DimnameList order, int64_t ellipsis_idx) {
888
889 static auto op = create_align_to_ellipsis_idx_typed_handle();
890 return op.call(self, order, ellipsis_idx);
891}
892
893// aten::align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a)
894at::Tensor align_to_ellipsis_idx::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList order, int64_t ellipsis_idx) {
895
896 static auto op = create_align_to_ellipsis_idx_typed_handle();
897 return op.redispatch(dispatchKeySet, self, order, ellipsis_idx);
898}
899
900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_use_cudnn_ctc_loss, name, "aten::_use_cudnn_ctc_loss")
901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_use_cudnn_ctc_loss, overload_name, "")
902STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_use_cudnn_ctc_loss, schema_str, "_use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool")
903
904// aten::_use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool
905static C10_NOINLINE c10::TypedOperatorHandle<_use_cudnn_ctc_loss::schema> create__use_cudnn_ctc_loss_typed_handle() {
906 return c10::Dispatcher::singleton()
907 .findSchemaOrThrow(_use_cudnn_ctc_loss::name, _use_cudnn_ctc_loss::overload_name)
908 .typed<_use_cudnn_ctc_loss::schema>();
909}
910
911// aten::_use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool
912bool _use_cudnn_ctc_loss::call(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank) {
913
914 static auto op = create__use_cudnn_ctc_loss_typed_handle();
915 return op.call(log_probs, targets, input_lengths, target_lengths, blank);
916}
917
918// aten::_use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool
919bool _use_cudnn_ctc_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank) {
920
921 static auto op = create__use_cudnn_ctc_loss_typed_handle();
922 return op.redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank);
923}
924
925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_use_cudnn_ctc_loss_Tensor, name, "aten::_use_cudnn_ctc_loss")
926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_use_cudnn_ctc_loss_Tensor, overload_name, "Tensor")
927STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_use_cudnn_ctc_loss_Tensor, schema_str, "_use_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank) -> bool")
928
929// aten::_use_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank) -> bool
930static C10_NOINLINE c10::TypedOperatorHandle<_use_cudnn_ctc_loss_Tensor::schema> create__use_cudnn_ctc_loss_Tensor_typed_handle() {
931 return c10::Dispatcher::singleton()
932 .findSchemaOrThrow(_use_cudnn_ctc_loss_Tensor::name, _use_cudnn_ctc_loss_Tensor::overload_name)
933 .typed<_use_cudnn_ctc_loss_Tensor::schema>();
934}
935
936// aten::_use_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank) -> bool
937bool _use_cudnn_ctc_loss_Tensor::call(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank) {
938
939 static auto op = create__use_cudnn_ctc_loss_Tensor_typed_handle();
940 return op.call(log_probs, targets, input_lengths, target_lengths, blank);
941}
942
943// aten::_use_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank) -> bool
944bool _use_cudnn_ctc_loss_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank) {
945
946 static auto op = create__use_cudnn_ctc_loss_Tensor_typed_handle();
947 return op.redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank);
948}
949
950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cudnn_ctc_loss, name, "aten::_cudnn_ctc_loss")
951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cudnn_ctc_loss, overload_name, "")
952STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cudnn_ctc_loss, schema_str, "_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)")
953
954// aten::_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)
955static C10_NOINLINE c10::TypedOperatorHandle<_cudnn_ctc_loss::schema> create__cudnn_ctc_loss_typed_handle() {
956 return c10::Dispatcher::singleton()
957 .findSchemaOrThrow(_cudnn_ctc_loss::name, _cudnn_ctc_loss::overload_name)
958 .typed<_cudnn_ctc_loss::schema>();
959}
960
961// aten::_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)
962::std::tuple<at::Tensor,at::Tensor> _cudnn_ctc_loss::call(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity) {
963
964 static auto op = create__cudnn_ctc_loss_typed_handle();
965 return op.call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity);
966}
967
968// aten::_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)
969::std::tuple<at::Tensor,at::Tensor> _cudnn_ctc_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity) {
970
971 static auto op = create__cudnn_ctc_loss_typed_handle();
972 return op.redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity);
973}
974
975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cudnn_ctc_loss_Tensor, name, "aten::_cudnn_ctc_loss")
976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cudnn_ctc_loss_Tensor, overload_name, "Tensor")
977STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cudnn_ctc_loss_Tensor, schema_str, "_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)")
978
979// aten::_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)
980static C10_NOINLINE c10::TypedOperatorHandle<_cudnn_ctc_loss_Tensor::schema> create__cudnn_ctc_loss_Tensor_typed_handle() {
981 return c10::Dispatcher::singleton()
982 .findSchemaOrThrow(_cudnn_ctc_loss_Tensor::name, _cudnn_ctc_loss_Tensor::overload_name)
983 .typed<_cudnn_ctc_loss_Tensor::schema>();
984}
985
986// aten::_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)
987::std::tuple<at::Tensor,at::Tensor> _cudnn_ctc_loss_Tensor::call(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool deterministic, bool zero_infinity) {
988
989 static auto op = create__cudnn_ctc_loss_Tensor_typed_handle();
990 return op.call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity);
991}
992
993// aten::_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)
994::std::tuple<at::Tensor,at::Tensor> _cudnn_ctc_loss_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool deterministic, bool zero_infinity) {
995
996 static auto op = create__cudnn_ctc_loss_Tensor_typed_handle();
997 return op.redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity);
998}
999
1000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cudnn_rnn, name, "aten::_cudnn_rnn")
1001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cudnn_rnn, overload_name, "")
1002STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cudnn_rnn, schema_str, "_cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)")
1003
1004// aten::_cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
1005static C10_NOINLINE c10::TypedOperatorHandle<_cudnn_rnn::schema> create__cudnn_rnn_typed_handle() {
1006 return c10::Dispatcher::singleton()
1007 .findSchemaOrThrow(_cudnn_rnn::name, _cudnn_rnn::overload_name)
1008 .typed<_cudnn_rnn::schema>();
1009}
1010
1011// aten::_cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
1012::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _cudnn_rnn::call(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state) {
1013
1014 static auto op = create__cudnn_rnn_typed_handle();
1015 return op.call(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state);
1016}
1017
1018// aten::_cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
1019::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _cudnn_rnn::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state) {
1020
1021 static auto op = create__cudnn_rnn_typed_handle();
1022 return op.redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state);
1023}
1024
1025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_debug_has_internal_overlap, name, "aten::_debug_has_internal_overlap")
1026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_debug_has_internal_overlap, overload_name, "")
1027STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_debug_has_internal_overlap, schema_str, "_debug_has_internal_overlap(Tensor self) -> int")
1028
1029// aten::_debug_has_internal_overlap(Tensor self) -> int
1030static C10_NOINLINE c10::TypedOperatorHandle<_debug_has_internal_overlap::schema> create__debug_has_internal_overlap_typed_handle() {
1031 return c10::Dispatcher::singleton()
1032 .findSchemaOrThrow(_debug_has_internal_overlap::name, _debug_has_internal_overlap::overload_name)
1033 .typed<_debug_has_internal_overlap::schema>();
1034}
1035
1036// aten::_debug_has_internal_overlap(Tensor self) -> int
1037int64_t _debug_has_internal_overlap::call(const at::Tensor & self) {
1038
1039 static auto op = create__debug_has_internal_overlap_typed_handle();
1040 return op.call(self);
1041}
1042
1043// aten::_debug_has_internal_overlap(Tensor self) -> int
1044int64_t _debug_has_internal_overlap::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1045
1046 static auto op = create__debug_has_internal_overlap_typed_handle();
1047 return op.redispatch(dispatchKeySet, self);
1048}
1049
1050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fused_dropout, name, "aten::_fused_dropout")
1051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fused_dropout, overload_name, "")
1052STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fused_dropout, schema_str, "_fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor)")
1053
1054// aten::_fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor)
1055static C10_NOINLINE c10::TypedOperatorHandle<_fused_dropout::schema> create__fused_dropout_typed_handle() {
1056 return c10::Dispatcher::singleton()
1057 .findSchemaOrThrow(_fused_dropout::name, _fused_dropout::overload_name)
1058 .typed<_fused_dropout::schema>();
1059}
1060
1061// aten::_fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor)
1062::std::tuple<at::Tensor,at::Tensor> _fused_dropout::call(const at::Tensor & self, double p, c10::optional<at::Generator> generator) {
1063
1064 static auto op = create__fused_dropout_typed_handle();
1065 return op.call(self, p, generator);
1066}
1067
1068// aten::_fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor)
1069::std::tuple<at::Tensor,at::Tensor> _fused_dropout::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, c10::optional<at::Generator> generator) {
1070
1071 static auto op = create__fused_dropout_typed_handle();
1072 return op.redispatch(dispatchKeySet, self, p, generator);
1073}
1074
1075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sobol_engine_initialize_state_, name, "aten::_sobol_engine_initialize_state_")
1076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sobol_engine_initialize_state_, overload_name, "")
1077STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sobol_engine_initialize_state_, schema_str, "_sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!)")
1078
1079// aten::_sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!)
1080static C10_NOINLINE c10::TypedOperatorHandle<_sobol_engine_initialize_state_::schema> create__sobol_engine_initialize_state__typed_handle() {
1081 return c10::Dispatcher::singleton()
1082 .findSchemaOrThrow(_sobol_engine_initialize_state_::name, _sobol_engine_initialize_state_::overload_name)
1083 .typed<_sobol_engine_initialize_state_::schema>();
1084}
1085
1086// aten::_sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!)
1087at::Tensor & _sobol_engine_initialize_state_::call(at::Tensor & self, int64_t dimension) {
1088
1089 static auto op = create__sobol_engine_initialize_state__typed_handle();
1090 return op.call(self, dimension);
1091}
1092
1093// aten::_sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!)
1094at::Tensor & _sobol_engine_initialize_state_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dimension) {
1095
1096 static auto op = create__sobol_engine_initialize_state__typed_handle();
1097 return op.redispatch(dispatchKeySet, self, dimension);
1098}
1099
1100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_shape_as_tensor, name, "aten::_shape_as_tensor")
1101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_shape_as_tensor, overload_name, "")
1102STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_shape_as_tensor, schema_str, "_shape_as_tensor(Tensor self) -> Tensor")
1103
1104// aten::_shape_as_tensor(Tensor self) -> Tensor
1105static C10_NOINLINE c10::TypedOperatorHandle<_shape_as_tensor::schema> create__shape_as_tensor_typed_handle() {
1106 return c10::Dispatcher::singleton()
1107 .findSchemaOrThrow(_shape_as_tensor::name, _shape_as_tensor::overload_name)
1108 .typed<_shape_as_tensor::schema>();
1109}
1110
1111// aten::_shape_as_tensor(Tensor self) -> Tensor
1112at::Tensor _shape_as_tensor::call(const at::Tensor & self) {
1113
1114 static auto op = create__shape_as_tensor_typed_handle();
1115 return op.call(self);
1116}
1117
1118// aten::_shape_as_tensor(Tensor self) -> Tensor
1119at::Tensor _shape_as_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1120
1121 static auto op = create__shape_as_tensor_typed_handle();
1122 return op.redispatch(dispatchKeySet, self);
1123}
1124
1125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dropout, name, "aten::dropout")
1126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dropout, overload_name, "")
1127STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dropout, schema_str, "dropout(Tensor input, float p, bool train) -> Tensor")
1128
1129// aten::dropout(Tensor input, float p, bool train) -> Tensor
1130static C10_NOINLINE c10::TypedOperatorHandle<dropout::schema> create_dropout_typed_handle() {
1131 return c10::Dispatcher::singleton()
1132 .findSchemaOrThrow(dropout::name, dropout::overload_name)
1133 .typed<dropout::schema>();
1134}
1135
1136// aten::dropout(Tensor input, float p, bool train) -> Tensor
1137at::Tensor dropout::call(const at::Tensor & input, double p, bool train) {
1138
1139 static auto op = create_dropout_typed_handle();
1140 return op.call(input, p, train);
1141}
1142
1143// aten::dropout(Tensor input, float p, bool train) -> Tensor
1144at::Tensor dropout::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, bool train) {
1145
1146 static auto op = create_dropout_typed_handle();
1147 return op.redispatch(dispatchKeySet, input, p, train);
1148}
1149
1150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dropout_, name, "aten::dropout_")
1151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dropout_, overload_name, "")
1152STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dropout_, schema_str, "dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)")
1153
1154// aten::dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
1155static C10_NOINLINE c10::TypedOperatorHandle<dropout_::schema> create_dropout__typed_handle() {
1156 return c10::Dispatcher::singleton()
1157 .findSchemaOrThrow(dropout_::name, dropout_::overload_name)
1158 .typed<dropout_::schema>();
1159}
1160
1161// aten::dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
1162at::Tensor & dropout_::call(at::Tensor & self, double p, bool train) {
1163
1164 static auto op = create_dropout__typed_handle();
1165 return op.call(self, p, train);
1166}
1167
1168// aten::dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
1169at::Tensor & dropout_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, bool train) {
1170
1171 static auto op = create_dropout__typed_handle();
1172 return op.redispatch(dispatchKeySet, self, p, train);
1173}
1174
1175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sgn, name, "aten::sgn")
1176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sgn, overload_name, "")
1177STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sgn, schema_str, "sgn(Tensor self) -> Tensor")
1178
1179// aten::sgn(Tensor self) -> Tensor
1180static C10_NOINLINE c10::TypedOperatorHandle<sgn::schema> create_sgn_typed_handle() {
1181 return c10::Dispatcher::singleton()
1182 .findSchemaOrThrow(sgn::name, sgn::overload_name)
1183 .typed<sgn::schema>();
1184}
1185
1186// aten::sgn(Tensor self) -> Tensor
1187at::Tensor sgn::call(const at::Tensor & self) {
1188
1189 static auto op = create_sgn_typed_handle();
1190 return op.call(self);
1191}
1192
1193// aten::sgn(Tensor self) -> Tensor
1194at::Tensor sgn::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1195
1196 static auto op = create_sgn_typed_handle();
1197 return op.redispatch(dispatchKeySet, self);
1198}
1199
1200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sgn_, name, "aten::sgn_")
1201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sgn_, overload_name, "")
1202STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sgn_, schema_str, "sgn_(Tensor(a!) self) -> Tensor(a!)")
1203
1204// aten::sgn_(Tensor(a!) self) -> Tensor(a!)
1205static C10_NOINLINE c10::TypedOperatorHandle<sgn_::schema> create_sgn__typed_handle() {
1206 return c10::Dispatcher::singleton()
1207 .findSchemaOrThrow(sgn_::name, sgn_::overload_name)
1208 .typed<sgn_::schema>();
1209}
1210
1211// aten::sgn_(Tensor(a!) self) -> Tensor(a!)
1212at::Tensor & sgn_::call(at::Tensor & self) {
1213
1214 static auto op = create_sgn__typed_handle();
1215 return op.call(self);
1216}
1217
1218// aten::sgn_(Tensor(a!) self) -> Tensor(a!)
1219at::Tensor & sgn_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
1220
1221 static auto op = create_sgn__typed_handle();
1222 return op.redispatch(dispatchKeySet, self);
1223}
1224
1225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sgn_out, name, "aten::sgn")
1226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sgn_out, overload_name, "out")
1227STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sgn_out, schema_str, "sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
1228
1229// aten::sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1230static C10_NOINLINE c10::TypedOperatorHandle<sgn_out::schema> create_sgn_out_typed_handle() {
1231 return c10::Dispatcher::singleton()
1232 .findSchemaOrThrow(sgn_out::name, sgn_out::overload_name)
1233 .typed<sgn_out::schema>();
1234}
1235
1236// aten::sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1237at::Tensor & sgn_out::call(const at::Tensor & self, at::Tensor & out) {
1238
1239 static auto op = create_sgn_out_typed_handle();
1240 return op.call(self, out);
1241}
1242
1243// aten::sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1244at::Tensor & sgn_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
1245
1246 static auto op = create_sgn_out_typed_handle();
1247 return op.redispatch(dispatchKeySet, self, out);
1248}
1249
1250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(real, name, "aten::real")
1251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(real, overload_name, "")
1252STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(real, schema_str, "real(Tensor(a) self) -> Tensor(a)")
1253
1254// aten::real(Tensor(a) self) -> Tensor(a)
1255static C10_NOINLINE c10::TypedOperatorHandle<real::schema> create_real_typed_handle() {
1256 return c10::Dispatcher::singleton()
1257 .findSchemaOrThrow(real::name, real::overload_name)
1258 .typed<real::schema>();
1259}
1260
1261// aten::real(Tensor(a) self) -> Tensor(a)
1262at::Tensor real::call(const at::Tensor & self) {
1263
1264 static auto op = create_real_typed_handle();
1265 return op.call(self);
1266}
1267
1268// aten::real(Tensor(a) self) -> Tensor(a)
1269at::Tensor real::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1270
1271 static auto op = create_real_typed_handle();
1272 return op.redispatch(dispatchKeySet, self);
1273}
1274
1275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_conj, name, "aten::_conj")
1276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_conj, overload_name, "")
1277STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_conj, schema_str, "_conj(Tensor(a) self) -> Tensor(a)")
1278
1279// aten::_conj(Tensor(a) self) -> Tensor(a)
1280static C10_NOINLINE c10::TypedOperatorHandle<_conj::schema> create__conj_typed_handle() {
1281 return c10::Dispatcher::singleton()
1282 .findSchemaOrThrow(_conj::name, _conj::overload_name)
1283 .typed<_conj::schema>();
1284}
1285
1286// aten::_conj(Tensor(a) self) -> Tensor(a)
1287at::Tensor _conj::call(const at::Tensor & self) {
1288
1289 static auto op = create__conj_typed_handle();
1290 return op.call(self);
1291}
1292
1293// aten::_conj(Tensor(a) self) -> Tensor(a)
1294at::Tensor _conj::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1295
1296 static auto op = create__conj_typed_handle();
1297 return op.redispatch(dispatchKeySet, self);
1298}
1299
1300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_conj_physical, name, "aten::_conj_physical")
1301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_conj_physical, overload_name, "")
1302STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_conj_physical, schema_str, "_conj_physical(Tensor self) -> Tensor")
1303
1304// aten::_conj_physical(Tensor self) -> Tensor
1305static C10_NOINLINE c10::TypedOperatorHandle<_conj_physical::schema> create__conj_physical_typed_handle() {
1306 return c10::Dispatcher::singleton()
1307 .findSchemaOrThrow(_conj_physical::name, _conj_physical::overload_name)
1308 .typed<_conj_physical::schema>();
1309}
1310
1311// aten::_conj_physical(Tensor self) -> Tensor
1312at::Tensor _conj_physical::call(const at::Tensor & self) {
1313
1314 static auto op = create__conj_physical_typed_handle();
1315 return op.call(self);
1316}
1317
1318// aten::_conj_physical(Tensor self) -> Tensor
1319at::Tensor _conj_physical::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1320
1321 static auto op = create__conj_physical_typed_handle();
1322 return op.redispatch(dispatchKeySet, self);
1323}
1324
1325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_neg_view, name, "aten::_neg_view")
1326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_neg_view, overload_name, "")
1327STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_neg_view, schema_str, "_neg_view(Tensor(a) self) -> Tensor(a)")
1328
1329// aten::_neg_view(Tensor(a) self) -> Tensor(a)
1330static C10_NOINLINE c10::TypedOperatorHandle<_neg_view::schema> create__neg_view_typed_handle() {
1331 return c10::Dispatcher::singleton()
1332 .findSchemaOrThrow(_neg_view::name, _neg_view::overload_name)
1333 .typed<_neg_view::schema>();
1334}
1335
1336// aten::_neg_view(Tensor(a) self) -> Tensor(a)
1337at::Tensor _neg_view::call(const at::Tensor & self) {
1338
1339 static auto op = create__neg_view_typed_handle();
1340 return op.call(self);
1341}
1342
1343// aten::_neg_view(Tensor(a) self) -> Tensor(a)
1344at::Tensor _neg_view::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1345
1346 static auto op = create__neg_view_typed_handle();
1347 return op.redispatch(dispatchKeySet, self);
1348}
1349
1350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(avg_pool1d, name, "aten::avg_pool1d")
1351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(avg_pool1d, overload_name, "")
1352STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(avg_pool1d, schema_str, "avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor")
1353
1354// aten::avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor
1355static C10_NOINLINE c10::TypedOperatorHandle<avg_pool1d::schema> create_avg_pool1d_typed_handle() {
1356 return c10::Dispatcher::singleton()
1357 .findSchemaOrThrow(avg_pool1d::name, avg_pool1d::overload_name)
1358 .typed<avg_pool1d::schema>();
1359}
1360
1361// aten::avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor
1362at::Tensor avg_pool1d::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad) {
1363
1364 static auto op = create_avg_pool1d_typed_handle();
1365 return op.call(self, kernel_size, stride, padding, ceil_mode, count_include_pad);
1366}
1367
1368// aten::avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor
1369at::Tensor avg_pool1d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad) {
1370
1371 static auto op = create_avg_pool1d_typed_handle();
1372 return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, ceil_mode, count_include_pad);
1373}
1374
1375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_avg_pool1d, name, "aten::adaptive_avg_pool1d")
1376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_avg_pool1d, overload_name, "")
1377STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_avg_pool1d, schema_str, "adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor")
1378
1379// aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor
1380static C10_NOINLINE c10::TypedOperatorHandle<adaptive_avg_pool1d::schema> create_adaptive_avg_pool1d_typed_handle() {
1381 return c10::Dispatcher::singleton()
1382 .findSchemaOrThrow(adaptive_avg_pool1d::name, adaptive_avg_pool1d::overload_name)
1383 .typed<adaptive_avg_pool1d::schema>();
1384}
1385
1386// aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor
1387at::Tensor adaptive_avg_pool1d::call(const at::Tensor & self, at::IntArrayRef output_size) {
1388
1389 static auto op = create_adaptive_avg_pool1d_typed_handle();
1390 return op.call(self, output_size);
1391}
1392
1393// aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor
1394at::Tensor adaptive_avg_pool1d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) {
1395
1396 static auto op = create_adaptive_avg_pool1d_typed_handle();
1397 return op.redispatch(dispatchKeySet, self, output_size);
1398}
1399
1400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_is_all_true, name, "aten::_is_all_true")
1401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_is_all_true, overload_name, "")
1402STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_is_all_true, schema_str, "_is_all_true(Tensor self) -> Tensor")
1403
1404// aten::_is_all_true(Tensor self) -> Tensor
1405static C10_NOINLINE c10::TypedOperatorHandle<_is_all_true::schema> create__is_all_true_typed_handle() {
1406 return c10::Dispatcher::singleton()
1407 .findSchemaOrThrow(_is_all_true::name, _is_all_true::overload_name)
1408 .typed<_is_all_true::schema>();
1409}
1410
1411// aten::_is_all_true(Tensor self) -> Tensor
1412at::Tensor _is_all_true::call(const at::Tensor & self) {
1413
1414 static auto op = create__is_all_true_typed_handle();
1415 return op.call(self);
1416}
1417
1418// aten::_is_all_true(Tensor self) -> Tensor
1419at::Tensor _is_all_true::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1420
1421 static auto op = create__is_all_true_typed_handle();
1422 return op.redispatch(dispatchKeySet, self);
1423}
1424
1425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_check_tensor, name, "aten::_test_check_tensor")
1426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_check_tensor, overload_name, "")
1427STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_check_tensor, schema_str, "_test_check_tensor(Tensor self) -> Tensor")
1428
1429// aten::_test_check_tensor(Tensor self) -> Tensor
1430static C10_NOINLINE c10::TypedOperatorHandle<_test_check_tensor::schema> create__test_check_tensor_typed_handle() {
1431 return c10::Dispatcher::singleton()
1432 .findSchemaOrThrow(_test_check_tensor::name, _test_check_tensor::overload_name)
1433 .typed<_test_check_tensor::schema>();
1434}
1435
1436// aten::_test_check_tensor(Tensor self) -> Tensor
1437at::Tensor _test_check_tensor::call(const at::Tensor & self) {
1438
1439 static auto op = create__test_check_tensor_typed_handle();
1440 return op.call(self);
1441}
1442
1443// aten::_test_check_tensor(Tensor self) -> Tensor
1444at::Tensor _test_check_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1445
1446 static auto op = create__test_check_tensor_typed_handle();
1447 return op.redispatch(dispatchKeySet, self);
1448}
1449
1450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(allclose, name, "aten::allclose")
1451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(allclose, overload_name, "")
1452STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(allclose, schema_str, "allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool")
1453
1454// aten::allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool
1455static C10_NOINLINE c10::TypedOperatorHandle<allclose::schema> create_allclose_typed_handle() {
1456 return c10::Dispatcher::singleton()
1457 .findSchemaOrThrow(allclose::name, allclose::overload_name)
1458 .typed<allclose::schema>();
1459}
1460
1461// aten::allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool
1462bool allclose::call(const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan) {
1463
1464 static auto op = create_allclose_typed_handle();
1465 return op.call(self, other, rtol, atol, equal_nan);
1466}
1467
1468// aten::allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool
1469bool allclose::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan) {
1470
1471 static auto op = create_allclose_typed_handle();
1472 return op.redispatch(dispatchKeySet, self, other, rtol, atol, equal_nan);
1473}
1474
1475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(argmax, name, "aten::argmax")
1476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(argmax, overload_name, "")
1477STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(argmax, schema_str, "argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor")
1478
1479// aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
1480static C10_NOINLINE c10::TypedOperatorHandle<argmax::schema> create_argmax_typed_handle() {
1481 return c10::Dispatcher::singleton()
1482 .findSchemaOrThrow(argmax::name, argmax::overload_name)
1483 .typed<argmax::schema>();
1484}
1485
1486// aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
1487at::Tensor argmax::call(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
1488
1489 static auto op = create_argmax_typed_handle();
1490 return op.call(self, dim, keepdim);
1491}
1492
1493// aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
1494at::Tensor argmax::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
1495
1496 static auto op = create_argmax_typed_handle();
1497 return op.redispatch(dispatchKeySet, self, dim, keepdim);
1498}
1499
1500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(argmax_out, name, "aten::argmax")
1501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(argmax_out, overload_name, "out")
1502STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(argmax_out, schema_str, "argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)")
1503
1504// aten::argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
1505static C10_NOINLINE c10::TypedOperatorHandle<argmax_out::schema> create_argmax_out_typed_handle() {
1506 return c10::Dispatcher::singleton()
1507 .findSchemaOrThrow(argmax_out::name, argmax_out::overload_name)
1508 .typed<argmax_out::schema>();
1509}
1510
1511// aten::argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
1512at::Tensor & argmax_out::call(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim, at::Tensor & out) {
1513
1514 static auto op = create_argmax_out_typed_handle();
1515 return op.call(self, dim, keepdim, out);
1516}
1517
1518// aten::argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
1519at::Tensor & argmax_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim, at::Tensor & out) {
1520
1521 static auto op = create_argmax_out_typed_handle();
1522 return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
1523}
1524
1525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(acosh, name, "aten::acosh")
1526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(acosh, overload_name, "")
1527STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(acosh, schema_str, "acosh(Tensor self) -> Tensor")
1528
1529// aten::acosh(Tensor self) -> Tensor
1530static C10_NOINLINE c10::TypedOperatorHandle<acosh::schema> create_acosh_typed_handle() {
1531 return c10::Dispatcher::singleton()
1532 .findSchemaOrThrow(acosh::name, acosh::overload_name)
1533 .typed<acosh::schema>();
1534}
1535
1536// aten::acosh(Tensor self) -> Tensor
1537at::Tensor acosh::call(const at::Tensor & self) {
1538
1539 static auto op = create_acosh_typed_handle();
1540 return op.call(self);
1541}
1542
1543// aten::acosh(Tensor self) -> Tensor
1544at::Tensor acosh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1545
1546 static auto op = create_acosh_typed_handle();
1547 return op.redispatch(dispatchKeySet, self);
1548}
1549
1550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(acosh_, name, "aten::acosh_")
1551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(acosh_, overload_name, "")
1552STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(acosh_, schema_str, "acosh_(Tensor(a!) self) -> Tensor(a!)")
1553
1554// aten::acosh_(Tensor(a!) self) -> Tensor(a!)
1555static C10_NOINLINE c10::TypedOperatorHandle<acosh_::schema> create_acosh__typed_handle() {
1556 return c10::Dispatcher::singleton()
1557 .findSchemaOrThrow(acosh_::name, acosh_::overload_name)
1558 .typed<acosh_::schema>();
1559}
1560
1561// aten::acosh_(Tensor(a!) self) -> Tensor(a!)
1562at::Tensor & acosh_::call(at::Tensor & self) {
1563
1564 static auto op = create_acosh__typed_handle();
1565 return op.call(self);
1566}
1567
1568// aten::acosh_(Tensor(a!) self) -> Tensor(a!)
1569at::Tensor & acosh_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
1570
1571 static auto op = create_acosh__typed_handle();
1572 return op.redispatch(dispatchKeySet, self);
1573}
1574
1575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(acosh_out, name, "aten::acosh")
1576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(acosh_out, overload_name, "out")
1577STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(acosh_out, schema_str, "acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
1578
1579// aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1580static C10_NOINLINE c10::TypedOperatorHandle<acosh_out::schema> create_acosh_out_typed_handle() {
1581 return c10::Dispatcher::singleton()
1582 .findSchemaOrThrow(acosh_out::name, acosh_out::overload_name)
1583 .typed<acosh_out::schema>();
1584}
1585
1586// aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1587at::Tensor & acosh_out::call(const at::Tensor & self, at::Tensor & out) {
1588
1589 static auto op = create_acosh_out_typed_handle();
1590 return op.call(self, out);
1591}
1592
1593// aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1594at::Tensor & acosh_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
1595
1596 static auto op = create_acosh_out_typed_handle();
1597 return op.redispatch(dispatchKeySet, self, out);
1598}
1599
1600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arctanh, name, "aten::arctanh")
1601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arctanh, overload_name, "")
1602STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arctanh, schema_str, "arctanh(Tensor self) -> Tensor")
1603
1604// aten::arctanh(Tensor self) -> Tensor
1605static C10_NOINLINE c10::TypedOperatorHandle<arctanh::schema> create_arctanh_typed_handle() {
1606 return c10::Dispatcher::singleton()
1607 .findSchemaOrThrow(arctanh::name, arctanh::overload_name)
1608 .typed<arctanh::schema>();
1609}
1610
1611// aten::arctanh(Tensor self) -> Tensor
1612at::Tensor arctanh::call(const at::Tensor & self) {
1613
1614 static auto op = create_arctanh_typed_handle();
1615 return op.call(self);
1616}
1617
1618// aten::arctanh(Tensor self) -> Tensor
1619at::Tensor arctanh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1620
1621 static auto op = create_arctanh_typed_handle();
1622 return op.redispatch(dispatchKeySet, self);
1623}
1624
1625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arctanh_, name, "aten::arctanh_")
1626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arctanh_, overload_name, "")
1627STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arctanh_, schema_str, "arctanh_(Tensor(a!) self) -> Tensor(a!)")
1628
1629// aten::arctanh_(Tensor(a!) self) -> Tensor(a!)
1630static C10_NOINLINE c10::TypedOperatorHandle<arctanh_::schema> create_arctanh__typed_handle() {
1631 return c10::Dispatcher::singleton()
1632 .findSchemaOrThrow(arctanh_::name, arctanh_::overload_name)
1633 .typed<arctanh_::schema>();
1634}
1635
1636// aten::arctanh_(Tensor(a!) self) -> Tensor(a!)
1637at::Tensor & arctanh_::call(at::Tensor & self) {
1638
1639 static auto op = create_arctanh__typed_handle();
1640 return op.call(self);
1641}
1642
1643// aten::arctanh_(Tensor(a!) self) -> Tensor(a!)
1644at::Tensor & arctanh_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
1645
1646 static auto op = create_arctanh__typed_handle();
1647 return op.redispatch(dispatchKeySet, self);
1648}
1649
1650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arctanh_out, name, "aten::arctanh")
1651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arctanh_out, overload_name, "out")
1652STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arctanh_out, schema_str, "arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
1653
1654// aten::arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1655static C10_NOINLINE c10::TypedOperatorHandle<arctanh_out::schema> create_arctanh_out_typed_handle() {
1656 return c10::Dispatcher::singleton()
1657 .findSchemaOrThrow(arctanh_out::name, arctanh_out::overload_name)
1658 .typed<arctanh_out::schema>();
1659}
1660
1661// aten::arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1662at::Tensor & arctanh_out::call(const at::Tensor & self, at::Tensor & out) {
1663
1664 static auto op = create_arctanh_out_typed_handle();
1665 return op.call(self, out);
1666}
1667
1668// aten::arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1669at::Tensor & arctanh_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
1670
1671 static auto op = create_arctanh_out_typed_handle();
1672 return op.redispatch(dispatchKeySet, self, out);
1673}
1674
1675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(as_strided, name, "aten::as_strided")
1676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(as_strided, overload_name, "")
1677STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(as_strided, schema_str, "as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)")
1678
1679// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)
1680static C10_NOINLINE c10::TypedOperatorHandle<as_strided::schema> create_as_strided_typed_handle() {
1681 return c10::Dispatcher::singleton()
1682 .findSchemaOrThrow(as_strided::name, as_strided::overload_name)
1683 .typed<as_strided::schema>();
1684}
1685
1686// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)
1687at::Tensor as_strided::call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
1688
1689 static auto op = create_as_strided_typed_handle();
1690 return op.call(self, size, stride, storage_offset);
1691}
1692
1693// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)
1694at::Tensor as_strided::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
1695
1696 static auto op = create_as_strided_typed_handle();
1697 return op.redispatch(dispatchKeySet, self, size, stride, storage_offset);
1698}
1699
1700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(as_strided_, name, "aten::as_strided_")
1701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(as_strided_, overload_name, "")
1702STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(as_strided_, schema_str, "as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)")
1703
1704// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)
1705static C10_NOINLINE c10::TypedOperatorHandle<as_strided_::schema> create_as_strided__typed_handle() {
1706 return c10::Dispatcher::singleton()
1707 .findSchemaOrThrow(as_strided_::name, as_strided_::overload_name)
1708 .typed<as_strided_::schema>();
1709}
1710
1711// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)
1712const at::Tensor & as_strided_::call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
1713
1714 static auto op = create_as_strided__typed_handle();
1715 return op.call(self, size, stride, storage_offset);
1716}
1717
1718// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)
1719const at::Tensor & as_strided_::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
1720
1721 static auto op = create_as_strided__typed_handle();
1722 return op.redispatch(dispatchKeySet, self, size, stride, storage_offset);
1723}
1724
1725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atleast_3d, name, "aten::atleast_3d")
1726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atleast_3d, overload_name, "")
1727STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atleast_3d, schema_str, "atleast_3d(Tensor self) -> Tensor")
1728
1729// aten::atleast_3d(Tensor self) -> Tensor
1730static C10_NOINLINE c10::TypedOperatorHandle<atleast_3d::schema> create_atleast_3d_typed_handle() {
1731 return c10::Dispatcher::singleton()
1732 .findSchemaOrThrow(atleast_3d::name, atleast_3d::overload_name)
1733 .typed<atleast_3d::schema>();
1734}
1735
1736// aten::atleast_3d(Tensor self) -> Tensor
1737at::Tensor atleast_3d::call(const at::Tensor & self) {
1738
1739 static auto op = create_atleast_3d_typed_handle();
1740 return op.call(self);
1741}
1742
1743// aten::atleast_3d(Tensor self) -> Tensor
1744at::Tensor atleast_3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1745
1746 static auto op = create_atleast_3d_typed_handle();
1747 return op.redispatch(dispatchKeySet, self);
1748}
1749
1750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atleast_3d_Sequence, name, "aten::atleast_3d")
1751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atleast_3d_Sequence, overload_name, "Sequence")
1752STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atleast_3d_Sequence, schema_str, "atleast_3d.Sequence(Tensor[] tensors) -> Tensor[]")
1753
1754// aten::atleast_3d.Sequence(Tensor[] tensors) -> Tensor[]
1755static C10_NOINLINE c10::TypedOperatorHandle<atleast_3d_Sequence::schema> create_atleast_3d_Sequence_typed_handle() {
1756 return c10::Dispatcher::singleton()
1757 .findSchemaOrThrow(atleast_3d_Sequence::name, atleast_3d_Sequence::overload_name)
1758 .typed<atleast_3d_Sequence::schema>();
1759}
1760
1761// aten::atleast_3d.Sequence(Tensor[] tensors) -> Tensor[]
1762::std::vector<at::Tensor> atleast_3d_Sequence::call(at::TensorList tensors) {
1763
1764 static auto op = create_atleast_3d_Sequence_typed_handle();
1765 return op.call(tensors);
1766}
1767
1768// aten::atleast_3d.Sequence(Tensor[] tensors) -> Tensor[]
1769::std::vector<at::Tensor> atleast_3d_Sequence::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
1770
1771 static auto op = create_atleast_3d_Sequence_typed_handle();
1772 return op.redispatch(dispatchKeySet, tensors);
1773}
1774
1775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_batch_norm_impl_index, name, "aten::_batch_norm_impl_index")
1776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_batch_norm_impl_index, overload_name, "")
1777STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_batch_norm_impl_index, schema_str, "_batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int)")
1778
1779// aten::_batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int)
1780static C10_NOINLINE c10::TypedOperatorHandle<_batch_norm_impl_index::schema> create__batch_norm_impl_index_typed_handle() {
1781 return c10::Dispatcher::singleton()
1782 .findSchemaOrThrow(_batch_norm_impl_index::name, _batch_norm_impl_index::overload_name)
1783 .typed<_batch_norm_impl_index::schema>();
1784}
1785
1786// aten::_batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int)
1787::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,int64_t> _batch_norm_impl_index::call(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled) {
1788
1789 static auto op = create__batch_norm_impl_index_typed_handle();
1790 return op.call(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled);
1791}
1792
1793// aten::_batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int)
1794::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,int64_t> _batch_norm_impl_index::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled) {
1795
1796 static auto op = create__batch_norm_impl_index_typed_handle();
1797 return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled);
1798}
1799
1800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_batch_norm_impl_index_backward, name, "aten::_batch_norm_impl_index_backward")
1801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_batch_norm_impl_index_backward, overload_name, "")
1802STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_batch_norm_impl_index_backward, schema_str, "_batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor)")
1803
1804// aten::_batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor)
1805static C10_NOINLINE c10::TypedOperatorHandle<_batch_norm_impl_index_backward::schema> create__batch_norm_impl_index_backward_typed_handle() {
1806 return c10::Dispatcher::singleton()
1807 .findSchemaOrThrow(_batch_norm_impl_index_backward::name, _batch_norm_impl_index_backward::overload_name)
1808 .typed<_batch_norm_impl_index_backward::schema>();
1809}
1810
1811// aten::_batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor)
1812::std::tuple<at::Tensor,at::Tensor,at::Tensor> _batch_norm_impl_index_backward::call(int64_t impl_index, const at::Tensor & input, const at::Tensor & grad_output, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var_transform, bool train, double eps, ::std::array<bool,3> output_mask, const at::Tensor & reservedSpace) {
1813
1814 static auto op = create__batch_norm_impl_index_backward_typed_handle();
1815 return op.call(impl_index, input, grad_output, weight, running_mean, running_var, save_mean, save_var_transform, train, eps, output_mask, reservedSpace);
1816}
1817
1818// aten::_batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor)
1819::std::tuple<at::Tensor,at::Tensor,at::Tensor> _batch_norm_impl_index_backward::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t impl_index, const at::Tensor & input, const at::Tensor & grad_output, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var_transform, bool train, double eps, ::std::array<bool,3> output_mask, const at::Tensor & reservedSpace) {
1820
1821 static auto op = create__batch_norm_impl_index_backward_typed_handle();
1822 return op.redispatch(dispatchKeySet, impl_index, input, grad_output, weight, running_mean, running_var, save_mean, save_var_transform, train, eps, output_mask, reservedSpace);
1823}
1824
1825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logical_or, name, "aten::logical_or")
1826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logical_or, overload_name, "")
1827STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logical_or, schema_str, "logical_or(Tensor self, Tensor other) -> Tensor")
1828
1829// aten::logical_or(Tensor self, Tensor other) -> Tensor
1830static C10_NOINLINE c10::TypedOperatorHandle<logical_or::schema> create_logical_or_typed_handle() {
1831 return c10::Dispatcher::singleton()
1832 .findSchemaOrThrow(logical_or::name, logical_or::overload_name)
1833 .typed<logical_or::schema>();
1834}
1835
1836// aten::logical_or(Tensor self, Tensor other) -> Tensor
1837at::Tensor logical_or::call(const at::Tensor & self, const at::Tensor & other) {
1838
1839 static auto op = create_logical_or_typed_handle();
1840 return op.call(self, other);
1841}
1842
1843// aten::logical_or(Tensor self, Tensor other) -> Tensor
1844at::Tensor logical_or::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
1845
1846 static auto op = create_logical_or_typed_handle();
1847 return op.redispatch(dispatchKeySet, self, other);
1848}
1849
1850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logical_or_, name, "aten::logical_or_")
1851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logical_or_, overload_name, "")
1852STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logical_or_, schema_str, "logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!)")
1853
1854// aten::logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!)
1855static C10_NOINLINE c10::TypedOperatorHandle<logical_or_::schema> create_logical_or__typed_handle() {
1856 return c10::Dispatcher::singleton()
1857 .findSchemaOrThrow(logical_or_::name, logical_or_::overload_name)
1858 .typed<logical_or_::schema>();
1859}
1860
1861// aten::logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!)
1862at::Tensor & logical_or_::call(at::Tensor & self, const at::Tensor & other) {
1863
1864 static auto op = create_logical_or__typed_handle();
1865 return op.call(self, other);
1866}
1867
1868// aten::logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!)
1869at::Tensor & logical_or_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
1870
1871 static auto op = create_logical_or__typed_handle();
1872 return op.redispatch(dispatchKeySet, self, other);
1873}
1874
1875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logical_or_out, name, "aten::logical_or")
1876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logical_or_out, overload_name, "out")
1877STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logical_or_out, schema_str, "logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
1878
1879// aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
1880static C10_NOINLINE c10::TypedOperatorHandle<logical_or_out::schema> create_logical_or_out_typed_handle() {
1881 return c10::Dispatcher::singleton()
1882 .findSchemaOrThrow(logical_or_out::name, logical_or_out::overload_name)
1883 .typed<logical_or_out::schema>();
1884}
1885
1886// aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
1887at::Tensor & logical_or_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
1888
1889 static auto op = create_logical_or_out_typed_handle();
1890 return op.call(self, other, out);
1891}
1892
1893// aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
1894at::Tensor & logical_or_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
1895
1896 static auto op = create_logical_or_out_typed_handle();
1897 return op.redispatch(dispatchKeySet, self, other, out);
1898}
1899
1900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(blackman_window, name, "aten::blackman_window")
1901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(blackman_window, overload_name, "")
1902STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(blackman_window, schema_str, "blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
1903
1904// aten::blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1905static C10_NOINLINE c10::TypedOperatorHandle<blackman_window::schema> create_blackman_window_typed_handle() {
1906 return c10::Dispatcher::singleton()
1907 .findSchemaOrThrow(blackman_window::name, blackman_window::overload_name)
1908 .typed<blackman_window::schema>();
1909}
1910
1911// aten::blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1912at::Tensor blackman_window::call(int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
1913
1914 static auto op = create_blackman_window_typed_handle();
1915 return op.call(window_length, dtype, layout, device, pin_memory);
1916}
1917
1918// aten::blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1919at::Tensor blackman_window::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
1920
1921 static auto op = create_blackman_window_typed_handle();
1922 return op.redispatch(dispatchKeySet, window_length, dtype, layout, device, pin_memory);
1923}
1924
1925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(blackman_window_periodic, name, "aten::blackman_window")
1926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(blackman_window_periodic, overload_name, "periodic")
1927STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(blackman_window_periodic, schema_str, "blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
1928
1929// aten::blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1930static C10_NOINLINE c10::TypedOperatorHandle<blackman_window_periodic::schema> create_blackman_window_periodic_typed_handle() {
1931 return c10::Dispatcher::singleton()
1932 .findSchemaOrThrow(blackman_window_periodic::name, blackman_window_periodic::overload_name)
1933 .typed<blackman_window_periodic::schema>();
1934}
1935
1936// aten::blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1937at::Tensor blackman_window_periodic::call(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
1938
1939 static auto op = create_blackman_window_periodic_typed_handle();
1940 return op.call(window_length, periodic, dtype, layout, device, pin_memory);
1941}
1942
1943// aten::blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1944at::Tensor blackman_window_periodic::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
1945
1946 static auto op = create_blackman_window_periodic_typed_handle();
1947 return op.redispatch(dispatchKeySet, window_length, periodic, dtype, layout, device, pin_memory);
1948}
1949
1950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(broadcast_tensors, name, "aten::broadcast_tensors")
1951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(broadcast_tensors, overload_name, "")
1952STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(broadcast_tensors, schema_str, "broadcast_tensors(Tensor[] tensors) -> Tensor[]")
1953
1954// aten::broadcast_tensors(Tensor[] tensors) -> Tensor[]
1955static C10_NOINLINE c10::TypedOperatorHandle<broadcast_tensors::schema> create_broadcast_tensors_typed_handle() {
1956 return c10::Dispatcher::singleton()
1957 .findSchemaOrThrow(broadcast_tensors::name, broadcast_tensors::overload_name)
1958 .typed<broadcast_tensors::schema>();
1959}
1960
1961// aten::broadcast_tensors(Tensor[] tensors) -> Tensor[]
1962::std::vector<at::Tensor> broadcast_tensors::call(at::TensorList tensors) {
1963
1964 static auto op = create_broadcast_tensors_typed_handle();
1965 return op.call(tensors);
1966}
1967
1968// aten::broadcast_tensors(Tensor[] tensors) -> Tensor[]
1969::std::vector<at::Tensor> broadcast_tensors::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
1970
1971 static auto op = create_broadcast_tensors_typed_handle();
1972 return op.redispatch(dispatchKeySet, tensors);
1973}
1974
1975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cat, name, "aten::cat")
1976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cat, overload_name, "")
1977STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cat, schema_str, "cat(Tensor[] tensors, int dim=0) -> Tensor")
1978
1979// aten::cat(Tensor[] tensors, int dim=0) -> Tensor
1980static C10_NOINLINE c10::TypedOperatorHandle<cat::schema> create_cat_typed_handle() {
1981 return c10::Dispatcher::singleton()
1982 .findSchemaOrThrow(cat::name, cat::overload_name)
1983 .typed<cat::schema>();
1984}
1985
1986// aten::cat(Tensor[] tensors, int dim=0) -> Tensor
1987at::Tensor cat::call(const at::ITensorListRef & tensors, int64_t dim) {
1988
1989 static auto op = create_cat_typed_handle();
1990 return op.call(tensors, dim);
1991}
1992
1993// aten::cat(Tensor[] tensors, int dim=0) -> Tensor
1994at::Tensor cat::redispatch(c10::DispatchKeySet dispatchKeySet, const at::ITensorListRef & tensors, int64_t dim) {
1995
1996 static auto op = create_cat_typed_handle();
1997 return op.redispatch(dispatchKeySet, tensors, dim);
1998}
1999
2000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cat_out, name, "aten::cat")
2001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cat_out, overload_name, "out")
2002STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cat_out, schema_str, "cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)")
2003
2004// aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
2005static C10_NOINLINE c10::TypedOperatorHandle<cat_out::schema> create_cat_out_typed_handle() {
2006 return c10::Dispatcher::singleton()
2007 .findSchemaOrThrow(cat_out::name, cat_out::overload_name)
2008 .typed<cat_out::schema>();
2009}
2010
2011// aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
2012at::Tensor & cat_out::call(const at::ITensorListRef & tensors, int64_t dim, at::Tensor & out) {
2013
2014 static auto op = create_cat_out_typed_handle();
2015 return op.call(tensors, dim, out);
2016}
2017
2018// aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
2019at::Tensor & cat_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::ITensorListRef & tensors, int64_t dim, at::Tensor & out) {
2020
2021 static auto op = create_cat_out_typed_handle();
2022 return op.redispatch(dispatchKeySet, tensors, dim, out);
2023}
2024
2025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cat_names, name, "aten::cat")
2026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cat_names, overload_name, "names")
2027STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cat_names, schema_str, "cat.names(Tensor[] tensors, Dimname dim) -> Tensor")
2028
2029// aten::cat.names(Tensor[] tensors, Dimname dim) -> Tensor
2030static C10_NOINLINE c10::TypedOperatorHandle<cat_names::schema> create_cat_names_typed_handle() {
2031 return c10::Dispatcher::singleton()
2032 .findSchemaOrThrow(cat_names::name, cat_names::overload_name)
2033 .typed<cat_names::schema>();
2034}
2035
2036// aten::cat.names(Tensor[] tensors, Dimname dim) -> Tensor
2037at::Tensor cat_names::call(at::TensorList tensors, at::Dimname dim) {
2038
2039 static auto op = create_cat_names_typed_handle();
2040 return op.call(tensors, dim);
2041}
2042
2043// aten::cat.names(Tensor[] tensors, Dimname dim) -> Tensor
2044at::Tensor cat_names::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim) {
2045
2046 static auto op = create_cat_names_typed_handle();
2047 return op.redispatch(dispatchKeySet, tensors, dim);
2048}
2049
2050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cat_names_out, name, "aten::cat")
2051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cat_names_out, overload_name, "names_out")
2052STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cat_names_out, schema_str, "cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)")
2053
2054// aten::cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
2055static C10_NOINLINE c10::TypedOperatorHandle<cat_names_out::schema> create_cat_names_out_typed_handle() {
2056 return c10::Dispatcher::singleton()
2057 .findSchemaOrThrow(cat_names_out::name, cat_names_out::overload_name)
2058 .typed<cat_names_out::schema>();
2059}
2060
2061// aten::cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
2062at::Tensor & cat_names_out::call(at::TensorList tensors, at::Dimname dim, at::Tensor & out) {
2063
2064 static auto op = create_cat_names_out_typed_handle();
2065 return op.call(tensors, dim, out);
2066}
2067
2068// aten::cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
2069at::Tensor & cat_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim, at::Tensor & out) {
2070
2071 static auto op = create_cat_names_out_typed_handle();
2072 return op.redispatch(dispatchKeySet, tensors, dim, out);
2073}
2074
2075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(convolution, name, "aten::convolution")
2076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(convolution, overload_name, "")
2077STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(convolution, schema_str, "convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups) -> Tensor")
2078
2079// aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups) -> Tensor
2080static C10_NOINLINE c10::TypedOperatorHandle<convolution::schema> create_convolution_typed_handle() {
2081 return c10::Dispatcher::singleton()
2082 .findSchemaOrThrow(convolution::name, convolution::overload_name)
2083 .typed<convolution::schema>();
2084}
2085
2086// aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups) -> Tensor
2087at::Tensor convolution::call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups) {
2088
2089 static auto op = create_convolution_typed_handle();
2090 return op.call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
2091}
2092
2093// aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups) -> Tensor
2094at::Tensor convolution::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups) {
2095
2096 static auto op = create_convolution_typed_handle();
2097 return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
2098}
2099
2100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(convolution_backward_overrideable, name, "aten::convolution_backward_overrideable")
2101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(convolution_backward_overrideable, overload_name, "")
2102STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(convolution_backward_overrideable, schema_str, "convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)")
2103
2104// aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
2105static C10_NOINLINE c10::TypedOperatorHandle<convolution_backward_overrideable::schema> create_convolution_backward_overrideable_typed_handle() {
2106 return c10::Dispatcher::singleton()
2107 .findSchemaOrThrow(convolution_backward_overrideable::name, convolution_backward_overrideable::overload_name)
2108 .typed<convolution_backward_overrideable::schema>();
2109}
2110
2111// aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
2112::std::tuple<at::Tensor,at::Tensor,at::Tensor> convolution_backward_overrideable::call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
2113
2114 static auto op = create_convolution_backward_overrideable_typed_handle();
2115 return op.call(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask);
2116}
2117
2118// aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
2119::std::tuple<at::Tensor,at::Tensor,at::Tensor> convolution_backward_overrideable::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
2120
2121 static auto op = create_convolution_backward_overrideable_typed_handle();
2122 return op.redispatch(dispatchKeySet, grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask);
2123}
2124
2125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_convolution, name, "aten::_convolution")
2126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_convolution, overload_name, "")
2127STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_convolution, schema_str, "_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor")
2128
2129// aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor
2130static C10_NOINLINE c10::TypedOperatorHandle<_convolution::schema> create__convolution_typed_handle() {
2131 return c10::Dispatcher::singleton()
2132 .findSchemaOrThrow(_convolution::name, _convolution::overload_name)
2133 .typed<_convolution::schema>();
2134}
2135
2136// aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor
2137at::Tensor _convolution::call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) {
2138
2139 static auto op = create__convolution_typed_handle();
2140 return op.call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32);
2141}
2142
2143// aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor
2144at::Tensor _convolution::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) {
2145
2146 static auto op = create__convolution_typed_handle();
2147 return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32);
2148}
2149
2150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_convolution_deprecated, name, "aten::_convolution")
2151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_convolution_deprecated, overload_name, "deprecated")
2152STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_convolution_deprecated, schema_str, "_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor")
2153
2154// aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor
2155static C10_NOINLINE c10::TypedOperatorHandle<_convolution_deprecated::schema> create__convolution_deprecated_typed_handle() {
2156 return c10::Dispatcher::singleton()
2157 .findSchemaOrThrow(_convolution_deprecated::name, _convolution_deprecated::overload_name)
2158 .typed<_convolution_deprecated::schema>();
2159}
2160
2161// aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor
2162at::Tensor _convolution_deprecated::call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled) {
2163
2164 static auto op = create__convolution_deprecated_typed_handle();
2165 return op.call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled);
2166}
2167
2168// aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor
2169at::Tensor _convolution_deprecated::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled) {
2170
2171 static auto op = create__convolution_deprecated_typed_handle();
2172 return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled);
2173}
2174
2175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv_transpose1d, name, "aten::conv_transpose1d")
2176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv_transpose1d, overload_name, "")
2177STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv_transpose1d, schema_str, "conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor")
2178
2179// aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor
2180static C10_NOINLINE c10::TypedOperatorHandle<conv_transpose1d::schema> create_conv_transpose1d_typed_handle() {
2181 return c10::Dispatcher::singleton()
2182 .findSchemaOrThrow(conv_transpose1d::name, conv_transpose1d::overload_name)
2183 .typed<conv_transpose1d::schema>();
2184}
2185
2186// aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor
2187at::Tensor conv_transpose1d::call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation) {
2188
2189 static auto op = create_conv_transpose1d_typed_handle();
2190 return op.call(input, weight, bias, stride, padding, output_padding, groups, dilation);
2191}
2192
2193// aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor
2194at::Tensor conv_transpose1d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation) {
2195
2196 static auto op = create_conv_transpose1d_typed_handle();
2197 return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, output_padding, groups, dilation);
2198}
2199
2200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cos, name, "aten::cos")
2201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cos, overload_name, "")
2202STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cos, schema_str, "cos(Tensor self) -> Tensor")
2203
2204// aten::cos(Tensor self) -> Tensor
2205static C10_NOINLINE c10::TypedOperatorHandle<cos::schema> create_cos_typed_handle() {
2206 return c10::Dispatcher::singleton()
2207 .findSchemaOrThrow(cos::name, cos::overload_name)
2208 .typed<cos::schema>();
2209}
2210
2211// aten::cos(Tensor self) -> Tensor
2212at::Tensor cos::call(const at::Tensor & self) {
2213
2214 static auto op = create_cos_typed_handle();
2215 return op.call(self);
2216}
2217
2218// aten::cos(Tensor self) -> Tensor
2219at::Tensor cos::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
2220
2221 static auto op = create_cos_typed_handle();
2222 return op.redispatch(dispatchKeySet, self);
2223}
2224
2225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cos_, name, "aten::cos_")
2226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cos_, overload_name, "")
2227STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cos_, schema_str, "cos_(Tensor(a!) self) -> Tensor(a!)")
2228
2229// aten::cos_(Tensor(a!) self) -> Tensor(a!)
2230static C10_NOINLINE c10::TypedOperatorHandle<cos_::schema> create_cos__typed_handle() {
2231 return c10::Dispatcher::singleton()
2232 .findSchemaOrThrow(cos_::name, cos_::overload_name)
2233 .typed<cos_::schema>();
2234}
2235
2236// aten::cos_(Tensor(a!) self) -> Tensor(a!)
2237at::Tensor & cos_::call(at::Tensor & self) {
2238
2239 static auto op = create_cos__typed_handle();
2240 return op.call(self);
2241}
2242
2243// aten::cos_(Tensor(a!) self) -> Tensor(a!)
2244at::Tensor & cos_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
2245
2246 static auto op = create_cos__typed_handle();
2247 return op.redispatch(dispatchKeySet, self);
2248}
2249
2250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cos_out, name, "aten::cos")
2251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cos_out, overload_name, "out")
2252STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cos_out, schema_str, "cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
2253
2254// aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2255static C10_NOINLINE c10::TypedOperatorHandle<cos_out::schema> create_cos_out_typed_handle() {
2256 return c10::Dispatcher::singleton()
2257 .findSchemaOrThrow(cos_out::name, cos_out::overload_name)
2258 .typed<cos_out::schema>();
2259}
2260
2261// aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2262at::Tensor & cos_out::call(const at::Tensor & self, at::Tensor & out) {
2263
2264 static auto op = create_cos_out_typed_handle();
2265 return op.call(self, out);
2266}
2267
2268// aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2269at::Tensor & cos_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
2270
2271 static auto op = create_cos_out_typed_handle();
2272 return op.redispatch(dispatchKeySet, self, out);
2273}
2274
2275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_affine_grid_generator, name, "aten::cudnn_affine_grid_generator")
2276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_affine_grid_generator, overload_name, "")
2277STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_affine_grid_generator, schema_str, "cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid")
2278
2279// aten::cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid
2280static C10_NOINLINE c10::TypedOperatorHandle<cudnn_affine_grid_generator::schema> create_cudnn_affine_grid_generator_typed_handle() {
2281 return c10::Dispatcher::singleton()
2282 .findSchemaOrThrow(cudnn_affine_grid_generator::name, cudnn_affine_grid_generator::overload_name)
2283 .typed<cudnn_affine_grid_generator::schema>();
2284}
2285
2286// aten::cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid
2287at::Tensor cudnn_affine_grid_generator::call(const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W) {
2288
2289 static auto op = create_cudnn_affine_grid_generator_typed_handle();
2290 return op.call(theta, N, C, H, W);
2291}
2292
2293// aten::cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid
2294at::Tensor cudnn_affine_grid_generator::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W) {
2295
2296 static auto op = create_cudnn_affine_grid_generator_typed_handle();
2297 return op.redispatch(dispatchKeySet, theta, N, C, H, W);
2298}
2299
2300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_batch_norm_backward, name, "aten::cudnn_batch_norm_backward")
2301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_batch_norm_backward, overload_name, "")
2302STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_batch_norm_backward, schema_str, "cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace) -> (Tensor, Tensor, Tensor)")
2303
2304// aten::cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace) -> (Tensor, Tensor, Tensor)
2305static C10_NOINLINE c10::TypedOperatorHandle<cudnn_batch_norm_backward::schema> create_cudnn_batch_norm_backward_typed_handle() {
2306 return c10::Dispatcher::singleton()
2307 .findSchemaOrThrow(cudnn_batch_norm_backward::name, cudnn_batch_norm_backward::overload_name)
2308 .typed<cudnn_batch_norm_backward::schema>();
2309}
2310
2311// aten::cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace) -> (Tensor, Tensor, Tensor)
2312::std::tuple<at::Tensor,at::Tensor,at::Tensor> cudnn_batch_norm_backward::call(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace) {
2313
2314 static auto op = create_cudnn_batch_norm_backward_typed_handle();
2315 return op.call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace);
2316}
2317
2318// aten::cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace) -> (Tensor, Tensor, Tensor)
2319::std::tuple<at::Tensor,at::Tensor,at::Tensor> cudnn_batch_norm_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace) {
2320
2321 static auto op = create_cudnn_batch_norm_backward_typed_handle();
2322 return op.redispatch(dispatchKeySet, input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace);
2323}
2324
2325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_convolution_transpose, name, "aten::cudnn_convolution_transpose")
2326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_convolution_transpose, overload_name, "")
2327STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_convolution_transpose, schema_str, "cudnn_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor")
2328
2329// aten::cudnn_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor
2330static C10_NOINLINE c10::TypedOperatorHandle<cudnn_convolution_transpose::schema> create_cudnn_convolution_transpose_typed_handle() {
2331 return c10::Dispatcher::singleton()
2332 .findSchemaOrThrow(cudnn_convolution_transpose::name, cudnn_convolution_transpose::overload_name)
2333 .typed<cudnn_convolution_transpose::schema>();
2334}
2335
2336// aten::cudnn_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor
2337at::Tensor cudnn_convolution_transpose::call(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) {
2338
2339 static auto op = create_cudnn_convolution_transpose_typed_handle();
2340 return op.call(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
2341}
2342
2343// aten::cudnn_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor
2344at::Tensor cudnn_convolution_transpose::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) {
2345
2346 static auto op = create_cudnn_convolution_transpose_typed_handle();
2347 return op.redispatch(dispatchKeySet, self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
2348}
2349
2350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_grid_sampler_backward, name, "aten::cudnn_grid_sampler_backward")
2351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_grid_sampler_backward, overload_name, "")
2352STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_grid_sampler_backward, schema_str, "cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid)")
2353
2354// aten::cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid)
2355static C10_NOINLINE c10::TypedOperatorHandle<cudnn_grid_sampler_backward::schema> create_cudnn_grid_sampler_backward_typed_handle() {
2356 return c10::Dispatcher::singleton()
2357 .findSchemaOrThrow(cudnn_grid_sampler_backward::name, cudnn_grid_sampler_backward::overload_name)
2358 .typed<cudnn_grid_sampler_backward::schema>();
2359}
2360
2361// aten::cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid)
2362::std::tuple<at::Tensor,at::Tensor> cudnn_grid_sampler_backward::call(const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output) {
2363
2364 static auto op = create_cudnn_grid_sampler_backward_typed_handle();
2365 return op.call(self, grid, grad_output);
2366}
2367
2368// aten::cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid)
2369::std::tuple<at::Tensor,at::Tensor> cudnn_grid_sampler_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output) {
2370
2371 static auto op = create_cudnn_grid_sampler_backward_typed_handle();
2372 return op.redispatch(dispatchKeySet, self, grid, grad_output);
2373}
2374
2375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumsum, name, "aten::cumsum")
2376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumsum, overload_name, "")
2377STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumsum, schema_str, "cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor")
2378
2379// aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
2380static C10_NOINLINE c10::TypedOperatorHandle<cumsum::schema> create_cumsum_typed_handle() {
2381 return c10::Dispatcher::singleton()
2382 .findSchemaOrThrow(cumsum::name, cumsum::overload_name)
2383 .typed<cumsum::schema>();
2384}
2385
2386// aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
2387at::Tensor cumsum::call(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
2388
2389 static auto op = create_cumsum_typed_handle();
2390 return op.call(self, dim, dtype);
2391}
2392
2393// aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
2394at::Tensor cumsum::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
2395
2396 static auto op = create_cumsum_typed_handle();
2397 return op.redispatch(dispatchKeySet, self, dim, dtype);
2398}
2399
2400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumsum_, name, "aten::cumsum_")
2401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumsum_, overload_name, "")
2402STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumsum_, schema_str, "cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)")
2403
2404// aten::cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)
2405static C10_NOINLINE c10::TypedOperatorHandle<cumsum_::schema> create_cumsum__typed_handle() {
2406 return c10::Dispatcher::singleton()
2407 .findSchemaOrThrow(cumsum_::name, cumsum_::overload_name)
2408 .typed<cumsum_::schema>();
2409}
2410
2411// aten::cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)
2412at::Tensor & cumsum_::call(at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
2413
2414 static auto op = create_cumsum__typed_handle();
2415 return op.call(self, dim, dtype);
2416}
2417
2418// aten::cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)
2419at::Tensor & cumsum_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
2420
2421 static auto op = create_cumsum__typed_handle();
2422 return op.redispatch(dispatchKeySet, self, dim, dtype);
2423}
2424
2425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumsum_out, name, "aten::cumsum")
2426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumsum_out, overload_name, "out")
2427STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumsum_out, schema_str, "cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)")
2428
2429// aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
2430static C10_NOINLINE c10::TypedOperatorHandle<cumsum_out::schema> create_cumsum_out_typed_handle() {
2431 return c10::Dispatcher::singleton()
2432 .findSchemaOrThrow(cumsum_out::name, cumsum_out::overload_name)
2433 .typed<cumsum_out::schema>();
2434}
2435
2436// aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
2437at::Tensor & cumsum_out::call(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
2438
2439 static auto op = create_cumsum_out_typed_handle();
2440 return op.call(self, dim, dtype, out);
2441}
2442
2443// aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
2444at::Tensor & cumsum_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
2445
2446 static auto op = create_cumsum_out_typed_handle();
2447 return op.redispatch(dispatchKeySet, self, dim, dtype, out);
2448}
2449
2450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumsum_dimname, name, "aten::cumsum")
2451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumsum_dimname, overload_name, "dimname")
2452STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumsum_dimname, schema_str, "cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor")
2453
2454// aten::cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
2455static C10_NOINLINE c10::TypedOperatorHandle<cumsum_dimname::schema> create_cumsum_dimname_typed_handle() {
2456 return c10::Dispatcher::singleton()
2457 .findSchemaOrThrow(cumsum_dimname::name, cumsum_dimname::overload_name)
2458 .typed<cumsum_dimname::schema>();
2459}
2460
2461// aten::cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
2462at::Tensor cumsum_dimname::call(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
2463
2464 static auto op = create_cumsum_dimname_typed_handle();
2465 return op.call(self, dim, dtype);
2466}
2467
2468// aten::cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
2469at::Tensor cumsum_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
2470
2471 static auto op = create_cumsum_dimname_typed_handle();
2472 return op.redispatch(dispatchKeySet, self, dim, dtype);
2473}
2474
2475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumsum__dimname, name, "aten::cumsum_")
2476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumsum__dimname, overload_name, "dimname")
2477STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumsum__dimname, schema_str, "cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)")
2478
2479// aten::cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)
2480static C10_NOINLINE c10::TypedOperatorHandle<cumsum__dimname::schema> create_cumsum__dimname_typed_handle() {
2481 return c10::Dispatcher::singleton()
2482 .findSchemaOrThrow(cumsum__dimname::name, cumsum__dimname::overload_name)
2483 .typed<cumsum__dimname::schema>();
2484}
2485
2486// aten::cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)
2487at::Tensor & cumsum__dimname::call(at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
2488
2489 static auto op = create_cumsum__dimname_typed_handle();
2490 return op.call(self, dim, dtype);
2491}
2492
2493// aten::cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)
2494at::Tensor & cumsum__dimname::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
2495
2496 static auto op = create_cumsum__dimname_typed_handle();
2497 return op.redispatch(dispatchKeySet, self, dim, dtype);
2498}
2499
2500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumsum_dimname_out, name, "aten::cumsum")
2501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumsum_dimname_out, overload_name, "dimname_out")
2502STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumsum_dimname_out, schema_str, "cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)")
2503
2504// aten::cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
2505static C10_NOINLINE c10::TypedOperatorHandle<cumsum_dimname_out::schema> create_cumsum_dimname_out_typed_handle() {
2506 return c10::Dispatcher::singleton()
2507 .findSchemaOrThrow(cumsum_dimname_out::name, cumsum_dimname_out::overload_name)
2508 .typed<cumsum_dimname_out::schema>();
2509}
2510
2511// aten::cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
2512at::Tensor & cumsum_dimname_out::call(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
2513
2514 static auto op = create_cumsum_dimname_out_typed_handle();
2515 return op.call(self, dim, dtype, out);
2516}
2517
2518// aten::cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
2519at::Tensor & cumsum_dimname_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
2520
2521 static auto op = create_cumsum_dimname_out_typed_handle();
2522 return op.redispatch(dispatchKeySet, self, dim, dtype, out);
2523}
2524
2525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_ctc_loss, name, "aten::_ctc_loss")
2526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_ctc_loss, overload_name, "")
2527STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_ctc_loss, schema_str, "_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)")
2528
2529// aten::_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
2530static C10_NOINLINE c10::TypedOperatorHandle<_ctc_loss::schema> create__ctc_loss_typed_handle() {
2531 return c10::Dispatcher::singleton()
2532 .findSchemaOrThrow(_ctc_loss::name, _ctc_loss::overload_name)
2533 .typed<_ctc_loss::schema>();
2534}
2535
2536// aten::_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
2537::std::tuple<at::Tensor,at::Tensor> _ctc_loss::call(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity) {
2538
2539 static auto op = create__ctc_loss_typed_handle();
2540 return op.call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
2541}
2542
2543// aten::_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
2544::std::tuple<at::Tensor,at::Tensor> _ctc_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity) {
2545
2546 static auto op = create__ctc_loss_typed_handle();
2547 return op.redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
2548}
2549
2550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_ctc_loss_Tensor, name, "aten::_ctc_loss")
2551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_ctc_loss_Tensor, overload_name, "Tensor")
2552STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_ctc_loss_Tensor, schema_str, "_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)")
2553
2554// aten::_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
2555static C10_NOINLINE c10::TypedOperatorHandle<_ctc_loss_Tensor::schema> create__ctc_loss_Tensor_typed_handle() {
2556 return c10::Dispatcher::singleton()
2557 .findSchemaOrThrow(_ctc_loss_Tensor::name, _ctc_loss_Tensor::overload_name)
2558 .typed<_ctc_loss_Tensor::schema>();
2559}
2560
2561// aten::_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
2562::std::tuple<at::Tensor,at::Tensor> _ctc_loss_Tensor::call(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity) {
2563
2564 static auto op = create__ctc_loss_Tensor_typed_handle();
2565 return op.call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
2566}
2567
2568// aten::_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
2569::std::tuple<at::Tensor,at::Tensor> _ctc_loss_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity) {
2570
2571 static auto op = create__ctc_loss_Tensor_typed_handle();
2572 return op.redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
2573}
2574
2575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diagflat, name, "aten::diagflat")
2576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diagflat, overload_name, "")
2577STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diagflat, schema_str, "diagflat(Tensor self, int offset=0) -> Tensor")
2578
2579// aten::diagflat(Tensor self, int offset=0) -> Tensor
2580static C10_NOINLINE c10::TypedOperatorHandle<diagflat::schema> create_diagflat_typed_handle() {
2581 return c10::Dispatcher::singleton()
2582 .findSchemaOrThrow(diagflat::name, diagflat::overload_name)
2583 .typed<diagflat::schema>();
2584}
2585
2586// aten::diagflat(Tensor self, int offset=0) -> Tensor
2587at::Tensor diagflat::call(const at::Tensor & self, int64_t offset) {
2588
2589 static auto op = create_diagflat_typed_handle();
2590 return op.call(self, offset);
2591}
2592
2593// aten::diagflat(Tensor self, int offset=0) -> Tensor
2594at::Tensor diagflat::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset) {
2595
2596 static auto op = create_diagflat_typed_handle();
2597 return op.redispatch(dispatchKeySet, self, offset);
2598}
2599
2600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_diagonal, name, "aten::linalg_diagonal")
2601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_diagonal, overload_name, "")
2602STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_diagonal, schema_str, "linalg_diagonal(Tensor(a) A, *, int offset=0, int dim1=-2, int dim2=-1) -> Tensor(a)")
2603
2604// aten::linalg_diagonal(Tensor(a) A, *, int offset=0, int dim1=-2, int dim2=-1) -> Tensor(a)
2605static C10_NOINLINE c10::TypedOperatorHandle<linalg_diagonal::schema> create_linalg_diagonal_typed_handle() {
2606 return c10::Dispatcher::singleton()
2607 .findSchemaOrThrow(linalg_diagonal::name, linalg_diagonal::overload_name)
2608 .typed<linalg_diagonal::schema>();
2609}
2610
2611// aten::linalg_diagonal(Tensor(a) A, *, int offset=0, int dim1=-2, int dim2=-1) -> Tensor(a)
2612at::Tensor linalg_diagonal::call(const at::Tensor & A, int64_t offset, int64_t dim1, int64_t dim2) {
2613
2614 static auto op = create_linalg_diagonal_typed_handle();
2615 return op.call(A, offset, dim1, dim2);
2616}
2617
2618// aten::linalg_diagonal(Tensor(a) A, *, int offset=0, int dim1=-2, int dim2=-1) -> Tensor(a)
2619at::Tensor linalg_diagonal::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, int64_t offset, int64_t dim1, int64_t dim2) {
2620
2621 static auto op = create_linalg_diagonal_typed_handle();
2622 return op.redispatch(dispatchKeySet, A, offset, dim1, dim2);
2623}
2624
2625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(true_divide_Tensor, name, "aten::true_divide")
2626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(true_divide_Tensor, overload_name, "Tensor")
2627STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(true_divide_Tensor, schema_str, "true_divide.Tensor(Tensor self, Tensor other) -> Tensor")
2628
2629// aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor
2630static C10_NOINLINE c10::TypedOperatorHandle<true_divide_Tensor::schema> create_true_divide_Tensor_typed_handle() {
2631 return c10::Dispatcher::singleton()
2632 .findSchemaOrThrow(true_divide_Tensor::name, true_divide_Tensor::overload_name)
2633 .typed<true_divide_Tensor::schema>();
2634}
2635
2636// aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor
2637at::Tensor true_divide_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
2638
2639 static auto op = create_true_divide_Tensor_typed_handle();
2640 return op.call(self, other);
2641}
2642
2643// aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor
2644at::Tensor true_divide_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
2645
2646 static auto op = create_true_divide_Tensor_typed_handle();
2647 return op.redispatch(dispatchKeySet, self, other);
2648}
2649
2650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(true_divide__Tensor, name, "aten::true_divide_")
2651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(true_divide__Tensor, overload_name, "Tensor")
2652STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(true_divide__Tensor, schema_str, "true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
2653
2654// aten::true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
2655static C10_NOINLINE c10::TypedOperatorHandle<true_divide__Tensor::schema> create_true_divide__Tensor_typed_handle() {
2656 return c10::Dispatcher::singleton()
2657 .findSchemaOrThrow(true_divide__Tensor::name, true_divide__Tensor::overload_name)
2658 .typed<true_divide__Tensor::schema>();
2659}
2660
2661// aten::true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
2662at::Tensor & true_divide__Tensor::call(at::Tensor & self, const at::Tensor & other) {
2663
2664 static auto op = create_true_divide__Tensor_typed_handle();
2665 return op.call(self, other);
2666}
2667
2668// aten::true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
2669at::Tensor & true_divide__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
2670
2671 static auto op = create_true_divide__Tensor_typed_handle();
2672 return op.redispatch(dispatchKeySet, self, other);
2673}
2674
2675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(true_divide_out, name, "aten::true_divide")
2676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(true_divide_out, overload_name, "out")
2677STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(true_divide_out, schema_str, "true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
2678
2679// aten::true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2680static C10_NOINLINE c10::TypedOperatorHandle<true_divide_out::schema> create_true_divide_out_typed_handle() {
2681 return c10::Dispatcher::singleton()
2682 .findSchemaOrThrow(true_divide_out::name, true_divide_out::overload_name)
2683 .typed<true_divide_out::schema>();
2684}
2685
2686// aten::true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2687at::Tensor & true_divide_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
2688
2689 static auto op = create_true_divide_out_typed_handle();
2690 return op.call(self, other, out);
2691}
2692
2693// aten::true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2694at::Tensor & true_divide_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
2695
2696 static auto op = create_true_divide_out_typed_handle();
2697 return op.redispatch(dispatchKeySet, self, other, out);
2698}
2699
2700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(true_divide_Scalar, name, "aten::true_divide")
2701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(true_divide_Scalar, overload_name, "Scalar")
2702STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(true_divide_Scalar, schema_str, "true_divide.Scalar(Tensor self, Scalar other) -> Tensor")
2703
2704// aten::true_divide.Scalar(Tensor self, Scalar other) -> Tensor
2705static C10_NOINLINE c10::TypedOperatorHandle<true_divide_Scalar::schema> create_true_divide_Scalar_typed_handle() {
2706 return c10::Dispatcher::singleton()
2707 .findSchemaOrThrow(true_divide_Scalar::name, true_divide_Scalar::overload_name)
2708 .typed<true_divide_Scalar::schema>();
2709}
2710
2711// aten::true_divide.Scalar(Tensor self, Scalar other) -> Tensor
2712at::Tensor true_divide_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
2713
2714 static auto op = create_true_divide_Scalar_typed_handle();
2715 return op.call(self, other);
2716}
2717
2718// aten::true_divide.Scalar(Tensor self, Scalar other) -> Tensor
2719at::Tensor true_divide_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
2720
2721 static auto op = create_true_divide_Scalar_typed_handle();
2722 return op.redispatch(dispatchKeySet, self, other);
2723}
2724
2725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(true_divide__Scalar, name, "aten::true_divide_")
2726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(true_divide__Scalar, overload_name, "Scalar")
2727STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(true_divide__Scalar, schema_str, "true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
2728
2729// aten::true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
2730static C10_NOINLINE c10::TypedOperatorHandle<true_divide__Scalar::schema> create_true_divide__Scalar_typed_handle() {
2731 return c10::Dispatcher::singleton()
2732 .findSchemaOrThrow(true_divide__Scalar::name, true_divide__Scalar::overload_name)
2733 .typed<true_divide__Scalar::schema>();
2734}
2735
2736// aten::true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
2737at::Tensor & true_divide__Scalar::call(at::Tensor & self, const at::Scalar & other) {
2738
2739 static auto op = create_true_divide__Scalar_typed_handle();
2740 return op.call(self, other);
2741}
2742
2743// aten::true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
2744at::Tensor & true_divide__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
2745
2746 static auto op = create_true_divide__Scalar_typed_handle();
2747 return op.redispatch(dispatchKeySet, self, other);
2748}
2749
2750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(vdot, name, "aten::vdot")
2751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(vdot, overload_name, "")
2752STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(vdot, schema_str, "vdot(Tensor self, Tensor other) -> Tensor")
2753
2754// aten::vdot(Tensor self, Tensor other) -> Tensor
2755static C10_NOINLINE c10::TypedOperatorHandle<vdot::schema> create_vdot_typed_handle() {
2756 return c10::Dispatcher::singleton()
2757 .findSchemaOrThrow(vdot::name, vdot::overload_name)
2758 .typed<vdot::schema>();
2759}
2760
2761// aten::vdot(Tensor self, Tensor other) -> Tensor
2762at::Tensor vdot::call(const at::Tensor & self, const at::Tensor & other) {
2763
2764 static auto op = create_vdot_typed_handle();
2765 return op.call(self, other);
2766}
2767
2768// aten::vdot(Tensor self, Tensor other) -> Tensor
2769at::Tensor vdot::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
2770
2771 static auto op = create_vdot_typed_handle();
2772 return op.redispatch(dispatchKeySet, self, other);
2773}
2774
2775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(vdot_out, name, "aten::vdot")
2776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(vdot_out, overload_name, "out")
2777STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(vdot_out, schema_str, "vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
2778
2779// aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2780static C10_NOINLINE c10::TypedOperatorHandle<vdot_out::schema> create_vdot_out_typed_handle() {
2781 return c10::Dispatcher::singleton()
2782 .findSchemaOrThrow(vdot_out::name, vdot_out::overload_name)
2783 .typed<vdot_out::schema>();
2784}
2785
2786// aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2787at::Tensor & vdot_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
2788
2789 static auto op = create_vdot_out_typed_handle();
2790 return op.call(self, other, out);
2791}
2792
2793// aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2794at::Tensor & vdot_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
2795
2796 static auto op = create_vdot_out_typed_handle();
2797 return op.redispatch(dispatchKeySet, self, other, out);
2798}
2799
2800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(embedding_backward, name, "aten::embedding_backward")
2801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(embedding_backward, overload_name, "")
2802STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(embedding_backward, schema_str, "embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor")
2803
2804// aten::embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor
2805static C10_NOINLINE c10::TypedOperatorHandle<embedding_backward::schema> create_embedding_backward_typed_handle() {
2806 return c10::Dispatcher::singleton()
2807 .findSchemaOrThrow(embedding_backward::name, embedding_backward::overload_name)
2808 .typed<embedding_backward::schema>();
2809}
2810
2811// aten::embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor
2812at::Tensor embedding_backward::call(const at::Tensor & grad, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) {
2813
2814 static auto op = create_embedding_backward_typed_handle();
2815 return op.call(grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse);
2816}
2817
2818// aten::embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor
2819at::Tensor embedding_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) {
2820
2821 static auto op = create_embedding_backward_typed_handle();
2822 return op.redispatch(dispatchKeySet, grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse);
2823}
2824
2825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(embedding_dense_backward, name, "aten::embedding_dense_backward")
2826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(embedding_dense_backward, overload_name, "")
2827STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(embedding_dense_backward, schema_str, "embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor")
2828
2829// aten::embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor
2830static C10_NOINLINE c10::TypedOperatorHandle<embedding_dense_backward::schema> create_embedding_dense_backward_typed_handle() {
2831 return c10::Dispatcher::singleton()
2832 .findSchemaOrThrow(embedding_dense_backward::name, embedding_dense_backward::overload_name)
2833 .typed<embedding_dense_backward::schema>();
2834}
2835
2836// aten::embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor
2837at::Tensor embedding_dense_backward::call(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq) {
2838
2839 static auto op = create_embedding_dense_backward_typed_handle();
2840 return op.call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq);
2841}
2842
2843// aten::embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor
2844at::Tensor embedding_dense_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq) {
2845
2846 static auto op = create_embedding_dense_backward_typed_handle();
2847 return op.redispatch(dispatchKeySet, grad_output, indices, num_weights, padding_idx, scale_grad_by_freq);
2848}
2849
2850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_embedding_bag, name, "aten::_embedding_bag")
2851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_embedding_bag, overload_name, "")
2852STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_embedding_bag, schema_str, "_embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)")
2853
2854// aten::_embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)
2855static C10_NOINLINE c10::TypedOperatorHandle<_embedding_bag::schema> create__embedding_bag_typed_handle() {
2856 return c10::Dispatcher::singleton()
2857 .findSchemaOrThrow(_embedding_bag::name, _embedding_bag::overload_name)
2858 .typed<_embedding_bag::schema>();
2859}
2860
2861// aten::_embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)
2862::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag::call(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx) {
2863
2864 static auto op = create__embedding_bag_typed_handle();
2865 return op.call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
2866}
2867
2868// aten::_embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)
2869::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx) {
2870
2871 static auto op = create__embedding_bag_typed_handle();
2872 return op.redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
2873}
2874
2875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_embedding_bag_sparse_backward, name, "aten::_embedding_bag_sparse_backward")
2876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_embedding_bag_sparse_backward, overload_name, "")
2877STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_embedding_bag_sparse_backward, schema_str, "_embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor")
2878
2879// aten::_embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
2880static C10_NOINLINE c10::TypedOperatorHandle<_embedding_bag_sparse_backward::schema> create__embedding_bag_sparse_backward_typed_handle() {
2881 return c10::Dispatcher::singleton()
2882 .findSchemaOrThrow(_embedding_bag_sparse_backward::name, _embedding_bag_sparse_backward::overload_name)
2883 .typed<_embedding_bag_sparse_backward::schema>();
2884}
2885
2886// aten::_embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
2887at::Tensor _embedding_bag_sparse_backward::call(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
2888
2889 static auto op = create__embedding_bag_sparse_backward_typed_handle();
2890 return op.call(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
2891}
2892
2893// aten::_embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
2894at::Tensor _embedding_bag_sparse_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
2895
2896 static auto op = create__embedding_bag_sparse_backward_typed_handle();
2897 return op.redispatch(dispatchKeySet, grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
2898}
2899
2900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(new_empty, name, "aten::new_empty")
2901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(new_empty, overload_name, "")
2902STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(new_empty, schema_str, "new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
2903
2904// aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2905static C10_NOINLINE c10::TypedOperatorHandle<new_empty::schema> create_new_empty_typed_handle() {
2906 return c10::Dispatcher::singleton()
2907 .findSchemaOrThrow(new_empty::name, new_empty::overload_name)
2908 .typed<new_empty::schema>();
2909}
2910
2911// aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2912at::Tensor new_empty::call(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2913
2914 static auto op = create_new_empty_typed_handle();
2915 return op.call(self, size, dtype, layout, device, pin_memory);
2916}
2917
2918// aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2919at::Tensor new_empty::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2920
2921 static auto op = create_new_empty_typed_handle();
2922 return op.redispatch(dispatchKeySet, self, size, dtype, layout, device, pin_memory);
2923}
2924
2925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(expm1, name, "aten::expm1")
2926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(expm1, overload_name, "")
2927STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(expm1, schema_str, "expm1(Tensor self) -> Tensor")
2928
2929// aten::expm1(Tensor self) -> Tensor
2930static C10_NOINLINE c10::TypedOperatorHandle<expm1::schema> create_expm1_typed_handle() {
2931 return c10::Dispatcher::singleton()
2932 .findSchemaOrThrow(expm1::name, expm1::overload_name)
2933 .typed<expm1::schema>();
2934}
2935
2936// aten::expm1(Tensor self) -> Tensor
2937at::Tensor expm1::call(const at::Tensor & self) {
2938
2939 static auto op = create_expm1_typed_handle();
2940 return op.call(self);
2941}
2942
2943// aten::expm1(Tensor self) -> Tensor
2944at::Tensor expm1::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
2945
2946 static auto op = create_expm1_typed_handle();
2947 return op.redispatch(dispatchKeySet, self);
2948}
2949
2950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(expm1_, name, "aten::expm1_")
2951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(expm1_, overload_name, "")
2952STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(expm1_, schema_str, "expm1_(Tensor(a!) self) -> Tensor(a!)")
2953
2954// aten::expm1_(Tensor(a!) self) -> Tensor(a!)
2955static C10_NOINLINE c10::TypedOperatorHandle<expm1_::schema> create_expm1__typed_handle() {
2956 return c10::Dispatcher::singleton()
2957 .findSchemaOrThrow(expm1_::name, expm1_::overload_name)
2958 .typed<expm1_::schema>();
2959}
2960
2961// aten::expm1_(Tensor(a!) self) -> Tensor(a!)
2962at::Tensor & expm1_::call(at::Tensor & self) {
2963
2964 static auto op = create_expm1__typed_handle();
2965 return op.call(self);
2966}
2967
2968// aten::expm1_(Tensor(a!) self) -> Tensor(a!)
2969at::Tensor & expm1_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
2970
2971 static auto op = create_expm1__typed_handle();
2972 return op.redispatch(dispatchKeySet, self);
2973}
2974
2975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(expm1_out, name, "aten::expm1")
2976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(expm1_out, overload_name, "out")
2977STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(expm1_out, schema_str, "expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
2978
2979// aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2980static C10_NOINLINE c10::TypedOperatorHandle<expm1_out::schema> create_expm1_out_typed_handle() {
2981 return c10::Dispatcher::singleton()
2982 .findSchemaOrThrow(expm1_out::name, expm1_out::overload_name)
2983 .typed<expm1_out::schema>();
2984}
2985
2986// aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2987at::Tensor & expm1_out::call(const at::Tensor & self, at::Tensor & out) {
2988
2989 static auto op = create_expm1_out_typed_handle();
2990 return op.call(self, out);
2991}
2992
2993// aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2994at::Tensor & expm1_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
2995
2996 static auto op = create_expm1_out_typed_handle();
2997 return op.redispatch(dispatchKeySet, self, out);
2998}
2999
3000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(expand_as, name, "aten::expand_as")
3001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(expand_as, overload_name, "")
3002STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(expand_as, schema_str, "expand_as(Tensor(a) self, Tensor other) -> Tensor(a)")
3003
3004// aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a)
3005static C10_NOINLINE c10::TypedOperatorHandle<expand_as::schema> create_expand_as_typed_handle() {
3006 return c10::Dispatcher::singleton()
3007 .findSchemaOrThrow(expand_as::name, expand_as::overload_name)
3008 .typed<expand_as::schema>();
3009}
3010
3011// aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a)
3012at::Tensor expand_as::call(const at::Tensor & self, const at::Tensor & other) {
3013
3014 static auto op = create_expand_as_typed_handle();
3015 return op.call(self, other);
3016}
3017
3018// aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a)
3019at::Tensor expand_as::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
3020
3021 static auto op = create_expand_as_typed_handle();
3022 return op.redispatch(dispatchKeySet, self, other);
3023}
3024
3025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unflatten_int, name, "aten::unflatten")
3026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unflatten_int, overload_name, "int")
3027STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unflatten_int, schema_str, "unflatten.int(Tensor(a) self, int dim, int[] sizes) -> Tensor(a)")
3028
3029// aten::unflatten.int(Tensor(a) self, int dim, int[] sizes) -> Tensor(a)
3030static C10_NOINLINE c10::TypedOperatorHandle<unflatten_int::schema> create_unflatten_int_typed_handle() {
3031 return c10::Dispatcher::singleton()
3032 .findSchemaOrThrow(unflatten_int::name, unflatten_int::overload_name)
3033 .typed<unflatten_int::schema>();
3034}
3035
3036// aten::unflatten.int(Tensor(a) self, int dim, int[] sizes) -> Tensor(a)
3037at::Tensor unflatten_int::call(const at::Tensor & self, int64_t dim, at::IntArrayRef sizes) {
3038
3039 static auto op = create_unflatten_int_typed_handle();
3040 return op.call(self, dim, sizes);
3041}
3042
3043// aten::unflatten.int(Tensor(a) self, int dim, int[] sizes) -> Tensor(a)
3044at::Tensor unflatten_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::IntArrayRef sizes) {
3045
3046 static auto op = create_unflatten_int_typed_handle();
3047 return op.redispatch(dispatchKeySet, self, dim, sizes);
3048}
3049
3050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unflatten_Dimname, name, "aten::unflatten")
3051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unflatten_Dimname, overload_name, "Dimname")
3052STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unflatten_Dimname, schema_str, "unflatten.Dimname(Tensor(a) self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor(a)")
3053
3054// aten::unflatten.Dimname(Tensor(a) self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor(a)
3055static C10_NOINLINE c10::TypedOperatorHandle<unflatten_Dimname::schema> create_unflatten_Dimname_typed_handle() {
3056 return c10::Dispatcher::singleton()
3057 .findSchemaOrThrow(unflatten_Dimname::name, unflatten_Dimname::overload_name)
3058 .typed<unflatten_Dimname::schema>();
3059}
3060
3061// aten::unflatten.Dimname(Tensor(a) self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor(a)
3062at::Tensor unflatten_Dimname::call(const at::Tensor & self, at::Dimname dim, at::IntArrayRef sizes, at::DimnameList names) {
3063
3064 static auto op = create_unflatten_Dimname_typed_handle();
3065 return op.call(self, dim, sizes, names);
3066}
3067
3068// aten::unflatten.Dimname(Tensor(a) self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor(a)
3069at::Tensor unflatten_Dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, at::IntArrayRef sizes, at::DimnameList names) {
3070
3071 static auto op = create_unflatten_Dimname_typed_handle();
3072 return op.redispatch(dispatchKeySet, self, dim, sizes, names);
3073}
3074
3075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fill_Scalar, name, "aten::fill")
3076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fill_Scalar, overload_name, "Scalar")
3077STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fill_Scalar, schema_str, "fill.Scalar(Tensor self, Scalar value) -> Tensor")
3078
3079// aten::fill.Scalar(Tensor self, Scalar value) -> Tensor
3080static C10_NOINLINE c10::TypedOperatorHandle<fill_Scalar::schema> create_fill_Scalar_typed_handle() {
3081 return c10::Dispatcher::singleton()
3082 .findSchemaOrThrow(fill_Scalar::name, fill_Scalar::overload_name)
3083 .typed<fill_Scalar::schema>();
3084}
3085
3086// aten::fill.Scalar(Tensor self, Scalar value) -> Tensor
3087at::Tensor fill_Scalar::call(const at::Tensor & self, const at::Scalar & value) {
3088
3089 static auto op = create_fill_Scalar_typed_handle();
3090 return op.call(self, value);
3091}
3092
3093// aten::fill.Scalar(Tensor self, Scalar value) -> Tensor
3094at::Tensor fill_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & value) {
3095
3096 static auto op = create_fill_Scalar_typed_handle();
3097 return op.redispatch(dispatchKeySet, self, value);
3098}
3099
3100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fill_Tensor, name, "aten::fill")
3101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fill_Tensor, overload_name, "Tensor")
3102STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fill_Tensor, schema_str, "fill.Tensor(Tensor self, Tensor value) -> Tensor")
3103
3104// aten::fill.Tensor(Tensor self, Tensor value) -> Tensor
3105static C10_NOINLINE c10::TypedOperatorHandle<fill_Tensor::schema> create_fill_Tensor_typed_handle() {
3106 return c10::Dispatcher::singleton()
3107 .findSchemaOrThrow(fill_Tensor::name, fill_Tensor::overload_name)
3108 .typed<fill_Tensor::schema>();
3109}
3110
3111// aten::fill.Tensor(Tensor self, Tensor value) -> Tensor
3112at::Tensor fill_Tensor::call(const at::Tensor & self, const at::Tensor & value) {
3113
3114 static auto op = create_fill_Tensor_typed_handle();
3115 return op.call(self, value);
3116}
3117
3118// aten::fill.Tensor(Tensor self, Tensor value) -> Tensor
3119at::Tensor fill_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & value) {
3120
3121 static auto op = create_fill_Tensor_typed_handle();
3122 return op.redispatch(dispatchKeySet, self, value);
3123}
3124
3125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fill__Scalar, name, "aten::fill_")
3126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fill__Scalar, overload_name, "Scalar")
3127STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fill__Scalar, schema_str, "fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)")
3128
3129// aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)
3130static C10_NOINLINE c10::TypedOperatorHandle<fill__Scalar::schema> create_fill__Scalar_typed_handle() {
3131 return c10::Dispatcher::singleton()
3132 .findSchemaOrThrow(fill__Scalar::name, fill__Scalar::overload_name)
3133 .typed<fill__Scalar::schema>();
3134}
3135
3136// aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)
3137at::Tensor & fill__Scalar::call(at::Tensor & self, const at::Scalar & value) {
3138
3139 static auto op = create_fill__Scalar_typed_handle();
3140 return op.call(self, value);
3141}
3142
3143// aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)
3144at::Tensor & fill__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & value) {
3145
3146 static auto op = create_fill__Scalar_typed_handle();
3147 return op.redispatch(dispatchKeySet, self, value);
3148}
3149
3150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fill__Tensor, name, "aten::fill_")
3151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fill__Tensor, overload_name, "Tensor")
3152STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fill__Tensor, schema_str, "fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!)")
3153
3154// aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!)
3155static C10_NOINLINE c10::TypedOperatorHandle<fill__Tensor::schema> create_fill__Tensor_typed_handle() {
3156 return c10::Dispatcher::singleton()
3157 .findSchemaOrThrow(fill__Tensor::name, fill__Tensor::overload_name)
3158 .typed<fill__Tensor::schema>();
3159}
3160
3161// aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!)
3162at::Tensor & fill__Tensor::call(at::Tensor & self, const at::Tensor & value) {
3163
3164 static auto op = create_fill__Tensor_typed_handle();
3165 return op.call(self, value);
3166}
3167
3168// aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!)
3169at::Tensor & fill__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & value) {
3170
3171 static auto op = create_fill__Tensor_typed_handle();
3172 return op.redispatch(dispatchKeySet, self, value);
3173}
3174
3175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lcm_out, name, "aten::lcm")
3176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lcm_out, overload_name, "out")
3177STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lcm_out, schema_str, "lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
3178
3179// aten::lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
3180static C10_NOINLINE c10::TypedOperatorHandle<lcm_out::schema> create_lcm_out_typed_handle() {
3181 return c10::Dispatcher::singleton()
3182 .findSchemaOrThrow(lcm_out::name, lcm_out::overload_name)
3183 .typed<lcm_out::schema>();
3184}
3185
3186// aten::lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
3187at::Tensor & lcm_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
3188
3189 static auto op = create_lcm_out_typed_handle();
3190 return op.call(self, other, out);
3191}
3192
3193// aten::lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
3194at::Tensor & lcm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
3195
3196 static auto op = create_lcm_out_typed_handle();
3197 return op.redispatch(dispatchKeySet, self, other, out);
3198}
3199
3200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lcm, name, "aten::lcm")
3201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lcm, overload_name, "")
3202STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lcm, schema_str, "lcm(Tensor self, Tensor other) -> Tensor")
3203
3204// aten::lcm(Tensor self, Tensor other) -> Tensor
3205static C10_NOINLINE c10::TypedOperatorHandle<lcm::schema> create_lcm_typed_handle() {
3206 return c10::Dispatcher::singleton()
3207 .findSchemaOrThrow(lcm::name, lcm::overload_name)
3208 .typed<lcm::schema>();
3209}
3210
3211// aten::lcm(Tensor self, Tensor other) -> Tensor
3212at::Tensor lcm::call(const at::Tensor & self, const at::Tensor & other) {
3213
3214 static auto op = create_lcm_typed_handle();
3215 return op.call(self, other);
3216}
3217
3218// aten::lcm(Tensor self, Tensor other) -> Tensor
3219at::Tensor lcm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
3220
3221 static auto op = create_lcm_typed_handle();
3222 return op.redispatch(dispatchKeySet, self, other);
3223}
3224
3225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lcm_, name, "aten::lcm_")
3226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lcm_, overload_name, "")
3227STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lcm_, schema_str, "lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!)")
3228
3229// aten::lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!)
3230static C10_NOINLINE c10::TypedOperatorHandle<lcm_::schema> create_lcm__typed_handle() {
3231 return c10::Dispatcher::singleton()
3232 .findSchemaOrThrow(lcm_::name, lcm_::overload_name)
3233 .typed<lcm_::schema>();
3234}
3235
3236// aten::lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!)
3237at::Tensor & lcm_::call(at::Tensor & self, const at::Tensor & other) {
3238
3239 static auto op = create_lcm__typed_handle();
3240 return op.call(self, other);
3241}
3242
3243// aten::lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!)
3244at::Tensor & lcm_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
3245
3246 static auto op = create_lcm__typed_handle();
3247 return op.redispatch(dispatchKeySet, self, other);
3248}
3249
3250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(grid_sampler_2d_backward, name, "aten::grid_sampler_2d_backward")
3251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(grid_sampler_2d_backward, overload_name, "")
3252STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(grid_sampler_2d_backward, schema_str, "grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)")
3253
3254// aten::grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)
3255static C10_NOINLINE c10::TypedOperatorHandle<grid_sampler_2d_backward::schema> create_grid_sampler_2d_backward_typed_handle() {
3256 return c10::Dispatcher::singleton()
3257 .findSchemaOrThrow(grid_sampler_2d_backward::name, grid_sampler_2d_backward::overload_name)
3258 .typed<grid_sampler_2d_backward::schema>();
3259}
3260
3261// aten::grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)
3262::std::tuple<at::Tensor,at::Tensor> grid_sampler_2d_backward::call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
3263
3264 static auto op = create_grid_sampler_2d_backward_typed_handle();
3265 return op.call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
3266}
3267
3268// aten::grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)
3269::std::tuple<at::Tensor,at::Tensor> grid_sampler_2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
3270
3271 static auto op = create_grid_sampler_2d_backward_typed_handle();
3272 return op.redispatch(dispatchKeySet, grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
3273}
3274
3275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(group_norm, name, "aten::group_norm")
3276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(group_norm, overload_name, "")
3277STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(group_norm, schema_str, "group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor")
3278
3279// aten::group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor
3280static C10_NOINLINE c10::TypedOperatorHandle<group_norm::schema> create_group_norm_typed_handle() {
3281 return c10::Dispatcher::singleton()
3282 .findSchemaOrThrow(group_norm::name, group_norm::overload_name)
3283 .typed<group_norm::schema>();
3284}
3285
3286// aten::group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor
3287at::Tensor group_norm::call(const at::Tensor & input, int64_t num_groups, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, bool cudnn_enabled) {
3288
3289 static auto op = create_group_norm_typed_handle();
3290 return op.call(input, num_groups, weight, bias, eps, cudnn_enabled);
3291}
3292
3293// aten::group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor
3294at::Tensor group_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, int64_t num_groups, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, bool cudnn_enabled) {
3295
3296 static auto op = create_group_norm_typed_handle();
3297 return op.redispatch(dispatchKeySet, input, num_groups, weight, bias, eps, cudnn_enabled);
3298}
3299
3300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_copy_out, name, "aten::index_copy")
3301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_copy_out, overload_name, "out")
3302STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_copy_out, schema_str, "index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!)")
3303
3304// aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
3305static C10_NOINLINE c10::TypedOperatorHandle<index_copy_out::schema> create_index_copy_out_typed_handle() {
3306 return c10::Dispatcher::singleton()
3307 .findSchemaOrThrow(index_copy_out::name, index_copy_out::overload_name)
3308 .typed<index_copy_out::schema>();
3309}
3310
3311// aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
3312at::Tensor & index_copy_out::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out) {
3313
3314 static auto op = create_index_copy_out_typed_handle();
3315 return op.call(self, dim, index, source, out);
3316}
3317
3318// aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
3319at::Tensor & index_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out) {
3320
3321 static auto op = create_index_copy_out_typed_handle();
3322 return op.redispatch(dispatchKeySet, self, dim, index, source, out);
3323}
3324
3325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_copy_, name, "aten::index_copy_")
3326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_copy_, overload_name, "")
3327STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_copy_, schema_str, "index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)")
3328
3329// aten::index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)
3330static C10_NOINLINE c10::TypedOperatorHandle<index_copy_::schema> create_index_copy__typed_handle() {
3331 return c10::Dispatcher::singleton()
3332 .findSchemaOrThrow(index_copy_::name, index_copy_::overload_name)
3333 .typed<index_copy_::schema>();
3334}
3335
3336// aten::index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)
3337at::Tensor & index_copy_::call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
3338
3339 static auto op = create_index_copy__typed_handle();
3340 return op.call(self, dim, index, source);
3341}
3342
3343// aten::index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)
3344at::Tensor & index_copy_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
3345
3346 static auto op = create_index_copy__typed_handle();
3347 return op.redispatch(dispatchKeySet, self, dim, index, source);
3348}
3349
3350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_copy, name, "aten::index_copy")
3351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_copy, overload_name, "")
3352STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_copy, schema_str, "index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor")
3353
3354// aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor
3355static C10_NOINLINE c10::TypedOperatorHandle<index_copy::schema> create_index_copy_typed_handle() {
3356 return c10::Dispatcher::singleton()
3357 .findSchemaOrThrow(index_copy::name, index_copy::overload_name)
3358 .typed<index_copy::schema>();
3359}
3360
3361// aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor
3362at::Tensor index_copy::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
3363
3364 static auto op = create_index_copy_typed_handle();
3365 return op.call(self, dim, index, source);
3366}
3367
3368// aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor
3369at::Tensor index_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
3370
3371 static auto op = create_index_copy_typed_handle();
3372 return op.redispatch(dispatchKeySet, self, dim, index, source);
3373}
3374
3375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_copy__dimname, name, "aten::index_copy_")
3376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_copy__dimname, overload_name, "dimname")
3377STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_copy__dimname, schema_str, "index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!)")
3378
3379// aten::index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!)
3380static C10_NOINLINE c10::TypedOperatorHandle<index_copy__dimname::schema> create_index_copy__dimname_typed_handle() {
3381 return c10::Dispatcher::singleton()
3382 .findSchemaOrThrow(index_copy__dimname::name, index_copy__dimname::overload_name)
3383 .typed<index_copy__dimname::schema>();
3384}
3385
3386// aten::index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!)
3387at::Tensor & index_copy__dimname::call(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) {
3388
3389 static auto op = create_index_copy__dimname_typed_handle();
3390 return op.call(self, dim, index, source);
3391}
3392
3393// aten::index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!)
3394at::Tensor & index_copy__dimname::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) {
3395
3396 static auto op = create_index_copy__dimname_typed_handle();
3397 return op.redispatch(dispatchKeySet, self, dim, index, source);
3398}
3399
3400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_copy_dimname, name, "aten::index_copy")
3401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_copy_dimname, overload_name, "dimname")
3402STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_copy_dimname, schema_str, "index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor")
3403
3404// aten::index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor
3405static C10_NOINLINE c10::TypedOperatorHandle<index_copy_dimname::schema> create_index_copy_dimname_typed_handle() {
3406 return c10::Dispatcher::singleton()
3407 .findSchemaOrThrow(index_copy_dimname::name, index_copy_dimname::overload_name)
3408 .typed<index_copy_dimname::schema>();
3409}
3410
3411// aten::index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor
3412at::Tensor index_copy_dimname::call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) {
3413
3414 static auto op = create_index_copy_dimname_typed_handle();
3415 return op.call(self, dim, index, source);
3416}
3417
3418// aten::index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor
3419at::Tensor index_copy_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) {
3420
3421 static auto op = create_index_copy_dimname_typed_handle();
3422 return op.redispatch(dispatchKeySet, self, dim, index, source);
3423}
3424
3425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_index_put_impl_, name, "aten::_index_put_impl_")
3426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_index_put_impl_, overload_name, "")
3427STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_index_put_impl_, schema_str, "_index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!)")
3428
3429// aten::_index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!)
3430static C10_NOINLINE c10::TypedOperatorHandle<_index_put_impl_::schema> create__index_put_impl__typed_handle() {
3431 return c10::Dispatcher::singleton()
3432 .findSchemaOrThrow(_index_put_impl_::name, _index_put_impl_::overload_name)
3433 .typed<_index_put_impl_::schema>();
3434}
3435
3436// aten::_index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!)
3437at::Tensor & _index_put_impl_::call(at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) {
3438
3439 static auto op = create__index_put_impl__typed_handle();
3440 return op.call(self, indices, values, accumulate, unsafe);
3441}
3442
3443// aten::_index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!)
3444at::Tensor & _index_put_impl_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) {
3445
3446 static auto op = create__index_put_impl__typed_handle();
3447 return op.redispatch(dispatchKeySet, self, indices, values, accumulate, unsafe);
3448}
3449
3450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_distributed, name, "aten::is_distributed")
3451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_distributed, overload_name, "")
3452STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_distributed, schema_str, "is_distributed(Tensor self) -> bool")
3453
3454// aten::is_distributed(Tensor self) -> bool
3455static C10_NOINLINE c10::TypedOperatorHandle<is_distributed::schema> create_is_distributed_typed_handle() {
3456 return c10::Dispatcher::singleton()
3457 .findSchemaOrThrow(is_distributed::name, is_distributed::overload_name)
3458 .typed<is_distributed::schema>();
3459}
3460
3461// aten::is_distributed(Tensor self) -> bool
3462bool is_distributed::call(const at::Tensor & self) {
3463
3464 static auto op = create_is_distributed_typed_handle();
3465 return op.call(self);
3466}
3467
3468// aten::is_distributed(Tensor self) -> bool
3469bool is_distributed::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
3470
3471 static auto op = create_is_distributed_typed_handle();
3472 return op.redispatch(dispatchKeySet, self);
3473}
3474
3475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_inference, name, "aten::is_inference")
3476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_inference, overload_name, "")
3477STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_inference, schema_str, "is_inference(Tensor self) -> bool")
3478
3479// aten::is_inference(Tensor self) -> bool
3480static C10_NOINLINE c10::TypedOperatorHandle<is_inference::schema> create_is_inference_typed_handle() {
3481 return c10::Dispatcher::singleton()
3482 .findSchemaOrThrow(is_inference::name, is_inference::overload_name)
3483 .typed<is_inference::schema>();
3484}
3485
3486// aten::is_inference(Tensor self) -> bool
3487bool is_inference::call(const at::Tensor & self) {
3488
3489 static auto op = create_is_inference_typed_handle();
3490 return op.call(self);
3491}
3492
3493// aten::is_inference(Tensor self) -> bool
3494bool is_inference::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
3495
3496 static auto op = create_is_inference_typed_handle();
3497 return op.redispatch(dispatchKeySet, self);
3498}
3499
3500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kron, name, "aten::kron")
3501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kron, overload_name, "")
3502STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kron, schema_str, "kron(Tensor self, Tensor other) -> Tensor")
3503
3504// aten::kron(Tensor self, Tensor other) -> Tensor
3505static C10_NOINLINE c10::TypedOperatorHandle<kron::schema> create_kron_typed_handle() {
3506 return c10::Dispatcher::singleton()
3507 .findSchemaOrThrow(kron::name, kron::overload_name)
3508 .typed<kron::schema>();
3509}
3510
3511// aten::kron(Tensor self, Tensor other) -> Tensor
3512at::Tensor kron::call(const at::Tensor & self, const at::Tensor & other) {
3513
3514 static auto op = create_kron_typed_handle();
3515 return op.call(self, other);
3516}
3517
3518// aten::kron(Tensor self, Tensor other) -> Tensor
3519at::Tensor kron::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
3520
3521 static auto op = create_kron_typed_handle();
3522 return op.redispatch(dispatchKeySet, self, other);
3523}
3524
3525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kron_out, name, "aten::kron")
3526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kron_out, overload_name, "out")
3527STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kron_out, schema_str, "kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
3528
3529// aten::kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
3530static C10_NOINLINE c10::TypedOperatorHandle<kron_out::schema> create_kron_out_typed_handle() {
3531 return c10::Dispatcher::singleton()
3532 .findSchemaOrThrow(kron_out::name, kron_out::overload_name)
3533 .typed<kron_out::schema>();
3534}
3535
3536// aten::kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
3537at::Tensor & kron_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
3538
3539 static auto op = create_kron_out_typed_handle();
3540 return op.call(self, other, out);
3541}
3542
3543// aten::kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
3544at::Tensor & kron_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
3545
3546 static auto op = create_kron_out_typed_handle();
3547 return op.redispatch(dispatchKeySet, self, other, out);
3548}
3549
3550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linear, name, "aten::linear")
3551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linear, overload_name, "")
3552STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linear, schema_str, "linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor")
3553
3554// aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor
3555static C10_NOINLINE c10::TypedOperatorHandle<linear::schema> create_linear_typed_handle() {
3556 return c10::Dispatcher::singleton()
3557 .findSchemaOrThrow(linear::name, linear::overload_name)
3558 .typed<linear::schema>();
3559}
3560
3561// aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor
3562at::Tensor linear::call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias) {
3563
3564 static auto op = create_linear_typed_handle();
3565 return op.call(input, weight, bias);
3566}
3567
3568// aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor
3569at::Tensor linear::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias) {
3570
3571 static auto op = create_linear_typed_handle();
3572 return op.redispatch(dispatchKeySet, input, weight, bias);
3573}
3574
3575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linear_out, name, "aten::linear")
3576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linear_out, overload_name, "out")
3577STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linear_out, schema_str, "linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)")
3578
3579// aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)
3580static C10_NOINLINE c10::TypedOperatorHandle<linear_out::schema> create_linear_out_typed_handle() {
3581 return c10::Dispatcher::singleton()
3582 .findSchemaOrThrow(linear_out::name, linear_out::overload_name)
3583 .typed<linear_out::schema>();
3584}
3585
3586// aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)
3587at::Tensor & linear_out::call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::Tensor & out) {
3588
3589 static auto op = create_linear_out_typed_handle();
3590 return op.call(input, weight, bias, out);
3591}
3592
3593// aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)
3594at::Tensor & linear_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::Tensor & out) {
3595
3596 static auto op = create_linear_out_typed_handle();
3597 return op.redispatch(dispatchKeySet, input, weight, bias, out);
3598}
3599
3600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_linear, name, "aten::mkldnn_linear")
3601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_linear, overload_name, "")
3602STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_linear, schema_str, "mkldnn_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor")
3603
3604// aten::mkldnn_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor
3605static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_linear::schema> create_mkldnn_linear_typed_handle() {
3606 return c10::Dispatcher::singleton()
3607 .findSchemaOrThrow(mkldnn_linear::name, mkldnn_linear::overload_name)
3608 .typed<mkldnn_linear::schema>();
3609}
3610
3611// aten::mkldnn_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor
3612at::Tensor mkldnn_linear::call(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias) {
3613
3614 static auto op = create_mkldnn_linear_typed_handle();
3615 return op.call(self, weight, bias);
3616}
3617
3618// aten::mkldnn_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor
3619at::Tensor mkldnn_linear::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias) {
3620
3621 static auto op = create_mkldnn_linear_typed_handle();
3622 return op.redispatch(dispatchKeySet, self, weight, bias);
3623}
3624
3625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_linear_backward_weights, name, "aten::mkldnn_linear_backward_weights")
3626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_linear_backward_weights, overload_name, "")
3627STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_linear_backward_weights, schema_str, "mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor)")
3628
3629// aten::mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor)
3630static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_linear_backward_weights::schema> create_mkldnn_linear_backward_weights_typed_handle() {
3631 return c10::Dispatcher::singleton()
3632 .findSchemaOrThrow(mkldnn_linear_backward_weights::name, mkldnn_linear_backward_weights::overload_name)
3633 .typed<mkldnn_linear_backward_weights::schema>();
3634}
3635
3636// aten::mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor)
3637::std::tuple<at::Tensor,at::Tensor> mkldnn_linear_backward_weights::call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined) {
3638
3639 static auto op = create_mkldnn_linear_backward_weights_typed_handle();
3640 return op.call(grad_output, input, weight, bias_defined);
3641}
3642
3643// aten::mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor)
3644::std::tuple<at::Tensor,at::Tensor> mkldnn_linear_backward_weights::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined) {
3645
3646 static auto op = create_mkldnn_linear_backward_weights_typed_handle();
3647 return op.redispatch(dispatchKeySet, grad_output, input, weight, bias_defined);
3648}
3649
3650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fbgemm_linear_quantize_weight, name, "aten::fbgemm_linear_quantize_weight")
3651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fbgemm_linear_quantize_weight, overload_name, "")
3652STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fbgemm_linear_quantize_weight, schema_str, "fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int)")
3653
3654// aten::fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int)
3655static C10_NOINLINE c10::TypedOperatorHandle<fbgemm_linear_quantize_weight::schema> create_fbgemm_linear_quantize_weight_typed_handle() {
3656 return c10::Dispatcher::singleton()
3657 .findSchemaOrThrow(fbgemm_linear_quantize_weight::name, fbgemm_linear_quantize_weight::overload_name)
3658 .typed<fbgemm_linear_quantize_weight::schema>();
3659}
3660
3661// aten::fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int)
3662::std::tuple<at::Tensor,at::Tensor,double,int64_t> fbgemm_linear_quantize_weight::call(const at::Tensor & input) {
3663
3664 static auto op = create_fbgemm_linear_quantize_weight_typed_handle();
3665 return op.call(input);
3666}
3667
3668// aten::fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int)
3669::std::tuple<at::Tensor,at::Tensor,double,int64_t> fbgemm_linear_quantize_weight::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input) {
3670
3671 static auto op = create_fbgemm_linear_quantize_weight_typed_handle();
3672 return op.redispatch(dispatchKeySet, input);
3673}
3674
3675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linspace, name, "aten::linspace")
3676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linspace, overload_name, "")
3677STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linspace, schema_str, "linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
3678
3679// aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3680static C10_NOINLINE c10::TypedOperatorHandle<linspace::schema> create_linspace_typed_handle() {
3681 return c10::Dispatcher::singleton()
3682 .findSchemaOrThrow(linspace::name, linspace::overload_name)
3683 .typed<linspace::schema>();
3684}
3685
3686// aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3687at::Tensor linspace::call(const at::Scalar & start, const at::Scalar & end, int64_t steps, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
3688
3689 static auto op = create_linspace_typed_handle();
3690 return op.call(start, end, steps, dtype, layout, device, pin_memory);
3691}
3692
3693// aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3694at::Tensor linspace::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
3695
3696 static auto op = create_linspace_typed_handle();
3697 return op.redispatch(dispatchKeySet, start, end, steps, dtype, layout, device, pin_memory);
3698}
3699
3700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linspace_out, name, "aten::linspace")
3701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linspace_out, overload_name, "out")
3702STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linspace_out, schema_str, "linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)")
3703
3704// aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)
3705static C10_NOINLINE c10::TypedOperatorHandle<linspace_out::schema> create_linspace_out_typed_handle() {
3706 return c10::Dispatcher::singleton()
3707 .findSchemaOrThrow(linspace_out::name, linspace_out::overload_name)
3708 .typed<linspace_out::schema>();
3709}
3710
3711// aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)
3712at::Tensor & linspace_out::call(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::Tensor & out) {
3713
3714 static auto op = create_linspace_out_typed_handle();
3715 return op.call(start, end, steps, out);
3716}
3717
3718// aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)
3719at::Tensor & linspace_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, at::Tensor & out) {
3720
3721 static auto op = create_linspace_out_typed_handle();
3722 return op.redispatch(dispatchKeySet, start, end, steps, out);
3723}
3724
3725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log10, name, "aten::log10")
3726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log10, overload_name, "")
3727STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log10, schema_str, "log10(Tensor self) -> Tensor")
3728
3729// aten::log10(Tensor self) -> Tensor
3730static C10_NOINLINE c10::TypedOperatorHandle<log10::schema> create_log10_typed_handle() {
3731 return c10::Dispatcher::singleton()
3732 .findSchemaOrThrow(log10::name, log10::overload_name)
3733 .typed<log10::schema>();
3734}
3735
3736// aten::log10(Tensor self) -> Tensor
3737at::Tensor log10::call(const at::Tensor & self) {
3738
3739 static auto op = create_log10_typed_handle();
3740 return op.call(self);
3741}
3742
3743// aten::log10(Tensor self) -> Tensor
3744at::Tensor log10::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
3745
3746 static auto op = create_log10_typed_handle();
3747 return op.redispatch(dispatchKeySet, self);
3748}
3749
3750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log10_, name, "aten::log10_")
3751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log10_, overload_name, "")
3752STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log10_, schema_str, "log10_(Tensor(a!) self) -> Tensor(a!)")
3753
3754// aten::log10_(Tensor(a!) self) -> Tensor(a!)
3755static C10_NOINLINE c10::TypedOperatorHandle<log10_::schema> create_log10__typed_handle() {
3756 return c10::Dispatcher::singleton()
3757 .findSchemaOrThrow(log10_::name, log10_::overload_name)
3758 .typed<log10_::schema>();
3759}
3760
3761// aten::log10_(Tensor(a!) self) -> Tensor(a!)
3762at::Tensor & log10_::call(at::Tensor & self) {
3763
3764 static auto op = create_log10__typed_handle();
3765 return op.call(self);
3766}
3767
3768// aten::log10_(Tensor(a!) self) -> Tensor(a!)
3769at::Tensor & log10_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
3770
3771 static auto op = create_log10__typed_handle();
3772 return op.redispatch(dispatchKeySet, self);
3773}
3774
3775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log10_out, name, "aten::log10")
3776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log10_out, overload_name, "out")
3777STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log10_out, schema_str, "log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
3778
3779// aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
3780static C10_NOINLINE c10::TypedOperatorHandle<log10_out::schema> create_log10_out_typed_handle() {
3781 return c10::Dispatcher::singleton()
3782 .findSchemaOrThrow(log10_out::name, log10_out::overload_name)
3783 .typed<log10_out::schema>();
3784}
3785
3786// aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
3787at::Tensor & log10_out::call(const at::Tensor & self, at::Tensor & out) {
3788
3789 static auto op = create_log10_out_typed_handle();
3790 return op.call(self, out);
3791}
3792
3793// aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
3794at::Tensor & log10_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
3795
3796 static auto op = create_log10_out_typed_handle();
3797 return op.redispatch(dispatchKeySet, self, out);
3798}
3799
3800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log1p, name, "aten::log1p")
3801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log1p, overload_name, "")
3802STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log1p, schema_str, "log1p(Tensor self) -> Tensor")
3803
3804// aten::log1p(Tensor self) -> Tensor
3805static C10_NOINLINE c10::TypedOperatorHandle<log1p::schema> create_log1p_typed_handle() {
3806 return c10::Dispatcher::singleton()
3807 .findSchemaOrThrow(log1p::name, log1p::overload_name)
3808 .typed<log1p::schema>();
3809}
3810
3811// aten::log1p(Tensor self) -> Tensor
3812at::Tensor log1p::call(const at::Tensor & self) {
3813
3814 static auto op = create_log1p_typed_handle();
3815 return op.call(self);
3816}
3817
3818// aten::log1p(Tensor self) -> Tensor
3819at::Tensor log1p::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
3820
3821 static auto op = create_log1p_typed_handle();
3822 return op.redispatch(dispatchKeySet, self);
3823}
3824
3825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log1p_, name, "aten::log1p_")
3826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log1p_, overload_name, "")
3827STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log1p_, schema_str, "log1p_(Tensor(a!) self) -> Tensor(a!)")
3828
3829// aten::log1p_(Tensor(a!) self) -> Tensor(a!)
3830static C10_NOINLINE c10::TypedOperatorHandle<log1p_::schema> create_log1p__typed_handle() {
3831 return c10::Dispatcher::singleton()
3832 .findSchemaOrThrow(log1p_::name, log1p_::overload_name)
3833 .typed<log1p_::schema>();
3834}
3835
3836// aten::log1p_(Tensor(a!) self) -> Tensor(a!)
3837at::Tensor & log1p_::call(at::Tensor & self) {
3838
3839 static auto op = create_log1p__typed_handle();
3840 return op.call(self);
3841}
3842
3843// aten::log1p_(Tensor(a!) self) -> Tensor(a!)
3844at::Tensor & log1p_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
3845
3846 static auto op = create_log1p__typed_handle();
3847 return op.redispatch(dispatchKeySet, self);
3848}
3849
3850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log1p_out, name, "aten::log1p")
3851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log1p_out, overload_name, "out")
3852STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log1p_out, schema_str, "log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
3853
3854// aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
3855static C10_NOINLINE c10::TypedOperatorHandle<log1p_out::schema> create_log1p_out_typed_handle() {
3856 return c10::Dispatcher::singleton()
3857 .findSchemaOrThrow(log1p_out::name, log1p_out::overload_name)
3858 .typed<log1p_out::schema>();
3859}
3860
3861// aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
3862at::Tensor & log1p_out::call(const at::Tensor & self, at::Tensor & out) {
3863
3864 static auto op = create_log1p_out_typed_handle();
3865 return op.call(self, out);
3866}
3867
3868// aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
3869at::Tensor & log1p_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
3870
3871 static auto op = create_log1p_out_typed_handle();
3872 return op.redispatch(dispatchKeySet, self, out);
3873}
3874
3875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logaddexp2_out, name, "aten::logaddexp2")
3876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logaddexp2_out, overload_name, "out")
3877STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logaddexp2_out, schema_str, "logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
3878
3879// aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
3880static C10_NOINLINE c10::TypedOperatorHandle<logaddexp2_out::schema> create_logaddexp2_out_typed_handle() {
3881 return c10::Dispatcher::singleton()
3882 .findSchemaOrThrow(logaddexp2_out::name, logaddexp2_out::overload_name)
3883 .typed<logaddexp2_out::schema>();
3884}
3885
3886// aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
3887at::Tensor & logaddexp2_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
3888
3889 static auto op = create_logaddexp2_out_typed_handle();
3890 return op.call(self, other, out);
3891}
3892
3893// aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
3894at::Tensor & logaddexp2_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
3895
3896 static auto op = create_logaddexp2_out_typed_handle();
3897 return op.redispatch(dispatchKeySet, self, other, out);
3898}
3899
3900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logaddexp2, name, "aten::logaddexp2")
3901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logaddexp2, overload_name, "")
3902STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logaddexp2, schema_str, "logaddexp2(Tensor self, Tensor other) -> Tensor")
3903
3904// aten::logaddexp2(Tensor self, Tensor other) -> Tensor
3905static C10_NOINLINE c10::TypedOperatorHandle<logaddexp2::schema> create_logaddexp2_typed_handle() {
3906 return c10::Dispatcher::singleton()
3907 .findSchemaOrThrow(logaddexp2::name, logaddexp2::overload_name)
3908 .typed<logaddexp2::schema>();
3909}
3910
3911// aten::logaddexp2(Tensor self, Tensor other) -> Tensor
3912at::Tensor logaddexp2::call(const at::Tensor & self, const at::Tensor & other) {
3913
3914 static auto op = create_logaddexp2_typed_handle();
3915 return op.call(self, other);
3916}
3917
3918// aten::logaddexp2(Tensor self, Tensor other) -> Tensor
3919at::Tensor logaddexp2::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
3920
3921 static auto op = create_logaddexp2_typed_handle();
3922 return op.redispatch(dispatchKeySet, self, other);
3923}
3924
3925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_log_softmax, name, "aten::_log_softmax")
3926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_log_softmax, overload_name, "")
3927STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_log_softmax, schema_str, "_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor")
3928
3929// aten::_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
3930static C10_NOINLINE c10::TypedOperatorHandle<_log_softmax::schema> create__log_softmax_typed_handle() {
3931 return c10::Dispatcher::singleton()
3932 .findSchemaOrThrow(_log_softmax::name, _log_softmax::overload_name)
3933 .typed<_log_softmax::schema>();
3934}
3935
3936// aten::_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
3937at::Tensor _log_softmax::call(const at::Tensor & self, int64_t dim, bool half_to_float) {
3938
3939 static auto op = create__log_softmax_typed_handle();
3940 return op.call(self, dim, half_to_float);
3941}
3942
3943// aten::_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
3944at::Tensor _log_softmax::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float) {
3945
3946 static auto op = create__log_softmax_typed_handle();
3947 return op.redispatch(dispatchKeySet, self, dim, half_to_float);
3948}
3949
3950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_log_softmax_out, name, "aten::_log_softmax")
3951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_log_softmax_out, overload_name, "out")
3952STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_log_softmax_out, schema_str, "_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)")
3953
3954// aten::_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
3955static C10_NOINLINE c10::TypedOperatorHandle<_log_softmax_out::schema> create__log_softmax_out_typed_handle() {
3956 return c10::Dispatcher::singleton()
3957 .findSchemaOrThrow(_log_softmax_out::name, _log_softmax_out::overload_name)
3958 .typed<_log_softmax_out::schema>();
3959}
3960
3961// aten::_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
3962at::Tensor & _log_softmax_out::call(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
3963
3964 static auto op = create__log_softmax_out_typed_handle();
3965 return op.call(self, dim, half_to_float, out);
3966}
3967
3968// aten::_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
3969at::Tensor & _log_softmax_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
3970
3971 static auto op = create__log_softmax_out_typed_handle();
3972 return op.redispatch(dispatchKeySet, self, dim, half_to_float, out);
3973}
3974
3975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logsumexp, name, "aten::logsumexp")
3976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logsumexp, overload_name, "")
3977STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logsumexp, schema_str, "logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor")
3978
3979// aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
3980static C10_NOINLINE c10::TypedOperatorHandle<logsumexp::schema> create_logsumexp_typed_handle() {
3981 return c10::Dispatcher::singleton()
3982 .findSchemaOrThrow(logsumexp::name, logsumexp::overload_name)
3983 .typed<logsumexp::schema>();
3984}
3985
3986// aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
3987at::Tensor logsumexp::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
3988
3989 static auto op = create_logsumexp_typed_handle();
3990 return op.call(self, dim, keepdim);
3991}
3992
3993// aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
3994at::Tensor logsumexp::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
3995
3996 static auto op = create_logsumexp_typed_handle();
3997 return op.redispatch(dispatchKeySet, self, dim, keepdim);
3998}
3999
4000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logsumexp_out, name, "aten::logsumexp")
4001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logsumexp_out, overload_name, "out")
4002STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logsumexp_out, schema_str, "logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)")
4003
4004// aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
4005static C10_NOINLINE c10::TypedOperatorHandle<logsumexp_out::schema> create_logsumexp_out_typed_handle() {
4006 return c10::Dispatcher::singleton()
4007 .findSchemaOrThrow(logsumexp_out::name, logsumexp_out::overload_name)
4008 .typed<logsumexp_out::schema>();
4009}
4010
4011// aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
4012at::Tensor & logsumexp_out::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
4013
4014 static auto op = create_logsumexp_out_typed_handle();
4015 return op.call(self, dim, keepdim, out);
4016}
4017
4018// aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
4019at::Tensor & logsumexp_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
4020
4021 static auto op = create_logsumexp_out_typed_handle();
4022 return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
4023}
4024
4025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logsumexp_names, name, "aten::logsumexp")
4026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logsumexp_names, overload_name, "names")
4027STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logsumexp_names, schema_str, "logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor")
4028
4029// aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor
4030static C10_NOINLINE c10::TypedOperatorHandle<logsumexp_names::schema> create_logsumexp_names_typed_handle() {
4031 return c10::Dispatcher::singleton()
4032 .findSchemaOrThrow(logsumexp_names::name, logsumexp_names::overload_name)
4033 .typed<logsumexp_names::schema>();
4034}
4035
4036// aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor
4037at::Tensor logsumexp_names::call(const at::Tensor & self, at::DimnameList dim, bool keepdim) {
4038
4039 static auto op = create_logsumexp_names_typed_handle();
4040 return op.call(self, dim, keepdim);
4041}
4042
4043// aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor
4044at::Tensor logsumexp_names::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim) {
4045
4046 static auto op = create_logsumexp_names_typed_handle();
4047 return op.redispatch(dispatchKeySet, self, dim, keepdim);
4048}
4049
4050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logsumexp_names_out, name, "aten::logsumexp")
4051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logsumexp_names_out, overload_name, "names_out")
4052STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logsumexp_names_out, schema_str, "logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)")
4053
4054// aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
4055static C10_NOINLINE c10::TypedOperatorHandle<logsumexp_names_out::schema> create_logsumexp_names_out_typed_handle() {
4056 return c10::Dispatcher::singleton()
4057 .findSchemaOrThrow(logsumexp_names_out::name, logsumexp_names_out::overload_name)
4058 .typed<logsumexp_names_out::schema>();
4059}
4060
4061// aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
4062at::Tensor & logsumexp_names_out::call(const at::Tensor & self, at::DimnameList dim, bool keepdim, at::Tensor & out) {
4063
4064 static auto op = create_logsumexp_names_out_typed_handle();
4065 return op.call(self, dim, keepdim, out);
4066}
4067
4068// aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
4069at::Tensor & logsumexp_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, at::Tensor & out) {
4070
4071 static auto op = create_logsumexp_names_out_typed_handle();
4072 return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
4073}
4074
4075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_aminmax, name, "aten::_aminmax")
4076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_aminmax, overload_name, "")
4077STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_aminmax, schema_str, "_aminmax(Tensor self) -> (Tensor, Tensor)")
4078
4079// aten::_aminmax(Tensor self) -> (Tensor, Tensor)
4080static C10_NOINLINE c10::TypedOperatorHandle<_aminmax::schema> create__aminmax_typed_handle() {
4081 return c10::Dispatcher::singleton()
4082 .findSchemaOrThrow(_aminmax::name, _aminmax::overload_name)
4083 .typed<_aminmax::schema>();
4084}
4085
4086// aten::_aminmax(Tensor self) -> (Tensor, Tensor)
4087::std::tuple<at::Tensor,at::Tensor> _aminmax::call(const at::Tensor & self) {
4088
4089 static auto op = create__aminmax_typed_handle();
4090 return op.call(self);
4091}
4092
4093// aten::_aminmax(Tensor self) -> (Tensor, Tensor)
4094::std::tuple<at::Tensor,at::Tensor> _aminmax::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
4095
4096 static auto op = create__aminmax_typed_handle();
4097 return op.redispatch(dispatchKeySet, self);
4098}
4099
4100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_aminmax_dim, name, "aten::_aminmax")
4101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_aminmax_dim, overload_name, "dim")
4102STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_aminmax_dim, schema_str, "_aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)")
4103
4104// aten::_aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)
4105static C10_NOINLINE c10::TypedOperatorHandle<_aminmax_dim::schema> create__aminmax_dim_typed_handle() {
4106 return c10::Dispatcher::singleton()
4107 .findSchemaOrThrow(_aminmax_dim::name, _aminmax_dim::overload_name)
4108 .typed<_aminmax_dim::schema>();
4109}
4110
4111// aten::_aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)
4112::std::tuple<at::Tensor,at::Tensor> _aminmax_dim::call(const at::Tensor & self, int64_t dim, bool keepdim) {
4113
4114 static auto op = create__aminmax_dim_typed_handle();
4115 return op.call(self, dim, keepdim);
4116}
4117
4118// aten::_aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)
4119::std::tuple<at::Tensor,at::Tensor> _aminmax_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim) {
4120
4121 static auto op = create__aminmax_dim_typed_handle();
4122 return op.redispatch(dispatchKeySet, self, dim, keepdim);
4123}
4124
4125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(aminmax, name, "aten::aminmax")
4126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(aminmax, overload_name, "")
4127STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(aminmax, schema_str, "aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)")
4128
4129// aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)
4130static C10_NOINLINE c10::TypedOperatorHandle<aminmax::schema> create_aminmax_typed_handle() {
4131 return c10::Dispatcher::singleton()
4132 .findSchemaOrThrow(aminmax::name, aminmax::overload_name)
4133 .typed<aminmax::schema>();
4134}
4135
4136// aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)
4137::std::tuple<at::Tensor,at::Tensor> aminmax::call(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
4138
4139 static auto op = create_aminmax_typed_handle();
4140 return op.call(self, dim, keepdim);
4141}
4142
4143// aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)
4144::std::tuple<at::Tensor,at::Tensor> aminmax::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
4145
4146 static auto op = create_aminmax_typed_handle();
4147 return op.redispatch(dispatchKeySet, self, dim, keepdim);
4148}
4149
4150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(aminmax_out, name, "aten::aminmax")
4151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(aminmax_out, overload_name, "out")
4152STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(aminmax_out, schema_str, "aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max)")
4153
4154// aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max)
4155static C10_NOINLINE c10::TypedOperatorHandle<aminmax_out::schema> create_aminmax_out_typed_handle() {
4156 return c10::Dispatcher::singleton()
4157 .findSchemaOrThrow(aminmax_out::name, aminmax_out::overload_name)
4158 .typed<aminmax_out::schema>();
4159}
4160
4161// aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max)
4162::std::tuple<at::Tensor &,at::Tensor &> aminmax_out::call(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim, at::Tensor & min, at::Tensor & max) {
4163
4164 static auto op = create_aminmax_out_typed_handle();
4165 return op.call(self, dim, keepdim, min, max);
4166}
4167
4168// aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max)
4169::std::tuple<at::Tensor &,at::Tensor &> aminmax_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim, at::Tensor & min, at::Tensor & max) {
4170
4171 static auto op = create_aminmax_out_typed_handle();
4172 return op.redispatch(dispatchKeySet, self, dim, keepdim, min, max);
4173}
4174
4175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_dim, name, "aten::max")
4176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_dim, overload_name, "dim")
4177STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_dim, schema_str, "max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)")
4178
4179// aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
4180static C10_NOINLINE c10::TypedOperatorHandle<max_dim::schema> create_max_dim_typed_handle() {
4181 return c10::Dispatcher::singleton()
4182 .findSchemaOrThrow(max_dim::name, max_dim::overload_name)
4183 .typed<max_dim::schema>();
4184}
4185
4186// aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
4187::std::tuple<at::Tensor,at::Tensor> max_dim::call(const at::Tensor & self, int64_t dim, bool keepdim) {
4188
4189 static auto op = create_max_dim_typed_handle();
4190 return op.call(self, dim, keepdim);
4191}
4192
4193// aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
4194::std::tuple<at::Tensor,at::Tensor> max_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim) {
4195
4196 static auto op = create_max_dim_typed_handle();
4197 return op.redispatch(dispatchKeySet, self, dim, keepdim);
4198}
4199
4200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_dim_max, name, "aten::max")
4201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_dim_max, overload_name, "dim_max")
4202STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_dim_max, schema_str, "max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)")
4203
4204// aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
4205static C10_NOINLINE c10::TypedOperatorHandle<max_dim_max::schema> create_max_dim_max_typed_handle() {
4206 return c10::Dispatcher::singleton()
4207 .findSchemaOrThrow(max_dim_max::name, max_dim_max::overload_name)
4208 .typed<max_dim_max::schema>();
4209}
4210
4211// aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
4212::std::tuple<at::Tensor &,at::Tensor &> max_dim_max::call(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) {
4213
4214 static auto op = create_max_dim_max_typed_handle();
4215 return op.call(self, dim, keepdim, max, max_values);
4216}
4217
4218// aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
4219::std::tuple<at::Tensor &,at::Tensor &> max_dim_max::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) {
4220
4221 static auto op = create_max_dim_max_typed_handle();
4222 return op.redispatch(dispatchKeySet, self, dim, keepdim, max, max_values);
4223}
4224
4225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_names_dim, name, "aten::max")
4226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_names_dim, overload_name, "names_dim")
4227STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_names_dim, schema_str, "max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)")
4228
4229// aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
4230static C10_NOINLINE c10::TypedOperatorHandle<max_names_dim::schema> create_max_names_dim_typed_handle() {
4231 return c10::Dispatcher::singleton()
4232 .findSchemaOrThrow(max_names_dim::name, max_names_dim::overload_name)
4233 .typed<max_names_dim::schema>();
4234}
4235
4236// aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
4237::std::tuple<at::Tensor,at::Tensor> max_names_dim::call(const at::Tensor & self, at::Dimname dim, bool keepdim) {
4238
4239 static auto op = create_max_names_dim_typed_handle();
4240 return op.call(self, dim, keepdim);
4241}
4242
4243// aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
4244::std::tuple<at::Tensor,at::Tensor> max_names_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim) {
4245
4246 static auto op = create_max_names_dim_typed_handle();
4247 return op.redispatch(dispatchKeySet, self, dim, keepdim);
4248}
4249
4250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_names_dim_max, name, "aten::max")
4251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_names_dim_max, overload_name, "names_dim_max")
4252STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_names_dim_max, schema_str, "max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)")
4253
4254// aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
4255static C10_NOINLINE c10::TypedOperatorHandle<max_names_dim_max::schema> create_max_names_dim_max_typed_handle() {
4256 return c10::Dispatcher::singleton()
4257 .findSchemaOrThrow(max_names_dim_max::name, max_names_dim_max::overload_name)
4258 .typed<max_names_dim_max::schema>();
4259}
4260
4261// aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
4262::std::tuple<at::Tensor &,at::Tensor &> max_names_dim_max::call(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) {
4263
4264 static auto op = create_max_names_dim_max_typed_handle();
4265 return op.call(self, dim, keepdim, max, max_values);
4266}
4267
4268// aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
4269::std::tuple<at::Tensor &,at::Tensor &> max_names_dim_max::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) {
4270
4271 static auto op = create_max_names_dim_max_typed_handle();
4272 return op.redispatch(dispatchKeySet, self, dim, keepdim, max, max_values);
4273}
4274
4275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool1d_with_indices, name, "aten::max_pool1d_with_indices")
4276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool1d_with_indices, overload_name, "")
4277STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool1d_with_indices, schema_str, "max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)")
4278
4279// aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
4280static C10_NOINLINE c10::TypedOperatorHandle<max_pool1d_with_indices::schema> create_max_pool1d_with_indices_typed_handle() {
4281 return c10::Dispatcher::singleton()
4282 .findSchemaOrThrow(max_pool1d_with_indices::name, max_pool1d_with_indices::overload_name)
4283 .typed<max_pool1d_with_indices::schema>();
4284}
4285
4286// aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
4287::std::tuple<at::Tensor,at::Tensor> max_pool1d_with_indices::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
4288
4289 static auto op = create_max_pool1d_with_indices_typed_handle();
4290 return op.call(self, kernel_size, stride, padding, dilation, ceil_mode);
4291}
4292
4293// aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
4294::std::tuple<at::Tensor,at::Tensor> max_pool1d_with_indices::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
4295
4296 static auto op = create_max_pool1d_with_indices_typed_handle();
4297 return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode);
4298}
4299
4300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_max_pool3d_backward, name, "aten::mkldnn_max_pool3d_backward")
4301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_max_pool3d_backward, overload_name, "")
4302STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_max_pool3d_backward, schema_str, "mkldnn_max_pool3d_backward(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor")
4303
4304// aten::mkldnn_max_pool3d_backward(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
4305static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_max_pool3d_backward::schema> create_mkldnn_max_pool3d_backward_typed_handle() {
4306 return c10::Dispatcher::singleton()
4307 .findSchemaOrThrow(mkldnn_max_pool3d_backward::name, mkldnn_max_pool3d_backward::overload_name)
4308 .typed<mkldnn_max_pool3d_backward::schema>();
4309}
4310
4311// aten::mkldnn_max_pool3d_backward(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
4312at::Tensor mkldnn_max_pool3d_backward::call(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
4313
4314 static auto op = create_mkldnn_max_pool3d_backward_typed_handle();
4315 return op.call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode);
4316}
4317
4318// aten::mkldnn_max_pool3d_backward(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
4319at::Tensor mkldnn_max_pool3d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
4320
4321 static auto op = create_mkldnn_max_pool3d_backward_typed_handle();
4322 return op.redispatch(dispatchKeySet, grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode);
4323}
4324
4325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantized_max_pool1d, name, "aten::quantized_max_pool1d")
4326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantized_max_pool1d, overload_name, "")
4327STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantized_max_pool1d, schema_str, "quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor")
4328
4329// aten::quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor
4330static C10_NOINLINE c10::TypedOperatorHandle<quantized_max_pool1d::schema> create_quantized_max_pool1d_typed_handle() {
4331 return c10::Dispatcher::singleton()
4332 .findSchemaOrThrow(quantized_max_pool1d::name, quantized_max_pool1d::overload_name)
4333 .typed<quantized_max_pool1d::schema>();
4334}
4335
4336// aten::quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor
4337at::Tensor quantized_max_pool1d::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
4338
4339 static auto op = create_quantized_max_pool1d_typed_handle();
4340 return op.call(self, kernel_size, stride, padding, dilation, ceil_mode);
4341}
4342
4343// aten::quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor
4344at::Tensor quantized_max_pool1d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
4345
4346 static auto op = create_quantized_max_pool1d_typed_handle();
4347 return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode);
4348}
4349
4350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_convolution, name, "aten::mkldnn_convolution")
4351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_convolution, overload_name, "")
4352STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_convolution, schema_str, "mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups) -> Tensor")
4353
4354// aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups) -> Tensor
4355static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_convolution::schema> create_mkldnn_convolution_typed_handle() {
4356 return c10::Dispatcher::singleton()
4357 .findSchemaOrThrow(mkldnn_convolution::name, mkldnn_convolution::overload_name)
4358 .typed<mkldnn_convolution::schema>();
4359}
4360
4361// aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups) -> Tensor
4362at::Tensor mkldnn_convolution::call(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
4363
4364 static auto op = create_mkldnn_convolution_typed_handle();
4365 return op.call(self, weight, bias, padding, stride, dilation, groups);
4366}
4367
4368// aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups) -> Tensor
4369at::Tensor mkldnn_convolution::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
4370
4371 static auto op = create_mkldnn_convolution_typed_handle();
4372 return op.redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups);
4373}
4374
4375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_batch_norm_backward, name, "aten::miopen_batch_norm_backward")
4376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_batch_norm_backward, overload_name, "")
4377STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_batch_norm_backward, schema_str, "miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor)")
4378
4379// aten::miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor)
4380static C10_NOINLINE c10::TypedOperatorHandle<miopen_batch_norm_backward::schema> create_miopen_batch_norm_backward_typed_handle() {
4381 return c10::Dispatcher::singleton()
4382 .findSchemaOrThrow(miopen_batch_norm_backward::name, miopen_batch_norm_backward::overload_name)
4383 .typed<miopen_batch_norm_backward::schema>();
4384}
4385
4386// aten::miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor)
4387::std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm_backward::call(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon) {
4388
4389 static auto op = create_miopen_batch_norm_backward_typed_handle();
4390 return op.call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon);
4391}
4392
4393// aten::miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor)
4394::std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon) {
4395
4396 static auto op = create_miopen_batch_norm_backward_typed_handle();
4397 return op.redispatch(dispatchKeySet, input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon);
4398}
4399
4400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_convolution_relu, name, "aten::miopen_convolution_relu")
4401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_convolution_relu, overload_name, "")
4402STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_convolution_relu, schema_str, "miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor")
4403
4404// aten::miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor
4405static C10_NOINLINE c10::TypedOperatorHandle<miopen_convolution_relu::schema> create_miopen_convolution_relu_typed_handle() {
4406 return c10::Dispatcher::singleton()
4407 .findSchemaOrThrow(miopen_convolution_relu::name, miopen_convolution_relu::overload_name)
4408 .typed<miopen_convolution_relu::schema>();
4409}
4410
4411// aten::miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor
4412at::Tensor miopen_convolution_relu::call(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
4413
4414 static auto op = create_miopen_convolution_relu_typed_handle();
4415 return op.call(self, weight, bias, stride, padding, dilation, groups);
4416}
4417
4418// aten::miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor
4419at::Tensor miopen_convolution_relu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
4420
4421 static auto op = create_miopen_convolution_relu_typed_handle();
4422 return op.redispatch(dispatchKeySet, self, weight, bias, stride, padding, dilation, groups);
4423}
4424
4425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mode, name, "aten::mode")
4426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mode, overload_name, "")
4427STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mode, schema_str, "mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)")
4428
4429// aten::mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
4430static C10_NOINLINE c10::TypedOperatorHandle<mode::schema> create_mode_typed_handle() {
4431 return c10::Dispatcher::singleton()
4432 .findSchemaOrThrow(mode::name, mode::overload_name)
4433 .typed<mode::schema>();
4434}
4435
4436// aten::mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
4437::std::tuple<at::Tensor,at::Tensor> mode::call(const at::Tensor & self, int64_t dim, bool keepdim) {
4438
4439 static auto op = create_mode_typed_handle();
4440 return op.call(self, dim, keepdim);
4441}
4442
4443// aten::mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
4444::std::tuple<at::Tensor,at::Tensor> mode::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim) {
4445
4446 static auto op = create_mode_typed_handle();
4447 return op.redispatch(dispatchKeySet, self, dim, keepdim);
4448}
4449
4450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mode_values, name, "aten::mode")
4451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mode_values, overload_name, "values")
4452STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mode_values, schema_str, "mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)")
4453
4454// aten::mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
4455static C10_NOINLINE c10::TypedOperatorHandle<mode_values::schema> create_mode_values_typed_handle() {
4456 return c10::Dispatcher::singleton()
4457 .findSchemaOrThrow(mode_values::name, mode_values::overload_name)
4458 .typed<mode_values::schema>();
4459}
4460
4461// aten::mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
4462::std::tuple<at::Tensor &,at::Tensor &> mode_values::call(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
4463
4464 static auto op = create_mode_values_typed_handle();
4465 return op.call(self, dim, keepdim, values, indices);
4466}
4467
4468// aten::mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
4469::std::tuple<at::Tensor &,at::Tensor &> mode_values::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
4470
4471 static auto op = create_mode_values_typed_handle();
4472 return op.redispatch(dispatchKeySet, self, dim, keepdim, values, indices);
4473}
4474
4475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mode_dimname, name, "aten::mode")
4476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mode_dimname, overload_name, "dimname")
4477STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mode_dimname, schema_str, "mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)")
4478
4479// aten::mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
4480static C10_NOINLINE c10::TypedOperatorHandle<mode_dimname::schema> create_mode_dimname_typed_handle() {
4481 return c10::Dispatcher::singleton()
4482 .findSchemaOrThrow(mode_dimname::name, mode_dimname::overload_name)
4483 .typed<mode_dimname::schema>();
4484}
4485
4486// aten::mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
4487::std::tuple<at::Tensor,at::Tensor> mode_dimname::call(const at::Tensor & self, at::Dimname dim, bool keepdim) {
4488
4489 static auto op = create_mode_dimname_typed_handle();
4490 return op.call(self, dim, keepdim);
4491}
4492
4493// aten::mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
4494::std::tuple<at::Tensor,at::Tensor> mode_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim) {
4495
4496 static auto op = create_mode_dimname_typed_handle();
4497 return op.redispatch(dispatchKeySet, self, dim, keepdim);
4498}
4499
4500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mode_dimname_out, name, "aten::mode")
4501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mode_dimname_out, overload_name, "dimname_out")
4502STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mode_dimname_out, schema_str, "mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)")
4503
4504// aten::mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
4505static C10_NOINLINE c10::TypedOperatorHandle<mode_dimname_out::schema> create_mode_dimname_out_typed_handle() {
4506 return c10::Dispatcher::singleton()
4507 .findSchemaOrThrow(mode_dimname_out::name, mode_dimname_out::overload_name)
4508 .typed<mode_dimname_out::schema>();
4509}
4510
4511// aten::mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
4512::std::tuple<at::Tensor &,at::Tensor &> mode_dimname_out::call(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
4513
4514 static auto op = create_mode_dimname_out_typed_handle();
4515 return op.call(self, dim, keepdim, values, indices);
4516}
4517
4518// aten::mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
4519::std::tuple<at::Tensor &,at::Tensor &> mode_dimname_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
4520
4521 static auto op = create_mode_dimname_out_typed_handle();
4522 return op.redispatch(dispatchKeySet, self, dim, keepdim, values, indices);
4523}
4524
4525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mul_Tensor, name, "aten::mul")
4526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mul_Tensor, overload_name, "Tensor")
4527STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mul_Tensor, schema_str, "mul.Tensor(Tensor self, Tensor other) -> Tensor")
4528
4529// aten::mul.Tensor(Tensor self, Tensor other) -> Tensor
4530static C10_NOINLINE c10::TypedOperatorHandle<mul_Tensor::schema> create_mul_Tensor_typed_handle() {
4531 return c10::Dispatcher::singleton()
4532 .findSchemaOrThrow(mul_Tensor::name, mul_Tensor::overload_name)
4533 .typed<mul_Tensor::schema>();
4534}
4535
4536// aten::mul.Tensor(Tensor self, Tensor other) -> Tensor
4537at::Tensor mul_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
4538
4539 static auto op = create_mul_Tensor_typed_handle();
4540 return op.call(self, other);
4541}
4542
4543// aten::mul.Tensor(Tensor self, Tensor other) -> Tensor
4544at::Tensor mul_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
4545
4546 static auto op = create_mul_Tensor_typed_handle();
4547 return op.redispatch(dispatchKeySet, self, other);
4548}
4549
4550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mul__Tensor, name, "aten::mul_")
4551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mul__Tensor, overload_name, "Tensor")
4552STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mul__Tensor, schema_str, "mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
4553
4554// aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4555static C10_NOINLINE c10::TypedOperatorHandle<mul__Tensor::schema> create_mul__Tensor_typed_handle() {
4556 return c10::Dispatcher::singleton()
4557 .findSchemaOrThrow(mul__Tensor::name, mul__Tensor::overload_name)
4558 .typed<mul__Tensor::schema>();
4559}
4560
4561// aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4562at::Tensor & mul__Tensor::call(at::Tensor & self, const at::Tensor & other) {
4563
4564 static auto op = create_mul__Tensor_typed_handle();
4565 return op.call(self, other);
4566}
4567
4568// aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4569at::Tensor & mul__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
4570
4571 static auto op = create_mul__Tensor_typed_handle();
4572 return op.redispatch(dispatchKeySet, self, other);
4573}
4574
4575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mul_out, name, "aten::mul")
4576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mul_out, overload_name, "out")
4577STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mul_out, schema_str, "mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
4578
4579// aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
4580static C10_NOINLINE c10::TypedOperatorHandle<mul_out::schema> create_mul_out_typed_handle() {
4581 return c10::Dispatcher::singleton()
4582 .findSchemaOrThrow(mul_out::name, mul_out::overload_name)
4583 .typed<mul_out::schema>();
4584}
4585
4586// aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
4587at::Tensor & mul_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
4588
4589 static auto op = create_mul_out_typed_handle();
4590 return op.call(self, other, out);
4591}
4592
4593// aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
4594at::Tensor & mul_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
4595
4596 static auto op = create_mul_out_typed_handle();
4597 return op.redispatch(dispatchKeySet, self, other, out);
4598}
4599
4600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mul_Scalar, name, "aten::mul")
4601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mul_Scalar, overload_name, "Scalar")
4602STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mul_Scalar, schema_str, "mul.Scalar(Tensor self, Scalar other) -> Tensor")
4603
4604// aten::mul.Scalar(Tensor self, Scalar other) -> Tensor
4605static C10_NOINLINE c10::TypedOperatorHandle<mul_Scalar::schema> create_mul_Scalar_typed_handle() {
4606 return c10::Dispatcher::singleton()
4607 .findSchemaOrThrow(mul_Scalar::name, mul_Scalar::overload_name)
4608 .typed<mul_Scalar::schema>();
4609}
4610
4611// aten::mul.Scalar(Tensor self, Scalar other) -> Tensor
4612at::Tensor mul_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
4613
4614 static auto op = create_mul_Scalar_typed_handle();
4615 return op.call(self, other);
4616}
4617
4618// aten::mul.Scalar(Tensor self, Scalar other) -> Tensor
4619at::Tensor mul_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
4620
4621 static auto op = create_mul_Scalar_typed_handle();
4622 return op.redispatch(dispatchKeySet, self, other);
4623}
4624
4625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mul__Scalar, name, "aten::mul_")
4626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mul__Scalar, overload_name, "Scalar")
4627STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mul__Scalar, schema_str, "mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
4628
4629// aten::mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4630static C10_NOINLINE c10::TypedOperatorHandle<mul__Scalar::schema> create_mul__Scalar_typed_handle() {
4631 return c10::Dispatcher::singleton()
4632 .findSchemaOrThrow(mul__Scalar::name, mul__Scalar::overload_name)
4633 .typed<mul__Scalar::schema>();
4634}
4635
4636// aten::mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4637at::Tensor & mul__Scalar::call(at::Tensor & self, const at::Scalar & other) {
4638
4639 static auto op = create_mul__Scalar_typed_handle();
4640 return op.call(self, other);
4641}
4642
4643// aten::mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4644at::Tensor & mul__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
4645
4646 static auto op = create_mul__Scalar_typed_handle();
4647 return op.redispatch(dispatchKeySet, self, other);
4648}
4649
4650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mvlgamma_out, name, "aten::mvlgamma")
4651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mvlgamma_out, overload_name, "out")
4652STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mvlgamma_out, schema_str, "mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)")
4653
4654// aten::mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)
4655static C10_NOINLINE c10::TypedOperatorHandle<mvlgamma_out::schema> create_mvlgamma_out_typed_handle() {
4656 return c10::Dispatcher::singleton()
4657 .findSchemaOrThrow(mvlgamma_out::name, mvlgamma_out::overload_name)
4658 .typed<mvlgamma_out::schema>();
4659}
4660
4661// aten::mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)
4662at::Tensor & mvlgamma_out::call(const at::Tensor & self, int64_t p, at::Tensor & out) {
4663
4664 static auto op = create_mvlgamma_out_typed_handle();
4665 return op.call(self, p, out);
4666}
4667
4668// aten::mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)
4669at::Tensor & mvlgamma_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t p, at::Tensor & out) {
4670
4671 static auto op = create_mvlgamma_out_typed_handle();
4672 return op.redispatch(dispatchKeySet, self, p, out);
4673}
4674
4675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mvlgamma, name, "aten::mvlgamma")
4676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mvlgamma, overload_name, "")
4677STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mvlgamma, schema_str, "mvlgamma(Tensor self, int p) -> Tensor")
4678
4679// aten::mvlgamma(Tensor self, int p) -> Tensor
4680static C10_NOINLINE c10::TypedOperatorHandle<mvlgamma::schema> create_mvlgamma_typed_handle() {
4681 return c10::Dispatcher::singleton()
4682 .findSchemaOrThrow(mvlgamma::name, mvlgamma::overload_name)
4683 .typed<mvlgamma::schema>();
4684}
4685
4686// aten::mvlgamma(Tensor self, int p) -> Tensor
4687at::Tensor mvlgamma::call(const at::Tensor & self, int64_t p) {
4688
4689 static auto op = create_mvlgamma_typed_handle();
4690 return op.call(self, p);
4691}
4692
4693// aten::mvlgamma(Tensor self, int p) -> Tensor
4694at::Tensor mvlgamma::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t p) {
4695
4696 static auto op = create_mvlgamma_typed_handle();
4697 return op.redispatch(dispatchKeySet, self, p);
4698}
4699
4700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mvlgamma_, name, "aten::mvlgamma_")
4701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mvlgamma_, overload_name, "")
4702STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mvlgamma_, schema_str, "mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!)")
4703
4704// aten::mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!)
4705static C10_NOINLINE c10::TypedOperatorHandle<mvlgamma_::schema> create_mvlgamma__typed_handle() {
4706 return c10::Dispatcher::singleton()
4707 .findSchemaOrThrow(mvlgamma_::name, mvlgamma_::overload_name)
4708 .typed<mvlgamma_::schema>();
4709}
4710
4711// aten::mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!)
4712at::Tensor & mvlgamma_::call(at::Tensor & self, int64_t p) {
4713
4714 static auto op = create_mvlgamma__typed_handle();
4715 return op.call(self, p);
4716}
4717
4718// aten::mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!)
4719at::Tensor & mvlgamma_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t p) {
4720
4721 static auto op = create_mvlgamma__typed_handle();
4722 return op.redispatch(dispatchKeySet, self, p);
4723}
4724
4725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(narrow, name, "aten::narrow")
4726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(narrow, overload_name, "")
4727STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(narrow, schema_str, "narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a)")
4728
4729// aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a)
4730static C10_NOINLINE c10::TypedOperatorHandle<narrow::schema> create_narrow_typed_handle() {
4731 return c10::Dispatcher::singleton()
4732 .findSchemaOrThrow(narrow::name, narrow::overload_name)
4733 .typed<narrow::schema>();
4734}
4735
4736// aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a)
4737at::Tensor narrow::call(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
4738
4739 static auto op = create_narrow_typed_handle();
4740 return op.call(self, dim, start, length);
4741}
4742
4743// aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a)
4744at::Tensor narrow::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
4745
4746 static auto op = create_narrow_typed_handle();
4747 return op.redispatch(dispatchKeySet, self, dim, start, length);
4748}
4749
4750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(narrow_Tensor, name, "aten::narrow")
4751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(narrow_Tensor, overload_name, "Tensor")
4752STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(narrow_Tensor, schema_str, "narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a)")
4753
4754// aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a)
4755static C10_NOINLINE c10::TypedOperatorHandle<narrow_Tensor::schema> create_narrow_Tensor_typed_handle() {
4756 return c10::Dispatcher::singleton()
4757 .findSchemaOrThrow(narrow_Tensor::name, narrow_Tensor::overload_name)
4758 .typed<narrow_Tensor::schema>();
4759}
4760
4761// aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a)
4762at::Tensor narrow_Tensor::call(const at::Tensor & self, int64_t dim, const at::Tensor & start, c10::SymInt length) {
4763
4764 static auto op = create_narrow_Tensor_typed_handle();
4765 return op.call(self, dim, start, length);
4766}
4767
4768// aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a)
4769at::Tensor narrow_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & start, c10::SymInt length) {
4770
4771 static auto op = create_narrow_Tensor_typed_handle();
4772 return op.redispatch(dispatchKeySet, self, dim, start, length);
4773}
4774
4775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_backward_elemt, name, "aten::batch_norm_backward_elemt")
4776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_backward_elemt, overload_name, "")
4777STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_backward_elemt, schema_str, "batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count) -> Tensor")
4778
4779// aten::batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count) -> Tensor
4780static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_backward_elemt::schema> create_batch_norm_backward_elemt_typed_handle() {
4781 return c10::Dispatcher::singleton()
4782 .findSchemaOrThrow(batch_norm_backward_elemt::name, batch_norm_backward_elemt::overload_name)
4783 .typed<batch_norm_backward_elemt::schema>();
4784}
4785
4786// aten::batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count) -> Tensor
4787at::Tensor batch_norm_backward_elemt::call(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, const at::Tensor & mean_dy, const at::Tensor & mean_dy_xmu, const at::Tensor & count) {
4788
4789 static auto op = create_batch_norm_backward_elemt_typed_handle();
4790 return op.call(grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count);
4791}
4792
4793// aten::batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count) -> Tensor
4794at::Tensor batch_norm_backward_elemt::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, const at::Tensor & mean_dy, const at::Tensor & mean_dy_xmu, const at::Tensor & count) {
4795
4796 static auto op = create_batch_norm_backward_elemt_typed_handle();
4797 return op.redispatch(dispatchKeySet, grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count);
4798}
4799
4800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pdist, name, "aten::pdist")
4801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pdist, overload_name, "")
4802STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pdist, schema_str, "pdist(Tensor self, float p=2) -> Tensor")
4803
4804// aten::pdist(Tensor self, float p=2) -> Tensor
4805static C10_NOINLINE c10::TypedOperatorHandle<pdist::schema> create_pdist_typed_handle() {
4806 return c10::Dispatcher::singleton()
4807 .findSchemaOrThrow(pdist::name, pdist::overload_name)
4808 .typed<pdist::schema>();
4809}
4810
4811// aten::pdist(Tensor self, float p=2) -> Tensor
4812at::Tensor pdist::call(const at::Tensor & self, double p) {
4813
4814 static auto op = create_pdist_typed_handle();
4815 return op.call(self, p);
4816}
4817
4818// aten::pdist(Tensor self, float p=2) -> Tensor
4819at::Tensor pdist::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p) {
4820
4821 static auto op = create_pdist_typed_handle();
4822 return op.redispatch(dispatchKeySet, self, p);
4823}
4824
4825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(moveaxis_intlist, name, "aten::moveaxis")
4826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(moveaxis_intlist, overload_name, "intlist")
4827STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(moveaxis_intlist, schema_str, "moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)")
4828
4829// aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)
4830static C10_NOINLINE c10::TypedOperatorHandle<moveaxis_intlist::schema> create_moveaxis_intlist_typed_handle() {
4831 return c10::Dispatcher::singleton()
4832 .findSchemaOrThrow(moveaxis_intlist::name, moveaxis_intlist::overload_name)
4833 .typed<moveaxis_intlist::schema>();
4834}
4835
4836// aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)
4837at::Tensor moveaxis_intlist::call(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) {
4838
4839 static auto op = create_moveaxis_intlist_typed_handle();
4840 return op.call(self, source, destination);
4841}
4842
4843// aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)
4844at::Tensor moveaxis_intlist::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) {
4845
4846 static auto op = create_moveaxis_intlist_typed_handle();
4847 return op.redispatch(dispatchKeySet, self, source, destination);
4848}
4849
4850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(moveaxis_int, name, "aten::moveaxis")
4851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(moveaxis_int, overload_name, "int")
4852STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(moveaxis_int, schema_str, "moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a)")
4853
4854// aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a)
4855static C10_NOINLINE c10::TypedOperatorHandle<moveaxis_int::schema> create_moveaxis_int_typed_handle() {
4856 return c10::Dispatcher::singleton()
4857 .findSchemaOrThrow(moveaxis_int::name, moveaxis_int::overload_name)
4858 .typed<moveaxis_int::schema>();
4859}
4860
4861// aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a)
4862at::Tensor moveaxis_int::call(const at::Tensor & self, int64_t source, int64_t destination) {
4863
4864 static auto op = create_moveaxis_int_typed_handle();
4865 return op.call(self, source, destination);
4866}
4867
4868// aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a)
4869at::Tensor moveaxis_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t source, int64_t destination) {
4870
4871 static auto op = create_moveaxis_int_typed_handle();
4872 return op.redispatch(dispatchKeySet, self, source, destination);
4873}
4874
4875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pixel_unshuffle, name, "aten::pixel_unshuffle")
4876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pixel_unshuffle, overload_name, "")
4877STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pixel_unshuffle, schema_str, "pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor")
4878
4879// aten::pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor
4880static C10_NOINLINE c10::TypedOperatorHandle<pixel_unshuffle::schema> create_pixel_unshuffle_typed_handle() {
4881 return c10::Dispatcher::singleton()
4882 .findSchemaOrThrow(pixel_unshuffle::name, pixel_unshuffle::overload_name)
4883 .typed<pixel_unshuffle::schema>();
4884}
4885
4886// aten::pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor
4887at::Tensor pixel_unshuffle::call(const at::Tensor & self, int64_t downscale_factor) {
4888
4889 static auto op = create_pixel_unshuffle_typed_handle();
4890 return op.call(self, downscale_factor);
4891}
4892
4893// aten::pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor
4894at::Tensor pixel_unshuffle::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t downscale_factor) {
4895
4896 static auto op = create_pixel_unshuffle_typed_handle();
4897 return op.redispatch(dispatchKeySet, self, downscale_factor);
4898}
4899
4900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_pinned, name, "aten::is_pinned")
4901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_pinned, overload_name, "")
4902STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_pinned, schema_str, "is_pinned(Tensor self, Device? device=None) -> bool")
4903
4904// aten::is_pinned(Tensor self, Device? device=None) -> bool
4905static C10_NOINLINE c10::TypedOperatorHandle<is_pinned::schema> create_is_pinned_typed_handle() {
4906 return c10::Dispatcher::singleton()
4907 .findSchemaOrThrow(is_pinned::name, is_pinned::overload_name)
4908 .typed<is_pinned::schema>();
4909}
4910
4911// aten::is_pinned(Tensor self, Device? device=None) -> bool
4912bool is_pinned::call(const at::Tensor & self, c10::optional<at::Device> device) {
4913
4914 static auto op = create_is_pinned_typed_handle();
4915 return op.call(self, device);
4916}
4917
4918// aten::is_pinned(Tensor self, Device? device=None) -> bool
4919bool is_pinned::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::Device> device) {
4920
4921 static auto op = create_is_pinned_typed_handle();
4922 return op.redispatch(dispatchKeySet, self, device);
4923}
4924
4925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pin_memory, name, "aten::pin_memory")
4926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pin_memory, overload_name, "")
4927STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pin_memory, schema_str, "pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a)")
4928
4929// aten::pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a)
4930static C10_NOINLINE c10::TypedOperatorHandle<pin_memory::schema> create_pin_memory_typed_handle() {
4931 return c10::Dispatcher::singleton()
4932 .findSchemaOrThrow(pin_memory::name, pin_memory::overload_name)
4933 .typed<pin_memory::schema>();
4934}
4935
4936// aten::pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a)
4937at::Tensor pin_memory::call(const at::Tensor & self, c10::optional<at::Device> device) {
4938
4939 static auto op = create_pin_memory_typed_handle();
4940 return op.call(self, device);
4941}
4942
4943// aten::pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a)
4944at::Tensor pin_memory::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::Device> device) {
4945
4946 static auto op = create_pin_memory_typed_handle();
4947 return op.redispatch(dispatchKeySet, self, device);
4948}
4949
4950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pin_memory, name, "aten::_pin_memory")
4951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pin_memory, overload_name, "")
4952STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pin_memory, schema_str, "_pin_memory(Tensor self, Device? device=None) -> Tensor")
4953
4954// aten::_pin_memory(Tensor self, Device? device=None) -> Tensor
4955static C10_NOINLINE c10::TypedOperatorHandle<_pin_memory::schema> create__pin_memory_typed_handle() {
4956 return c10::Dispatcher::singleton()
4957 .findSchemaOrThrow(_pin_memory::name, _pin_memory::overload_name)
4958 .typed<_pin_memory::schema>();
4959}
4960
4961// aten::_pin_memory(Tensor self, Device? device=None) -> Tensor
4962at::Tensor _pin_memory::call(const at::Tensor & self, c10::optional<at::Device> device) {
4963
4964 static auto op = create__pin_memory_typed_handle();
4965 return op.call(self, device);
4966}
4967
4968// aten::_pin_memory(Tensor self, Device? device=None) -> Tensor
4969at::Tensor _pin_memory::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::Device> device) {
4970
4971 static auto op = create__pin_memory_typed_handle();
4972 return op.redispatch(dispatchKeySet, self, device);
4973}
4974
4975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randn, name, "aten::randn")
4976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randn, overload_name, "")
4977STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randn, schema_str, "randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
4978
4979// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
4980static C10_NOINLINE c10::TypedOperatorHandle<randn::schema> create_randn_typed_handle() {
4981 return c10::Dispatcher::singleton()
4982 .findSchemaOrThrow(randn::name, randn::overload_name)
4983 .typed<randn::schema>();
4984}
4985
4986// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
4987at::Tensor randn::call(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4988
4989 static auto op = create_randn_typed_handle();
4990 return op.call(size, dtype, layout, device, pin_memory);
4991}
4992
4993// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
4994at::Tensor randn::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4995
4996 static auto op = create_randn_typed_handle();
4997 return op.redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory);
4998}
4999
5000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randn_generator, name, "aten::randn")
5001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randn_generator, overload_name, "generator")
5002STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randn_generator, schema_str, "randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
5003
5004// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5005static C10_NOINLINE c10::TypedOperatorHandle<randn_generator::schema> create_randn_generator_typed_handle() {
5006 return c10::Dispatcher::singleton()
5007 .findSchemaOrThrow(randn_generator::name, randn_generator::overload_name)
5008 .typed<randn_generator::schema>();
5009}
5010
5011// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5012at::Tensor randn_generator::call(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5013
5014 static auto op = create_randn_generator_typed_handle();
5015 return op.call(size, generator, dtype, layout, device, pin_memory);
5016}
5017
5018// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5019at::Tensor randn_generator::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5020
5021 static auto op = create_randn_generator_typed_handle();
5022 return op.redispatch(dispatchKeySet, size, generator, dtype, layout, device, pin_memory);
5023}
5024
5025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randn_names, name, "aten::randn")
5026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randn_names, overload_name, "names")
5027STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randn_names, schema_str, "randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
5028
5029// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5030static C10_NOINLINE c10::TypedOperatorHandle<randn_names::schema> create_randn_names_typed_handle() {
5031 return c10::Dispatcher::singleton()
5032 .findSchemaOrThrow(randn_names::name, randn_names::overload_name)
5033 .typed<randn_names::schema>();
5034}
5035
5036// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5037at::Tensor randn_names::call(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5038
5039 static auto op = create_randn_names_typed_handle();
5040 return op.call(size, names, dtype, layout, device, pin_memory);
5041}
5042
5043// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5044at::Tensor randn_names::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5045
5046 static auto op = create_randn_names_typed_handle();
5047 return op.redispatch(dispatchKeySet, size, names, dtype, layout, device, pin_memory);
5048}
5049
5050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randn_generator_with_names, name, "aten::randn")
5051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randn_generator_with_names, overload_name, "generator_with_names")
5052STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randn_generator_with_names, schema_str, "randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
5053
5054// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5055static C10_NOINLINE c10::TypedOperatorHandle<randn_generator_with_names::schema> create_randn_generator_with_names_typed_handle() {
5056 return c10::Dispatcher::singleton()
5057 .findSchemaOrThrow(randn_generator_with_names::name, randn_generator_with_names::overload_name)
5058 .typed<randn_generator_with_names::schema>();
5059}
5060
5061// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5062at::Tensor randn_generator_with_names::call(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5063
5064 static auto op = create_randn_generator_with_names_typed_handle();
5065 return op.call(size, generator, names, dtype, layout, device, pin_memory);
5066}
5067
5068// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5069at::Tensor randn_generator_with_names::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5070
5071 static auto op = create_randn_generator_with_names_typed_handle();
5072 return op.redispatch(dispatchKeySet, size, generator, names, dtype, layout, device, pin_memory);
5073}
5074
5075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randn_out, name, "aten::randn")
5076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randn_out, overload_name, "out")
5077STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randn_out, schema_str, "randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)")
5078
5079// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
5080static C10_NOINLINE c10::TypedOperatorHandle<randn_out::schema> create_randn_out_typed_handle() {
5081 return c10::Dispatcher::singleton()
5082 .findSchemaOrThrow(randn_out::name, randn_out::overload_name)
5083 .typed<randn_out::schema>();
5084}
5085
5086// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
5087at::Tensor & randn_out::call(c10::SymIntArrayRef size, at::Tensor & out) {
5088
5089 static auto op = create_randn_out_typed_handle();
5090 return op.call(size, out);
5091}
5092
5093// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
5094at::Tensor & randn_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out) {
5095
5096 static auto op = create_randn_out_typed_handle();
5097 return op.redispatch(dispatchKeySet, size, out);
5098}
5099
5100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randn_generator_out, name, "aten::randn")
5101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randn_generator_out, overload_name, "generator_out")
5102STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randn_generator_out, schema_str, "randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)")
5103
5104// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
5105static C10_NOINLINE c10::TypedOperatorHandle<randn_generator_out::schema> create_randn_generator_out_typed_handle() {
5106 return c10::Dispatcher::singleton()
5107 .findSchemaOrThrow(randn_generator_out::name, randn_generator_out::overload_name)
5108 .typed<randn_generator_out::schema>();
5109}
5110
5111// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
5112at::Tensor & randn_generator_out::call(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
5113
5114 static auto op = create_randn_generator_out_typed_handle();
5115 return op.call(size, generator, out);
5116}
5117
5118// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
5119at::Tensor & randn_generator_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
5120
5121 static auto op = create_randn_generator_out_typed_handle();
5122 return op.redispatch(dispatchKeySet, size, generator, out);
5123}
5124
5125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(range_step, name, "aten::range")
5126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(range_step, overload_name, "step")
5127STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(range_step, schema_str, "range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
5128
5129// aten::range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5130static C10_NOINLINE c10::TypedOperatorHandle<range_step::schema> create_range_step_typed_handle() {
5131 return c10::Dispatcher::singleton()
5132 .findSchemaOrThrow(range_step::name, range_step::overload_name)
5133 .typed<range_step::schema>();
5134}
5135
5136// aten::range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5137at::Tensor range_step::call(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5138
5139 static auto op = create_range_step_typed_handle();
5140 return op.call(start, end, step, dtype, layout, device, pin_memory);
5141}
5142
5143// aten::range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5144at::Tensor range_step::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5145
5146 static auto op = create_range_step_typed_handle();
5147 return op.redispatch(dispatchKeySet, start, end, step, dtype, layout, device, pin_memory);
5148}
5149
5150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(range, name, "aten::range")
5151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(range, overload_name, "")
5152STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(range, schema_str, "range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
5153
5154// aten::range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5155static C10_NOINLINE c10::TypedOperatorHandle<range::schema> create_range_typed_handle() {
5156 return c10::Dispatcher::singleton()
5157 .findSchemaOrThrow(range::name, range::overload_name)
5158 .typed<range::schema>();
5159}
5160
5161// aten::range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5162at::Tensor range::call(const at::Scalar & start, const at::Scalar & end, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5163
5164 static auto op = create_range_typed_handle();
5165 return op.call(start, end, dtype, layout, device, pin_memory);
5166}
5167
5168// aten::range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5169at::Tensor range::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5170
5171 static auto op = create_range_typed_handle();
5172 return op.redispatch(dispatchKeySet, start, end, dtype, layout, device, pin_memory);
5173}
5174
5175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(range_out_, name, "aten::range")
5176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(range_out_, overload_name, "out_")
5177STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(range_out_, schema_str, "range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!)")
5178
5179// aten::range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!)
5180static C10_NOINLINE c10::TypedOperatorHandle<range_out_::schema> create_range_out__typed_handle() {
5181 return c10::Dispatcher::singleton()
5182 .findSchemaOrThrow(range_out_::name, range_out_::overload_name)
5183 .typed<range_out_::schema>();
5184}
5185
5186// aten::range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!)
5187at::Tensor & range_out_::call(const at::Scalar & start, const at::Scalar & end, at::Tensor & out) {
5188
5189 static auto op = create_range_out__typed_handle();
5190 return op.call(start, end, out);
5191}
5192
5193// aten::range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!)
5194at::Tensor & range_out_::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, at::Tensor & out) {
5195
5196 static auto op = create_range_out__typed_handle();
5197 return op.redispatch(dispatchKeySet, start, end, out);
5198}
5199
5200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(range_out, name, "aten::range")
5201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(range_out, overload_name, "out")
5202STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(range_out, schema_str, "range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)")
5203
5204// aten::range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
5205static C10_NOINLINE c10::TypedOperatorHandle<range_out::schema> create_range_out_typed_handle() {
5206 return c10::Dispatcher::singleton()
5207 .findSchemaOrThrow(range_out::name, range_out::overload_name)
5208 .typed<range_out::schema>();
5209}
5210
5211// aten::range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
5212at::Tensor & range_out::call(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out) {
5213
5214 static auto op = create_range_out_typed_handle();
5215 return op.call(start, end, step, out);
5216}
5217
5218// aten::range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
5219at::Tensor & range_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out) {
5220
5221 static auto op = create_range_out_typed_handle();
5222 return op.redispatch(dispatchKeySet, start, end, step, out);
5223}
5224
5225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ravel, name, "aten::ravel")
5226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ravel, overload_name, "")
5227STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ravel, schema_str, "ravel(Tensor(a) self) -> Tensor(a)")
5228
5229// aten::ravel(Tensor(a) self) -> Tensor(a)
5230static C10_NOINLINE c10::TypedOperatorHandle<ravel::schema> create_ravel_typed_handle() {
5231 return c10::Dispatcher::singleton()
5232 .findSchemaOrThrow(ravel::name, ravel::overload_name)
5233 .typed<ravel::schema>();
5234}
5235
5236// aten::ravel(Tensor(a) self) -> Tensor(a)
5237at::Tensor ravel::call(const at::Tensor & self) {
5238
5239 static auto op = create_ravel_typed_handle();
5240 return op.call(self);
5241}
5242
5243// aten::ravel(Tensor(a) self) -> Tensor(a)
5244at::Tensor ravel::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
5245
5246 static auto op = create_ravel_typed_handle();
5247 return op.redispatch(dispatchKeySet, self);
5248}
5249
5250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reciprocal, name, "aten::reciprocal")
5251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reciprocal, overload_name, "")
5252STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reciprocal, schema_str, "reciprocal(Tensor self) -> Tensor")
5253
5254// aten::reciprocal(Tensor self) -> Tensor
5255static C10_NOINLINE c10::TypedOperatorHandle<reciprocal::schema> create_reciprocal_typed_handle() {
5256 return c10::Dispatcher::singleton()
5257 .findSchemaOrThrow(reciprocal::name, reciprocal::overload_name)
5258 .typed<reciprocal::schema>();
5259}
5260
5261// aten::reciprocal(Tensor self) -> Tensor
5262at::Tensor reciprocal::call(const at::Tensor & self) {
5263
5264 static auto op = create_reciprocal_typed_handle();
5265 return op.call(self);
5266}
5267
5268// aten::reciprocal(Tensor self) -> Tensor
5269at::Tensor reciprocal::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
5270
5271 static auto op = create_reciprocal_typed_handle();
5272 return op.redispatch(dispatchKeySet, self);
5273}
5274
5275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reciprocal_, name, "aten::reciprocal_")
5276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reciprocal_, overload_name, "")
5277STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reciprocal_, schema_str, "reciprocal_(Tensor(a!) self) -> Tensor(a!)")
5278
5279// aten::reciprocal_(Tensor(a!) self) -> Tensor(a!)
5280static C10_NOINLINE c10::TypedOperatorHandle<reciprocal_::schema> create_reciprocal__typed_handle() {
5281 return c10::Dispatcher::singleton()
5282 .findSchemaOrThrow(reciprocal_::name, reciprocal_::overload_name)
5283 .typed<reciprocal_::schema>();
5284}
5285
5286// aten::reciprocal_(Tensor(a!) self) -> Tensor(a!)
5287at::Tensor & reciprocal_::call(at::Tensor & self) {
5288
5289 static auto op = create_reciprocal__typed_handle();
5290 return op.call(self);
5291}
5292
5293// aten::reciprocal_(Tensor(a!) self) -> Tensor(a!)
5294at::Tensor & reciprocal_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
5295
5296 static auto op = create_reciprocal__typed_handle();
5297 return op.redispatch(dispatchKeySet, self);
5298}
5299
5300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reciprocal_out, name, "aten::reciprocal")
5301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reciprocal_out, overload_name, "out")
5302STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reciprocal_out, schema_str, "reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
5303
5304// aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
5305static C10_NOINLINE c10::TypedOperatorHandle<reciprocal_out::schema> create_reciprocal_out_typed_handle() {
5306 return c10::Dispatcher::singleton()
5307 .findSchemaOrThrow(reciprocal_out::name, reciprocal_out::overload_name)
5308 .typed<reciprocal_out::schema>();
5309}
5310
5311// aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
5312at::Tensor & reciprocal_out::call(const at::Tensor & self, at::Tensor & out) {
5313
5314 static auto op = create_reciprocal_out_typed_handle();
5315 return op.call(self, out);
5316}
5317
5318// aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
5319at::Tensor & reciprocal_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
5320
5321 static auto op = create_reciprocal_out_typed_handle();
5322 return op.redispatch(dispatchKeySet, self, out);
5323}
5324
5325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(neg, name, "aten::neg")
5326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(neg, overload_name, "")
5327STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(neg, schema_str, "neg(Tensor self) -> Tensor")
5328
5329// aten::neg(Tensor self) -> Tensor
5330static C10_NOINLINE c10::TypedOperatorHandle<neg::schema> create_neg_typed_handle() {
5331 return c10::Dispatcher::singleton()
5332 .findSchemaOrThrow(neg::name, neg::overload_name)
5333 .typed<neg::schema>();
5334}
5335
5336// aten::neg(Tensor self) -> Tensor
5337at::Tensor neg::call(const at::Tensor & self) {
5338
5339 static auto op = create_neg_typed_handle();
5340 return op.call(self);
5341}
5342
5343// aten::neg(Tensor self) -> Tensor
5344at::Tensor neg::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
5345
5346 static auto op = create_neg_typed_handle();
5347 return op.redispatch(dispatchKeySet, self);
5348}
5349
5350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(neg_, name, "aten::neg_")
5351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(neg_, overload_name, "")
5352STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(neg_, schema_str, "neg_(Tensor(a!) self) -> Tensor(a!)")
5353
5354// aten::neg_(Tensor(a!) self) -> Tensor(a!)
5355static C10_NOINLINE c10::TypedOperatorHandle<neg_::schema> create_neg__typed_handle() {
5356 return c10::Dispatcher::singleton()
5357 .findSchemaOrThrow(neg_::name, neg_::overload_name)
5358 .typed<neg_::schema>();
5359}
5360
5361// aten::neg_(Tensor(a!) self) -> Tensor(a!)
5362at::Tensor & neg_::call(at::Tensor & self) {
5363
5364 static auto op = create_neg__typed_handle();
5365 return op.call(self);
5366}
5367
5368// aten::neg_(Tensor(a!) self) -> Tensor(a!)
5369at::Tensor & neg_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
5370
5371 static auto op = create_neg__typed_handle();
5372 return op.redispatch(dispatchKeySet, self);
5373}
5374
5375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(neg_out, name, "aten::neg")
5376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(neg_out, overload_name, "out")
5377STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(neg_out, schema_str, "neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
5378
5379// aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
5380static C10_NOINLINE c10::TypedOperatorHandle<neg_out::schema> create_neg_out_typed_handle() {
5381 return c10::Dispatcher::singleton()
5382 .findSchemaOrThrow(neg_out::name, neg_out::overload_name)
5383 .typed<neg_out::schema>();
5384}
5385
5386// aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
5387at::Tensor & neg_out::call(const at::Tensor & self, at::Tensor & out) {
5388
5389 static auto op = create_neg_out_typed_handle();
5390 return op.call(self, out);
5391}
5392
5393// aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
5394at::Tensor & neg_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
5395
5396 static auto op = create_neg_out_typed_handle();
5397 return op.redispatch(dispatchKeySet, self, out);
5398}
5399
5400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reshape_as, name, "aten::reshape_as")
5401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reshape_as, overload_name, "")
5402STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reshape_as, schema_str, "reshape_as(Tensor(a) self, Tensor other) -> Tensor(a)")
5403
5404// aten::reshape_as(Tensor(a) self, Tensor other) -> Tensor(a)
5405static C10_NOINLINE c10::TypedOperatorHandle<reshape_as::schema> create_reshape_as_typed_handle() {
5406 return c10::Dispatcher::singleton()
5407 .findSchemaOrThrow(reshape_as::name, reshape_as::overload_name)
5408 .typed<reshape_as::schema>();
5409}
5410
5411// aten::reshape_as(Tensor(a) self, Tensor other) -> Tensor(a)
5412at::Tensor reshape_as::call(const at::Tensor & self, const at::Tensor & other) {
5413
5414 static auto op = create_reshape_as_typed_handle();
5415 return op.call(self, other);
5416}
5417
5418// aten::reshape_as(Tensor(a) self, Tensor other) -> Tensor(a)
5419at::Tensor reshape_as::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
5420
5421 static auto op = create_reshape_as_typed_handle();
5422 return op.redispatch(dispatchKeySet, self, other);
5423}
5424
5425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rrelu, name, "aten::rrelu")
5426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rrelu, overload_name, "")
5427STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rrelu, schema_str, "rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor")
5428
5429// aten::rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
5430static C10_NOINLINE c10::TypedOperatorHandle<rrelu::schema> create_rrelu_typed_handle() {
5431 return c10::Dispatcher::singleton()
5432 .findSchemaOrThrow(rrelu::name, rrelu::overload_name)
5433 .typed<rrelu::schema>();
5434}
5435
5436// aten::rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
5437at::Tensor rrelu::call(const at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
5438
5439 static auto op = create_rrelu_typed_handle();
5440 return op.call(self, lower, upper, training, generator);
5441}
5442
5443// aten::rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
5444at::Tensor rrelu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
5445
5446 static auto op = create_rrelu_typed_handle();
5447 return op.redispatch(dispatchKeySet, self, lower, upper, training, generator);
5448}
5449
5450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rrelu_, name, "aten::rrelu_")
5451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rrelu_, overload_name, "")
5452STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rrelu_, schema_str, "rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)")
5453
5454// aten::rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
5455static C10_NOINLINE c10::TypedOperatorHandle<rrelu_::schema> create_rrelu__typed_handle() {
5456 return c10::Dispatcher::singleton()
5457 .findSchemaOrThrow(rrelu_::name, rrelu_::overload_name)
5458 .typed<rrelu_::schema>();
5459}
5460
5461// aten::rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
5462at::Tensor & rrelu_::call(at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
5463
5464 static auto op = create_rrelu__typed_handle();
5465 return op.call(self, lower, upper, training, generator);
5466}
5467
5468// aten::rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
5469at::Tensor & rrelu_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
5470
5471 static auto op = create_rrelu__typed_handle();
5472 return op.redispatch(dispatchKeySet, self, lower, upper, training, generator);
5473}
5474
5475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(relu6, name, "aten::relu6")
5476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(relu6, overload_name, "")
5477STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(relu6, schema_str, "relu6(Tensor self) -> Tensor")
5478
5479// aten::relu6(Tensor self) -> Tensor
5480static C10_NOINLINE c10::TypedOperatorHandle<relu6::schema> create_relu6_typed_handle() {
5481 return c10::Dispatcher::singleton()
5482 .findSchemaOrThrow(relu6::name, relu6::overload_name)
5483 .typed<relu6::schema>();
5484}
5485
5486// aten::relu6(Tensor self) -> Tensor
5487at::Tensor relu6::call(const at::Tensor & self) {
5488
5489 static auto op = create_relu6_typed_handle();
5490 return op.call(self);
5491}
5492
5493// aten::relu6(Tensor self) -> Tensor
5494at::Tensor relu6::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
5495
5496 static auto op = create_relu6_typed_handle();
5497 return op.redispatch(dispatchKeySet, self);
5498}
5499
5500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(relu6_, name, "aten::relu6_")
5501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(relu6_, overload_name, "")
5502STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(relu6_, schema_str, "relu6_(Tensor(a!) self) -> Tensor(a!)")
5503
5504// aten::relu6_(Tensor(a!) self) -> Tensor(a!)
5505static C10_NOINLINE c10::TypedOperatorHandle<relu6_::schema> create_relu6__typed_handle() {
5506 return c10::Dispatcher::singleton()
5507 .findSchemaOrThrow(relu6_::name, relu6_::overload_name)
5508 .typed<relu6_::schema>();
5509}
5510
5511// aten::relu6_(Tensor(a!) self) -> Tensor(a!)
5512at::Tensor & relu6_::call(at::Tensor & self) {
5513
5514 static auto op = create_relu6__typed_handle();
5515 return op.call(self);
5516}
5517
5518// aten::relu6_(Tensor(a!) self) -> Tensor(a!)
5519at::Tensor & relu6_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
5520
5521 static auto op = create_relu6__typed_handle();
5522 return op.redispatch(dispatchKeySet, self);
5523}
5524
5525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(prelu, name, "aten::prelu")
5526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(prelu, overload_name, "")
5527STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(prelu, schema_str, "prelu(Tensor self, Tensor weight) -> Tensor")
5528
5529// aten::prelu(Tensor self, Tensor weight) -> Tensor
5530static C10_NOINLINE c10::TypedOperatorHandle<prelu::schema> create_prelu_typed_handle() {
5531 return c10::Dispatcher::singleton()
5532 .findSchemaOrThrow(prelu::name, prelu::overload_name)
5533 .typed<prelu::schema>();
5534}
5535
5536// aten::prelu(Tensor self, Tensor weight) -> Tensor
5537at::Tensor prelu::call(const at::Tensor & self, const at::Tensor & weight) {
5538
5539 static auto op = create_prelu_typed_handle();
5540 return op.call(self, weight);
5541}
5542
5543// aten::prelu(Tensor self, Tensor weight) -> Tensor
5544at::Tensor prelu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight) {
5545
5546 static auto op = create_prelu_typed_handle();
5547 return op.redispatch(dispatchKeySet, self, weight);
5548}
5549
5550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_prelu_kernel_backward, name, "aten::_prelu_kernel_backward")
5551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_prelu_kernel_backward, overload_name, "")
5552STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_prelu_kernel_backward, schema_str, "_prelu_kernel_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor)")
5553
5554// aten::_prelu_kernel_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor)
5555static C10_NOINLINE c10::TypedOperatorHandle<_prelu_kernel_backward::schema> create__prelu_kernel_backward_typed_handle() {
5556 return c10::Dispatcher::singleton()
5557 .findSchemaOrThrow(_prelu_kernel_backward::name, _prelu_kernel_backward::overload_name)
5558 .typed<_prelu_kernel_backward::schema>();
5559}
5560
5561// aten::_prelu_kernel_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor)
5562::std::tuple<at::Tensor,at::Tensor> _prelu_kernel_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight) {
5563
5564 static auto op = create__prelu_kernel_backward_typed_handle();
5565 return op.call(grad_output, self, weight);
5566}
5567
5568// aten::_prelu_kernel_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor)
5569::std::tuple<at::Tensor,at::Tensor> _prelu_kernel_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight) {
5570
5571 static auto op = create__prelu_kernel_backward_typed_handle();
5572 return op.redispatch(dispatchKeySet, grad_output, self, weight);
5573}
5574
5575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gelu_backward_grad_input, name, "aten::gelu_backward")
5576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gelu_backward_grad_input, overload_name, "grad_input")
5577STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gelu_backward_grad_input, schema_str, "gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!)")
5578
5579// aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!)
5580static C10_NOINLINE c10::TypedOperatorHandle<gelu_backward_grad_input::schema> create_gelu_backward_grad_input_typed_handle() {
5581 return c10::Dispatcher::singleton()
5582 .findSchemaOrThrow(gelu_backward_grad_input::name, gelu_backward_grad_input::overload_name)
5583 .typed<gelu_backward_grad_input::schema>();
5584}
5585
5586// aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!)
5587at::Tensor & gelu_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input) {
5588
5589 static auto op = create_gelu_backward_grad_input_typed_handle();
5590 return op.call(grad_output, self, approximate, grad_input);
5591}
5592
5593// aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!)
5594at::Tensor & gelu_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input) {
5595
5596 static auto op = create_gelu_backward_grad_input_typed_handle();
5597 return op.redispatch(dispatchKeySet, grad_output, self, approximate, grad_input);
5598}
5599
5600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gelu_backward, name, "aten::gelu_backward")
5601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gelu_backward, overload_name, "")
5602STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gelu_backward, schema_str, "gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor")
5603
5604// aten::gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor
5605static C10_NOINLINE c10::TypedOperatorHandle<gelu_backward::schema> create_gelu_backward_typed_handle() {
5606 return c10::Dispatcher::singleton()
5607 .findSchemaOrThrow(gelu_backward::name, gelu_backward::overload_name)
5608 .typed<gelu_backward::schema>();
5609}
5610
5611// aten::gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor
5612at::Tensor gelu_backward::call(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate) {
5613
5614 static auto op = create_gelu_backward_typed_handle();
5615 return op.call(grad_output, self, approximate);
5616}
5617
5618// aten::gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor
5619at::Tensor gelu_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate) {
5620
5621 static auto op = create_gelu_backward_typed_handle();
5622 return op.redispatch(dispatchKeySet, grad_output, self, approximate);
5623}
5624
5625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(selu, name, "aten::selu")
5626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(selu, overload_name, "")
5627STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(selu, schema_str, "selu(Tensor self) -> Tensor")
5628
5629// aten::selu(Tensor self) -> Tensor
5630static C10_NOINLINE c10::TypedOperatorHandle<selu::schema> create_selu_typed_handle() {
5631 return c10::Dispatcher::singleton()
5632 .findSchemaOrThrow(selu::name, selu::overload_name)
5633 .typed<selu::schema>();
5634}
5635
5636// aten::selu(Tensor self) -> Tensor
5637at::Tensor selu::call(const at::Tensor & self) {
5638
5639 static auto op = create_selu_typed_handle();
5640 return op.call(self);
5641}
5642
5643// aten::selu(Tensor self) -> Tensor
5644at::Tensor selu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
5645
5646 static auto op = create_selu_typed_handle();
5647 return op.redispatch(dispatchKeySet, self);
5648}
5649
5650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(selu_, name, "aten::selu_")
5651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(selu_, overload_name, "")
5652STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(selu_, schema_str, "selu_(Tensor(a!) self) -> Tensor(a!)")
5653
5654// aten::selu_(Tensor(a!) self) -> Tensor(a!)
5655static C10_NOINLINE c10::TypedOperatorHandle<selu_::schema> create_selu__typed_handle() {
5656 return c10::Dispatcher::singleton()
5657 .findSchemaOrThrow(selu_::name, selu_::overload_name)
5658 .typed<selu_::schema>();
5659}
5660
5661// aten::selu_(Tensor(a!) self) -> Tensor(a!)
5662at::Tensor & selu_::call(at::Tensor & self) {
5663
5664 static auto op = create_selu__typed_handle();
5665 return op.call(self);
5666}
5667
5668// aten::selu_(Tensor(a!) self) -> Tensor(a!)
5669at::Tensor & selu_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
5670
5671 static auto op = create_selu__typed_handle();
5672 return op.redispatch(dispatchKeySet, self);
5673}
5674
5675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(silu_backward_grad_input, name, "aten::silu_backward")
5676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(silu_backward_grad_input, overload_name, "grad_input")
5677STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(silu_backward_grad_input, schema_str, "silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)")
5678
5679// aten::silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
5680static C10_NOINLINE c10::TypedOperatorHandle<silu_backward_grad_input::schema> create_silu_backward_grad_input_typed_handle() {
5681 return c10::Dispatcher::singleton()
5682 .findSchemaOrThrow(silu_backward_grad_input::name, silu_backward_grad_input::overload_name)
5683 .typed<silu_backward_grad_input::schema>();
5684}
5685
5686// aten::silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
5687at::Tensor & silu_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) {
5688
5689 static auto op = create_silu_backward_grad_input_typed_handle();
5690 return op.call(grad_output, self, grad_input);
5691}
5692
5693// aten::silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
5694at::Tensor & silu_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) {
5695
5696 static auto op = create_silu_backward_grad_input_typed_handle();
5697 return op.redispatch(dispatchKeySet, grad_output, self, grad_input);
5698}
5699
5700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(silu_backward, name, "aten::silu_backward")
5701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(silu_backward, overload_name, "")
5702STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(silu_backward, schema_str, "silu_backward(Tensor grad_output, Tensor self) -> Tensor")
5703
5704// aten::silu_backward(Tensor grad_output, Tensor self) -> Tensor
5705static C10_NOINLINE c10::TypedOperatorHandle<silu_backward::schema> create_silu_backward_typed_handle() {
5706 return c10::Dispatcher::singleton()
5707 .findSchemaOrThrow(silu_backward::name, silu_backward::overload_name)
5708 .typed<silu_backward::schema>();
5709}
5710
5711// aten::silu_backward(Tensor grad_output, Tensor self) -> Tensor
5712at::Tensor silu_backward::call(const at::Tensor & grad_output, const at::Tensor & self) {
5713
5714 static auto op = create_silu_backward_typed_handle();
5715 return op.call(grad_output, self);
5716}
5717
5718// aten::silu_backward(Tensor grad_output, Tensor self) -> Tensor
5719at::Tensor silu_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) {
5720
5721 static auto op = create_silu_backward_typed_handle();
5722 return op.redispatch(dispatchKeySet, grad_output, self);
5723}
5724
5725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sin, name, "aten::sin")
5726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sin, overload_name, "")
5727STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sin, schema_str, "sin(Tensor self) -> Tensor")
5728
5729// aten::sin(Tensor self) -> Tensor
5730static C10_NOINLINE c10::TypedOperatorHandle<sin::schema> create_sin_typed_handle() {
5731 return c10::Dispatcher::singleton()
5732 .findSchemaOrThrow(sin::name, sin::overload_name)
5733 .typed<sin::schema>();
5734}
5735
5736// aten::sin(Tensor self) -> Tensor
5737at::Tensor sin::call(const at::Tensor & self) {
5738
5739 static auto op = create_sin_typed_handle();
5740 return op.call(self);
5741}
5742
5743// aten::sin(Tensor self) -> Tensor
5744at::Tensor sin::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
5745
5746 static auto op = create_sin_typed_handle();
5747 return op.redispatch(dispatchKeySet, self);
5748}
5749
5750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sin_, name, "aten::sin_")
5751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sin_, overload_name, "")
5752STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sin_, schema_str, "sin_(Tensor(a!) self) -> Tensor(a!)")
5753
5754// aten::sin_(Tensor(a!) self) -> Tensor(a!)
5755static C10_NOINLINE c10::TypedOperatorHandle<sin_::schema> create_sin__typed_handle() {
5756 return c10::Dispatcher::singleton()
5757 .findSchemaOrThrow(sin_::name, sin_::overload_name)
5758 .typed<sin_::schema>();
5759}
5760
5761// aten::sin_(Tensor(a!) self) -> Tensor(a!)
5762at::Tensor & sin_::call(at::Tensor & self) {
5763
5764 static auto op = create_sin__typed_handle();
5765 return op.call(self);
5766}
5767
5768// aten::sin_(Tensor(a!) self) -> Tensor(a!)
5769at::Tensor & sin_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
5770
5771 static auto op = create_sin__typed_handle();
5772 return op.redispatch(dispatchKeySet, self);
5773}
5774
5775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sin_out, name, "aten::sin")
5776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sin_out, overload_name, "out")
5777STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sin_out, schema_str, "sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
5778
5779// aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
5780static C10_NOINLINE c10::TypedOperatorHandle<sin_out::schema> create_sin_out_typed_handle() {
5781 return c10::Dispatcher::singleton()
5782 .findSchemaOrThrow(sin_out::name, sin_out::overload_name)
5783 .typed<sin_out::schema>();
5784}
5785
5786// aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
5787at::Tensor & sin_out::call(const at::Tensor & self, at::Tensor & out) {
5788
5789 static auto op = create_sin_out_typed_handle();
5790 return op.call(self, out);
5791}
5792
5793// aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
5794at::Tensor & sin_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
5795
5796 static auto op = create_sin_out_typed_handle();
5797 return op.redispatch(dispatchKeySet, self, out);
5798}
5799
5800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diagonal_scatter, name, "aten::diagonal_scatter")
5801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diagonal_scatter, overload_name, "")
5802STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diagonal_scatter, schema_str, "diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor")
5803
5804// aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor
5805static C10_NOINLINE c10::TypedOperatorHandle<diagonal_scatter::schema> create_diagonal_scatter_typed_handle() {
5806 return c10::Dispatcher::singleton()
5807 .findSchemaOrThrow(diagonal_scatter::name, diagonal_scatter::overload_name)
5808 .typed<diagonal_scatter::schema>();
5809}
5810
5811// aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor
5812at::Tensor diagonal_scatter::call(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2) {
5813
5814 static auto op = create_diagonal_scatter_typed_handle();
5815 return op.call(self, src, offset, dim1, dim2);
5816}
5817
5818// aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor
5819at::Tensor diagonal_scatter::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2) {
5820
5821 static auto op = create_diagonal_scatter_typed_handle();
5822 return op.redispatch(dispatchKeySet, self, src, offset, dim1, dim2);
5823}
5824
5825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(as_strided_scatter, name, "aten::as_strided_scatter")
5826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(as_strided_scatter, overload_name, "")
5827STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(as_strided_scatter, schema_str, "as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor")
5828
5829// aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
5830static C10_NOINLINE c10::TypedOperatorHandle<as_strided_scatter::schema> create_as_strided_scatter_typed_handle() {
5831 return c10::Dispatcher::singleton()
5832 .findSchemaOrThrow(as_strided_scatter::name, as_strided_scatter::overload_name)
5833 .typed<as_strided_scatter::schema>();
5834}
5835
5836// aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
5837at::Tensor as_strided_scatter::call(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
5838
5839 static auto op = create_as_strided_scatter_typed_handle();
5840 return op.call(self, src, size, stride, storage_offset);
5841}
5842
5843// aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
5844at::Tensor as_strided_scatter::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
5845
5846 static auto op = create_as_strided_scatter_typed_handle();
5847 return op.redispatch(dispatchKeySet, self, src, size, stride, storage_offset);
5848}
5849
5850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(split_Tensor, name, "aten::split")
5851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(split_Tensor, overload_name, "Tensor")
5852STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(split_Tensor, schema_str, "split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[]")
5853
5854// aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[]
5855static C10_NOINLINE c10::TypedOperatorHandle<split_Tensor::schema> create_split_Tensor_typed_handle() {
5856 return c10::Dispatcher::singleton()
5857 .findSchemaOrThrow(split_Tensor::name, split_Tensor::overload_name)
5858 .typed<split_Tensor::schema>();
5859}
5860
5861// aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[]
5862::std::vector<at::Tensor> split_Tensor::call(const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
5863
5864 static auto op = create_split_Tensor_typed_handle();
5865 return op.call(self, split_size, dim);
5866}
5867
5868// aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[]
5869::std::vector<at::Tensor> split_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
5870
5871 static auto op = create_split_Tensor_typed_handle();
5872 return op.redispatch(dispatchKeySet, self, split_size, dim);
5873}
5874
5875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(split_sizes, name, "aten::split")
5876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(split_sizes, overload_name, "sizes")
5877STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(split_sizes, schema_str, "split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[]")
5878
5879// aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[]
5880static C10_NOINLINE c10::TypedOperatorHandle<split_sizes::schema> create_split_sizes_typed_handle() {
5881 return c10::Dispatcher::singleton()
5882 .findSchemaOrThrow(split_sizes::name, split_sizes::overload_name)
5883 .typed<split_sizes::schema>();
5884}
5885
5886// aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[]
5887::std::vector<at::Tensor> split_sizes::call(const at::Tensor & self, c10::SymIntArrayRef split_size, int64_t dim) {
5888
5889 static auto op = create_split_sizes_typed_handle();
5890 return op.call(self, split_size, dim);
5891}
5892
5893// aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[]
5894::std::vector<at::Tensor> split_sizes::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_size, int64_t dim) {
5895
5896 static auto op = create_split_sizes_typed_handle();
5897 return op.redispatch(dispatchKeySet, self, split_size, dim);
5898}
5899
5900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze, name, "aten::squeeze")
5901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze, overload_name, "")
5902STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze, schema_str, "squeeze(Tensor(a) self) -> Tensor(a)")
5903
5904// aten::squeeze(Tensor(a) self) -> Tensor(a)
5905static C10_NOINLINE c10::TypedOperatorHandle<squeeze::schema> create_squeeze_typed_handle() {
5906 return c10::Dispatcher::singleton()
5907 .findSchemaOrThrow(squeeze::name, squeeze::overload_name)
5908 .typed<squeeze::schema>();
5909}
5910
5911// aten::squeeze(Tensor(a) self) -> Tensor(a)
5912at::Tensor squeeze::call(const at::Tensor & self) {
5913
5914 static auto op = create_squeeze_typed_handle();
5915 return op.call(self);
5916}
5917
5918// aten::squeeze(Tensor(a) self) -> Tensor(a)
5919at::Tensor squeeze::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
5920
5921 static auto op = create_squeeze_typed_handle();
5922 return op.redispatch(dispatchKeySet, self);
5923}
5924
5925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_dim, name, "aten::squeeze")
5926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_dim, overload_name, "dim")
5927STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_dim, schema_str, "squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)")
5928
5929// aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)
5930static C10_NOINLINE c10::TypedOperatorHandle<squeeze_dim::schema> create_squeeze_dim_typed_handle() {
5931 return c10::Dispatcher::singleton()
5932 .findSchemaOrThrow(squeeze_dim::name, squeeze_dim::overload_name)
5933 .typed<squeeze_dim::schema>();
5934}
5935
5936// aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)
5937at::Tensor squeeze_dim::call(const at::Tensor & self, int64_t dim) {
5938
5939 static auto op = create_squeeze_dim_typed_handle();
5940 return op.call(self, dim);
5941}
5942
5943// aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)
5944at::Tensor squeeze_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
5945
5946 static auto op = create_squeeze_dim_typed_handle();
5947 return op.redispatch(dispatchKeySet, self, dim);
5948}
5949
5950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_dimname, name, "aten::squeeze")
5951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_dimname, overload_name, "dimname")
5952STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_dimname, schema_str, "squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a)")
5953
5954// aten::squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a)
5955static C10_NOINLINE c10::TypedOperatorHandle<squeeze_dimname::schema> create_squeeze_dimname_typed_handle() {
5956 return c10::Dispatcher::singleton()
5957 .findSchemaOrThrow(squeeze_dimname::name, squeeze_dimname::overload_name)
5958 .typed<squeeze_dimname::schema>();
5959}
5960
5961// aten::squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a)
5962at::Tensor squeeze_dimname::call(const at::Tensor & self, at::Dimname dim) {
5963
5964 static auto op = create_squeeze_dimname_typed_handle();
5965 return op.call(self, dim);
5966}
5967
5968// aten::squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a)
5969at::Tensor squeeze_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) {
5970
5971 static auto op = create_squeeze_dimname_typed_handle();
5972 return op.redispatch(dispatchKeySet, self, dim);
5973}
5974
5975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_dims, name, "aten::squeeze")
5976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_dims, overload_name, "dims")
5977STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_dims, schema_str, "squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a)")
5978
5979// aten::squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a)
5980static C10_NOINLINE c10::TypedOperatorHandle<squeeze_dims::schema> create_squeeze_dims_typed_handle() {
5981 return c10::Dispatcher::singleton()
5982 .findSchemaOrThrow(squeeze_dims::name, squeeze_dims::overload_name)
5983 .typed<squeeze_dims::schema>();
5984}
5985
5986// aten::squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a)
5987at::Tensor squeeze_dims::call(const at::Tensor & self, at::IntArrayRef dim) {
5988
5989 static auto op = create_squeeze_dims_typed_handle();
5990 return op.call(self, dim);
5991}
5992
5993// aten::squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a)
5994at::Tensor squeeze_dims::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim) {
5995
5996 static auto op = create_squeeze_dims_typed_handle();
5997 return op.redispatch(dispatchKeySet, self, dim);
5998}
5999
6000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_, name, "aten::squeeze_")
6001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_, overload_name, "")
6002STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_, schema_str, "squeeze_(Tensor(a!) self) -> Tensor(a!)")
6003
6004// aten::squeeze_(Tensor(a!) self) -> Tensor(a!)
6005static C10_NOINLINE c10::TypedOperatorHandle<squeeze_::schema> create_squeeze__typed_handle() {
6006 return c10::Dispatcher::singleton()
6007 .findSchemaOrThrow(squeeze_::name, squeeze_::overload_name)
6008 .typed<squeeze_::schema>();
6009}
6010
6011// aten::squeeze_(Tensor(a!) self) -> Tensor(a!)
6012at::Tensor & squeeze_::call(at::Tensor & self) {
6013
6014 static auto op = create_squeeze__typed_handle();
6015 return op.call(self);
6016}
6017
6018// aten::squeeze_(Tensor(a!) self) -> Tensor(a!)
6019at::Tensor & squeeze_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
6020
6021 static auto op = create_squeeze__typed_handle();
6022 return op.redispatch(dispatchKeySet, self);
6023}
6024
6025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze__dim, name, "aten::squeeze_")
6026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze__dim, overload_name, "dim")
6027STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze__dim, schema_str, "squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!)")
6028
6029// aten::squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!)
6030static C10_NOINLINE c10::TypedOperatorHandle<squeeze__dim::schema> create_squeeze__dim_typed_handle() {
6031 return c10::Dispatcher::singleton()
6032 .findSchemaOrThrow(squeeze__dim::name, squeeze__dim::overload_name)
6033 .typed<squeeze__dim::schema>();
6034}
6035
6036// aten::squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!)
6037at::Tensor & squeeze__dim::call(at::Tensor & self, int64_t dim) {
6038
6039 static auto op = create_squeeze__dim_typed_handle();
6040 return op.call(self, dim);
6041}
6042
6043// aten::squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!)
6044at::Tensor & squeeze__dim::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim) {
6045
6046 static auto op = create_squeeze__dim_typed_handle();
6047 return op.redispatch(dispatchKeySet, self, dim);
6048}
6049
6050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze__dims, name, "aten::squeeze_")
6051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze__dims, overload_name, "dims")
6052STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze__dims, schema_str, "squeeze_.dims(Tensor(a!) self, int[] dim) -> Tensor(a!)")
6053
6054// aten::squeeze_.dims(Tensor(a!) self, int[] dim) -> Tensor(a!)
6055static C10_NOINLINE c10::TypedOperatorHandle<squeeze__dims::schema> create_squeeze__dims_typed_handle() {
6056 return c10::Dispatcher::singleton()
6057 .findSchemaOrThrow(squeeze__dims::name, squeeze__dims::overload_name)
6058 .typed<squeeze__dims::schema>();
6059}
6060
6061// aten::squeeze_.dims(Tensor(a!) self, int[] dim) -> Tensor(a!)
6062at::Tensor & squeeze__dims::call(at::Tensor & self, at::IntArrayRef dim) {
6063
6064 static auto op = create_squeeze__dims_typed_handle();
6065 return op.call(self, dim);
6066}
6067
6068// aten::squeeze_.dims(Tensor(a!) self, int[] dim) -> Tensor(a!)
6069at::Tensor & squeeze__dims::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::IntArrayRef dim) {
6070
6071 static auto op = create_squeeze__dims_typed_handle();
6072 return op.redispatch(dispatchKeySet, self, dim);
6073}
6074
6075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze__dimname, name, "aten::squeeze_")
6076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze__dimname, overload_name, "dimname")
6077STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze__dimname, schema_str, "squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!)")
6078
6079// aten::squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!)
6080static C10_NOINLINE c10::TypedOperatorHandle<squeeze__dimname::schema> create_squeeze__dimname_typed_handle() {
6081 return c10::Dispatcher::singleton()
6082 .findSchemaOrThrow(squeeze__dimname::name, squeeze__dimname::overload_name)
6083 .typed<squeeze__dimname::schema>();
6084}
6085
6086// aten::squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!)
6087at::Tensor & squeeze__dimname::call(at::Tensor & self, at::Dimname dim) {
6088
6089 static auto op = create_squeeze__dimname_typed_handle();
6090 return op.call(self, dim);
6091}
6092
6093// aten::squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!)
6094at::Tensor & squeeze__dimname::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim) {
6095
6096 static auto op = create_squeeze__dimname_typed_handle();
6097 return op.redispatch(dispatchKeySet, self, dim);
6098}
6099
6100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sspaddmm, name, "aten::sspaddmm")
6101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sspaddmm, overload_name, "")
6102STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sspaddmm, schema_str, "sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor")
6103
6104// aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
6105static C10_NOINLINE c10::TypedOperatorHandle<sspaddmm::schema> create_sspaddmm_typed_handle() {
6106 return c10::Dispatcher::singleton()
6107 .findSchemaOrThrow(sspaddmm::name, sspaddmm::overload_name)
6108 .typed<sspaddmm::schema>();
6109}
6110
6111// aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
6112at::Tensor sspaddmm::call(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
6113
6114 static auto op = create_sspaddmm_typed_handle();
6115 return op.call(self, mat1, mat2, beta, alpha);
6116}
6117
6118// aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
6119at::Tensor sspaddmm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
6120
6121 static auto op = create_sspaddmm_typed_handle();
6122 return op.redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha);
6123}
6124
6125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sspaddmm_out, name, "aten::sspaddmm")
6126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sspaddmm_out, overload_name, "out")
6127STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sspaddmm_out, schema_str, "sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)")
6128
6129// aten::sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
6130static C10_NOINLINE c10::TypedOperatorHandle<sspaddmm_out::schema> create_sspaddmm_out_typed_handle() {
6131 return c10::Dispatcher::singleton()
6132 .findSchemaOrThrow(sspaddmm_out::name, sspaddmm_out::overload_name)
6133 .typed<sspaddmm_out::schema>();
6134}
6135
6136// aten::sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
6137at::Tensor & sspaddmm_out::call(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
6138
6139 static auto op = create_sspaddmm_out_typed_handle();
6140 return op.call(self, mat1, mat2, beta, alpha, out);
6141}
6142
6143// aten::sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
6144at::Tensor & sspaddmm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
6145
6146 static auto op = create_sspaddmm_out_typed_handle();
6147 return op.redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, out);
6148}
6149
6150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(stride_int, name, "aten::stride")
6151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(stride_int, overload_name, "int")
6152STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(stride_int, schema_str, "stride.int(Tensor self, int dim) -> int")
6153
6154// aten::stride.int(Tensor self, int dim) -> int
6155static C10_NOINLINE c10::TypedOperatorHandle<stride_int::schema> create_stride_int_typed_handle() {
6156 return c10::Dispatcher::singleton()
6157 .findSchemaOrThrow(stride_int::name, stride_int::overload_name)
6158 .typed<stride_int::schema>();
6159}
6160
6161// aten::stride.int(Tensor self, int dim) -> int
6162int64_t stride_int::call(const at::Tensor & self, int64_t dim) {
6163
6164 static auto op = create_stride_int_typed_handle();
6165 return op.call(self, dim);
6166}
6167
6168// aten::stride.int(Tensor self, int dim) -> int
6169int64_t stride_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
6170
6171 static auto op = create_stride_int_typed_handle();
6172 return op.redispatch(dispatchKeySet, self, dim);
6173}
6174
6175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(stride_Dimname, name, "aten::stride")
6176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(stride_Dimname, overload_name, "Dimname")
6177STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(stride_Dimname, schema_str, "stride.Dimname(Tensor self, Dimname dim) -> int")
6178
6179// aten::stride.Dimname(Tensor self, Dimname dim) -> int
6180static C10_NOINLINE c10::TypedOperatorHandle<stride_Dimname::schema> create_stride_Dimname_typed_handle() {
6181 return c10::Dispatcher::singleton()
6182 .findSchemaOrThrow(stride_Dimname::name, stride_Dimname::overload_name)
6183 .typed<stride_Dimname::schema>();
6184}
6185
6186// aten::stride.Dimname(Tensor self, Dimname dim) -> int
6187int64_t stride_Dimname::call(const at::Tensor & self, at::Dimname dim) {
6188
6189 static auto op = create_stride_Dimname_typed_handle();
6190 return op.call(self, dim);
6191}
6192
6193// aten::stride.Dimname(Tensor self, Dimname dim) -> int
6194int64_t stride_Dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) {
6195
6196 static auto op = create_stride_Dimname_typed_handle();
6197 return op.redispatch(dispatchKeySet, self, dim);
6198}
6199
6200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(threshold_backward_grad_input, name, "aten::threshold_backward")
6201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(threshold_backward_grad_input, overload_name, "grad_input")
6202STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(threshold_backward_grad_input, schema_str, "threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)")
6203
6204// aten::threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)
6205static C10_NOINLINE c10::TypedOperatorHandle<threshold_backward_grad_input::schema> create_threshold_backward_grad_input_typed_handle() {
6206 return c10::Dispatcher::singleton()
6207 .findSchemaOrThrow(threshold_backward_grad_input::name, threshold_backward_grad_input::overload_name)
6208 .typed<threshold_backward_grad_input::schema>();
6209}
6210
6211// aten::threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)
6212at::Tensor & threshold_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) {
6213
6214 static auto op = create_threshold_backward_grad_input_typed_handle();
6215 return op.call(grad_output, self, threshold, grad_input);
6216}
6217
6218// aten::threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)
6219at::Tensor & threshold_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) {
6220
6221 static auto op = create_threshold_backward_grad_input_typed_handle();
6222 return op.redispatch(dispatchKeySet, grad_output, self, threshold, grad_input);
6223}
6224
6225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(threshold_backward, name, "aten::threshold_backward")
6226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(threshold_backward, overload_name, "")
6227STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(threshold_backward, schema_str, "threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor")
6228
6229// aten::threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor
6230static C10_NOINLINE c10::TypedOperatorHandle<threshold_backward::schema> create_threshold_backward_typed_handle() {
6231 return c10::Dispatcher::singleton()
6232 .findSchemaOrThrow(threshold_backward::name, threshold_backward::overload_name)
6233 .typed<threshold_backward::schema>();
6234}
6235
6236// aten::threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor
6237at::Tensor threshold_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
6238
6239 static auto op = create_threshold_backward_typed_handle();
6240 return op.call(grad_output, self, threshold);
6241}
6242
6243// aten::threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor
6244at::Tensor threshold_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
6245
6246 static auto op = create_threshold_backward_typed_handle();
6247 return op.redispatch(dispatchKeySet, grad_output, self, threshold);
6248}
6249
6250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(one_hot, name, "aten::one_hot")
6251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(one_hot, overload_name, "")
6252STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(one_hot, schema_str, "one_hot(Tensor self, int num_classes=-1) -> Tensor")
6253
6254// aten::one_hot(Tensor self, int num_classes=-1) -> Tensor
6255static C10_NOINLINE c10::TypedOperatorHandle<one_hot::schema> create_one_hot_typed_handle() {
6256 return c10::Dispatcher::singleton()
6257 .findSchemaOrThrow(one_hot::name, one_hot::overload_name)
6258 .typed<one_hot::schema>();
6259}
6260
6261// aten::one_hot(Tensor self, int num_classes=-1) -> Tensor
6262at::Tensor one_hot::call(const at::Tensor & self, int64_t num_classes) {
6263
6264 static auto op = create_one_hot_typed_handle();
6265 return op.call(self, num_classes);
6266}
6267
6268// aten::one_hot(Tensor self, int num_classes=-1) -> Tensor
6269at::Tensor one_hot::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t num_classes) {
6270
6271 static auto op = create_one_hot_typed_handle();
6272 return op.redispatch(dispatchKeySet, self, num_classes);
6273}
6274
6275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_transform_bias_rescale_qkv, name, "aten::_transform_bias_rescale_qkv")
6276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_transform_bias_rescale_qkv, overload_name, "")
6277STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_transform_bias_rescale_qkv, schema_str, "_transform_bias_rescale_qkv(Tensor qkv, Tensor qkv_bias, int num_heads) -> (Tensor, Tensor, Tensor)")
6278
6279// aten::_transform_bias_rescale_qkv(Tensor qkv, Tensor qkv_bias, int num_heads) -> (Tensor, Tensor, Tensor)
6280static C10_NOINLINE c10::TypedOperatorHandle<_transform_bias_rescale_qkv::schema> create__transform_bias_rescale_qkv_typed_handle() {
6281 return c10::Dispatcher::singleton()
6282 .findSchemaOrThrow(_transform_bias_rescale_qkv::name, _transform_bias_rescale_qkv::overload_name)
6283 .typed<_transform_bias_rescale_qkv::schema>();
6284}
6285
6286// aten::_transform_bias_rescale_qkv(Tensor qkv, Tensor qkv_bias, int num_heads) -> (Tensor, Tensor, Tensor)
6287::std::tuple<at::Tensor,at::Tensor,at::Tensor> _transform_bias_rescale_qkv::call(const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads) {
6288
6289 static auto op = create__transform_bias_rescale_qkv_typed_handle();
6290 return op.call(qkv, qkv_bias, num_heads);
6291}
6292
6293// aten::_transform_bias_rescale_qkv(Tensor qkv, Tensor qkv_bias, int num_heads) -> (Tensor, Tensor, Tensor)
6294::std::tuple<at::Tensor,at::Tensor,at::Tensor> _transform_bias_rescale_qkv::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads) {
6295
6296 static auto op = create__transform_bias_rescale_qkv_typed_handle();
6297 return op.redispatch(dispatchKeySet, qkv, qkv_bias, num_heads);
6298}
6299
6300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_unique, name, "aten::_unique")
6301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_unique, overload_name, "")
6302STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_unique, schema_str, "_unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor)")
6303
6304// aten::_unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor)
6305static C10_NOINLINE c10::TypedOperatorHandle<_unique::schema> create__unique_typed_handle() {
6306 return c10::Dispatcher::singleton()
6307 .findSchemaOrThrow(_unique::name, _unique::overload_name)
6308 .typed<_unique::schema>();
6309}
6310
6311// aten::_unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor)
6312::std::tuple<at::Tensor,at::Tensor> _unique::call(const at::Tensor & self, bool sorted, bool return_inverse) {
6313
6314 static auto op = create__unique_typed_handle();
6315 return op.call(self, sorted, return_inverse);
6316}
6317
6318// aten::_unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor)
6319::std::tuple<at::Tensor,at::Tensor> _unique::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool sorted, bool return_inverse) {
6320
6321 static auto op = create__unique_typed_handle();
6322 return op.redispatch(dispatchKeySet, self, sorted, return_inverse);
6323}
6324
6325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(where_self, name, "aten::where")
6326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(where_self, overload_name, "self")
6327STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(where_self, schema_str, "where.self(Tensor condition, Tensor self, Tensor other) -> Tensor")
6328
6329// aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor
6330static C10_NOINLINE c10::TypedOperatorHandle<where_self::schema> create_where_self_typed_handle() {
6331 return c10::Dispatcher::singleton()
6332 .findSchemaOrThrow(where_self::name, where_self::overload_name)
6333 .typed<where_self::schema>();
6334}
6335
6336// aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor
6337at::Tensor where_self::call(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) {
6338
6339 static auto op = create_where_self_typed_handle();
6340 return op.call(condition, self, other);
6341}
6342
6343// aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor
6344at::Tensor where_self::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) {
6345
6346 static auto op = create_where_self_typed_handle();
6347 return op.redispatch(dispatchKeySet, condition, self, other);
6348}
6349
6350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(where_self_out, name, "aten::where")
6351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(where_self_out, overload_name, "self_out")
6352STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(where_self_out, schema_str, "where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
6353
6354// aten::where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
6355static C10_NOINLINE c10::TypedOperatorHandle<where_self_out::schema> create_where_self_out_typed_handle() {
6356 return c10::Dispatcher::singleton()
6357 .findSchemaOrThrow(where_self_out::name, where_self_out::overload_name)
6358 .typed<where_self_out::schema>();
6359}
6360
6361// aten::where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
6362at::Tensor & where_self_out::call(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
6363
6364 static auto op = create_where_self_out_typed_handle();
6365 return op.call(condition, self, other, out);
6366}
6367
6368// aten::where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
6369at::Tensor & where_self_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
6370
6371 static auto op = create_where_self_out_typed_handle();
6372 return op.redispatch(dispatchKeySet, condition, self, other, out);
6373}
6374
6375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(where_ScalarSelf, name, "aten::where")
6376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(where_ScalarSelf, overload_name, "ScalarSelf")
6377STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(where_ScalarSelf, schema_str, "where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor")
6378
6379// aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor
6380static C10_NOINLINE c10::TypedOperatorHandle<where_ScalarSelf::schema> create_where_ScalarSelf_typed_handle() {
6381 return c10::Dispatcher::singleton()
6382 .findSchemaOrThrow(where_ScalarSelf::name, where_ScalarSelf::overload_name)
6383 .typed<where_ScalarSelf::schema>();
6384}
6385
6386// aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor
6387at::Tensor where_ScalarSelf::call(const at::Tensor & condition, const at::Scalar & self, const at::Tensor & other) {
6388
6389 static auto op = create_where_ScalarSelf_typed_handle();
6390 return op.call(condition, self, other);
6391}
6392
6393// aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor
6394at::Tensor where_ScalarSelf::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & condition, const at::Scalar & self, const at::Tensor & other) {
6395
6396 static auto op = create_where_ScalarSelf_typed_handle();
6397 return op.redispatch(dispatchKeySet, condition, self, other);
6398}
6399
6400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(where_ScalarOther, name, "aten::where")
6401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(where_ScalarOther, overload_name, "ScalarOther")
6402STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(where_ScalarOther, schema_str, "where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor")
6403
6404// aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor
6405static C10_NOINLINE c10::TypedOperatorHandle<where_ScalarOther::schema> create_where_ScalarOther_typed_handle() {
6406 return c10::Dispatcher::singleton()
6407 .findSchemaOrThrow(where_ScalarOther::name, where_ScalarOther::overload_name)
6408 .typed<where_ScalarOther::schema>();
6409}
6410
6411// aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor
6412at::Tensor where_ScalarOther::call(const at::Tensor & condition, const at::Tensor & self, const at::Scalar & other) {
6413
6414 static auto op = create_where_ScalarOther_typed_handle();
6415 return op.call(condition, self, other);
6416}
6417
6418// aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor
6419at::Tensor where_ScalarOther::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & condition, const at::Tensor & self, const at::Scalar & other) {
6420
6421 static auto op = create_where_ScalarOther_typed_handle();
6422 return op.redispatch(dispatchKeySet, condition, self, other);
6423}
6424
6425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(where_Scalar, name, "aten::where")
6426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(where_Scalar, overload_name, "Scalar")
6427STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(where_Scalar, schema_str, "where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor")
6428
6429// aten::where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor
6430static C10_NOINLINE c10::TypedOperatorHandle<where_Scalar::schema> create_where_Scalar_typed_handle() {
6431 return c10::Dispatcher::singleton()
6432 .findSchemaOrThrow(where_Scalar::name, where_Scalar::overload_name)
6433 .typed<where_Scalar::schema>();
6434}
6435
6436// aten::where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor
6437at::Tensor where_Scalar::call(const at::Tensor & condition, const at::Scalar & self, const at::Scalar & other) {
6438
6439 static auto op = create_where_Scalar_typed_handle();
6440 return op.call(condition, self, other);
6441}
6442
6443// aten::where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor
6444at::Tensor where_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & condition, const at::Scalar & self, const at::Scalar & other) {
6445
6446 static auto op = create_where_Scalar_typed_handle();
6447 return op.redispatch(dispatchKeySet, condition, self, other);
6448}
6449
6450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(where, name, "aten::where")
6451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(where, overload_name, "")
6452STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(where, schema_str, "where(Tensor condition) -> Tensor[]")
6453
6454// aten::where(Tensor condition) -> Tensor[]
6455static C10_NOINLINE c10::TypedOperatorHandle<where::schema> create_where_typed_handle() {
6456 return c10::Dispatcher::singleton()
6457 .findSchemaOrThrow(where::name, where::overload_name)
6458 .typed<where::schema>();
6459}
6460
6461// aten::where(Tensor condition) -> Tensor[]
6462::std::vector<at::Tensor> where::call(const at::Tensor & condition) {
6463
6464 static auto op = create_where_typed_handle();
6465 return op.call(condition);
6466}
6467
6468// aten::where(Tensor condition) -> Tensor[]
6469::std::vector<at::Tensor> where::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & condition) {
6470
6471 static auto op = create_where_typed_handle();
6472 return op.redispatch(dispatchKeySet, condition);
6473}
6474
6475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_weight_norm, name, "aten::_weight_norm")
6476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_weight_norm, overload_name, "")
6477STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_weight_norm, schema_str, "_weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor")
6478
6479// aten::_weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor
6480static C10_NOINLINE c10::TypedOperatorHandle<_weight_norm::schema> create__weight_norm_typed_handle() {
6481 return c10::Dispatcher::singleton()
6482 .findSchemaOrThrow(_weight_norm::name, _weight_norm::overload_name)
6483 .typed<_weight_norm::schema>();
6484}
6485
6486// aten::_weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor
6487at::Tensor _weight_norm::call(const at::Tensor & v, const at::Tensor & g, int64_t dim) {
6488
6489 static auto op = create__weight_norm_typed_handle();
6490 return op.call(v, g, dim);
6491}
6492
6493// aten::_weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor
6494at::Tensor _weight_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & v, const at::Tensor & g, int64_t dim) {
6495
6496 static auto op = create__weight_norm_typed_handle();
6497 return op.redispatch(dispatchKeySet, v, g, dim);
6498}
6499
6500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_weight_norm_interface, name, "aten::_weight_norm_interface")
6501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_weight_norm_interface, overload_name, "")
6502STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_weight_norm_interface, schema_str, "_weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor)")
6503
6504// aten::_weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor)
6505static C10_NOINLINE c10::TypedOperatorHandle<_weight_norm_interface::schema> create__weight_norm_interface_typed_handle() {
6506 return c10::Dispatcher::singleton()
6507 .findSchemaOrThrow(_weight_norm_interface::name, _weight_norm_interface::overload_name)
6508 .typed<_weight_norm_interface::schema>();
6509}
6510
6511// aten::_weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor)
6512::std::tuple<at::Tensor,at::Tensor> _weight_norm_interface::call(const at::Tensor & v, const at::Tensor & g, int64_t dim) {
6513
6514 static auto op = create__weight_norm_interface_typed_handle();
6515 return op.call(v, g, dim);
6516}
6517
6518// aten::_weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor)
6519::std::tuple<at::Tensor,at::Tensor> _weight_norm_interface::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & v, const at::Tensor & g, int64_t dim) {
6520
6521 static auto op = create__weight_norm_interface_typed_handle();
6522 return op.redispatch(dispatchKeySet, v, g, dim);
6523}
6524
6525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_weight_norm_differentiable_backward, name, "aten::_weight_norm_differentiable_backward")
6526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_weight_norm_differentiable_backward, overload_name, "")
6527STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_weight_norm_differentiable_backward, schema_str, "_weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)")
6528
6529// aten::_weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)
6530static C10_NOINLINE c10::TypedOperatorHandle<_weight_norm_differentiable_backward::schema> create__weight_norm_differentiable_backward_typed_handle() {
6531 return c10::Dispatcher::singleton()
6532 .findSchemaOrThrow(_weight_norm_differentiable_backward::name, _weight_norm_differentiable_backward::overload_name)
6533 .typed<_weight_norm_differentiable_backward::schema>();
6534}
6535
6536// aten::_weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)
6537::std::tuple<at::Tensor,at::Tensor> _weight_norm_differentiable_backward::call(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) {
6538
6539 static auto op = create__weight_norm_differentiable_backward_typed_handle();
6540 return op.call(grad_w, saved_v, saved_g, saved_norms, dim);
6541}
6542
6543// aten::_weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)
6544::std::tuple<at::Tensor,at::Tensor> _weight_norm_differentiable_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) {
6545
6546 static auto op = create__weight_norm_differentiable_backward_typed_handle();
6547 return op.redispatch(dispatchKeySet, grad_w, saved_v, saved_g, saved_norms, dim);
6548}
6549
6550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(zeros_names, name, "aten::zeros")
6551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(zeros_names, overload_name, "names")
6552STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(zeros_names, schema_str, "zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
6553
6554// aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6555static C10_NOINLINE c10::TypedOperatorHandle<zeros_names::schema> create_zeros_names_typed_handle() {
6556 return c10::Dispatcher::singleton()
6557 .findSchemaOrThrow(zeros_names::name, zeros_names::overload_name)
6558 .typed<zeros_names::schema>();
6559}
6560
6561// aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6562at::Tensor zeros_names::call(at::IntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
6563
6564 static auto op = create_zeros_names_typed_handle();
6565 return op.call(size, names, dtype, layout, device, pin_memory);
6566}
6567
6568// aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6569at::Tensor zeros_names::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
6570
6571 static auto op = create_zeros_names_typed_handle();
6572 return op.redispatch(dispatchKeySet, size, names, dtype, layout, device, pin_memory);
6573}
6574
6575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(zeros, name, "aten::zeros")
6576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(zeros, overload_name, "")
6577STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(zeros, schema_str, "zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
6578
6579// aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6580static C10_NOINLINE c10::TypedOperatorHandle<zeros::schema> create_zeros_typed_handle() {
6581 return c10::Dispatcher::singleton()
6582 .findSchemaOrThrow(zeros::name, zeros::overload_name)
6583 .typed<zeros::schema>();
6584}
6585
6586// aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6587at::Tensor zeros::call(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
6588
6589 static auto op = create_zeros_typed_handle();
6590 return op.call(size, dtype, layout, device, pin_memory);
6591}
6592
6593// aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6594at::Tensor zeros::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
6595
6596 static auto op = create_zeros_typed_handle();
6597 return op.redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory);
6598}
6599
6600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(zeros_out, name, "aten::zeros")
6601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(zeros_out, overload_name, "out")
6602STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(zeros_out, schema_str, "zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)")
6603
6604// aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
6605static C10_NOINLINE c10::TypedOperatorHandle<zeros_out::schema> create_zeros_out_typed_handle() {
6606 return c10::Dispatcher::singleton()
6607 .findSchemaOrThrow(zeros_out::name, zeros_out::overload_name)
6608 .typed<zeros_out::schema>();
6609}
6610
6611// aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
6612at::Tensor & zeros_out::call(c10::SymIntArrayRef size, at::Tensor & out) {
6613
6614 static auto op = create_zeros_out_typed_handle();
6615 return op.call(size, out);
6616}
6617
6618// aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
6619at::Tensor & zeros_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out) {
6620
6621 static auto op = create_zeros_out_typed_handle();
6622 return op.redispatch(dispatchKeySet, size, out);
6623}
6624
6625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_standard_gamma, name, "aten::_standard_gamma")
6626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_standard_gamma, overload_name, "")
6627STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_standard_gamma, schema_str, "_standard_gamma(Tensor self, Generator? generator=None) -> Tensor")
6628
6629// aten::_standard_gamma(Tensor self, Generator? generator=None) -> Tensor
6630static C10_NOINLINE c10::TypedOperatorHandle<_standard_gamma::schema> create__standard_gamma_typed_handle() {
6631 return c10::Dispatcher::singleton()
6632 .findSchemaOrThrow(_standard_gamma::name, _standard_gamma::overload_name)
6633 .typed<_standard_gamma::schema>();
6634}
6635
6636// aten::_standard_gamma(Tensor self, Generator? generator=None) -> Tensor
6637at::Tensor _standard_gamma::call(const at::Tensor & self, c10::optional<at::Generator> generator) {
6638
6639 static auto op = create__standard_gamma_typed_handle();
6640 return op.call(self, generator);
6641}
6642
6643// aten::_standard_gamma(Tensor self, Generator? generator=None) -> Tensor
6644at::Tensor _standard_gamma::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::Generator> generator) {
6645
6646 static auto op = create__standard_gamma_typed_handle();
6647 return op.redispatch(dispatchKeySet, self, generator);
6648}
6649
6650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sample_dirichlet, name, "aten::_sample_dirichlet")
6651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sample_dirichlet, overload_name, "")
6652STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sample_dirichlet, schema_str, "_sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor")
6653
6654// aten::_sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor
6655static C10_NOINLINE c10::TypedOperatorHandle<_sample_dirichlet::schema> create__sample_dirichlet_typed_handle() {
6656 return c10::Dispatcher::singleton()
6657 .findSchemaOrThrow(_sample_dirichlet::name, _sample_dirichlet::overload_name)
6658 .typed<_sample_dirichlet::schema>();
6659}
6660
6661// aten::_sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor
6662at::Tensor _sample_dirichlet::call(const at::Tensor & self, c10::optional<at::Generator> generator) {
6663
6664 static auto op = create__sample_dirichlet_typed_handle();
6665 return op.call(self, generator);
6666}
6667
6668// aten::_sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor
6669at::Tensor _sample_dirichlet::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::Generator> generator) {
6670
6671 static auto op = create__sample_dirichlet_typed_handle();
6672 return op.redispatch(dispatchKeySet, self, generator);
6673}
6674
6675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(binomial, name, "aten::binomial")
6676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(binomial, overload_name, "")
6677STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(binomial, schema_str, "binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor")
6678
6679// aten::binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor
6680static C10_NOINLINE c10::TypedOperatorHandle<binomial::schema> create_binomial_typed_handle() {
6681 return c10::Dispatcher::singleton()
6682 .findSchemaOrThrow(binomial::name, binomial::overload_name)
6683 .typed<binomial::schema>();
6684}
6685
6686// aten::binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor
6687at::Tensor binomial::call(const at::Tensor & count, const at::Tensor & prob, c10::optional<at::Generator> generator) {
6688
6689 static auto op = create_binomial_typed_handle();
6690 return op.call(count, prob, generator);
6691}
6692
6693// aten::binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor
6694at::Tensor binomial::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & count, const at::Tensor & prob, c10::optional<at::Generator> generator) {
6695
6696 static auto op = create_binomial_typed_handle();
6697 return op.redispatch(dispatchKeySet, count, prob, generator);
6698}
6699
6700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_sum, name, "aten::_sparse_sum")
6701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_sum, overload_name, "")
6702STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_sum, schema_str, "_sparse_sum(Tensor self) -> Tensor")
6703
6704// aten::_sparse_sum(Tensor self) -> Tensor
6705static C10_NOINLINE c10::TypedOperatorHandle<_sparse_sum::schema> create__sparse_sum_typed_handle() {
6706 return c10::Dispatcher::singleton()
6707 .findSchemaOrThrow(_sparse_sum::name, _sparse_sum::overload_name)
6708 .typed<_sparse_sum::schema>();
6709}
6710
6711// aten::_sparse_sum(Tensor self) -> Tensor
6712at::Tensor _sparse_sum::call(const at::Tensor & self) {
6713
6714 static auto op = create__sparse_sum_typed_handle();
6715 return op.call(self);
6716}
6717
6718// aten::_sparse_sum(Tensor self) -> Tensor
6719at::Tensor _sparse_sum::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
6720
6721 static auto op = create__sparse_sum_typed_handle();
6722 return op.redispatch(dispatchKeySet, self);
6723}
6724
6725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_sum_dtype, name, "aten::_sparse_sum")
6726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_sum_dtype, overload_name, "dtype")
6727STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_sum_dtype, schema_str, "_sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor")
6728
6729// aten::_sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor
6730static C10_NOINLINE c10::TypedOperatorHandle<_sparse_sum_dtype::schema> create__sparse_sum_dtype_typed_handle() {
6731 return c10::Dispatcher::singleton()
6732 .findSchemaOrThrow(_sparse_sum_dtype::name, _sparse_sum_dtype::overload_name)
6733 .typed<_sparse_sum_dtype::schema>();
6734}
6735
6736// aten::_sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor
6737at::Tensor _sparse_sum_dtype::call(const at::Tensor & self, at::ScalarType dtype) {
6738
6739 static auto op = create__sparse_sum_dtype_typed_handle();
6740 return op.call(self, dtype);
6741}
6742
6743// aten::_sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor
6744at::Tensor _sparse_sum_dtype::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype) {
6745
6746 static auto op = create__sparse_sum_dtype_typed_handle();
6747 return op.redispatch(dispatchKeySet, self, dtype);
6748}
6749
6750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_sum_dim, name, "aten::_sparse_sum")
6751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_sum_dim, overload_name, "dim")
6752STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_sum_dim, schema_str, "_sparse_sum.dim(Tensor self, int[1] dim) -> Tensor")
6753
6754// aten::_sparse_sum.dim(Tensor self, int[1] dim) -> Tensor
6755static C10_NOINLINE c10::TypedOperatorHandle<_sparse_sum_dim::schema> create__sparse_sum_dim_typed_handle() {
6756 return c10::Dispatcher::singleton()
6757 .findSchemaOrThrow(_sparse_sum_dim::name, _sparse_sum_dim::overload_name)
6758 .typed<_sparse_sum_dim::schema>();
6759}
6760
6761// aten::_sparse_sum.dim(Tensor self, int[1] dim) -> Tensor
6762at::Tensor _sparse_sum_dim::call(const at::Tensor & self, at::IntArrayRef dim) {
6763
6764 static auto op = create__sparse_sum_dim_typed_handle();
6765 return op.call(self, dim);
6766}
6767
6768// aten::_sparse_sum.dim(Tensor self, int[1] dim) -> Tensor
6769at::Tensor _sparse_sum_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim) {
6770
6771 static auto op = create__sparse_sum_dim_typed_handle();
6772 return op.redispatch(dispatchKeySet, self, dim);
6773}
6774
6775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_sum_dim_dtype, name, "aten::_sparse_sum")
6776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_sum_dim_dtype, overload_name, "dim_dtype")
6777STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_sum_dim_dtype, schema_str, "_sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor")
6778
6779// aten::_sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor
6780static C10_NOINLINE c10::TypedOperatorHandle<_sparse_sum_dim_dtype::schema> create__sparse_sum_dim_dtype_typed_handle() {
6781 return c10::Dispatcher::singleton()
6782 .findSchemaOrThrow(_sparse_sum_dim_dtype::name, _sparse_sum_dim_dtype::overload_name)
6783 .typed<_sparse_sum_dim_dtype::schema>();
6784}
6785
6786// aten::_sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor
6787at::Tensor _sparse_sum_dim_dtype::call(const at::Tensor & self, at::IntArrayRef dim, at::ScalarType dtype) {
6788
6789 static auto op = create__sparse_sum_dim_dtype_typed_handle();
6790 return op.call(self, dim, dtype);
6791}
6792
6793// aten::_sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor
6794at::Tensor _sparse_sum_dim_dtype::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, at::ScalarType dtype) {
6795
6796 static auto op = create__sparse_sum_dim_dtype_typed_handle();
6797 return op.redispatch(dispatchKeySet, self, dim, dtype);
6798}
6799
6800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_addmm, name, "aten::_sparse_addmm")
6801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_addmm, overload_name, "")
6802STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_addmm, schema_str, "_sparse_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor")
6803
6804// aten::_sparse_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
6805static C10_NOINLINE c10::TypedOperatorHandle<_sparse_addmm::schema> create__sparse_addmm_typed_handle() {
6806 return c10::Dispatcher::singleton()
6807 .findSchemaOrThrow(_sparse_addmm::name, _sparse_addmm::overload_name)
6808 .typed<_sparse_addmm::schema>();
6809}
6810
6811// aten::_sparse_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
6812at::Tensor _sparse_addmm::call(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
6813
6814 static auto op = create__sparse_addmm_typed_handle();
6815 return op.call(self, mat1, mat2, beta, alpha);
6816}
6817
6818// aten::_sparse_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
6819at::Tensor _sparse_addmm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
6820
6821 static auto op = create__sparse_addmm_typed_handle();
6822 return op.redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha);
6823}
6824
6825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_mm_reduce_impl_backward, name, "aten::_sparse_mm_reduce_impl_backward")
6826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_mm_reduce_impl_backward, overload_name, "")
6827STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_mm_reduce_impl_backward, schema_str, "_sparse_mm_reduce_impl_backward(Tensor self, Tensor grad_out, Tensor weight, str reduce, Tensor arg_out, bool[2] output_mask) -> (Tensor, Tensor)")
6828
6829// aten::_sparse_mm_reduce_impl_backward(Tensor self, Tensor grad_out, Tensor weight, str reduce, Tensor arg_out, bool[2] output_mask) -> (Tensor, Tensor)
6830static C10_NOINLINE c10::TypedOperatorHandle<_sparse_mm_reduce_impl_backward::schema> create__sparse_mm_reduce_impl_backward_typed_handle() {
6831 return c10::Dispatcher::singleton()
6832 .findSchemaOrThrow(_sparse_mm_reduce_impl_backward::name, _sparse_mm_reduce_impl_backward::overload_name)
6833 .typed<_sparse_mm_reduce_impl_backward::schema>();
6834}
6835
6836// aten::_sparse_mm_reduce_impl_backward(Tensor self, Tensor grad_out, Tensor weight, str reduce, Tensor arg_out, bool[2] output_mask) -> (Tensor, Tensor)
6837::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl_backward::call(const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array<bool,2> output_mask) {
6838
6839 static auto op = create__sparse_mm_reduce_impl_backward_typed_handle();
6840 return op.call(self, grad_out, weight, reduce, arg_out, output_mask);
6841}
6842
6843// aten::_sparse_mm_reduce_impl_backward(Tensor self, Tensor grad_out, Tensor weight, str reduce, Tensor arg_out, bool[2] output_mask) -> (Tensor, Tensor)
6844::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array<bool,2> output_mask) {
6845
6846 static auto op = create__sparse_mm_reduce_impl_backward_typed_handle();
6847 return op.redispatch(dispatchKeySet, self, grad_out, weight, reduce, arg_out, output_mask);
6848}
6849
6850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addmm_out, name, "aten::addmm")
6851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addmm_out, overload_name, "out")
6852STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addmm_out, schema_str, "addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)")
6853
6854// aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
6855static C10_NOINLINE c10::TypedOperatorHandle<addmm_out::schema> create_addmm_out_typed_handle() {
6856 return c10::Dispatcher::singleton()
6857 .findSchemaOrThrow(addmm_out::name, addmm_out::overload_name)
6858 .typed<addmm_out::schema>();
6859}
6860
6861// aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
6862at::Tensor & addmm_out::call(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
6863
6864 static auto op = create_addmm_out_typed_handle();
6865 return op.call(self, mat1, mat2, beta, alpha, out);
6866}
6867
6868// aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
6869at::Tensor & addmm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
6870
6871 static auto op = create_addmm_out_typed_handle();
6872 return op.redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, out);
6873}
6874
6875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addmm, name, "aten::addmm")
6876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addmm, overload_name, "")
6877STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addmm, schema_str, "addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor")
6878
6879// aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
6880static C10_NOINLINE c10::TypedOperatorHandle<addmm::schema> create_addmm_typed_handle() {
6881 return c10::Dispatcher::singleton()
6882 .findSchemaOrThrow(addmm::name, addmm::overload_name)
6883 .typed<addmm::schema>();
6884}
6885
6886// aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
6887at::Tensor addmm::call(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
6888
6889 static auto op = create_addmm_typed_handle();
6890 return op.call(self, mat1, mat2, beta, alpha);
6891}
6892
6893// aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
6894at::Tensor addmm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
6895
6896 static auto op = create_addmm_typed_handle();
6897 return op.redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha);
6898}
6899
6900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addmm_, name, "aten::addmm_")
6901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addmm_, overload_name, "")
6902STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addmm_, schema_str, "addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)")
6903
6904// aten::addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
6905static C10_NOINLINE c10::TypedOperatorHandle<addmm_::schema> create_addmm__typed_handle() {
6906 return c10::Dispatcher::singleton()
6907 .findSchemaOrThrow(addmm_::name, addmm_::overload_name)
6908 .typed<addmm_::schema>();
6909}
6910
6911// aten::addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
6912at::Tensor & addmm_::call(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
6913
6914 static auto op = create_addmm__typed_handle();
6915 return op.call(self, mat1, mat2, beta, alpha);
6916}
6917
6918// aten::addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
6919at::Tensor & addmm_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
6920
6921 static auto op = create_addmm__typed_handle();
6922 return op.redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha);
6923}
6924
6925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_csc_tensor_ccol_row_value_size, name, "aten::sparse_csc_tensor")
6926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_csc_tensor_ccol_row_value_size, overload_name, "ccol_row_value_size")
6927STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_csc_tensor_ccol_row_value_size, schema_str, "sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor")
6928
6929// aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
6930static C10_NOINLINE c10::TypedOperatorHandle<sparse_csc_tensor_ccol_row_value_size::schema> create_sparse_csc_tensor_ccol_row_value_size_typed_handle() {
6931 return c10::Dispatcher::singleton()
6932 .findSchemaOrThrow(sparse_csc_tensor_ccol_row_value_size::name, sparse_csc_tensor_ccol_row_value_size::overload_name)
6933 .typed<sparse_csc_tensor_ccol_row_value_size::schema>();
6934}
6935
6936// aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
6937at::Tensor sparse_csc_tensor_ccol_row_value_size::call(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
6938
6939 static auto op = create_sparse_csc_tensor_ccol_row_value_size_typed_handle();
6940 return op.call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
6941}
6942
6943// aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
6944at::Tensor sparse_csc_tensor_ccol_row_value_size::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
6945
6946 static auto op = create_sparse_csc_tensor_ccol_row_value_size_typed_handle();
6947 return op.redispatch(dispatchKeySet, ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
6948}
6949
6950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_bsc_tensor_ccol_row_value_size, name, "aten::sparse_bsc_tensor")
6951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_bsc_tensor_ccol_row_value_size, overload_name, "ccol_row_value_size")
6952STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_bsc_tensor_ccol_row_value_size, schema_str, "sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor")
6953
6954// aten::sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
6955static C10_NOINLINE c10::TypedOperatorHandle<sparse_bsc_tensor_ccol_row_value_size::schema> create_sparse_bsc_tensor_ccol_row_value_size_typed_handle() {
6956 return c10::Dispatcher::singleton()
6957 .findSchemaOrThrow(sparse_bsc_tensor_ccol_row_value_size::name, sparse_bsc_tensor_ccol_row_value_size::overload_name)
6958 .typed<sparse_bsc_tensor_ccol_row_value_size::schema>();
6959}
6960
6961// aten::sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
6962at::Tensor sparse_bsc_tensor_ccol_row_value_size::call(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
6963
6964 static auto op = create_sparse_bsc_tensor_ccol_row_value_size_typed_handle();
6965 return op.call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
6966}
6967
6968// aten::sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
6969at::Tensor sparse_bsc_tensor_ccol_row_value_size::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
6970
6971 static auto op = create_sparse_bsc_tensor_ccol_row_value_size_typed_handle();
6972 return op.redispatch(dispatchKeySet, ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
6973}
6974
6975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_csc_tensor_ccol_row_value, name, "aten::sparse_csc_tensor")
6976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_csc_tensor_ccol_row_value, overload_name, "ccol_row_value")
6977STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_csc_tensor_ccol_row_value, schema_str, "sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor")
6978
6979// aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
6980static C10_NOINLINE c10::TypedOperatorHandle<sparse_csc_tensor_ccol_row_value::schema> create_sparse_csc_tensor_ccol_row_value_typed_handle() {
6981 return c10::Dispatcher::singleton()
6982 .findSchemaOrThrow(sparse_csc_tensor_ccol_row_value::name, sparse_csc_tensor_ccol_row_value::overload_name)
6983 .typed<sparse_csc_tensor_ccol_row_value::schema>();
6984}
6985
6986// aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
6987at::Tensor sparse_csc_tensor_ccol_row_value::call(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
6988
6989 static auto op = create_sparse_csc_tensor_ccol_row_value_typed_handle();
6990 return op.call(ccol_indices, row_indices, values, dtype, layout, device, pin_memory);
6991}
6992
6993// aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
6994at::Tensor sparse_csc_tensor_ccol_row_value::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
6995
6996 static auto op = create_sparse_csc_tensor_ccol_row_value_typed_handle();
6997 return op.redispatch(dispatchKeySet, ccol_indices, row_indices, values, dtype, layout, device, pin_memory);
6998}
6999
7000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_bsc_tensor_ccol_row_value, name, "aten::sparse_bsc_tensor")
7001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_bsc_tensor_ccol_row_value, overload_name, "ccol_row_value")
7002STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_bsc_tensor_ccol_row_value, schema_str, "sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor")
7003
7004// aten::sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
7005static C10_NOINLINE c10::TypedOperatorHandle<sparse_bsc_tensor_ccol_row_value::schema> create_sparse_bsc_tensor_ccol_row_value_typed_handle() {
7006 return c10::Dispatcher::singleton()
7007 .findSchemaOrThrow(sparse_bsc_tensor_ccol_row_value::name, sparse_bsc_tensor_ccol_row_value::overload_name)
7008 .typed<sparse_bsc_tensor_ccol_row_value::schema>();
7009}
7010
7011// aten::sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
7012at::Tensor sparse_bsc_tensor_ccol_row_value::call(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
7013
7014 static auto op = create_sparse_bsc_tensor_ccol_row_value_typed_handle();
7015 return op.call(ccol_indices, row_indices, values, dtype, layout, device, pin_memory);
7016}
7017
7018// aten::sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
7019at::Tensor sparse_bsc_tensor_ccol_row_value::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
7020
7021 static auto op = create_sparse_bsc_tensor_ccol_row_value_typed_handle();
7022 return op.redispatch(dispatchKeySet, ccol_indices, row_indices, values, dtype, layout, device, pin_memory);
7023}
7024
7025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_compressed_tensor_unsafe, name, "aten::_sparse_compressed_tensor_unsafe")
7026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_compressed_tensor_unsafe, overload_name, "")
7027STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_compressed_tensor_unsafe, schema_str, "_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
7028
7029// aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
7030static C10_NOINLINE c10::TypedOperatorHandle<_sparse_compressed_tensor_unsafe::schema> create__sparse_compressed_tensor_unsafe_typed_handle() {
7031 return c10::Dispatcher::singleton()
7032 .findSchemaOrThrow(_sparse_compressed_tensor_unsafe::name, _sparse_compressed_tensor_unsafe::overload_name)
7033 .typed<_sparse_compressed_tensor_unsafe::schema>();
7034}
7035
7036// aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
7037at::Tensor _sparse_compressed_tensor_unsafe::call(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
7038
7039 static auto op = create__sparse_compressed_tensor_unsafe_typed_handle();
7040 return op.call(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
7041}
7042
7043// aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
7044at::Tensor _sparse_compressed_tensor_unsafe::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
7045
7046 static auto op = create__sparse_compressed_tensor_unsafe_typed_handle();
7047 return op.redispatch(dispatchKeySet, compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
7048}
7049
7050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_csr_tensor_unsafe, name, "aten::_sparse_csr_tensor_unsafe")
7051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_csr_tensor_unsafe, overload_name, "")
7052STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_csr_tensor_unsafe, schema_str, "_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
7053
7054// aten::_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
7055static C10_NOINLINE c10::TypedOperatorHandle<_sparse_csr_tensor_unsafe::schema> create__sparse_csr_tensor_unsafe_typed_handle() {
7056 return c10::Dispatcher::singleton()
7057 .findSchemaOrThrow(_sparse_csr_tensor_unsafe::name, _sparse_csr_tensor_unsafe::overload_name)
7058 .typed<_sparse_csr_tensor_unsafe::schema>();
7059}
7060
7061// aten::_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
7062at::Tensor _sparse_csr_tensor_unsafe::call(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
7063
7064 static auto op = create__sparse_csr_tensor_unsafe_typed_handle();
7065 return op.call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
7066}
7067
7068// aten::_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
7069at::Tensor _sparse_csr_tensor_unsafe::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
7070
7071 static auto op = create__sparse_csr_tensor_unsafe_typed_handle();
7072 return op.redispatch(dispatchKeySet, crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
7073}
7074
7075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_coo_tensor_unsafe, name, "aten::_sparse_coo_tensor_unsafe")
7076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_coo_tensor_unsafe, overload_name, "")
7077STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_coo_tensor_unsafe, schema_str, "_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
7078
7079// aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
7080static C10_NOINLINE c10::TypedOperatorHandle<_sparse_coo_tensor_unsafe::schema> create__sparse_coo_tensor_unsafe_typed_handle() {
7081 return c10::Dispatcher::singleton()
7082 .findSchemaOrThrow(_sparse_coo_tensor_unsafe::name, _sparse_coo_tensor_unsafe::overload_name)
7083 .typed<_sparse_coo_tensor_unsafe::schema>();
7084}
7085
7086// aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
7087at::Tensor _sparse_coo_tensor_unsafe::call(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
7088
7089 static auto op = create__sparse_coo_tensor_unsafe_typed_handle();
7090 return op.call(indices, values, size, dtype, layout, device, pin_memory);
7091}
7092
7093// aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
7094at::Tensor _sparse_coo_tensor_unsafe::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
7095
7096 static auto op = create__sparse_coo_tensor_unsafe_typed_handle();
7097 return op.redispatch(dispatchKeySet, indices, values, size, dtype, layout, device, pin_memory);
7098}
7099
7100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_validate_sparse_csr_tensor_args, name, "aten::_validate_sparse_csr_tensor_args")
7101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_validate_sparse_csr_tensor_args, overload_name, "")
7102STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_validate_sparse_csr_tensor_args, schema_str, "_validate_sparse_csr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()")
7103
7104// aten::_validate_sparse_csr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()
7105static C10_NOINLINE c10::TypedOperatorHandle<_validate_sparse_csr_tensor_args::schema> create__validate_sparse_csr_tensor_args_typed_handle() {
7106 return c10::Dispatcher::singleton()
7107 .findSchemaOrThrow(_validate_sparse_csr_tensor_args::name, _validate_sparse_csr_tensor_args::overload_name)
7108 .typed<_validate_sparse_csr_tensor_args::schema>();
7109}
7110
7111// aten::_validate_sparse_csr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()
7112void _validate_sparse_csr_tensor_args::call(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) {
7113
7114 static auto op = create__validate_sparse_csr_tensor_args_typed_handle();
7115 return op.call(crow_indices, col_indices, values, size);
7116}
7117
7118// aten::_validate_sparse_csr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()
7119void _validate_sparse_csr_tensor_args::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) {
7120
7121 static auto op = create__validate_sparse_csr_tensor_args_typed_handle();
7122 return op.redispatch(dispatchKeySet, crow_indices, col_indices, values, size);
7123}
7124
7125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_validate_sparse_bsr_tensor_args, name, "aten::_validate_sparse_bsr_tensor_args")
7126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_validate_sparse_bsr_tensor_args, overload_name, "")
7127STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_validate_sparse_bsr_tensor_args, schema_str, "_validate_sparse_bsr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()")
7128
7129// aten::_validate_sparse_bsr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()
7130static C10_NOINLINE c10::TypedOperatorHandle<_validate_sparse_bsr_tensor_args::schema> create__validate_sparse_bsr_tensor_args_typed_handle() {
7131 return c10::Dispatcher::singleton()
7132 .findSchemaOrThrow(_validate_sparse_bsr_tensor_args::name, _validate_sparse_bsr_tensor_args::overload_name)
7133 .typed<_validate_sparse_bsr_tensor_args::schema>();
7134}
7135
7136// aten::_validate_sparse_bsr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()
7137void _validate_sparse_bsr_tensor_args::call(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) {
7138
7139 static auto op = create__validate_sparse_bsr_tensor_args_typed_handle();
7140 return op.call(crow_indices, col_indices, values, size);
7141}
7142
7143// aten::_validate_sparse_bsr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()
7144void _validate_sparse_bsr_tensor_args::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) {
7145
7146 static auto op = create__validate_sparse_bsr_tensor_args_typed_handle();
7147 return op.redispatch(dispatchKeySet, crow_indices, col_indices, values, size);
7148}
7149
7150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_validate_sparse_bsc_tensor_args, name, "aten::_validate_sparse_bsc_tensor_args")
7151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_validate_sparse_bsc_tensor_args, overload_name, "")
7152STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_validate_sparse_bsc_tensor_args, schema_str, "_validate_sparse_bsc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()")
7153
7154// aten::_validate_sparse_bsc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()
7155static C10_NOINLINE c10::TypedOperatorHandle<_validate_sparse_bsc_tensor_args::schema> create__validate_sparse_bsc_tensor_args_typed_handle() {
7156 return c10::Dispatcher::singleton()
7157 .findSchemaOrThrow(_validate_sparse_bsc_tensor_args::name, _validate_sparse_bsc_tensor_args::overload_name)
7158 .typed<_validate_sparse_bsc_tensor_args::schema>();
7159}
7160
7161// aten::_validate_sparse_bsc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()
7162void _validate_sparse_bsc_tensor_args::call(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) {
7163
7164 static auto op = create__validate_sparse_bsc_tensor_args_typed_handle();
7165 return op.call(ccol_indices, row_indices, values, size);
7166}
7167
7168// aten::_validate_sparse_bsc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()
7169void _validate_sparse_bsc_tensor_args::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) {
7170
7171 static auto op = create__validate_sparse_bsc_tensor_args_typed_handle();
7172 return op.redispatch(dispatchKeySet, ccol_indices, row_indices, values, size);
7173}
7174
7175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_resize_, name, "aten::sparse_resize_")
7176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_resize_, overload_name, "")
7177STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_resize_, schema_str, "sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)")
7178
7179// aten::sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)
7180static C10_NOINLINE c10::TypedOperatorHandle<sparse_resize_::schema> create_sparse_resize__typed_handle() {
7181 return c10::Dispatcher::singleton()
7182 .findSchemaOrThrow(sparse_resize_::name, sparse_resize_::overload_name)
7183 .typed<sparse_resize_::schema>();
7184}
7185
7186// aten::sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)
7187const at::Tensor & sparse_resize_::call(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
7188
7189 static auto op = create_sparse_resize__typed_handle();
7190 return op.call(self, size, sparse_dim, dense_dim);
7191}
7192
7193// aten::sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)
7194const at::Tensor & sparse_resize_::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
7195
7196 static auto op = create_sparse_resize__typed_handle();
7197 return op.redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim);
7198}
7199
7200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_mask, name, "aten::sparse_mask")
7201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_mask, overload_name, "")
7202STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_mask, schema_str, "sparse_mask(Tensor self, Tensor mask) -> Tensor")
7203
7204// aten::sparse_mask(Tensor self, Tensor mask) -> Tensor
7205static C10_NOINLINE c10::TypedOperatorHandle<sparse_mask::schema> create_sparse_mask_typed_handle() {
7206 return c10::Dispatcher::singleton()
7207 .findSchemaOrThrow(sparse_mask::name, sparse_mask::overload_name)
7208 .typed<sparse_mask::schema>();
7209}
7210
7211// aten::sparse_mask(Tensor self, Tensor mask) -> Tensor
7212at::Tensor sparse_mask::call(const at::Tensor & self, const at::Tensor & mask) {
7213
7214 static auto op = create_sparse_mask_typed_handle();
7215 return op.call(self, mask);
7216}
7217
7218// aten::sparse_mask(Tensor self, Tensor mask) -> Tensor
7219at::Tensor sparse_mask::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask) {
7220
7221 static auto op = create_sparse_mask_typed_handle();
7222 return op.redispatch(dispatchKeySet, self, mask);
7223}
7224
7225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_to_cpu, name, "aten::_to_cpu")
7226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_to_cpu, overload_name, "")
7227STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_to_cpu, schema_str, "_to_cpu(Tensor[] tensors) -> Tensor[]")
7228
7229// aten::_to_cpu(Tensor[] tensors) -> Tensor[]
7230static C10_NOINLINE c10::TypedOperatorHandle<_to_cpu::schema> create__to_cpu_typed_handle() {
7231 return c10::Dispatcher::singleton()
7232 .findSchemaOrThrow(_to_cpu::name, _to_cpu::overload_name)
7233 .typed<_to_cpu::schema>();
7234}
7235
7236// aten::_to_cpu(Tensor[] tensors) -> Tensor[]
7237::std::vector<at::Tensor> _to_cpu::call(at::TensorList tensors) {
7238
7239 static auto op = create__to_cpu_typed_handle();
7240 return op.call(tensors);
7241}
7242
7243// aten::_to_cpu(Tensor[] tensors) -> Tensor[]
7244::std::vector<at::Tensor> _to_cpu::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
7245
7246 static auto op = create__to_cpu_typed_handle();
7247 return op.redispatch(dispatchKeySet, tensors);
7248}
7249
7250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(values, name, "aten::values")
7251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(values, overload_name, "")
7252STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(values, schema_str, "values(Tensor(a) self) -> Tensor(a)")
7253
7254// aten::values(Tensor(a) self) -> Tensor(a)
7255static C10_NOINLINE c10::TypedOperatorHandle<values::schema> create_values_typed_handle() {
7256 return c10::Dispatcher::singleton()
7257 .findSchemaOrThrow(values::name, values::overload_name)
7258 .typed<values::schema>();
7259}
7260
7261// aten::values(Tensor(a) self) -> Tensor(a)
7262at::Tensor values::call(const at::Tensor & self) {
7263
7264 static auto op = create_values_typed_handle();
7265 return op.call(self);
7266}
7267
7268// aten::values(Tensor(a) self) -> Tensor(a)
7269at::Tensor values::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
7270
7271 static auto op = create_values_typed_handle();
7272 return op.redispatch(dispatchKeySet, self);
7273}
7274
7275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(row_indices, name, "aten::row_indices")
7276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(row_indices, overload_name, "")
7277STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(row_indices, schema_str, "row_indices(Tensor(a) self) -> Tensor(a)")
7278
7279// aten::row_indices(Tensor(a) self) -> Tensor(a)
7280static C10_NOINLINE c10::TypedOperatorHandle<row_indices::schema> create_row_indices_typed_handle() {
7281 return c10::Dispatcher::singleton()
7282 .findSchemaOrThrow(row_indices::name, row_indices::overload_name)
7283 .typed<row_indices::schema>();
7284}
7285
7286// aten::row_indices(Tensor(a) self) -> Tensor(a)
7287at::Tensor row_indices::call(const at::Tensor & self) {
7288
7289 static auto op = create_row_indices_typed_handle();
7290 return op.call(self);
7291}
7292
7293// aten::row_indices(Tensor(a) self) -> Tensor(a)
7294at::Tensor row_indices::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
7295
7296 static auto op = create_row_indices_typed_handle();
7297 return op.redispatch(dispatchKeySet, self);
7298}
7299
7300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copy_sparse_to_sparse_, name, "aten::copy_sparse_to_sparse_")
7301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copy_sparse_to_sparse_, overload_name, "")
7302STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copy_sparse_to_sparse_, schema_str, "copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)")
7303
7304// aten::copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)
7305static C10_NOINLINE c10::TypedOperatorHandle<copy_sparse_to_sparse_::schema> create_copy_sparse_to_sparse__typed_handle() {
7306 return c10::Dispatcher::singleton()
7307 .findSchemaOrThrow(copy_sparse_to_sparse_::name, copy_sparse_to_sparse_::overload_name)
7308 .typed<copy_sparse_to_sparse_::schema>();
7309}
7310
7311// aten::copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)
7312at::Tensor & copy_sparse_to_sparse_::call(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
7313
7314 static auto op = create_copy_sparse_to_sparse__typed_handle();
7315 return op.call(self, src, non_blocking);
7316}
7317
7318// aten::copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)
7319at::Tensor & copy_sparse_to_sparse_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & src, bool non_blocking) {
7320
7321 static auto op = create_copy_sparse_to_sparse__typed_handle();
7322 return op.redispatch(dispatchKeySet, self, src, non_blocking);
7323}
7324
7325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unbind_int, name, "aten::unbind")
7326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unbind_int, overload_name, "int")
7327STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unbind_int, schema_str, "unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[]")
7328
7329// aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[]
7330static C10_NOINLINE c10::TypedOperatorHandle<unbind_int::schema> create_unbind_int_typed_handle() {
7331 return c10::Dispatcher::singleton()
7332 .findSchemaOrThrow(unbind_int::name, unbind_int::overload_name)
7333 .typed<unbind_int::schema>();
7334}
7335
7336// aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[]
7337::std::vector<at::Tensor> unbind_int::call(const at::Tensor & self, int64_t dim) {
7338
7339 static auto op = create_unbind_int_typed_handle();
7340 return op.call(self, dim);
7341}
7342
7343// aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[]
7344::std::vector<at::Tensor> unbind_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
7345
7346 static auto op = create_unbind_int_typed_handle();
7347 return op.redispatch(dispatchKeySet, self, dim);
7348}
7349
7350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unbind_Dimname, name, "aten::unbind")
7351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unbind_Dimname, overload_name, "Dimname")
7352STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unbind_Dimname, schema_str, "unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[]")
7353
7354// aten::unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[]
7355static C10_NOINLINE c10::TypedOperatorHandle<unbind_Dimname::schema> create_unbind_Dimname_typed_handle() {
7356 return c10::Dispatcher::singleton()
7357 .findSchemaOrThrow(unbind_Dimname::name, unbind_Dimname::overload_name)
7358 .typed<unbind_Dimname::schema>();
7359}
7360
7361// aten::unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[]
7362::std::vector<at::Tensor> unbind_Dimname::call(const at::Tensor & self, at::Dimname dim) {
7363
7364 static auto op = create_unbind_Dimname_typed_handle();
7365 return op.call(self, dim);
7366}
7367
7368// aten::unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[]
7369::std::vector<at::Tensor> unbind_Dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) {
7370
7371 static auto op = create_unbind_Dimname_typed_handle();
7372 return op.redispatch(dispatchKeySet, self, dim);
7373}
7374
7375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse_sparse_dim, name, "aten::to_sparse")
7376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse_sparse_dim, overload_name, "sparse_dim")
7377STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse_sparse_dim, schema_str, "to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor")
7378
7379// aten::to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor
7380static C10_NOINLINE c10::TypedOperatorHandle<to_sparse_sparse_dim::schema> create_to_sparse_sparse_dim_typed_handle() {
7381 return c10::Dispatcher::singleton()
7382 .findSchemaOrThrow(to_sparse_sparse_dim::name, to_sparse_sparse_dim::overload_name)
7383 .typed<to_sparse_sparse_dim::schema>();
7384}
7385
7386// aten::to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor
7387at::Tensor to_sparse_sparse_dim::call(const at::Tensor & self, int64_t sparse_dim) {
7388
7389 static auto op = create_to_sparse_sparse_dim_typed_handle();
7390 return op.call(self, sparse_dim);
7391}
7392
7393// aten::to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor
7394at::Tensor to_sparse_sparse_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sparse_dim) {
7395
7396 static auto op = create_to_sparse_sparse_dim_typed_handle();
7397 return op.redispatch(dispatchKeySet, self, sparse_dim);
7398}
7399
7400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse, name, "aten::to_sparse")
7401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse, overload_name, "")
7402STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse, schema_str, "to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor")
7403
7404// aten::to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor
7405static C10_NOINLINE c10::TypedOperatorHandle<to_sparse::schema> create_to_sparse_typed_handle() {
7406 return c10::Dispatcher::singleton()
7407 .findSchemaOrThrow(to_sparse::name, to_sparse::overload_name)
7408 .typed<to_sparse::schema>();
7409}
7410
7411// aten::to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor
7412at::Tensor to_sparse::call(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
7413
7414 static auto op = create_to_sparse_typed_handle();
7415 return op.call(self, layout, blocksize, dense_dim);
7416}
7417
7418// aten::to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor
7419at::Tensor to_sparse::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
7420
7421 static auto op = create_to_sparse_typed_handle();
7422 return op.redispatch(dispatchKeySet, self, layout, blocksize, dense_dim);
7423}
7424
7425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_mkldnn, name, "aten::to_mkldnn")
7426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_mkldnn, overload_name, "")
7427STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_mkldnn, schema_str, "to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor")
7428
7429// aten::to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor
7430static C10_NOINLINE c10::TypedOperatorHandle<to_mkldnn::schema> create_to_mkldnn_typed_handle() {
7431 return c10::Dispatcher::singleton()
7432 .findSchemaOrThrow(to_mkldnn::name, to_mkldnn::overload_name)
7433 .typed<to_mkldnn::schema>();
7434}
7435
7436// aten::to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor
7437at::Tensor to_mkldnn::call(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
7438
7439 static auto op = create_to_mkldnn_typed_handle();
7440 return op.call(self, dtype);
7441}
7442
7443// aten::to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor
7444at::Tensor to_mkldnn::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
7445
7446 static auto op = create_to_mkldnn_typed_handle();
7447 return op.redispatch(dispatchKeySet, self, dtype);
7448}
7449
7450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_mkldnn_backward, name, "aten::to_mkldnn_backward")
7451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_mkldnn_backward, overload_name, "")
7452STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_mkldnn_backward, schema_str, "to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor")
7453
7454// aten::to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor
7455static C10_NOINLINE c10::TypedOperatorHandle<to_mkldnn_backward::schema> create_to_mkldnn_backward_typed_handle() {
7456 return c10::Dispatcher::singleton()
7457 .findSchemaOrThrow(to_mkldnn_backward::name, to_mkldnn_backward::overload_name)
7458 .typed<to_mkldnn_backward::schema>();
7459}
7460
7461// aten::to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor
7462at::Tensor to_mkldnn_backward::call(const at::Tensor & grad, const at::Tensor & input) {
7463
7464 static auto op = create_to_mkldnn_backward_typed_handle();
7465 return op.call(grad, input);
7466}
7467
7468// aten::to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor
7469at::Tensor to_mkldnn_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & input) {
7470
7471 static auto op = create_to_mkldnn_backward_typed_handle();
7472 return op.redispatch(dispatchKeySet, grad, input);
7473}
7474
7475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(int_repr, name, "aten::int_repr")
7476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(int_repr, overload_name, "")
7477STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(int_repr, schema_str, "int_repr(Tensor self) -> Tensor")
7478
7479// aten::int_repr(Tensor self) -> Tensor
7480static C10_NOINLINE c10::TypedOperatorHandle<int_repr::schema> create_int_repr_typed_handle() {
7481 return c10::Dispatcher::singleton()
7482 .findSchemaOrThrow(int_repr::name, int_repr::overload_name)
7483 .typed<int_repr::schema>();
7484}
7485
7486// aten::int_repr(Tensor self) -> Tensor
7487at::Tensor int_repr::call(const at::Tensor & self) {
7488
7489 static auto op = create_int_repr_typed_handle();
7490 return op.call(self);
7491}
7492
7493// aten::int_repr(Tensor self) -> Tensor
7494at::Tensor int_repr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
7495
7496 static auto op = create_int_repr_typed_handle();
7497 return op.redispatch(dispatchKeySet, self);
7498}
7499
7500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(qscheme, name, "aten::qscheme")
7501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(qscheme, overload_name, "")
7502STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(qscheme, schema_str, "qscheme(Tensor self) -> QScheme")
7503
7504// aten::qscheme(Tensor self) -> QScheme
7505static C10_NOINLINE c10::TypedOperatorHandle<qscheme::schema> create_qscheme_typed_handle() {
7506 return c10::Dispatcher::singleton()
7507 .findSchemaOrThrow(qscheme::name, qscheme::overload_name)
7508 .typed<qscheme::schema>();
7509}
7510
7511// aten::qscheme(Tensor self) -> QScheme
7512at::QScheme qscheme::call(const at::Tensor & self) {
7513
7514 static auto op = create_qscheme_typed_handle();
7515 return op.call(self);
7516}
7517
7518// aten::qscheme(Tensor self) -> QScheme
7519at::QScheme qscheme::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
7520
7521 static auto op = create_qscheme_typed_handle();
7522 return op.redispatch(dispatchKeySet, self);
7523}
7524
7525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fake_quantize_per_channel_affine, name, "aten::fake_quantize_per_channel_affine")
7526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fake_quantize_per_channel_affine, overload_name, "")
7527STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fake_quantize_per_channel_affine, schema_str, "fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor")
7528
7529// aten::fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor
7530static C10_NOINLINE c10::TypedOperatorHandle<fake_quantize_per_channel_affine::schema> create_fake_quantize_per_channel_affine_typed_handle() {
7531 return c10::Dispatcher::singleton()
7532 .findSchemaOrThrow(fake_quantize_per_channel_affine::name, fake_quantize_per_channel_affine::overload_name)
7533 .typed<fake_quantize_per_channel_affine::schema>();
7534}
7535
7536// aten::fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor
7537at::Tensor fake_quantize_per_channel_affine::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
7538
7539 static auto op = create_fake_quantize_per_channel_affine_typed_handle();
7540 return op.call(self, scale, zero_point, axis, quant_min, quant_max);
7541}
7542
7543// aten::fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor
7544at::Tensor fake_quantize_per_channel_affine::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
7545
7546 static auto op = create_fake_quantize_per_channel_affine_typed_handle();
7547 return op.redispatch(dispatchKeySet, self, scale, zero_point, axis, quant_min, quant_max);
7548}
7549
7550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fake_quantize_per_channel_affine_cachemask, name, "aten::fake_quantize_per_channel_affine_cachemask")
7551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fake_quantize_per_channel_affine_cachemask, overload_name, "")
7552STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fake_quantize_per_channel_affine_cachemask, schema_str, "fake_quantize_per_channel_affine_cachemask(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor output, Tensor mask)")
7553
7554// aten::fake_quantize_per_channel_affine_cachemask(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
7555static C10_NOINLINE c10::TypedOperatorHandle<fake_quantize_per_channel_affine_cachemask::schema> create_fake_quantize_per_channel_affine_cachemask_typed_handle() {
7556 return c10::Dispatcher::singleton()
7557 .findSchemaOrThrow(fake_quantize_per_channel_affine_cachemask::name, fake_quantize_per_channel_affine_cachemask::overload_name)
7558 .typed<fake_quantize_per_channel_affine_cachemask::schema>();
7559}
7560
7561// aten::fake_quantize_per_channel_affine_cachemask(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
7562::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_channel_affine_cachemask::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
7563
7564 static auto op = create_fake_quantize_per_channel_affine_cachemask_typed_handle();
7565 return op.call(self, scale, zero_point, axis, quant_min, quant_max);
7566}
7567
7568// aten::fake_quantize_per_channel_affine_cachemask(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
7569::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_channel_affine_cachemask::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
7570
7571 static auto op = create_fake_quantize_per_channel_affine_cachemask_typed_handle();
7572 return op.redispatch(dispatchKeySet, self, scale, zero_point, axis, quant_min, quant_max);
7573}
7574
7575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fused_moving_avg_obs_fq_helper, name, "aten::_fused_moving_avg_obs_fq_helper")
7576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fused_moving_avg_obs_fq_helper, overload_name, "")
7577STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fused_moving_avg_obs_fq_helper, schema_str, "_fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask)")
7578
7579// aten::_fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask)
7580static C10_NOINLINE c10::TypedOperatorHandle<_fused_moving_avg_obs_fq_helper::schema> create__fused_moving_avg_obs_fq_helper_typed_handle() {
7581 return c10::Dispatcher::singleton()
7582 .findSchemaOrThrow(_fused_moving_avg_obs_fq_helper::name, _fused_moving_avg_obs_fq_helper::overload_name)
7583 .typed<_fused_moving_avg_obs_fq_helper::schema>();
7584}
7585
7586// aten::_fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask)
7587::std::tuple<at::Tensor,at::Tensor> _fused_moving_avg_obs_fq_helper::call(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) {
7588
7589 static auto op = create__fused_moving_avg_obs_fq_helper_typed_handle();
7590 return op.call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
7591}
7592
7593// aten::_fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask)
7594::std::tuple<at::Tensor,at::Tensor> _fused_moving_avg_obs_fq_helper::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) {
7595
7596 static auto op = create__fused_moving_avg_obs_fq_helper_typed_handle();
7597 return op.redispatch(dispatchKeySet, self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
7598}
7599
7600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_to_copy, name, "aten::_to_copy")
7601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_to_copy, overload_name, "")
7602STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_to_copy, schema_str, "_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor")
7603
7604// aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor
7605static C10_NOINLINE c10::TypedOperatorHandle<_to_copy::schema> create__to_copy_typed_handle() {
7606 return c10::Dispatcher::singleton()
7607 .findSchemaOrThrow(_to_copy::name, _to_copy::overload_name)
7608 .typed<_to_copy::schema>();
7609}
7610
7611// aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor
7612at::Tensor _to_copy::call(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, c10::optional<at::MemoryFormat> memory_format) {
7613
7614 static auto op = create__to_copy_typed_handle();
7615 return op.call(self, dtype, layout, device, pin_memory, non_blocking, memory_format);
7616}
7617
7618// aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor
7619at::Tensor _to_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, c10::optional<at::MemoryFormat> memory_format) {
7620
7621 static auto op = create__to_copy_typed_handle();
7622 return op.redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, non_blocking, memory_format);
7623}
7624
7625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_thnn_differentiable_lstm_cell_backward, name, "aten::_thnn_differentiable_lstm_cell_backward")
7626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_thnn_differentiable_lstm_cell_backward, overload_name, "")
7627STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_thnn_differentiable_lstm_cell_backward, schema_str, "_thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_gates, Tensor? input_bias, Tensor? hidden_bias, Tensor cx, Tensor cy) -> (Tensor, Tensor, Tensor, Tensor, Tensor)")
7628
7629// aten::_thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_gates, Tensor? input_bias, Tensor? hidden_bias, Tensor cx, Tensor cy) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
7630static C10_NOINLINE c10::TypedOperatorHandle<_thnn_differentiable_lstm_cell_backward::schema> create__thnn_differentiable_lstm_cell_backward_typed_handle() {
7631 return c10::Dispatcher::singleton()
7632 .findSchemaOrThrow(_thnn_differentiable_lstm_cell_backward::name, _thnn_differentiable_lstm_cell_backward::overload_name)
7633 .typed<_thnn_differentiable_lstm_cell_backward::schema>();
7634}
7635
7636// aten::_thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_gates, Tensor? input_bias, Tensor? hidden_bias, Tensor cx, Tensor cy) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
7637::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_differentiable_lstm_cell_backward::call(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, const at::Tensor & cx, const at::Tensor & cy) {
7638
7639 static auto op = create__thnn_differentiable_lstm_cell_backward_typed_handle();
7640 return op.call(grad_hy, grad_cy, input_gates, hidden_gates, input_bias, hidden_bias, cx, cy);
7641}
7642
7643// aten::_thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_gates, Tensor? input_bias, Tensor? hidden_bias, Tensor cx, Tensor cy) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
7644::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_differentiable_lstm_cell_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, const at::Tensor & cx, const at::Tensor & cy) {
7645
7646 static auto op = create__thnn_differentiable_lstm_cell_backward_typed_handle();
7647 return op.redispatch(dispatchKeySet, grad_hy, grad_cy, input_gates, hidden_gates, input_bias, hidden_bias, cx, cy);
7648}
7649
7650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_thnn_differentiable_gru_cell_backward, name, "aten::_thnn_differentiable_gru_cell_backward")
7651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_thnn_differentiable_gru_cell_backward, overload_name, "")
7652STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_thnn_differentiable_gru_cell_backward, schema_str, "_thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias, Tensor? hidden_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)")
7653
7654// aten::_thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias, Tensor? hidden_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
7655static C10_NOINLINE c10::TypedOperatorHandle<_thnn_differentiable_gru_cell_backward::schema> create__thnn_differentiable_gru_cell_backward_typed_handle() {
7656 return c10::Dispatcher::singleton()
7657 .findSchemaOrThrow(_thnn_differentiable_gru_cell_backward::name, _thnn_differentiable_gru_cell_backward::overload_name)
7658 .typed<_thnn_differentiable_gru_cell_backward::schema>();
7659}
7660
7661// aten::_thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias, Tensor? hidden_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
7662::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_differentiable_gru_cell_backward::call(const at::Tensor & grad_hy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias) {
7663
7664 static auto op = create__thnn_differentiable_gru_cell_backward_typed_handle();
7665 return op.call(grad_hy, input_gates, hidden_gates, hx, input_bias, hidden_bias);
7666}
7667
7668// aten::_thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias, Tensor? hidden_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
7669::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_differentiable_gru_cell_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_hy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias) {
7670
7671 static auto op = create__thnn_differentiable_gru_cell_backward_typed_handle();
7672 return op.redispatch(dispatchKeySet, grad_hy, input_gates, hidden_gates, hx, input_bias, hidden_bias);
7673}
7674
7675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rnn_tanh_cell, name, "aten::rnn_tanh_cell")
7676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rnn_tanh_cell, overload_name, "")
7677STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rnn_tanh_cell, schema_str, "rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor")
7678
7679// aten::rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
7680static C10_NOINLINE c10::TypedOperatorHandle<rnn_tanh_cell::schema> create_rnn_tanh_cell_typed_handle() {
7681 return c10::Dispatcher::singleton()
7682 .findSchemaOrThrow(rnn_tanh_cell::name, rnn_tanh_cell::overload_name)
7683 .typed<rnn_tanh_cell::schema>();
7684}
7685
7686// aten::rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
7687at::Tensor rnn_tanh_cell::call(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
7688
7689 static auto op = create_rnn_tanh_cell_typed_handle();
7690 return op.call(input, hx, w_ih, w_hh, b_ih, b_hh);
7691}
7692
7693// aten::rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
7694at::Tensor rnn_tanh_cell::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
7695
7696 static auto op = create_rnn_tanh_cell_typed_handle();
7697 return op.redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh);
7698}
7699
7700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantized_gru_cell, name, "aten::quantized_gru_cell")
7701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantized_gru_cell, overload_name, "")
7702STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantized_gru_cell, schema_str, "quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor")
7703
7704// aten::quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
7705static C10_NOINLINE c10::TypedOperatorHandle<quantized_gru_cell::schema> create_quantized_gru_cell_typed_handle() {
7706 return c10::Dispatcher::singleton()
7707 .findSchemaOrThrow(quantized_gru_cell::name, quantized_gru_cell::overload_name)
7708 .typed<quantized_gru_cell::schema>();
7709}
7710
7711// aten::quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
7712at::Tensor quantized_gru_cell::call(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
7713
7714 static auto op = create_quantized_gru_cell_typed_handle();
7715 return op.call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
7716}
7717
7718// aten::quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
7719at::Tensor quantized_gru_cell::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
7720
7721 static auto op = create_quantized_gru_cell_typed_handle();
7722 return op.redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
7723}
7724
7725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pack_padded_sequence_backward, name, "aten::_pack_padded_sequence_backward")
7726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pack_padded_sequence_backward, overload_name, "")
7727STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pack_padded_sequence_backward, schema_str, "_pack_padded_sequence_backward(Tensor grad, SymInt[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor")
7728
7729// aten::_pack_padded_sequence_backward(Tensor grad, SymInt[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor
7730static C10_NOINLINE c10::TypedOperatorHandle<_pack_padded_sequence_backward::schema> create__pack_padded_sequence_backward_typed_handle() {
7731 return c10::Dispatcher::singleton()
7732 .findSchemaOrThrow(_pack_padded_sequence_backward::name, _pack_padded_sequence_backward::overload_name)
7733 .typed<_pack_padded_sequence_backward::schema>();
7734}
7735
7736// aten::_pack_padded_sequence_backward(Tensor grad, SymInt[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor
7737at::Tensor _pack_padded_sequence_backward::call(const at::Tensor & grad, c10::SymIntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first) {
7738
7739 static auto op = create__pack_padded_sequence_backward_typed_handle();
7740 return op.call(grad, input_size, batch_sizes, batch_first);
7741}
7742
7743// aten::_pack_padded_sequence_backward(Tensor grad, SymInt[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor
7744at::Tensor _pack_padded_sequence_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, c10::SymIntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first) {
7745
7746 static auto op = create__pack_padded_sequence_backward_typed_handle();
7747 return op.redispatch(dispatchKeySet, grad, input_size, batch_sizes, batch_first);
7748}
7749
7750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lift, name, "aten::lift")
7751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lift, overload_name, "")
7752STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lift, schema_str, "lift(Tensor self) -> Tensor")
7753
7754// aten::lift(Tensor self) -> Tensor
7755static C10_NOINLINE c10::TypedOperatorHandle<lift::schema> create_lift_typed_handle() {
7756 return c10::Dispatcher::singleton()
7757 .findSchemaOrThrow(lift::name, lift::overload_name)
7758 .typed<lift::schema>();
7759}
7760
7761// aten::lift(Tensor self) -> Tensor
7762at::Tensor lift::call(const at::Tensor & self) {
7763
7764 static auto op = create_lift_typed_handle();
7765 return op.call(self);
7766}
7767
7768// aten::lift(Tensor self) -> Tensor
7769at::Tensor lift::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
7770
7771 static auto op = create_lift_typed_handle();
7772 return op.redispatch(dispatchKeySet, self);
7773}
7774
7775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lift_fresh, name, "aten::lift_fresh")
7776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lift_fresh, overload_name, "")
7777STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lift_fresh, schema_str, "lift_fresh(Tensor(a) self) -> Tensor(a)")
7778
7779// aten::lift_fresh(Tensor(a) self) -> Tensor(a)
7780static C10_NOINLINE c10::TypedOperatorHandle<lift_fresh::schema> create_lift_fresh_typed_handle() {
7781 return c10::Dispatcher::singleton()
7782 .findSchemaOrThrow(lift_fresh::name, lift_fresh::overload_name)
7783 .typed<lift_fresh::schema>();
7784}
7785
7786// aten::lift_fresh(Tensor(a) self) -> Tensor(a)
7787at::Tensor lift_fresh::call(const at::Tensor & self) {
7788
7789 static auto op = create_lift_fresh_typed_handle();
7790 return op.call(self);
7791}
7792
7793// aten::lift_fresh(Tensor(a) self) -> Tensor(a)
7794at::Tensor lift_fresh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
7795
7796 static auto op = create_lift_fresh_typed_handle();
7797 return op.redispatch(dispatchKeySet, self);
7798}
7799
7800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(eq__Scalar, name, "aten::eq_")
7801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(eq__Scalar, overload_name, "Scalar")
7802STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(eq__Scalar, schema_str, "eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
7803
7804// aten::eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7805static C10_NOINLINE c10::TypedOperatorHandle<eq__Scalar::schema> create_eq__Scalar_typed_handle() {
7806 return c10::Dispatcher::singleton()
7807 .findSchemaOrThrow(eq__Scalar::name, eq__Scalar::overload_name)
7808 .typed<eq__Scalar::schema>();
7809}
7810
7811// aten::eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7812at::Tensor & eq__Scalar::call(at::Tensor & self, const at::Scalar & other) {
7813
7814 static auto op = create_eq__Scalar_typed_handle();
7815 return op.call(self, other);
7816}
7817
7818// aten::eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7819at::Tensor & eq__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
7820
7821 static auto op = create_eq__Scalar_typed_handle();
7822 return op.redispatch(dispatchKeySet, self, other);
7823}
7824
7825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(eq__Tensor, name, "aten::eq_")
7826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(eq__Tensor, overload_name, "Tensor")
7827STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(eq__Tensor, schema_str, "eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
7828
7829// aten::eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
7830static C10_NOINLINE c10::TypedOperatorHandle<eq__Tensor::schema> create_eq__Tensor_typed_handle() {
7831 return c10::Dispatcher::singleton()
7832 .findSchemaOrThrow(eq__Tensor::name, eq__Tensor::overload_name)
7833 .typed<eq__Tensor::schema>();
7834}
7835
7836// aten::eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
7837at::Tensor & eq__Tensor::call(at::Tensor & self, const at::Tensor & other) {
7838
7839 static auto op = create_eq__Tensor_typed_handle();
7840 return op.call(self, other);
7841}
7842
7843// aten::eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
7844at::Tensor & eq__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
7845
7846 static auto op = create_eq__Tensor_typed_handle();
7847 return op.redispatch(dispatchKeySet, self, other);
7848}
7849
7850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_and_Tensor_out, name, "aten::bitwise_and")
7851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_and_Tensor_out, overload_name, "Tensor_out")
7852STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_and_Tensor_out, schema_str, "bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
7853
7854// aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
7855static C10_NOINLINE c10::TypedOperatorHandle<bitwise_and_Tensor_out::schema> create_bitwise_and_Tensor_out_typed_handle() {
7856 return c10::Dispatcher::singleton()
7857 .findSchemaOrThrow(bitwise_and_Tensor_out::name, bitwise_and_Tensor_out::overload_name)
7858 .typed<bitwise_and_Tensor_out::schema>();
7859}
7860
7861// aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
7862at::Tensor & bitwise_and_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
7863
7864 static auto op = create_bitwise_and_Tensor_out_typed_handle();
7865 return op.call(self, other, out);
7866}
7867
7868// aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
7869at::Tensor & bitwise_and_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
7870
7871 static auto op = create_bitwise_and_Tensor_out_typed_handle();
7872 return op.redispatch(dispatchKeySet, self, other, out);
7873}
7874
7875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_and_Scalar_out, name, "aten::bitwise_and")
7876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_and_Scalar_out, overload_name, "Scalar_out")
7877STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_and_Scalar_out, schema_str, "bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
7878
7879// aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
7880static C10_NOINLINE c10::TypedOperatorHandle<bitwise_and_Scalar_out::schema> create_bitwise_and_Scalar_out_typed_handle() {
7881 return c10::Dispatcher::singleton()
7882 .findSchemaOrThrow(bitwise_and_Scalar_out::name, bitwise_and_Scalar_out::overload_name)
7883 .typed<bitwise_and_Scalar_out::schema>();
7884}
7885
7886// aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
7887at::Tensor & bitwise_and_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
7888
7889 static auto op = create_bitwise_and_Scalar_out_typed_handle();
7890 return op.call(self, other, out);
7891}
7892
7893// aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
7894at::Tensor & bitwise_and_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
7895
7896 static auto op = create_bitwise_and_Scalar_out_typed_handle();
7897 return op.redispatch(dispatchKeySet, self, other, out);
7898}
7899
7900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_and_Scalar, name, "aten::bitwise_and")
7901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_and_Scalar, overload_name, "Scalar")
7902STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_and_Scalar, schema_str, "bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor")
7903
7904// aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor
7905static C10_NOINLINE c10::TypedOperatorHandle<bitwise_and_Scalar::schema> create_bitwise_and_Scalar_typed_handle() {
7906 return c10::Dispatcher::singleton()
7907 .findSchemaOrThrow(bitwise_and_Scalar::name, bitwise_and_Scalar::overload_name)
7908 .typed<bitwise_and_Scalar::schema>();
7909}
7910
7911// aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor
7912at::Tensor bitwise_and_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
7913
7914 static auto op = create_bitwise_and_Scalar_typed_handle();
7915 return op.call(self, other);
7916}
7917
7918// aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor
7919at::Tensor bitwise_and_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
7920
7921 static auto op = create_bitwise_and_Scalar_typed_handle();
7922 return op.redispatch(dispatchKeySet, self, other);
7923}
7924
7925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_and_Scalar_Tensor, name, "aten::bitwise_and")
7926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_and_Scalar_Tensor, overload_name, "Scalar_Tensor")
7927STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_and_Scalar_Tensor, schema_str, "bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor")
7928
7929// aten::bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
7930static C10_NOINLINE c10::TypedOperatorHandle<bitwise_and_Scalar_Tensor::schema> create_bitwise_and_Scalar_Tensor_typed_handle() {
7931 return c10::Dispatcher::singleton()
7932 .findSchemaOrThrow(bitwise_and_Scalar_Tensor::name, bitwise_and_Scalar_Tensor::overload_name)
7933 .typed<bitwise_and_Scalar_Tensor::schema>();
7934}
7935
7936// aten::bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
7937at::Tensor bitwise_and_Scalar_Tensor::call(const at::Scalar & self, const at::Tensor & other) {
7938
7939 static auto op = create_bitwise_and_Scalar_Tensor_typed_handle();
7940 return op.call(self, other);
7941}
7942
7943// aten::bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
7944at::Tensor bitwise_and_Scalar_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) {
7945
7946 static auto op = create_bitwise_and_Scalar_Tensor_typed_handle();
7947 return op.redispatch(dispatchKeySet, self, other);
7948}
7949
7950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_and_Tensor, name, "aten::bitwise_and")
7951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_and_Tensor, overload_name, "Tensor")
7952STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_and_Tensor, schema_str, "bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor")
7953
7954// aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor
7955static C10_NOINLINE c10::TypedOperatorHandle<bitwise_and_Tensor::schema> create_bitwise_and_Tensor_typed_handle() {
7956 return c10::Dispatcher::singleton()
7957 .findSchemaOrThrow(bitwise_and_Tensor::name, bitwise_and_Tensor::overload_name)
7958 .typed<bitwise_and_Tensor::schema>();
7959}
7960
7961// aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor
7962at::Tensor bitwise_and_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
7963
7964 static auto op = create_bitwise_and_Tensor_typed_handle();
7965 return op.call(self, other);
7966}
7967
7968// aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor
7969at::Tensor bitwise_and_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
7970
7971 static auto op = create_bitwise_and_Tensor_typed_handle();
7972 return op.redispatch(dispatchKeySet, self, other);
7973}
7974
7975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_and__Scalar, name, "aten::bitwise_and_")
7976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_and__Scalar, overload_name, "Scalar")
7977STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_and__Scalar, schema_str, "bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
7978
7979// aten::bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7980static C10_NOINLINE c10::TypedOperatorHandle<bitwise_and__Scalar::schema> create_bitwise_and__Scalar_typed_handle() {
7981 return c10::Dispatcher::singleton()
7982 .findSchemaOrThrow(bitwise_and__Scalar::name, bitwise_and__Scalar::overload_name)
7983 .typed<bitwise_and__Scalar::schema>();
7984}
7985
7986// aten::bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7987at::Tensor & bitwise_and__Scalar::call(at::Tensor & self, const at::Scalar & other) {
7988
7989 static auto op = create_bitwise_and__Scalar_typed_handle();
7990 return op.call(self, other);
7991}
7992
7993// aten::bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7994at::Tensor & bitwise_and__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
7995
7996 static auto op = create_bitwise_and__Scalar_typed_handle();
7997 return op.redispatch(dispatchKeySet, self, other);
7998}
7999
8000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_and__Tensor, name, "aten::bitwise_and_")
8001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_and__Tensor, overload_name, "Tensor")
8002STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_and__Tensor, schema_str, "bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
8003
8004// aten::bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
8005static C10_NOINLINE c10::TypedOperatorHandle<bitwise_and__Tensor::schema> create_bitwise_and__Tensor_typed_handle() {
8006 return c10::Dispatcher::singleton()
8007 .findSchemaOrThrow(bitwise_and__Tensor::name, bitwise_and__Tensor::overload_name)
8008 .typed<bitwise_and__Tensor::schema>();
8009}
8010
8011// aten::bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
8012at::Tensor & bitwise_and__Tensor::call(at::Tensor & self, const at::Tensor & other) {
8013
8014 static auto op = create_bitwise_and__Tensor_typed_handle();
8015 return op.call(self, other);
8016}
8017
8018// aten::bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
8019at::Tensor & bitwise_and__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
8020
8021 static auto op = create_bitwise_and__Tensor_typed_handle();
8022 return op.redispatch(dispatchKeySet, self, other);
8023}
8024
8025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__or___Scalar, name, "aten::__or__")
8026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__or___Scalar, overload_name, "Scalar")
8027STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__or___Scalar, schema_str, "__or__.Scalar(Tensor self, Scalar other) -> Tensor")
8028
8029// aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor
8030static C10_NOINLINE c10::TypedOperatorHandle<__or___Scalar::schema> create___or___Scalar_typed_handle() {
8031 return c10::Dispatcher::singleton()
8032 .findSchemaOrThrow(__or___Scalar::name, __or___Scalar::overload_name)
8033 .typed<__or___Scalar::schema>();
8034}
8035
8036// aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor
8037at::Tensor __or___Scalar::call(const at::Tensor & self, const at::Scalar & other) {
8038
8039 static auto op = create___or___Scalar_typed_handle();
8040 return op.call(self, other);
8041}
8042
8043// aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor
8044at::Tensor __or___Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
8045
8046 static auto op = create___or___Scalar_typed_handle();
8047 return op.redispatch(dispatchKeySet, self, other);
8048}
8049
8050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__or___Tensor, name, "aten::__or__")
8051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__or___Tensor, overload_name, "Tensor")
8052STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__or___Tensor, schema_str, "__or__.Tensor(Tensor self, Tensor other) -> Tensor")
8053
8054// aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor
8055static C10_NOINLINE c10::TypedOperatorHandle<__or___Tensor::schema> create___or___Tensor_typed_handle() {
8056 return c10::Dispatcher::singleton()
8057 .findSchemaOrThrow(__or___Tensor::name, __or___Tensor::overload_name)
8058 .typed<__or___Tensor::schema>();
8059}
8060
8061// aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor
8062at::Tensor __or___Tensor::call(const at::Tensor & self, const at::Tensor & other) {
8063
8064 static auto op = create___or___Tensor_typed_handle();
8065 return op.call(self, other);
8066}
8067
8068// aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor
8069at::Tensor __or___Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
8070
8071 static auto op = create___or___Tensor_typed_handle();
8072 return op.redispatch(dispatchKeySet, self, other);
8073}
8074
8075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__ior___Scalar, name, "aten::__ior__")
8076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__ior___Scalar, overload_name, "Scalar")
8077STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__ior___Scalar, schema_str, "__ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
8078
8079// aten::__ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
8080static C10_NOINLINE c10::TypedOperatorHandle<__ior___Scalar::schema> create___ior___Scalar_typed_handle() {
8081 return c10::Dispatcher::singleton()
8082 .findSchemaOrThrow(__ior___Scalar::name, __ior___Scalar::overload_name)
8083 .typed<__ior___Scalar::schema>();
8084}
8085
8086// aten::__ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
8087at::Tensor & __ior___Scalar::call(at::Tensor & self, const at::Scalar & other) {
8088
8089 static auto op = create___ior___Scalar_typed_handle();
8090 return op.call(self, other);
8091}
8092
8093// aten::__ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
8094at::Tensor & __ior___Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
8095
8096 static auto op = create___ior___Scalar_typed_handle();
8097 return op.redispatch(dispatchKeySet, self, other);
8098}
8099
8100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__ior___Tensor, name, "aten::__ior__")
8101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__ior___Tensor, overload_name, "Tensor")
8102STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__ior___Tensor, schema_str, "__ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
8103
8104// aten::__ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
8105static C10_NOINLINE c10::TypedOperatorHandle<__ior___Tensor::schema> create___ior___Tensor_typed_handle() {
8106 return c10::Dispatcher::singleton()
8107 .findSchemaOrThrow(__ior___Tensor::name, __ior___Tensor::overload_name)
8108 .typed<__ior___Tensor::schema>();
8109}
8110
8111// aten::__ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
8112at::Tensor & __ior___Tensor::call(at::Tensor & self, const at::Tensor & other) {
8113
8114 static auto op = create___ior___Tensor_typed_handle();
8115 return op.call(self, other);
8116}
8117
8118// aten::__ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
8119at::Tensor & __ior___Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
8120
8121 static auto op = create___ior___Tensor_typed_handle();
8122 return op.redispatch(dispatchKeySet, self, other);
8123}
8124
8125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_xor_Tensor_out, name, "aten::bitwise_xor")
8126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_xor_Tensor_out, overload_name, "Tensor_out")
8127STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_xor_Tensor_out, schema_str, "bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
8128
8129// aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
8130static C10_NOINLINE c10::TypedOperatorHandle<bitwise_xor_Tensor_out::schema> create_bitwise_xor_Tensor_out_typed_handle() {
8131 return c10::Dispatcher::singleton()
8132 .findSchemaOrThrow(bitwise_xor_Tensor_out::name, bitwise_xor_Tensor_out::overload_name)
8133 .typed<bitwise_xor_Tensor_out::schema>();
8134}
8135
8136// aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
8137at::Tensor & bitwise_xor_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
8138
8139 static auto op = create_bitwise_xor_Tensor_out_typed_handle();
8140 return op.call(self, other, out);
8141}
8142
8143// aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
8144at::Tensor & bitwise_xor_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
8145
8146 static auto op = create_bitwise_xor_Tensor_out_typed_handle();
8147 return op.redispatch(dispatchKeySet, self, other, out);
8148}
8149
8150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_xor_Scalar_out, name, "aten::bitwise_xor")
8151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_xor_Scalar_out, overload_name, "Scalar_out")
8152STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_xor_Scalar_out, schema_str, "bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
8153
8154// aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
8155static C10_NOINLINE c10::TypedOperatorHandle<bitwise_xor_Scalar_out::schema> create_bitwise_xor_Scalar_out_typed_handle() {
8156 return c10::Dispatcher::singleton()
8157 .findSchemaOrThrow(bitwise_xor_Scalar_out::name, bitwise_xor_Scalar_out::overload_name)
8158 .typed<bitwise_xor_Scalar_out::schema>();
8159}
8160
8161// aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
8162at::Tensor & bitwise_xor_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
8163
8164 static auto op = create_bitwise_xor_Scalar_out_typed_handle();
8165 return op.call(self, other, out);
8166}
8167
8168// aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
8169at::Tensor & bitwise_xor_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
8170
8171 static auto op = create_bitwise_xor_Scalar_out_typed_handle();
8172 return op.redispatch(dispatchKeySet, self, other, out);
8173}
8174
8175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_xor_Scalar, name, "aten::bitwise_xor")
8176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_xor_Scalar, overload_name, "Scalar")
8177STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_xor_Scalar, schema_str, "bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor")
8178
8179// aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor
8180static C10_NOINLINE c10::TypedOperatorHandle<bitwise_xor_Scalar::schema> create_bitwise_xor_Scalar_typed_handle() {
8181 return c10::Dispatcher::singleton()
8182 .findSchemaOrThrow(bitwise_xor_Scalar::name, bitwise_xor_Scalar::overload_name)
8183 .typed<bitwise_xor_Scalar::schema>();
8184}
8185
8186// aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor
8187at::Tensor bitwise_xor_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
8188
8189 static auto op = create_bitwise_xor_Scalar_typed_handle();
8190 return op.call(self, other);
8191}
8192
8193// aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor
8194at::Tensor bitwise_xor_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
8195
8196 static auto op = create_bitwise_xor_Scalar_typed_handle();
8197 return op.redispatch(dispatchKeySet, self, other);
8198}
8199
8200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_xor_Scalar_Tensor, name, "aten::bitwise_xor")
8201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_xor_Scalar_Tensor, overload_name, "Scalar_Tensor")
8202STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_xor_Scalar_Tensor, schema_str, "bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor")
8203
8204// aten::bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
8205static C10_NOINLINE c10::TypedOperatorHandle<bitwise_xor_Scalar_Tensor::schema> create_bitwise_xor_Scalar_Tensor_typed_handle() {
8206 return c10::Dispatcher::singleton()
8207 .findSchemaOrThrow(bitwise_xor_Scalar_Tensor::name, bitwise_xor_Scalar_Tensor::overload_name)
8208 .typed<bitwise_xor_Scalar_Tensor::schema>();
8209}
8210
8211// aten::bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
8212at::Tensor bitwise_xor_Scalar_Tensor::call(const at::Scalar & self, const at::Tensor & other) {
8213
8214 static auto op = create_bitwise_xor_Scalar_Tensor_typed_handle();
8215 return op.call(self, other);
8216}
8217
8218// aten::bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
8219at::Tensor bitwise_xor_Scalar_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) {
8220
8221 static auto op = create_bitwise_xor_Scalar_Tensor_typed_handle();
8222 return op.redispatch(dispatchKeySet, self, other);
8223}
8224
8225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_xor_Tensor, name, "aten::bitwise_xor")
8226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_xor_Tensor, overload_name, "Tensor")
8227STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_xor_Tensor, schema_str, "bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor")
8228
8229// aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor
8230static C10_NOINLINE c10::TypedOperatorHandle<bitwise_xor_Tensor::schema> create_bitwise_xor_Tensor_typed_handle() {
8231 return c10::Dispatcher::singleton()
8232 .findSchemaOrThrow(bitwise_xor_Tensor::name, bitwise_xor_Tensor::overload_name)
8233 .typed<bitwise_xor_Tensor::schema>();
8234}
8235
8236// aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor
8237at::Tensor bitwise_xor_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
8238
8239 static auto op = create_bitwise_xor_Tensor_typed_handle();
8240 return op.call(self, other);
8241}
8242
8243// aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor
8244at::Tensor bitwise_xor_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
8245
8246 static auto op = create_bitwise_xor_Tensor_typed_handle();
8247 return op.redispatch(dispatchKeySet, self, other);
8248}
8249
8250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_xor__Scalar, name, "aten::bitwise_xor_")
8251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_xor__Scalar, overload_name, "Scalar")
8252STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_xor__Scalar, schema_str, "bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
8253
8254// aten::bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
8255static C10_NOINLINE c10::TypedOperatorHandle<bitwise_xor__Scalar::schema> create_bitwise_xor__Scalar_typed_handle() {
8256 return c10::Dispatcher::singleton()
8257 .findSchemaOrThrow(bitwise_xor__Scalar::name, bitwise_xor__Scalar::overload_name)
8258 .typed<bitwise_xor__Scalar::schema>();
8259}
8260
8261// aten::bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
8262at::Tensor & bitwise_xor__Scalar::call(at::Tensor & self, const at::Scalar & other) {
8263
8264 static auto op = create_bitwise_xor__Scalar_typed_handle();
8265 return op.call(self, other);
8266}
8267
8268// aten::bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
8269at::Tensor & bitwise_xor__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
8270
8271 static auto op = create_bitwise_xor__Scalar_typed_handle();
8272 return op.redispatch(dispatchKeySet, self, other);
8273}
8274
8275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_xor__Tensor, name, "aten::bitwise_xor_")
8276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_xor__Tensor, overload_name, "Tensor")
8277STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_xor__Tensor, schema_str, "bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
8278
8279// aten::bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
8280static C10_NOINLINE c10::TypedOperatorHandle<bitwise_xor__Tensor::schema> create_bitwise_xor__Tensor_typed_handle() {
8281 return c10::Dispatcher::singleton()
8282 .findSchemaOrThrow(bitwise_xor__Tensor::name, bitwise_xor__Tensor::overload_name)
8283 .typed<bitwise_xor__Tensor::schema>();
8284}
8285
8286// aten::bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
8287at::Tensor & bitwise_xor__Tensor::call(at::Tensor & self, const at::Tensor & other) {
8288
8289 static auto op = create_bitwise_xor__Tensor_typed_handle();
8290 return op.call(self, other);
8291}
8292
8293// aten::bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
8294at::Tensor & bitwise_xor__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
8295
8296 static auto op = create_bitwise_xor__Tensor_typed_handle();
8297 return op.redispatch(dispatchKeySet, self, other);
8298}
8299
8300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__lshift___Scalar, name, "aten::__lshift__")
8301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__lshift___Scalar, overload_name, "Scalar")
8302STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__lshift___Scalar, schema_str, "__lshift__.Scalar(Tensor self, Scalar other) -> Tensor")
8303
8304// aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor
8305static C10_NOINLINE c10::TypedOperatorHandle<__lshift___Scalar::schema> create___lshift___Scalar_typed_handle() {
8306 return c10::Dispatcher::singleton()
8307 .findSchemaOrThrow(__lshift___Scalar::name, __lshift___Scalar::overload_name)
8308 .typed<__lshift___Scalar::schema>();
8309}
8310
8311// aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor
8312at::Tensor __lshift___Scalar::call(const at::Tensor & self, const at::Scalar & other) {
8313
8314 static auto op = create___lshift___Scalar_typed_handle();
8315 return op.call(self, other);
8316}
8317
8318// aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor
8319at::Tensor __lshift___Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
8320
8321 static auto op = create___lshift___Scalar_typed_handle();
8322 return op.redispatch(dispatchKeySet, self, other);
8323}
8324
8325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__lshift___Tensor, name, "aten::__lshift__")
8326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__lshift___Tensor, overload_name, "Tensor")
8327STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__lshift___Tensor, schema_str, "__lshift__.Tensor(Tensor self, Tensor other) -> Tensor")
8328
8329// aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor
8330static C10_NOINLINE c10::TypedOperatorHandle<__lshift___Tensor::schema> create___lshift___Tensor_typed_handle() {
8331 return c10::Dispatcher::singleton()
8332 .findSchemaOrThrow(__lshift___Tensor::name, __lshift___Tensor::overload_name)
8333 .typed<__lshift___Tensor::schema>();
8334}
8335
8336// aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor
8337at::Tensor __lshift___Tensor::call(const at::Tensor & self, const at::Tensor & other) {
8338
8339 static auto op = create___lshift___Tensor_typed_handle();
8340 return op.call(self, other);
8341}
8342
8343// aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor
8344at::Tensor __lshift___Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
8345
8346 static auto op = create___lshift___Tensor_typed_handle();
8347 return op.redispatch(dispatchKeySet, self, other);
8348}
8349
8350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__ilshift___Scalar, name, "aten::__ilshift__")
8351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__ilshift___Scalar, overload_name, "Scalar")
8352STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__ilshift___Scalar, schema_str, "__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
8353
8354// aten::__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
8355static C10_NOINLINE c10::TypedOperatorHandle<__ilshift___Scalar::schema> create___ilshift___Scalar_typed_handle() {
8356 return c10::Dispatcher::singleton()
8357 .findSchemaOrThrow(__ilshift___Scalar::name, __ilshift___Scalar::overload_name)
8358 .typed<__ilshift___Scalar::schema>();
8359}
8360
8361// aten::__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
8362at::Tensor & __ilshift___Scalar::call(at::Tensor & self, const at::Scalar & other) {
8363
8364 static auto op = create___ilshift___Scalar_typed_handle();
8365 return op.call(self, other);
8366}
8367
8368// aten::__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
8369at::Tensor & __ilshift___Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
8370
8371 static auto op = create___ilshift___Scalar_typed_handle();
8372 return op.redispatch(dispatchKeySet, self, other);
8373}
8374
8375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__ilshift___Tensor, name, "aten::__ilshift__")
8376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__ilshift___Tensor, overload_name, "Tensor")
8377STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__ilshift___Tensor, schema_str, "__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
8378
8379// aten::__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
8380static C10_NOINLINE c10::TypedOperatorHandle<__ilshift___Tensor::schema> create___ilshift___Tensor_typed_handle() {
8381 return c10::Dispatcher::singleton()
8382 .findSchemaOrThrow(__ilshift___Tensor::name, __ilshift___Tensor::overload_name)
8383 .typed<__ilshift___Tensor::schema>();
8384}
8385
8386// aten::__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
8387at::Tensor & __ilshift___Tensor::call(at::Tensor & self, const at::Tensor & other) {
8388
8389 static auto op = create___ilshift___Tensor_typed_handle();
8390 return op.call(self, other);
8391}
8392
8393// aten::__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
8394at::Tensor & __ilshift___Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
8395
8396 static auto op = create___ilshift___Tensor_typed_handle();
8397 return op.redispatch(dispatchKeySet, self, other);
8398}
8399
8400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_left_shift_Tensor, name, "aten::bitwise_left_shift")
8401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_left_shift_Tensor, overload_name, "Tensor")
8402STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_left_shift_Tensor, schema_str, "bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor")
8403
8404// aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor
8405static C10_NOINLINE c10::TypedOperatorHandle<bitwise_left_shift_Tensor::schema> create_bitwise_left_shift_Tensor_typed_handle() {
8406 return c10::Dispatcher::singleton()
8407 .findSchemaOrThrow(bitwise_left_shift_Tensor::name, bitwise_left_shift_Tensor::overload_name)
8408 .typed<bitwise_left_shift_Tensor::schema>();
8409}
8410
8411// aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor
8412at::Tensor bitwise_left_shift_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
8413
8414 static auto op = create_bitwise_left_shift_Tensor_typed_handle();
8415 return op.call(self, other);
8416}
8417
8418// aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor
8419at::Tensor bitwise_left_shift_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
8420
8421 static auto op = create_bitwise_left_shift_Tensor_typed_handle();
8422 return op.redispatch(dispatchKeySet, self, other);
8423}
8424
8425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_left_shift__Tensor, name, "aten::bitwise_left_shift_")
8426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_left_shift__Tensor, overload_name, "Tensor")
8427STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_left_shift__Tensor, schema_str, "bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
8428
8429// aten::bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
8430static C10_NOINLINE c10::TypedOperatorHandle<bitwise_left_shift__Tensor::schema> create_bitwise_left_shift__Tensor_typed_handle() {
8431 return c10::Dispatcher::singleton()
8432 .findSchemaOrThrow(bitwise_left_shift__Tensor::name, bitwise_left_shift__Tensor::overload_name)
8433 .typed<bitwise_left_shift__Tensor::schema>();
8434}
8435
8436// aten::bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
8437at::Tensor & bitwise_left_shift__Tensor::call(at::Tensor & self, const at::Tensor & other) {
8438
8439 static auto op = create_bitwise_left_shift__Tensor_typed_handle();
8440 return op.call(self, other);
8441}
8442
8443// aten::bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
8444at::Tensor & bitwise_left_shift__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
8445
8446 static auto op = create_bitwise_left_shift__Tensor_typed_handle();
8447 return op.redispatch(dispatchKeySet, self, other);
8448}
8449
8450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_left_shift_Tensor_out, name, "aten::bitwise_left_shift")
8451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_left_shift_Tensor_out, overload_name, "Tensor_out")
8452STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_left_shift_Tensor_out, schema_str, "bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
8453
8454// aten::bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
8455static C10_NOINLINE c10::TypedOperatorHandle<bitwise_left_shift_Tensor_out::schema> create_bitwise_left_shift_Tensor_out_typed_handle() {
8456 return c10::Dispatcher::singleton()
8457 .findSchemaOrThrow(bitwise_left_shift_Tensor_out::name, bitwise_left_shift_Tensor_out::overload_name)
8458 .typed<bitwise_left_shift_Tensor_out::schema>();
8459}
8460
8461// aten::bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
8462at::Tensor & bitwise_left_shift_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
8463
8464 static auto op = create_bitwise_left_shift_Tensor_out_typed_handle();
8465 return op.call(self, other, out);
8466}
8467
8468// aten::bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
8469at::Tensor & bitwise_left_shift_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
8470
8471 static auto op = create_bitwise_left_shift_Tensor_out_typed_handle();
8472 return op.redispatch(dispatchKeySet, self, other, out);
8473}
8474
8475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_left_shift_Tensor_Scalar, name, "aten::bitwise_left_shift")
8476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_left_shift_Tensor_Scalar, overload_name, "Tensor_Scalar")
8477STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_left_shift_Tensor_Scalar, schema_str, "bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor")
8478
8479// aten::bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
8480static C10_NOINLINE c10::TypedOperatorHandle<bitwise_left_shift_Tensor_Scalar::schema> create_bitwise_left_shift_Tensor_Scalar_typed_handle() {
8481 return c10::Dispatcher::singleton()
8482 .findSchemaOrThrow(bitwise_left_shift_Tensor_Scalar::name, bitwise_left_shift_Tensor_Scalar::overload_name)
8483 .typed<bitwise_left_shift_Tensor_Scalar::schema>();
8484}
8485
8486// aten::bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
8487at::Tensor bitwise_left_shift_Tensor_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
8488
8489 static auto op = create_bitwise_left_shift_Tensor_Scalar_typed_handle();
8490 return op.call(self, other);
8491}
8492
8493// aten::bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
8494at::Tensor bitwise_left_shift_Tensor_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
8495
8496 static auto op = create_bitwise_left_shift_Tensor_Scalar_typed_handle();
8497 return op.redispatch(dispatchKeySet, self, other);
8498}
8499
8500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_left_shift__Tensor_Scalar, name, "aten::bitwise_left_shift_")
8501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_left_shift__Tensor_Scalar, overload_name, "Tensor_Scalar")
8502STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_left_shift__Tensor_Scalar, schema_str, "bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
8503
8504// aten::bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
8505static C10_NOINLINE c10::TypedOperatorHandle<bitwise_left_shift__Tensor_Scalar::schema> create_bitwise_left_shift__Tensor_Scalar_typed_handle() {
8506 return c10::Dispatcher::singleton()
8507 .findSchemaOrThrow(bitwise_left_shift__Tensor_Scalar::name, bitwise_left_shift__Tensor_Scalar::overload_name)
8508 .typed<bitwise_left_shift__Tensor_Scalar::schema>();
8509}
8510
8511// aten::bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
8512at::Tensor & bitwise_left_shift__Tensor_Scalar::call(at::Tensor & self, const at::Scalar & other) {
8513
8514 static auto op = create_bitwise_left_shift__Tensor_Scalar_typed_handle();
8515 return op.call(self, other);
8516}
8517
8518// aten::bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
8519at::Tensor & bitwise_left_shift__Tensor_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
8520
8521 static auto op = create_bitwise_left_shift__Tensor_Scalar_typed_handle();
8522 return op.redispatch(dispatchKeySet, self, other);
8523}
8524
8525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_left_shift_Tensor_Scalar_out, name, "aten::bitwise_left_shift")
8526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_left_shift_Tensor_Scalar_out, overload_name, "Tensor_Scalar_out")
8527STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_left_shift_Tensor_Scalar_out, schema_str, "bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
8528
8529// aten::bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
8530static C10_NOINLINE c10::TypedOperatorHandle<bitwise_left_shift_Tensor_Scalar_out::schema> create_bitwise_left_shift_Tensor_Scalar_out_typed_handle() {
8531 return c10::Dispatcher::singleton()
8532 .findSchemaOrThrow(bitwise_left_shift_Tensor_Scalar_out::name, bitwise_left_shift_Tensor_Scalar_out::overload_name)
8533 .typed<bitwise_left_shift_Tensor_Scalar_out::schema>();
8534}
8535
8536// aten::bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
8537at::Tensor & bitwise_left_shift_Tensor_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
8538
8539 static auto op = create_bitwise_left_shift_Tensor_Scalar_out_typed_handle();
8540 return op.call(self, other, out);
8541}
8542
8543// aten::bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
8544at::Tensor & bitwise_left_shift_Tensor_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
8545
8546 static auto op = create_bitwise_left_shift_Tensor_Scalar_out_typed_handle();
8547 return op.redispatch(dispatchKeySet, self, other, out);
8548}
8549
8550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_left_shift_Scalar_Tensor, name, "aten::bitwise_left_shift")
8551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_left_shift_Scalar_Tensor, overload_name, "Scalar_Tensor")
8552STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_left_shift_Scalar_Tensor, schema_str, "bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor")
8553
8554// aten::bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
8555static C10_NOINLINE c10::TypedOperatorHandle<bitwise_left_shift_Scalar_Tensor::schema> create_bitwise_left_shift_Scalar_Tensor_typed_handle() {
8556 return c10::Dispatcher::singleton()
8557 .findSchemaOrThrow(bitwise_left_shift_Scalar_Tensor::name, bitwise_left_shift_Scalar_Tensor::overload_name)
8558 .typed<bitwise_left_shift_Scalar_Tensor::schema>();
8559}
8560
8561// aten::bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
8562at::Tensor bitwise_left_shift_Scalar_Tensor::call(const at::Scalar & self, const at::Tensor & other) {
8563
8564 static auto op = create_bitwise_left_shift_Scalar_Tensor_typed_handle();
8565 return op.call(self, other);
8566}
8567
8568// aten::bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
8569at::Tensor bitwise_left_shift_Scalar_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) {
8570
8571 static auto op = create_bitwise_left_shift_Scalar_Tensor_typed_handle();
8572 return op.redispatch(dispatchKeySet, self, other);
8573}
8574
8575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__rshift___Scalar, name, "aten::__rshift__")
8576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__rshift___Scalar, overload_name, "Scalar")
8577STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__rshift___Scalar, schema_str, "__rshift__.Scalar(Tensor self, Scalar other) -> Tensor")
8578
8579// aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor
8580static C10_NOINLINE c10::TypedOperatorHandle<__rshift___Scalar::schema> create___rshift___Scalar_typed_handle() {
8581 return c10::Dispatcher::singleton()
8582 .findSchemaOrThrow(__rshift___Scalar::name, __rshift___Scalar::overload_name)
8583 .typed<__rshift___Scalar::schema>();
8584}
8585
8586// aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor
8587at::Tensor __rshift___Scalar::call(const at::Tensor & self, const at::Scalar & other) {
8588
8589 static auto op = create___rshift___Scalar_typed_handle();
8590 return op.call(self, other);
8591}
8592
8593// aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor
8594at::Tensor __rshift___Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
8595
8596 static auto op = create___rshift___Scalar_typed_handle();
8597 return op.redispatch(dispatchKeySet, self, other);
8598}
8599
8600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__rshift___Tensor, name, "aten::__rshift__")
8601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__rshift___Tensor, overload_name, "Tensor")
8602STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__rshift___Tensor, schema_str, "__rshift__.Tensor(Tensor self, Tensor other) -> Tensor")
8603
8604// aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor
8605static C10_NOINLINE c10::TypedOperatorHandle<__rshift___Tensor::schema> create___rshift___Tensor_typed_handle() {
8606 return c10::Dispatcher::singleton()
8607 .findSchemaOrThrow(__rshift___Tensor::name, __rshift___Tensor::overload_name)
8608 .typed<__rshift___Tensor::schema>();
8609}
8610
8611// aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor
8612at::Tensor __rshift___Tensor::call(const at::Tensor & self, const at::Tensor & other) {
8613
8614 static auto op = create___rshift___Tensor_typed_handle();
8615 return op.call(self, other);
8616}
8617
8618// aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor
8619at::Tensor __rshift___Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
8620
8621 static auto op = create___rshift___Tensor_typed_handle();
8622 return op.redispatch(dispatchKeySet, self, other);
8623}
8624
8625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__irshift___Scalar, name, "aten::__irshift__")
8626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__irshift___Scalar, overload_name, "Scalar")
8627STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__irshift___Scalar, schema_str, "__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
8628
8629// aten::__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
8630static C10_NOINLINE c10::TypedOperatorHandle<__irshift___Scalar::schema> create___irshift___Scalar_typed_handle() {
8631 return c10::Dispatcher::singleton()
8632 .findSchemaOrThrow(__irshift___Scalar::name, __irshift___Scalar::overload_name)
8633 .typed<__irshift___Scalar::schema>();
8634}
8635
8636// aten::__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
8637at::Tensor & __irshift___Scalar::call(at::Tensor & self, const at::Scalar & other) {
8638
8639 static auto op = create___irshift___Scalar_typed_handle();
8640 return op.call(self, other);
8641}
8642
8643// aten::__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
8644at::Tensor & __irshift___Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
8645
8646 static auto op = create___irshift___Scalar_typed_handle();
8647 return op.redispatch(dispatchKeySet, self, other);
8648}
8649
8650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__irshift___Tensor, name, "aten::__irshift__")
8651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__irshift___Tensor, overload_name, "Tensor")
8652STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__irshift___Tensor, schema_str, "__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
8653
8654// aten::__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
8655static C10_NOINLINE c10::TypedOperatorHandle<__irshift___Tensor::schema> create___irshift___Tensor_typed_handle() {
8656 return c10::Dispatcher::singleton()
8657 .findSchemaOrThrow(__irshift___Tensor::name, __irshift___Tensor::overload_name)
8658 .typed<__irshift___Tensor::schema>();
8659}
8660
8661// aten::__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
8662at::Tensor & __irshift___Tensor::call(at::Tensor & self, const at::Tensor & other) {
8663
8664 static auto op = create___irshift___Tensor_typed_handle();
8665 return op.call(self, other);
8666}
8667
8668// aten::__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
8669at::Tensor & __irshift___Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
8670
8671 static auto op = create___irshift___Tensor_typed_handle();
8672 return op.redispatch(dispatchKeySet, self, other);
8673}
8674
8675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_right_shift_Tensor, name, "aten::bitwise_right_shift")
8676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_right_shift_Tensor, overload_name, "Tensor")
8677STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_right_shift_Tensor, schema_str, "bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor")
8678
8679// aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor
8680static C10_NOINLINE c10::TypedOperatorHandle<bitwise_right_shift_Tensor::schema> create_bitwise_right_shift_Tensor_typed_handle() {
8681 return c10::Dispatcher::singleton()
8682 .findSchemaOrThrow(bitwise_right_shift_Tensor::name, bitwise_right_shift_Tensor::overload_name)
8683 .typed<bitwise_right_shift_Tensor::schema>();
8684}
8685
8686// aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor
8687at::Tensor bitwise_right_shift_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
8688
8689 static auto op = create_bitwise_right_shift_Tensor_typed_handle();
8690 return op.call(self, other);
8691}
8692
8693// aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor
8694at::Tensor bitwise_right_shift_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
8695
8696 static auto op = create_bitwise_right_shift_Tensor_typed_handle();
8697 return op.redispatch(dispatchKeySet, self, other);
8698}
8699
8700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_right_shift__Tensor, name, "aten::bitwise_right_shift_")
8701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_right_shift__Tensor, overload_name, "Tensor")
8702STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_right_shift__Tensor, schema_str, "bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
8703
8704// aten::bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
8705static C10_NOINLINE c10::TypedOperatorHandle<bitwise_right_shift__Tensor::schema> create_bitwise_right_shift__Tensor_typed_handle() {
8706 return c10::Dispatcher::singleton()
8707 .findSchemaOrThrow(bitwise_right_shift__Tensor::name, bitwise_right_shift__Tensor::overload_name)
8708 .typed<bitwise_right_shift__Tensor::schema>();
8709}
8710
8711// aten::bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
8712at::Tensor & bitwise_right_shift__Tensor::call(at::Tensor & self, const at::Tensor & other) {
8713
8714 static auto op = create_bitwise_right_shift__Tensor_typed_handle();
8715 return op.call(self, other);
8716}
8717
8718// aten::bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
8719at::Tensor & bitwise_right_shift__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
8720
8721 static auto op = create_bitwise_right_shift__Tensor_typed_handle();
8722 return op.redispatch(dispatchKeySet, self, other);
8723}
8724
8725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_right_shift_Tensor_out, name, "aten::bitwise_right_shift")
8726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_right_shift_Tensor_out, overload_name, "Tensor_out")
8727STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_right_shift_Tensor_out, schema_str, "bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
8728
8729// aten::bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
8730static C10_NOINLINE c10::TypedOperatorHandle<bitwise_right_shift_Tensor_out::schema> create_bitwise_right_shift_Tensor_out_typed_handle() {
8731 return c10::Dispatcher::singleton()
8732 .findSchemaOrThrow(bitwise_right_shift_Tensor_out::name, bitwise_right_shift_Tensor_out::overload_name)
8733 .typed<bitwise_right_shift_Tensor_out::schema>();
8734}
8735
8736// aten::bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
8737at::Tensor & bitwise_right_shift_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
8738
8739 static auto op = create_bitwise_right_shift_Tensor_out_typed_handle();
8740 return op.call(self, other, out);
8741}
8742
8743// aten::bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
8744at::Tensor & bitwise_right_shift_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
8745
8746 static auto op = create_bitwise_right_shift_Tensor_out_typed_handle();
8747 return op.redispatch(dispatchKeySet, self, other, out);
8748}
8749
8750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_right_shift_Tensor_Scalar, name, "aten::bitwise_right_shift")
8751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_right_shift_Tensor_Scalar, overload_name, "Tensor_Scalar")
8752STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_right_shift_Tensor_Scalar, schema_str, "bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor")
8753
8754// aten::bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
8755static C10_NOINLINE c10::TypedOperatorHandle<bitwise_right_shift_Tensor_Scalar::schema> create_bitwise_right_shift_Tensor_Scalar_typed_handle() {
8756 return c10::Dispatcher::singleton()
8757 .findSchemaOrThrow(bitwise_right_shift_Tensor_Scalar::name, bitwise_right_shift_Tensor_Scalar::overload_name)
8758 .typed<bitwise_right_shift_Tensor_Scalar::schema>();
8759}
8760
8761// aten::bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
8762at::Tensor bitwise_right_shift_Tensor_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
8763
8764 static auto op = create_bitwise_right_shift_Tensor_Scalar_typed_handle();
8765 return op.call(self, other);
8766}
8767
8768// aten::bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
8769at::Tensor bitwise_right_shift_Tensor_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
8770
8771 static auto op = create_bitwise_right_shift_Tensor_Scalar_typed_handle();
8772 return op.redispatch(dispatchKeySet, self, other);
8773}
8774
8775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_right_shift__Tensor_Scalar, name, "aten::bitwise_right_shift_")
8776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_right_shift__Tensor_Scalar, overload_name, "Tensor_Scalar")
8777STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_right_shift__Tensor_Scalar, schema_str, "bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
8778
8779// aten::bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
8780static C10_NOINLINE c10::TypedOperatorHandle<bitwise_right_shift__Tensor_Scalar::schema> create_bitwise_right_shift__Tensor_Scalar_typed_handle() {
8781 return c10::Dispatcher::singleton()
8782 .findSchemaOrThrow(bitwise_right_shift__Tensor_Scalar::name, bitwise_right_shift__Tensor_Scalar::overload_name)
8783 .typed<bitwise_right_shift__Tensor_Scalar::schema>();
8784}
8785
8786// aten::bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
8787at::Tensor & bitwise_right_shift__Tensor_Scalar::call(at::Tensor & self, const at::Scalar & other) {
8788
8789 static auto op = create_bitwise_right_shift__Tensor_Scalar_typed_handle();
8790 return op.call(self, other);
8791}
8792
8793// aten::bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
8794at::Tensor & bitwise_right_shift__Tensor_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
8795
8796 static auto op = create_bitwise_right_shift__Tensor_Scalar_typed_handle();
8797 return op.redispatch(dispatchKeySet, self, other);
8798}
8799
8800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_right_shift_Tensor_Scalar_out, name, "aten::bitwise_right_shift")
8801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_right_shift_Tensor_Scalar_out, overload_name, "Tensor_Scalar_out")
8802STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_right_shift_Tensor_Scalar_out, schema_str, "bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
8803
8804// aten::bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
8805static C10_NOINLINE c10::TypedOperatorHandle<bitwise_right_shift_Tensor_Scalar_out::schema> create_bitwise_right_shift_Tensor_Scalar_out_typed_handle() {
8806 return c10::Dispatcher::singleton()
8807 .findSchemaOrThrow(bitwise_right_shift_Tensor_Scalar_out::name, bitwise_right_shift_Tensor_Scalar_out::overload_name)
8808 .typed<bitwise_right_shift_Tensor_Scalar_out::schema>();
8809}
8810
8811// aten::bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
8812at::Tensor & bitwise_right_shift_Tensor_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
8813
8814 static auto op = create_bitwise_right_shift_Tensor_Scalar_out_typed_handle();
8815 return op.call(self, other, out);
8816}
8817
8818// aten::bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
8819at::Tensor & bitwise_right_shift_Tensor_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
8820
8821 static auto op = create_bitwise_right_shift_Tensor_Scalar_out_typed_handle();
8822 return op.redispatch(dispatchKeySet, self, other, out);
8823}
8824
8825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_right_shift_Scalar_Tensor, name, "aten::bitwise_right_shift")
8826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_right_shift_Scalar_Tensor, overload_name, "Scalar_Tensor")
8827STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_right_shift_Scalar_Tensor, schema_str, "bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor")
8828
8829// aten::bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
8830static C10_NOINLINE c10::TypedOperatorHandle<bitwise_right_shift_Scalar_Tensor::schema> create_bitwise_right_shift_Scalar_Tensor_typed_handle() {
8831 return c10::Dispatcher::singleton()
8832 .findSchemaOrThrow(bitwise_right_shift_Scalar_Tensor::name, bitwise_right_shift_Scalar_Tensor::overload_name)
8833 .typed<bitwise_right_shift_Scalar_Tensor::schema>();
8834}
8835
8836// aten::bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
8837at::Tensor bitwise_right_shift_Scalar_Tensor::call(const at::Scalar & self, const at::Tensor & other) {
8838
8839 static auto op = create_bitwise_right_shift_Scalar_Tensor_typed_handle();
8840 return op.call(self, other);
8841}
8842
8843// aten::bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
8844at::Tensor bitwise_right_shift_Scalar_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) {
8845
8846 static auto op = create_bitwise_right_shift_Scalar_Tensor_typed_handle();
8847 return op.redispatch(dispatchKeySet, self, other);
8848}
8849
8850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(exponential_, name, "aten::exponential_")
8851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(exponential_, overload_name, "")
8852STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(exponential_, schema_str, "exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!)")
8853
8854// aten::exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!)
8855static C10_NOINLINE c10::TypedOperatorHandle<exponential_::schema> create_exponential__typed_handle() {
8856 return c10::Dispatcher::singleton()
8857 .findSchemaOrThrow(exponential_::name, exponential_::overload_name)
8858 .typed<exponential_::schema>();
8859}
8860
8861// aten::exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!)
8862at::Tensor & exponential_::call(at::Tensor & self, double lambd, c10::optional<at::Generator> generator) {
8863
8864 static auto op = create_exponential__typed_handle();
8865 return op.call(self, lambd, generator);
8866}
8867
8868// aten::exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!)
8869at::Tensor & exponential_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double lambd, c10::optional<at::Generator> generator) {
8870
8871 static auto op = create_exponential__typed_handle();
8872 return op.redispatch(dispatchKeySet, self, lambd, generator);
8873}
8874
8875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(geometric_, name, "aten::geometric_")
8876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(geometric_, overload_name, "")
8877STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(geometric_, schema_str, "geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)")
8878
8879// aten::geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)
8880static C10_NOINLINE c10::TypedOperatorHandle<geometric_::schema> create_geometric__typed_handle() {
8881 return c10::Dispatcher::singleton()
8882 .findSchemaOrThrow(geometric_::name, geometric_::overload_name)
8883 .typed<geometric_::schema>();
8884}
8885
8886// aten::geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)
8887at::Tensor & geometric_::call(at::Tensor & self, double p, c10::optional<at::Generator> generator) {
8888
8889 static auto op = create_geometric__typed_handle();
8890 return op.call(self, p, generator);
8891}
8892
8893// aten::geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)
8894at::Tensor & geometric_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, c10::optional<at::Generator> generator) {
8895
8896 static auto op = create_geometric__typed_handle();
8897 return op.redispatch(dispatchKeySet, self, p, generator);
8898}
8899
8900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(trace_backward, name, "aten::trace_backward")
8901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(trace_backward, overload_name, "")
8902STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(trace_backward, schema_str, "trace_backward(Tensor grad, SymInt[] sizes) -> Tensor")
8903
8904// aten::trace_backward(Tensor grad, SymInt[] sizes) -> Tensor
8905static C10_NOINLINE c10::TypedOperatorHandle<trace_backward::schema> create_trace_backward_typed_handle() {
8906 return c10::Dispatcher::singleton()
8907 .findSchemaOrThrow(trace_backward::name, trace_backward::overload_name)
8908 .typed<trace_backward::schema>();
8909}
8910
8911// aten::trace_backward(Tensor grad, SymInt[] sizes) -> Tensor
8912at::Tensor trace_backward::call(const at::Tensor & grad, c10::SymIntArrayRef sizes) {
8913
8914 static auto op = create_trace_backward_typed_handle();
8915 return op.call(grad, sizes);
8916}
8917
8918// aten::trace_backward(Tensor grad, SymInt[] sizes) -> Tensor
8919at::Tensor trace_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, c10::SymIntArrayRef sizes) {
8920
8921 static auto op = create_trace_backward_typed_handle();
8922 return op.redispatch(dispatchKeySet, grad, sizes);
8923}
8924
8925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(eq_Scalar_out, name, "aten::eq")
8926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(eq_Scalar_out, overload_name, "Scalar_out")
8927STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(eq_Scalar_out, schema_str, "eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
8928
8929// aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
8930static C10_NOINLINE c10::TypedOperatorHandle<eq_Scalar_out::schema> create_eq_Scalar_out_typed_handle() {
8931 return c10::Dispatcher::singleton()
8932 .findSchemaOrThrow(eq_Scalar_out::name, eq_Scalar_out::overload_name)
8933 .typed<eq_Scalar_out::schema>();
8934}
8935
8936// aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
8937at::Tensor & eq_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
8938
8939 static auto op = create_eq_Scalar_out_typed_handle();
8940 return op.call(self, other, out);
8941}
8942
8943// aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
8944at::Tensor & eq_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
8945
8946 static auto op = create_eq_Scalar_out_typed_handle();
8947 return op.redispatch(dispatchKeySet, self, other, out);
8948}
8949
8950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(eq_Scalar, name, "aten::eq")
8951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(eq_Scalar, overload_name, "Scalar")
8952STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(eq_Scalar, schema_str, "eq.Scalar(Tensor self, Scalar other) -> Tensor")
8953
8954// aten::eq.Scalar(Tensor self, Scalar other) -> Tensor
8955static C10_NOINLINE c10::TypedOperatorHandle<eq_Scalar::schema> create_eq_Scalar_typed_handle() {
8956 return c10::Dispatcher::singleton()
8957 .findSchemaOrThrow(eq_Scalar::name, eq_Scalar::overload_name)
8958 .typed<eq_Scalar::schema>();
8959}
8960
8961// aten::eq.Scalar(Tensor self, Scalar other) -> Tensor
8962at::Tensor eq_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
8963
8964 static auto op = create_eq_Scalar_typed_handle();
8965 return op.call(self, other);
8966}
8967
8968// aten::eq.Scalar(Tensor self, Scalar other) -> Tensor
8969at::Tensor eq_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
8970
8971 static auto op = create_eq_Scalar_typed_handle();
8972 return op.redispatch(dispatchKeySet, self, other);
8973}
8974
8975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(eq_Tensor_out, name, "aten::eq")
8976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(eq_Tensor_out, overload_name, "Tensor_out")
8977STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(eq_Tensor_out, schema_str, "eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
8978
8979// aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
8980static C10_NOINLINE c10::TypedOperatorHandle<eq_Tensor_out::schema> create_eq_Tensor_out_typed_handle() {
8981 return c10::Dispatcher::singleton()
8982 .findSchemaOrThrow(eq_Tensor_out::name, eq_Tensor_out::overload_name)
8983 .typed<eq_Tensor_out::schema>();
8984}
8985
8986// aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
8987at::Tensor & eq_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
8988
8989 static auto op = create_eq_Tensor_out_typed_handle();
8990 return op.call(self, other, out);
8991}
8992
8993// aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
8994at::Tensor & eq_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
8995
8996 static auto op = create_eq_Tensor_out_typed_handle();
8997 return op.redispatch(dispatchKeySet, self, other, out);
8998}
8999
9000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(eq_Tensor, name, "aten::eq")
9001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(eq_Tensor, overload_name, "Tensor")
9002STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(eq_Tensor, schema_str, "eq.Tensor(Tensor self, Tensor other) -> Tensor")
9003
9004// aten::eq.Tensor(Tensor self, Tensor other) -> Tensor
9005static C10_NOINLINE c10::TypedOperatorHandle<eq_Tensor::schema> create_eq_Tensor_typed_handle() {
9006 return c10::Dispatcher::singleton()
9007 .findSchemaOrThrow(eq_Tensor::name, eq_Tensor::overload_name)
9008 .typed<eq_Tensor::schema>();
9009}
9010
9011// aten::eq.Tensor(Tensor self, Tensor other) -> Tensor
9012at::Tensor eq_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
9013
9014 static auto op = create_eq_Tensor_typed_handle();
9015 return op.call(self, other);
9016}
9017
9018// aten::eq.Tensor(Tensor self, Tensor other) -> Tensor
9019at::Tensor eq_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
9020
9021 static auto op = create_eq_Tensor_typed_handle();
9022 return op.redispatch(dispatchKeySet, self, other);
9023}
9024
9025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(le_Scalar_out, name, "aten::le")
9026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(le_Scalar_out, overload_name, "Scalar_out")
9027STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(le_Scalar_out, schema_str, "le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
9028
9029// aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
9030static C10_NOINLINE c10::TypedOperatorHandle<le_Scalar_out::schema> create_le_Scalar_out_typed_handle() {
9031 return c10::Dispatcher::singleton()
9032 .findSchemaOrThrow(le_Scalar_out::name, le_Scalar_out::overload_name)
9033 .typed<le_Scalar_out::schema>();
9034}
9035
9036// aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
9037at::Tensor & le_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
9038
9039 static auto op = create_le_Scalar_out_typed_handle();
9040 return op.call(self, other, out);
9041}
9042
9043// aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
9044at::Tensor & le_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
9045
9046 static auto op = create_le_Scalar_out_typed_handle();
9047 return op.redispatch(dispatchKeySet, self, other, out);
9048}
9049
9050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(le_Scalar, name, "aten::le")
9051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(le_Scalar, overload_name, "Scalar")
9052STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(le_Scalar, schema_str, "le.Scalar(Tensor self, Scalar other) -> Tensor")
9053
9054// aten::le.Scalar(Tensor self, Scalar other) -> Tensor
9055static C10_NOINLINE c10::TypedOperatorHandle<le_Scalar::schema> create_le_Scalar_typed_handle() {
9056 return c10::Dispatcher::singleton()
9057 .findSchemaOrThrow(le_Scalar::name, le_Scalar::overload_name)
9058 .typed<le_Scalar::schema>();
9059}
9060
9061// aten::le.Scalar(Tensor self, Scalar other) -> Tensor
9062at::Tensor le_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
9063
9064 static auto op = create_le_Scalar_typed_handle();
9065 return op.call(self, other);
9066}
9067
9068// aten::le.Scalar(Tensor self, Scalar other) -> Tensor
9069at::Tensor le_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
9070
9071 static auto op = create_le_Scalar_typed_handle();
9072 return op.redispatch(dispatchKeySet, self, other);
9073}
9074
9075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(le_Tensor_out, name, "aten::le")
9076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(le_Tensor_out, overload_name, "Tensor_out")
9077STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(le_Tensor_out, schema_str, "le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
9078
9079// aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
9080static C10_NOINLINE c10::TypedOperatorHandle<le_Tensor_out::schema> create_le_Tensor_out_typed_handle() {
9081 return c10::Dispatcher::singleton()
9082 .findSchemaOrThrow(le_Tensor_out::name, le_Tensor_out::overload_name)
9083 .typed<le_Tensor_out::schema>();
9084}
9085
9086// aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
9087at::Tensor & le_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
9088
9089 static auto op = create_le_Tensor_out_typed_handle();
9090 return op.call(self, other, out);
9091}
9092
9093// aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
9094at::Tensor & le_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
9095
9096 static auto op = create_le_Tensor_out_typed_handle();
9097 return op.redispatch(dispatchKeySet, self, other, out);
9098}
9099
9100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(le_Tensor, name, "aten::le")
9101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(le_Tensor, overload_name, "Tensor")
9102STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(le_Tensor, schema_str, "le.Tensor(Tensor self, Tensor other) -> Tensor")
9103
9104// aten::le.Tensor(Tensor self, Tensor other) -> Tensor
9105static C10_NOINLINE c10::TypedOperatorHandle<le_Tensor::schema> create_le_Tensor_typed_handle() {
9106 return c10::Dispatcher::singleton()
9107 .findSchemaOrThrow(le_Tensor::name, le_Tensor::overload_name)
9108 .typed<le_Tensor::schema>();
9109}
9110
9111// aten::le.Tensor(Tensor self, Tensor other) -> Tensor
9112at::Tensor le_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
9113
9114 static auto op = create_le_Tensor_typed_handle();
9115 return op.call(self, other);
9116}
9117
9118// aten::le.Tensor(Tensor self, Tensor other) -> Tensor
9119at::Tensor le_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
9120
9121 static auto op = create_le_Tensor_typed_handle();
9122 return op.redispatch(dispatchKeySet, self, other);
9123}
9124
9125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(le__Scalar, name, "aten::le_")
9126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(le__Scalar, overload_name, "Scalar")
9127STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(le__Scalar, schema_str, "le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
9128
9129// aten::le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
9130static C10_NOINLINE c10::TypedOperatorHandle<le__Scalar::schema> create_le__Scalar_typed_handle() {
9131 return c10::Dispatcher::singleton()
9132 .findSchemaOrThrow(le__Scalar::name, le__Scalar::overload_name)
9133 .typed<le__Scalar::schema>();
9134}
9135
9136// aten::le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
9137at::Tensor & le__Scalar::call(at::Tensor & self, const at::Scalar & other) {
9138
9139 static auto op = create_le__Scalar_typed_handle();
9140 return op.call(self, other);
9141}
9142
9143// aten::le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
9144at::Tensor & le__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
9145
9146 static auto op = create_le__Scalar_typed_handle();
9147 return op.redispatch(dispatchKeySet, self, other);
9148}
9149
9150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(le__Tensor, name, "aten::le_")
9151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(le__Tensor, overload_name, "Tensor")
9152STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(le__Tensor, schema_str, "le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
9153
9154// aten::le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
9155static C10_NOINLINE c10::TypedOperatorHandle<le__Tensor::schema> create_le__Tensor_typed_handle() {
9156 return c10::Dispatcher::singleton()
9157 .findSchemaOrThrow(le__Tensor::name, le__Tensor::overload_name)
9158 .typed<le__Tensor::schema>();
9159}
9160
9161// aten::le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
9162at::Tensor & le__Tensor::call(at::Tensor & self, const at::Tensor & other) {
9163
9164 static auto op = create_le__Tensor_typed_handle();
9165 return op.call(self, other);
9166}
9167
9168// aten::le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
9169at::Tensor & le__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
9170
9171 static auto op = create_le__Tensor_typed_handle();
9172 return op.redispatch(dispatchKeySet, self, other);
9173}
9174
9175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(take_along_dim_out, name, "aten::take_along_dim")
9176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(take_along_dim_out, overload_name, "out")
9177STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(take_along_dim_out, schema_str, "take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)")
9178
9179// aten::take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
9180static C10_NOINLINE c10::TypedOperatorHandle<take_along_dim_out::schema> create_take_along_dim_out_typed_handle() {
9181 return c10::Dispatcher::singleton()
9182 .findSchemaOrThrow(take_along_dim_out::name, take_along_dim_out::overload_name)
9183 .typed<take_along_dim_out::schema>();
9184}
9185
9186// aten::take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
9187at::Tensor & take_along_dim_out::call(const at::Tensor & self, const at::Tensor & indices, c10::optional<int64_t> dim, at::Tensor & out) {
9188
9189 static auto op = create_take_along_dim_out_typed_handle();
9190 return op.call(self, indices, dim, out);
9191}
9192
9193// aten::take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
9194at::Tensor & take_along_dim_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, c10::optional<int64_t> dim, at::Tensor & out) {
9195
9196 static auto op = create_take_along_dim_out_typed_handle();
9197 return op.redispatch(dispatchKeySet, self, indices, dim, out);
9198}
9199
9200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(take_along_dim, name, "aten::take_along_dim")
9201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(take_along_dim, overload_name, "")
9202STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(take_along_dim, schema_str, "take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor")
9203
9204// aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor
9205static C10_NOINLINE c10::TypedOperatorHandle<take_along_dim::schema> create_take_along_dim_typed_handle() {
9206 return c10::Dispatcher::singleton()
9207 .findSchemaOrThrow(take_along_dim::name, take_along_dim::overload_name)
9208 .typed<take_along_dim::schema>();
9209}
9210
9211// aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor
9212at::Tensor take_along_dim::call(const at::Tensor & self, const at::Tensor & indices, c10::optional<int64_t> dim) {
9213
9214 static auto op = create_take_along_dim_typed_handle();
9215 return op.call(self, indices, dim);
9216}
9217
9218// aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor
9219at::Tensor take_along_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, c10::optional<int64_t> dim) {
9220
9221 static auto op = create_take_along_dim_typed_handle();
9222 return op.redispatch(dispatchKeySet, self, indices, dim);
9223}
9224
9225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_select_out, name, "aten::index_select")
9226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_select_out, overload_name, "out")
9227STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_select_out, schema_str, "index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)")
9228
9229// aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
9230static C10_NOINLINE c10::TypedOperatorHandle<index_select_out::schema> create_index_select_out_typed_handle() {
9231 return c10::Dispatcher::singleton()
9232 .findSchemaOrThrow(index_select_out::name, index_select_out::overload_name)
9233 .typed<index_select_out::schema>();
9234}
9235
9236// aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
9237at::Tensor & index_select_out::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out) {
9238
9239 static auto op = create_index_select_out_typed_handle();
9240 return op.call(self, dim, index, out);
9241}
9242
9243// aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
9244at::Tensor & index_select_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out) {
9245
9246 static auto op = create_index_select_out_typed_handle();
9247 return op.redispatch(dispatchKeySet, self, dim, index, out);
9248}
9249
9250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_select, name, "aten::index_select")
9251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_select, overload_name, "")
9252STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_select, schema_str, "index_select(Tensor self, int dim, Tensor index) -> Tensor")
9253
9254// aten::index_select(Tensor self, int dim, Tensor index) -> Tensor
9255static C10_NOINLINE c10::TypedOperatorHandle<index_select::schema> create_index_select_typed_handle() {
9256 return c10::Dispatcher::singleton()
9257 .findSchemaOrThrow(index_select::name, index_select::overload_name)
9258 .typed<index_select::schema>();
9259}
9260
9261// aten::index_select(Tensor self, int dim, Tensor index) -> Tensor
9262at::Tensor index_select::call(const at::Tensor & self, int64_t dim, const at::Tensor & index) {
9263
9264 static auto op = create_index_select_typed_handle();
9265 return op.call(self, dim, index);
9266}
9267
9268// aten::index_select(Tensor self, int dim, Tensor index) -> Tensor
9269at::Tensor index_select::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index) {
9270
9271 static auto op = create_index_select_typed_handle();
9272 return op.redispatch(dispatchKeySet, self, dim, index);
9273}
9274
9275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_select_dimname_out, name, "aten::index_select")
9276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_select_dimname_out, overload_name, "dimname_out")
9277STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_select_dimname_out, schema_str, "index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)")
9278
9279// aten::index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
9280static C10_NOINLINE c10::TypedOperatorHandle<index_select_dimname_out::schema> create_index_select_dimname_out_typed_handle() {
9281 return c10::Dispatcher::singleton()
9282 .findSchemaOrThrow(index_select_dimname_out::name, index_select_dimname_out::overload_name)
9283 .typed<index_select_dimname_out::schema>();
9284}
9285
9286// aten::index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
9287at::Tensor & index_select_dimname_out::call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, at::Tensor & out) {
9288
9289 static auto op = create_index_select_dimname_out_typed_handle();
9290 return op.call(self, dim, index, out);
9291}
9292
9293// aten::index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
9294at::Tensor & index_select_dimname_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, at::Tensor & out) {
9295
9296 static auto op = create_index_select_dimname_out_typed_handle();
9297 return op.redispatch(dispatchKeySet, self, dim, index, out);
9298}
9299
9300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_select_dimname, name, "aten::index_select")
9301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_select_dimname, overload_name, "dimname")
9302STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_select_dimname, schema_str, "index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor")
9303
9304// aten::index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor
9305static C10_NOINLINE c10::TypedOperatorHandle<index_select_dimname::schema> create_index_select_dimname_typed_handle() {
9306 return c10::Dispatcher::singleton()
9307 .findSchemaOrThrow(index_select_dimname::name, index_select_dimname::overload_name)
9308 .typed<index_select_dimname::schema>();
9309}
9310
9311// aten::index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor
9312at::Tensor index_select_dimname::call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index) {
9313
9314 static auto op = create_index_select_dimname_typed_handle();
9315 return op.call(self, dim, index);
9316}
9317
9318// aten::index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor
9319at::Tensor index_select_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index) {
9320
9321 static auto op = create_index_select_dimname_typed_handle();
9322 return op.redispatch(dispatchKeySet, self, dim, index);
9323}
9324
9325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_select_backward, name, "aten::masked_select_backward")
9326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_select_backward, overload_name, "")
9327STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(masked_select_backward, schema_str, "masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor")
9328
9329// aten::masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor
9330static C10_NOINLINE c10::TypedOperatorHandle<masked_select_backward::schema> create_masked_select_backward_typed_handle() {
9331 return c10::Dispatcher::singleton()
9332 .findSchemaOrThrow(masked_select_backward::name, masked_select_backward::overload_name)
9333 .typed<masked_select_backward::schema>();
9334}
9335
9336// aten::masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor
9337at::Tensor masked_select_backward::call(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask) {
9338
9339 static auto op = create_masked_select_backward_typed_handle();
9340 return op.call(grad, input, mask);
9341}
9342
9343// aten::masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor
9344at::Tensor masked_select_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask) {
9345
9346 static auto op = create_masked_select_backward_typed_handle();
9347 return op.redispatch(dispatchKeySet, grad, input, mask);
9348}
9349
9350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nonzero_out, name, "aten::nonzero")
9351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nonzero_out, overload_name, "out")
9352STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nonzero_out, schema_str, "nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
9353
9354// aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
9355static C10_NOINLINE c10::TypedOperatorHandle<nonzero_out::schema> create_nonzero_out_typed_handle() {
9356 return c10::Dispatcher::singleton()
9357 .findSchemaOrThrow(nonzero_out::name, nonzero_out::overload_name)
9358 .typed<nonzero_out::schema>();
9359}
9360
9361// aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
9362at::Tensor & nonzero_out::call(const at::Tensor & self, at::Tensor & out) {
9363
9364 static auto op = create_nonzero_out_typed_handle();
9365 return op.call(self, out);
9366}
9367
9368// aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
9369at::Tensor & nonzero_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
9370
9371 static auto op = create_nonzero_out_typed_handle();
9372 return op.redispatch(dispatchKeySet, self, out);
9373}
9374
9375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nonzero, name, "aten::nonzero")
9376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nonzero, overload_name, "")
9377STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nonzero, schema_str, "nonzero(Tensor self) -> Tensor")
9378
9379// aten::nonzero(Tensor self) -> Tensor
9380static C10_NOINLINE c10::TypedOperatorHandle<nonzero::schema> create_nonzero_typed_handle() {
9381 return c10::Dispatcher::singleton()
9382 .findSchemaOrThrow(nonzero::name, nonzero::overload_name)
9383 .typed<nonzero::schema>();
9384}
9385
9386// aten::nonzero(Tensor self) -> Tensor
9387at::Tensor nonzero::call(const at::Tensor & self) {
9388
9389 static auto op = create_nonzero_typed_handle();
9390 return op.call(self);
9391}
9392
9393// aten::nonzero(Tensor self) -> Tensor
9394at::Tensor nonzero::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
9395
9396 static auto op = create_nonzero_typed_handle();
9397 return op.redispatch(dispatchKeySet, self);
9398}
9399
9400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nonzero_numpy, name, "aten::nonzero_numpy")
9401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nonzero_numpy, overload_name, "")
9402STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nonzero_numpy, schema_str, "nonzero_numpy(Tensor self) -> Tensor[]")
9403
9404// aten::nonzero_numpy(Tensor self) -> Tensor[]
9405static C10_NOINLINE c10::TypedOperatorHandle<nonzero_numpy::schema> create_nonzero_numpy_typed_handle() {
9406 return c10::Dispatcher::singleton()
9407 .findSchemaOrThrow(nonzero_numpy::name, nonzero_numpy::overload_name)
9408 .typed<nonzero_numpy::schema>();
9409}
9410
9411// aten::nonzero_numpy(Tensor self) -> Tensor[]
9412::std::vector<at::Tensor> nonzero_numpy::call(const at::Tensor & self) {
9413
9414 static auto op = create_nonzero_numpy_typed_handle();
9415 return op.call(self);
9416}
9417
9418// aten::nonzero_numpy(Tensor self) -> Tensor[]
9419::std::vector<at::Tensor> nonzero_numpy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
9420
9421 static auto op = create_nonzero_numpy_typed_handle();
9422 return op.redispatch(dispatchKeySet, self);
9423}
9424
9425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addcmul_out, name, "aten::addcmul")
9426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addcmul_out, overload_name, "out")
9427STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addcmul_out, schema_str, "addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)")
9428
9429// aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
9430static C10_NOINLINE c10::TypedOperatorHandle<addcmul_out::schema> create_addcmul_out_typed_handle() {
9431 return c10::Dispatcher::singleton()
9432 .findSchemaOrThrow(addcmul_out::name, addcmul_out::overload_name)
9433 .typed<addcmul_out::schema>();
9434}
9435
9436// aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
9437at::Tensor & addcmul_out::call(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) {
9438
9439 static auto op = create_addcmul_out_typed_handle();
9440 return op.call(self, tensor1, tensor2, value, out);
9441}
9442
9443// aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
9444at::Tensor & addcmul_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) {
9445
9446 static auto op = create_addcmul_out_typed_handle();
9447 return op.redispatch(dispatchKeySet, self, tensor1, tensor2, value, out);
9448}
9449
9450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addcmul, name, "aten::addcmul")
9451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addcmul, overload_name, "")
9452STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addcmul, schema_str, "addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor")
9453
9454// aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
9455static C10_NOINLINE c10::TypedOperatorHandle<addcmul::schema> create_addcmul_typed_handle() {
9456 return c10::Dispatcher::singleton()
9457 .findSchemaOrThrow(addcmul::name, addcmul::overload_name)
9458 .typed<addcmul::schema>();
9459}
9460
9461// aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
9462at::Tensor addcmul::call(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
9463
9464 static auto op = create_addcmul_typed_handle();
9465 return op.call(self, tensor1, tensor2, value);
9466}
9467
9468// aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
9469at::Tensor addcmul::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
9470
9471 static auto op = create_addcmul_typed_handle();
9472 return op.redispatch(dispatchKeySet, self, tensor1, tensor2, value);
9473}
9474
9475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addcmul_, name, "aten::addcmul_")
9476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addcmul_, overload_name, "")
9477STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addcmul_, schema_str, "addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)")
9478
9479// aten::addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
9480static C10_NOINLINE c10::TypedOperatorHandle<addcmul_::schema> create_addcmul__typed_handle() {
9481 return c10::Dispatcher::singleton()
9482 .findSchemaOrThrow(addcmul_::name, addcmul_::overload_name)
9483 .typed<addcmul_::schema>();
9484}
9485
9486// aten::addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
9487at::Tensor & addcmul_::call(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
9488
9489 static auto op = create_addcmul__typed_handle();
9490 return op.call(self, tensor1, tensor2, value);
9491}
9492
9493// aten::addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
9494at::Tensor & addcmul_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
9495
9496 static auto op = create_addcmul__typed_handle();
9497 return op.redispatch(dispatchKeySet, self, tensor1, tensor2, value);
9498}
9499
9500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(swapdims, name, "aten::swapdims")
9501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(swapdims, overload_name, "")
9502STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(swapdims, schema_str, "swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a)")
9503
9504// aten::swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
9505static C10_NOINLINE c10::TypedOperatorHandle<swapdims::schema> create_swapdims_typed_handle() {
9506 return c10::Dispatcher::singleton()
9507 .findSchemaOrThrow(swapdims::name, swapdims::overload_name)
9508 .typed<swapdims::schema>();
9509}
9510
9511// aten::swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
9512at::Tensor swapdims::call(const at::Tensor & self, int64_t dim0, int64_t dim1) {
9513
9514 static auto op = create_swapdims_typed_handle();
9515 return op.call(self, dim0, dim1);
9516}
9517
9518// aten::swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
9519at::Tensor swapdims::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1) {
9520
9521 static auto op = create_swapdims_typed_handle();
9522 return op.redispatch(dispatchKeySet, self, dim0, dim1);
9523}
9524
9525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(swapdims_, name, "aten::swapdims_")
9526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(swapdims_, overload_name, "")
9527STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(swapdims_, schema_str, "swapdims_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)")
9528
9529// aten::swapdims_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
9530static C10_NOINLINE c10::TypedOperatorHandle<swapdims_::schema> create_swapdims__typed_handle() {
9531 return c10::Dispatcher::singleton()
9532 .findSchemaOrThrow(swapdims_::name, swapdims_::overload_name)
9533 .typed<swapdims_::schema>();
9534}
9535
9536// aten::swapdims_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
9537at::Tensor & swapdims_::call(at::Tensor & self, int64_t dim0, int64_t dim1) {
9538
9539 static auto op = create_swapdims__typed_handle();
9540 return op.call(self, dim0, dim1);
9541}
9542
9543// aten::swapdims_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
9544at::Tensor & swapdims_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim0, int64_t dim1) {
9545
9546 static auto op = create_swapdims__typed_handle();
9547 return op.redispatch(dispatchKeySet, self, dim0, dim1);
9548}
9549
9550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cholesky_out, name, "aten::cholesky")
9551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cholesky_out, overload_name, "out")
9552STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cholesky_out, schema_str, "cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)")
9553
9554// aten::cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
9555static C10_NOINLINE c10::TypedOperatorHandle<cholesky_out::schema> create_cholesky_out_typed_handle() {
9556 return c10::Dispatcher::singleton()
9557 .findSchemaOrThrow(cholesky_out::name, cholesky_out::overload_name)
9558 .typed<cholesky_out::schema>();
9559}
9560
9561// aten::cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
9562at::Tensor & cholesky_out::call(const at::Tensor & self, bool upper, at::Tensor & out) {
9563
9564 static auto op = create_cholesky_out_typed_handle();
9565 return op.call(self, upper, out);
9566}
9567
9568// aten::cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
9569at::Tensor & cholesky_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, at::Tensor & out) {
9570
9571 static auto op = create_cholesky_out_typed_handle();
9572 return op.redispatch(dispatchKeySet, self, upper, out);
9573}
9574
9575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cholesky, name, "aten::cholesky")
9576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cholesky, overload_name, "")
9577STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cholesky, schema_str, "cholesky(Tensor self, bool upper=False) -> Tensor")
9578
9579// aten::cholesky(Tensor self, bool upper=False) -> Tensor
9580static C10_NOINLINE c10::TypedOperatorHandle<cholesky::schema> create_cholesky_typed_handle() {
9581 return c10::Dispatcher::singleton()
9582 .findSchemaOrThrow(cholesky::name, cholesky::overload_name)
9583 .typed<cholesky::schema>();
9584}
9585
9586// aten::cholesky(Tensor self, bool upper=False) -> Tensor
9587at::Tensor cholesky::call(const at::Tensor & self, bool upper) {
9588
9589 static auto op = create_cholesky_typed_handle();
9590 return op.call(self, upper);
9591}
9592
9593// aten::cholesky(Tensor self, bool upper=False) -> Tensor
9594at::Tensor cholesky::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper) {
9595
9596 static auto op = create_cholesky_typed_handle();
9597 return op.redispatch(dispatchKeySet, self, upper);
9598}
9599
9600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lu_solve_out, name, "aten::lu_solve")
9601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lu_solve_out, overload_name, "out")
9602STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lu_solve_out, schema_str, "lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!)")
9603
9604// aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!)
9605static C10_NOINLINE c10::TypedOperatorHandle<lu_solve_out::schema> create_lu_solve_out_typed_handle() {
9606 return c10::Dispatcher::singleton()
9607 .findSchemaOrThrow(lu_solve_out::name, lu_solve_out::overload_name)
9608 .typed<lu_solve_out::schema>();
9609}
9610
9611// aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!)
9612at::Tensor & lu_solve_out::call(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots, at::Tensor & out) {
9613
9614 static auto op = create_lu_solve_out_typed_handle();
9615 return op.call(self, LU_data, LU_pivots, out);
9616}
9617
9618// aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!)
9619at::Tensor & lu_solve_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots, at::Tensor & out) {
9620
9621 static auto op = create_lu_solve_out_typed_handle();
9622 return op.redispatch(dispatchKeySet, self, LU_data, LU_pivots, out);
9623}
9624
9625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lu_solve, name, "aten::lu_solve")
9626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lu_solve, overload_name, "")
9627STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lu_solve, schema_str, "lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor")
9628
9629// aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor
9630static C10_NOINLINE c10::TypedOperatorHandle<lu_solve::schema> create_lu_solve_typed_handle() {
9631 return c10::Dispatcher::singleton()
9632 .findSchemaOrThrow(lu_solve::name, lu_solve::overload_name)
9633 .typed<lu_solve::schema>();
9634}
9635
9636// aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor
9637at::Tensor lu_solve::call(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) {
9638
9639 static auto op = create_lu_solve_typed_handle();
9640 return op.call(self, LU_data, LU_pivots);
9641}
9642
9643// aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor
9644at::Tensor lu_solve::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) {
9645
9646 static auto op = create_lu_solve_typed_handle();
9647 return op.redispatch(dispatchKeySet, self, LU_data, LU_pivots);
9648}
9649
9650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lu_unpack, name, "aten::lu_unpack")
9651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lu_unpack, overload_name, "")
9652STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lu_unpack, schema_str, "lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U)")
9653
9654// aten::lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U)
9655static C10_NOINLINE c10::TypedOperatorHandle<lu_unpack::schema> create_lu_unpack_typed_handle() {
9656 return c10::Dispatcher::singleton()
9657 .findSchemaOrThrow(lu_unpack::name, lu_unpack::overload_name)
9658 .typed<lu_unpack::schema>();
9659}
9660
9661// aten::lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U)
9662::std::tuple<at::Tensor,at::Tensor,at::Tensor> lu_unpack::call(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots) {
9663
9664 static auto op = create_lu_unpack_typed_handle();
9665 return op.call(LU_data, LU_pivots, unpack_data, unpack_pivots);
9666}
9667
9668// aten::lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U)
9669::std::tuple<at::Tensor,at::Tensor,at::Tensor> lu_unpack::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots) {
9670
9671 static auto op = create_lu_unpack_typed_handle();
9672 return op.redispatch(dispatchKeySet, LU_data, LU_pivots, unpack_data, unpack_pivots);
9673}
9674
9675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lu_unpack_out, name, "aten::lu_unpack")
9676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lu_unpack_out, overload_name, "out")
9677STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lu_unpack_out, schema_str, "lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)")
9678
9679// aten::lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)
9680static C10_NOINLINE c10::TypedOperatorHandle<lu_unpack_out::schema> create_lu_unpack_out_typed_handle() {
9681 return c10::Dispatcher::singleton()
9682 .findSchemaOrThrow(lu_unpack_out::name, lu_unpack_out::overload_name)
9683 .typed<lu_unpack_out::schema>();
9684}
9685
9686// aten::lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)
9687::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> lu_unpack_out::call(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, at::Tensor & P, at::Tensor & L, at::Tensor & U) {
9688
9689 static auto op = create_lu_unpack_out_typed_handle();
9690 return op.call(LU_data, LU_pivots, unpack_data, unpack_pivots, P, L, U);
9691}
9692
9693// aten::lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)
9694::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> lu_unpack_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, at::Tensor & P, at::Tensor & L, at::Tensor & U) {
9695
9696 static auto op = create_lu_unpack_out_typed_handle();
9697 return op.redispatch(dispatchKeySet, LU_data, LU_pivots, unpack_data, unpack_pivots, P, L, U);
9698}
9699
9700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multinomial_out, name, "aten::multinomial")
9701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multinomial_out, overload_name, "out")
9702STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multinomial_out, schema_str, "multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)")
9703
9704// aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
9705static C10_NOINLINE c10::TypedOperatorHandle<multinomial_out::schema> create_multinomial_out_typed_handle() {
9706 return c10::Dispatcher::singleton()
9707 .findSchemaOrThrow(multinomial_out::name, multinomial_out::overload_name)
9708 .typed<multinomial_out::schema>();
9709}
9710
9711// aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
9712at::Tensor & multinomial_out::call(const at::Tensor & self, int64_t num_samples, bool replacement, c10::optional<at::Generator> generator, at::Tensor & out) {
9713
9714 static auto op = create_multinomial_out_typed_handle();
9715 return op.call(self, num_samples, replacement, generator, out);
9716}
9717
9718// aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
9719at::Tensor & multinomial_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t num_samples, bool replacement, c10::optional<at::Generator> generator, at::Tensor & out) {
9720
9721 static auto op = create_multinomial_out_typed_handle();
9722 return op.redispatch(dispatchKeySet, self, num_samples, replacement, generator, out);
9723}
9724
9725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multinomial, name, "aten::multinomial")
9726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multinomial, overload_name, "")
9727STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multinomial, schema_str, "multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor")
9728
9729// aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor
9730static C10_NOINLINE c10::TypedOperatorHandle<multinomial::schema> create_multinomial_typed_handle() {
9731 return c10::Dispatcher::singleton()
9732 .findSchemaOrThrow(multinomial::name, multinomial::overload_name)
9733 .typed<multinomial::schema>();
9734}
9735
9736// aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor
9737at::Tensor multinomial::call(const at::Tensor & self, int64_t num_samples, bool replacement, c10::optional<at::Generator> generator) {
9738
9739 static auto op = create_multinomial_typed_handle();
9740 return op.call(self, num_samples, replacement, generator);
9741}
9742
9743// aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor
9744at::Tensor multinomial::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t num_samples, bool replacement, c10::optional<at::Generator> generator) {
9745
9746 static auto op = create_multinomial_typed_handle();
9747 return op.redispatch(dispatchKeySet, self, num_samples, replacement, generator);
9748}
9749
9750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lgamma_out, name, "aten::lgamma")
9751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lgamma_out, overload_name, "out")
9752STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lgamma_out, schema_str, "lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
9753
9754// aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
9755static C10_NOINLINE c10::TypedOperatorHandle<lgamma_out::schema> create_lgamma_out_typed_handle() {
9756 return c10::Dispatcher::singleton()
9757 .findSchemaOrThrow(lgamma_out::name, lgamma_out::overload_name)
9758 .typed<lgamma_out::schema>();
9759}
9760
9761// aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
9762at::Tensor & lgamma_out::call(const at::Tensor & self, at::Tensor & out) {
9763
9764 static auto op = create_lgamma_out_typed_handle();
9765 return op.call(self, out);
9766}
9767
9768// aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
9769at::Tensor & lgamma_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
9770
9771 static auto op = create_lgamma_out_typed_handle();
9772 return op.redispatch(dispatchKeySet, self, out);
9773}
9774
9775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lgamma_, name, "aten::lgamma_")
9776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lgamma_, overload_name, "")
9777STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lgamma_, schema_str, "lgamma_(Tensor(a!) self) -> Tensor(a!)")
9778
9779// aten::lgamma_(Tensor(a!) self) -> Tensor(a!)
9780static C10_NOINLINE c10::TypedOperatorHandle<lgamma_::schema> create_lgamma__typed_handle() {
9781 return c10::Dispatcher::singleton()
9782 .findSchemaOrThrow(lgamma_::name, lgamma_::overload_name)
9783 .typed<lgamma_::schema>();
9784}
9785
9786// aten::lgamma_(Tensor(a!) self) -> Tensor(a!)
9787at::Tensor & lgamma_::call(at::Tensor & self) {
9788
9789 static auto op = create_lgamma__typed_handle();
9790 return op.call(self);
9791}
9792
9793// aten::lgamma_(Tensor(a!) self) -> Tensor(a!)
9794at::Tensor & lgamma_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
9795
9796 static auto op = create_lgamma__typed_handle();
9797 return op.redispatch(dispatchKeySet, self);
9798}
9799
9800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lgamma, name, "aten::lgamma")
9801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lgamma, overload_name, "")
9802STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lgamma, schema_str, "lgamma(Tensor self) -> Tensor")
9803
9804// aten::lgamma(Tensor self) -> Tensor
9805static C10_NOINLINE c10::TypedOperatorHandle<lgamma::schema> create_lgamma_typed_handle() {
9806 return c10::Dispatcher::singleton()
9807 .findSchemaOrThrow(lgamma::name, lgamma::overload_name)
9808 .typed<lgamma::schema>();
9809}
9810
9811// aten::lgamma(Tensor self) -> Tensor
9812at::Tensor lgamma::call(const at::Tensor & self) {
9813
9814 static auto op = create_lgamma_typed_handle();
9815 return op.call(self);
9816}
9817
9818// aten::lgamma(Tensor self) -> Tensor
9819at::Tensor lgamma::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
9820
9821 static auto op = create_lgamma_typed_handle();
9822 return op.redispatch(dispatchKeySet, self);
9823}
9824
9825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arctan2, name, "aten::arctan2")
9826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arctan2, overload_name, "")
9827STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arctan2, schema_str, "arctan2(Tensor self, Tensor other) -> Tensor")
9828
9829// aten::arctan2(Tensor self, Tensor other) -> Tensor
9830static C10_NOINLINE c10::TypedOperatorHandle<arctan2::schema> create_arctan2_typed_handle() {
9831 return c10::Dispatcher::singleton()
9832 .findSchemaOrThrow(arctan2::name, arctan2::overload_name)
9833 .typed<arctan2::schema>();
9834}
9835
9836// aten::arctan2(Tensor self, Tensor other) -> Tensor
9837at::Tensor arctan2::call(const at::Tensor & self, const at::Tensor & other) {
9838
9839 static auto op = create_arctan2_typed_handle();
9840 return op.call(self, other);
9841}
9842
9843// aten::arctan2(Tensor self, Tensor other) -> Tensor
9844at::Tensor arctan2::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
9845
9846 static auto op = create_arctan2_typed_handle();
9847 return op.redispatch(dispatchKeySet, self, other);
9848}
9849
9850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arctan2_out, name, "aten::arctan2")
9851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arctan2_out, overload_name, "out")
9852STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arctan2_out, schema_str, "arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
9853
9854// aten::arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
9855static C10_NOINLINE c10::TypedOperatorHandle<arctan2_out::schema> create_arctan2_out_typed_handle() {
9856 return c10::Dispatcher::singleton()
9857 .findSchemaOrThrow(arctan2_out::name, arctan2_out::overload_name)
9858 .typed<arctan2_out::schema>();
9859}
9860
9861// aten::arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
9862at::Tensor & arctan2_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
9863
9864 static auto op = create_arctan2_out_typed_handle();
9865 return op.call(self, other, out);
9866}
9867
9868// aten::arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
9869at::Tensor & arctan2_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
9870
9871 static auto op = create_arctan2_out_typed_handle();
9872 return op.redispatch(dispatchKeySet, self, other, out);
9873}
9874
9875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arctan2_, name, "aten::arctan2_")
9876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arctan2_, overload_name, "")
9877STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arctan2_, schema_str, "arctan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)")
9878
9879// aten::arctan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)
9880static C10_NOINLINE c10::TypedOperatorHandle<arctan2_::schema> create_arctan2__typed_handle() {
9881 return c10::Dispatcher::singleton()
9882 .findSchemaOrThrow(arctan2_::name, arctan2_::overload_name)
9883 .typed<arctan2_::schema>();
9884}
9885
9886// aten::arctan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)
9887at::Tensor & arctan2_::call(at::Tensor & self, const at::Tensor & other) {
9888
9889 static auto op = create_arctan2__typed_handle();
9890 return op.call(self, other);
9891}
9892
9893// aten::arctan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)
9894at::Tensor & arctan2_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
9895
9896 static auto op = create_arctan2__typed_handle();
9897 return op.redispatch(dispatchKeySet, self, other);
9898}
9899
9900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(histogram_bins_tensor_out, name, "aten::histogram")
9901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(histogram_bins_tensor_out, overload_name, "bins_tensor_out")
9902STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(histogram_bins_tensor_out, schema_str, "histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)")
9903
9904// aten::histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)
9905static C10_NOINLINE c10::TypedOperatorHandle<histogram_bins_tensor_out::schema> create_histogram_bins_tensor_out_typed_handle() {
9906 return c10::Dispatcher::singleton()
9907 .findSchemaOrThrow(histogram_bins_tensor_out::name, histogram_bins_tensor_out::overload_name)
9908 .typed<histogram_bins_tensor_out::schema>();
9909}
9910
9911// aten::histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)
9912::std::tuple<at::Tensor &,at::Tensor &> histogram_bins_tensor_out::call(const at::Tensor & self, const at::Tensor & bins, const c10::optional<at::Tensor> & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) {
9913
9914 static auto op = create_histogram_bins_tensor_out_typed_handle();
9915 return op.call(self, bins, weight, density, hist, bin_edges);
9916}
9917
9918// aten::histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)
9919::std::tuple<at::Tensor &,at::Tensor &> histogram_bins_tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & bins, const c10::optional<at::Tensor> & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) {
9920
9921 static auto op = create_histogram_bins_tensor_out_typed_handle();
9922 return op.redispatch(dispatchKeySet, self, bins, weight, density, hist, bin_edges);
9923}
9924
9925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(histogram_bins_tensor, name, "aten::histogram")
9926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(histogram_bins_tensor, overload_name, "bins_tensor")
9927STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(histogram_bins_tensor, schema_str, "histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)")
9928
9929// aten::histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)
9930static C10_NOINLINE c10::TypedOperatorHandle<histogram_bins_tensor::schema> create_histogram_bins_tensor_typed_handle() {
9931 return c10::Dispatcher::singleton()
9932 .findSchemaOrThrow(histogram_bins_tensor::name, histogram_bins_tensor::overload_name)
9933 .typed<histogram_bins_tensor::schema>();
9934}
9935
9936// aten::histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)
9937::std::tuple<at::Tensor,at::Tensor> histogram_bins_tensor::call(const at::Tensor & self, const at::Tensor & bins, const c10::optional<at::Tensor> & weight, bool density) {
9938
9939 static auto op = create_histogram_bins_tensor_typed_handle();
9940 return op.call(self, bins, weight, density);
9941}
9942
9943// aten::histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)
9944::std::tuple<at::Tensor,at::Tensor> histogram_bins_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & bins, const c10::optional<at::Tensor> & weight, bool density) {
9945
9946 static auto op = create_histogram_bins_tensor_typed_handle();
9947 return op.redispatch(dispatchKeySet, self, bins, weight, density);
9948}
9949
9950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(histogram_bin_ct_out, name, "aten::histogram")
9951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(histogram_bin_ct_out, overload_name, "bin_ct_out")
9952STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(histogram_bin_ct_out, schema_str, "histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)")
9953
9954// aten::histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)
9955static C10_NOINLINE c10::TypedOperatorHandle<histogram_bin_ct_out::schema> create_histogram_bin_ct_out_typed_handle() {
9956 return c10::Dispatcher::singleton()
9957 .findSchemaOrThrow(histogram_bin_ct_out::name, histogram_bin_ct_out::overload_name)
9958 .typed<histogram_bin_ct_out::schema>();
9959}
9960
9961// aten::histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)
9962::std::tuple<at::Tensor &,at::Tensor &> histogram_bin_ct_out::call(const at::Tensor & self, int64_t bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) {
9963
9964 static auto op = create_histogram_bin_ct_out_typed_handle();
9965 return op.call(self, bins, range, weight, density, hist, bin_edges);
9966}
9967
9968// aten::histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)
9969::std::tuple<at::Tensor &,at::Tensor &> histogram_bin_ct_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) {
9970
9971 static auto op = create_histogram_bin_ct_out_typed_handle();
9972 return op.redispatch(dispatchKeySet, self, bins, range, weight, density, hist, bin_edges);
9973}
9974
9975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(histogram_bin_ct, name, "aten::histogram")
9976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(histogram_bin_ct, overload_name, "bin_ct")
9977STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(histogram_bin_ct, schema_str, "histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)")
9978
9979// aten::histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)
9980static C10_NOINLINE c10::TypedOperatorHandle<histogram_bin_ct::schema> create_histogram_bin_ct_typed_handle() {
9981 return c10::Dispatcher::singleton()
9982 .findSchemaOrThrow(histogram_bin_ct::name, histogram_bin_ct::overload_name)
9983 .typed<histogram_bin_ct::schema>();
9984}
9985
9986// aten::histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)
9987::std::tuple<at::Tensor,at::Tensor> histogram_bin_ct::call(const at::Tensor & self, int64_t bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
9988
9989 static auto op = create_histogram_bin_ct_typed_handle();
9990 return op.call(self, bins, range, weight, density);
9991}
9992
9993// aten::histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)
9994::std::tuple<at::Tensor,at::Tensor> histogram_bin_ct::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
9995
9996 static auto op = create_histogram_bin_ct_typed_handle();
9997 return op.redispatch(dispatchKeySet, self, bins, range, weight, density);
9998}
9999
10000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(igamma_out, name, "aten::igamma")
10001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(igamma_out, overload_name, "out")
10002STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(igamma_out, schema_str, "igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
10003
10004// aten::igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10005static C10_NOINLINE c10::TypedOperatorHandle<igamma_out::schema> create_igamma_out_typed_handle() {
10006 return c10::Dispatcher::singleton()
10007 .findSchemaOrThrow(igamma_out::name, igamma_out::overload_name)
10008 .typed<igamma_out::schema>();
10009}
10010
10011// aten::igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10012at::Tensor & igamma_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
10013
10014 static auto op = create_igamma_out_typed_handle();
10015 return op.call(self, other, out);
10016}
10017
10018// aten::igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10019at::Tensor & igamma_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
10020
10021 static auto op = create_igamma_out_typed_handle();
10022 return op.redispatch(dispatchKeySet, self, other, out);
10023}
10024
10025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(igamma, name, "aten::igamma")
10026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(igamma, overload_name, "")
10027STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(igamma, schema_str, "igamma(Tensor self, Tensor other) -> Tensor")
10028
10029// aten::igamma(Tensor self, Tensor other) -> Tensor
10030static C10_NOINLINE c10::TypedOperatorHandle<igamma::schema> create_igamma_typed_handle() {
10031 return c10::Dispatcher::singleton()
10032 .findSchemaOrThrow(igamma::name, igamma::overload_name)
10033 .typed<igamma::schema>();
10034}
10035
10036// aten::igamma(Tensor self, Tensor other) -> Tensor
10037at::Tensor igamma::call(const at::Tensor & self, const at::Tensor & other) {
10038
10039 static auto op = create_igamma_typed_handle();
10040 return op.call(self, other);
10041}
10042
10043// aten::igamma(Tensor self, Tensor other) -> Tensor
10044at::Tensor igamma::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
10045
10046 static auto op = create_igamma_typed_handle();
10047 return op.redispatch(dispatchKeySet, self, other);
10048}
10049
10050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(igamma_, name, "aten::igamma_")
10051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(igamma_, overload_name, "")
10052STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(igamma_, schema_str, "igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!)")
10053
10054// aten::igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!)
10055static C10_NOINLINE c10::TypedOperatorHandle<igamma_::schema> create_igamma__typed_handle() {
10056 return c10::Dispatcher::singleton()
10057 .findSchemaOrThrow(igamma_::name, igamma_::overload_name)
10058 .typed<igamma_::schema>();
10059}
10060
10061// aten::igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!)
10062at::Tensor & igamma_::call(at::Tensor & self, const at::Tensor & other) {
10063
10064 static auto op = create_igamma__typed_handle();
10065 return op.call(self, other);
10066}
10067
10068// aten::igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!)
10069at::Tensor & igamma_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
10070
10071 static auto op = create_igamma__typed_handle();
10072 return op.redispatch(dispatchKeySet, self, other);
10073}
10074
10075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max, name, "aten::max")
10076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max, overload_name, "")
10077STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max, schema_str, "max(Tensor self) -> Tensor")
10078
10079// aten::max(Tensor self) -> Tensor
10080static C10_NOINLINE c10::TypedOperatorHandle<max::schema> create_max_typed_handle() {
10081 return c10::Dispatcher::singleton()
10082 .findSchemaOrThrow(max::name, max::overload_name)
10083 .typed<max::schema>();
10084}
10085
10086// aten::max(Tensor self) -> Tensor
10087at::Tensor max::call(const at::Tensor & self) {
10088
10089 static auto op = create_max_typed_handle();
10090 return op.call(self);
10091}
10092
10093// aten::max(Tensor self) -> Tensor
10094at::Tensor max::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
10095
10096 static auto op = create_max_typed_handle();
10097 return op.redispatch(dispatchKeySet, self);
10098}
10099
10100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_other, name, "aten::max")
10101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_other, overload_name, "other")
10102STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_other, schema_str, "max.other(Tensor self, Tensor other) -> Tensor")
10103
10104// aten::max.other(Tensor self, Tensor other) -> Tensor
10105static C10_NOINLINE c10::TypedOperatorHandle<max_other::schema> create_max_other_typed_handle() {
10106 return c10::Dispatcher::singleton()
10107 .findSchemaOrThrow(max_other::name, max_other::overload_name)
10108 .typed<max_other::schema>();
10109}
10110
10111// aten::max.other(Tensor self, Tensor other) -> Tensor
10112at::Tensor max_other::call(const at::Tensor & self, const at::Tensor & other) {
10113
10114 static auto op = create_max_other_typed_handle();
10115 return op.call(self, other);
10116}
10117
10118// aten::max.other(Tensor self, Tensor other) -> Tensor
10119at::Tensor max_other::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
10120
10121 static auto op = create_max_other_typed_handle();
10122 return op.redispatch(dispatchKeySet, self, other);
10123}
10124
10125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_out, name, "aten::max")
10126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_out, overload_name, "out")
10127STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_out, schema_str, "max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
10128
10129// aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10130static C10_NOINLINE c10::TypedOperatorHandle<max_out::schema> create_max_out_typed_handle() {
10131 return c10::Dispatcher::singleton()
10132 .findSchemaOrThrow(max_out::name, max_out::overload_name)
10133 .typed<max_out::schema>();
10134}
10135
10136// aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10137at::Tensor & max_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
10138
10139 static auto op = create_max_out_typed_handle();
10140 return op.call(self, other, out);
10141}
10142
10143// aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10144at::Tensor & max_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
10145
10146 static auto op = create_max_out_typed_handle();
10147 return op.redispatch(dispatchKeySet, self, other, out);
10148}
10149
10150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_unary_out, name, "aten::max")
10151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_unary_out, overload_name, "unary_out")
10152STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_unary_out, schema_str, "max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
10153
10154// aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10155static C10_NOINLINE c10::TypedOperatorHandle<max_unary_out::schema> create_max_unary_out_typed_handle() {
10156 return c10::Dispatcher::singleton()
10157 .findSchemaOrThrow(max_unary_out::name, max_unary_out::overload_name)
10158 .typed<max_unary_out::schema>();
10159}
10160
10161// aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10162at::Tensor & max_unary_out::call(const at::Tensor & self, at::Tensor & out) {
10163
10164 static auto op = create_max_unary_out_typed_handle();
10165 return op.call(self, out);
10166}
10167
10168// aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10169at::Tensor & max_unary_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
10170
10171 static auto op = create_max_unary_out_typed_handle();
10172 return op.redispatch(dispatchKeySet, self, out);
10173}
10174
10175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pow_Tensor_Tensor_out, name, "aten::pow")
10176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pow_Tensor_Tensor_out, overload_name, "Tensor_Tensor_out")
10177STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pow_Tensor_Tensor_out, schema_str, "pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)")
10178
10179// aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
10180static C10_NOINLINE c10::TypedOperatorHandle<pow_Tensor_Tensor_out::schema> create_pow_Tensor_Tensor_out_typed_handle() {
10181 return c10::Dispatcher::singleton()
10182 .findSchemaOrThrow(pow_Tensor_Tensor_out::name, pow_Tensor_Tensor_out::overload_name)
10183 .typed<pow_Tensor_Tensor_out::schema>();
10184}
10185
10186// aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
10187at::Tensor & pow_Tensor_Tensor_out::call(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) {
10188
10189 static auto op = create_pow_Tensor_Tensor_out_typed_handle();
10190 return op.call(self, exponent, out);
10191}
10192
10193// aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
10194at::Tensor & pow_Tensor_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) {
10195
10196 static auto op = create_pow_Tensor_Tensor_out_typed_handle();
10197 return op.redispatch(dispatchKeySet, self, exponent, out);
10198}
10199
10200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pow_Tensor_Tensor, name, "aten::pow")
10201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pow_Tensor_Tensor, overload_name, "Tensor_Tensor")
10202STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pow_Tensor_Tensor, schema_str, "pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor")
10203
10204// aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor
10205static C10_NOINLINE c10::TypedOperatorHandle<pow_Tensor_Tensor::schema> create_pow_Tensor_Tensor_typed_handle() {
10206 return c10::Dispatcher::singleton()
10207 .findSchemaOrThrow(pow_Tensor_Tensor::name, pow_Tensor_Tensor::overload_name)
10208 .typed<pow_Tensor_Tensor::schema>();
10209}
10210
10211// aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor
10212at::Tensor pow_Tensor_Tensor::call(const at::Tensor & self, const at::Tensor & exponent) {
10213
10214 static auto op = create_pow_Tensor_Tensor_typed_handle();
10215 return op.call(self, exponent);
10216}
10217
10218// aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor
10219at::Tensor pow_Tensor_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent) {
10220
10221 static auto op = create_pow_Tensor_Tensor_typed_handle();
10222 return op.redispatch(dispatchKeySet, self, exponent);
10223}
10224
10225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pow_Scalar_out, name, "aten::pow")
10226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pow_Scalar_out, overload_name, "Scalar_out")
10227STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pow_Scalar_out, schema_str, "pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)")
10228
10229// aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
10230static C10_NOINLINE c10::TypedOperatorHandle<pow_Scalar_out::schema> create_pow_Scalar_out_typed_handle() {
10231 return c10::Dispatcher::singleton()
10232 .findSchemaOrThrow(pow_Scalar_out::name, pow_Scalar_out::overload_name)
10233 .typed<pow_Scalar_out::schema>();
10234}
10235
10236// aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
10237at::Tensor & pow_Scalar_out::call(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) {
10238
10239 static auto op = create_pow_Scalar_out_typed_handle();
10240 return op.call(self, exponent, out);
10241}
10242
10243// aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
10244at::Tensor & pow_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) {
10245
10246 static auto op = create_pow_Scalar_out_typed_handle();
10247 return op.redispatch(dispatchKeySet, self, exponent, out);
10248}
10249
10250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pow_Scalar, name, "aten::pow")
10251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pow_Scalar, overload_name, "Scalar")
10252STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pow_Scalar, schema_str, "pow.Scalar(Scalar self, Tensor exponent) -> Tensor")
10253
10254// aten::pow.Scalar(Scalar self, Tensor exponent) -> Tensor
10255static C10_NOINLINE c10::TypedOperatorHandle<pow_Scalar::schema> create_pow_Scalar_typed_handle() {
10256 return c10::Dispatcher::singleton()
10257 .findSchemaOrThrow(pow_Scalar::name, pow_Scalar::overload_name)
10258 .typed<pow_Scalar::schema>();
10259}
10260
10261// aten::pow.Scalar(Scalar self, Tensor exponent) -> Tensor
10262at::Tensor pow_Scalar::call(const at::Scalar & self, const at::Tensor & exponent) {
10263
10264 static auto op = create_pow_Scalar_typed_handle();
10265 return op.call(self, exponent);
10266}
10267
10268// aten::pow.Scalar(Scalar self, Tensor exponent) -> Tensor
10269at::Tensor pow_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent) {
10270
10271 static auto op = create_pow_Scalar_typed_handle();
10272 return op.redispatch(dispatchKeySet, self, exponent);
10273}
10274
10275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pow_Tensor_Scalar_out, name, "aten::pow")
10276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pow_Tensor_Scalar_out, overload_name, "Tensor_Scalar_out")
10277STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pow_Tensor_Scalar_out, schema_str, "pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)")
10278
10279// aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)
10280static C10_NOINLINE c10::TypedOperatorHandle<pow_Tensor_Scalar_out::schema> create_pow_Tensor_Scalar_out_typed_handle() {
10281 return c10::Dispatcher::singleton()
10282 .findSchemaOrThrow(pow_Tensor_Scalar_out::name, pow_Tensor_Scalar_out::overload_name)
10283 .typed<pow_Tensor_Scalar_out::schema>();
10284}
10285
10286// aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)
10287at::Tensor & pow_Tensor_Scalar_out::call(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) {
10288
10289 static auto op = create_pow_Tensor_Scalar_out_typed_handle();
10290 return op.call(self, exponent, out);
10291}
10292
10293// aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)
10294at::Tensor & pow_Tensor_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) {
10295
10296 static auto op = create_pow_Tensor_Scalar_out_typed_handle();
10297 return op.redispatch(dispatchKeySet, self, exponent, out);
10298}
10299
10300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pow_Tensor_Scalar, name, "aten::pow")
10301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pow_Tensor_Scalar, overload_name, "Tensor_Scalar")
10302STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pow_Tensor_Scalar, schema_str, "pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor")
10303
10304// aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor
10305static C10_NOINLINE c10::TypedOperatorHandle<pow_Tensor_Scalar::schema> create_pow_Tensor_Scalar_typed_handle() {
10306 return c10::Dispatcher::singleton()
10307 .findSchemaOrThrow(pow_Tensor_Scalar::name, pow_Tensor_Scalar::overload_name)
10308 .typed<pow_Tensor_Scalar::schema>();
10309}
10310
10311// aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor
10312at::Tensor pow_Tensor_Scalar::call(const at::Tensor & self, const at::Scalar & exponent) {
10313
10314 static auto op = create_pow_Tensor_Scalar_typed_handle();
10315 return op.call(self, exponent);
10316}
10317
10318// aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor
10319at::Tensor pow_Tensor_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent) {
10320
10321 static auto op = create_pow_Tensor_Scalar_typed_handle();
10322 return op.redispatch(dispatchKeySet, self, exponent);
10323}
10324
10325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pow__Scalar, name, "aten::pow_")
10326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pow__Scalar, overload_name, "Scalar")
10327STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pow__Scalar, schema_str, "pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)")
10328
10329// aten::pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)
10330static C10_NOINLINE c10::TypedOperatorHandle<pow__Scalar::schema> create_pow__Scalar_typed_handle() {
10331 return c10::Dispatcher::singleton()
10332 .findSchemaOrThrow(pow__Scalar::name, pow__Scalar::overload_name)
10333 .typed<pow__Scalar::schema>();
10334}
10335
10336// aten::pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)
10337at::Tensor & pow__Scalar::call(at::Tensor & self, const at::Scalar & exponent) {
10338
10339 static auto op = create_pow__Scalar_typed_handle();
10340 return op.call(self, exponent);
10341}
10342
10343// aten::pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)
10344at::Tensor & pow__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & exponent) {
10345
10346 static auto op = create_pow__Scalar_typed_handle();
10347 return op.redispatch(dispatchKeySet, self, exponent);
10348}
10349
10350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pow__Tensor, name, "aten::pow_")
10351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pow__Tensor, overload_name, "Tensor")
10352STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pow__Tensor, schema_str, "pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)")
10353
10354// aten::pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)
10355static C10_NOINLINE c10::TypedOperatorHandle<pow__Tensor::schema> create_pow__Tensor_typed_handle() {
10356 return c10::Dispatcher::singleton()
10357 .findSchemaOrThrow(pow__Tensor::name, pow__Tensor::overload_name)
10358 .typed<pow__Tensor::schema>();
10359}
10360
10361// aten::pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)
10362at::Tensor & pow__Tensor::call(at::Tensor & self, const at::Tensor & exponent) {
10363
10364 static auto op = create_pow__Tensor_typed_handle();
10365 return op.call(self, exponent);
10366}
10367
10368// aten::pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)
10369at::Tensor & pow__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & exponent) {
10370
10371 static auto op = create_pow__Tensor_typed_handle();
10372 return op.redispatch(dispatchKeySet, self, exponent);
10373}
10374
10375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_amp_foreach_non_finite_check_and_unscale_, name, "aten::_amp_foreach_non_finite_check_and_unscale_")
10376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_amp_foreach_non_finite_check_and_unscale_, overload_name, "")
10377STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_amp_foreach_non_finite_check_and_unscale_, schema_str, "_amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> ()")
10378
10379// aten::_amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> ()
10380static C10_NOINLINE c10::TypedOperatorHandle<_amp_foreach_non_finite_check_and_unscale_::schema> create__amp_foreach_non_finite_check_and_unscale__typed_handle() {
10381 return c10::Dispatcher::singleton()
10382 .findSchemaOrThrow(_amp_foreach_non_finite_check_and_unscale_::name, _amp_foreach_non_finite_check_and_unscale_::overload_name)
10383 .typed<_amp_foreach_non_finite_check_and_unscale_::schema>();
10384}
10385
10386// aten::_amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> ()
10387void _amp_foreach_non_finite_check_and_unscale_::call(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale) {
10388
10389 static auto op = create__amp_foreach_non_finite_check_and_unscale__typed_handle();
10390 return op.call(self, found_inf, inv_scale);
10391}
10392
10393// aten::_amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> ()
10394void _amp_foreach_non_finite_check_and_unscale_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale) {
10395
10396 static auto op = create__amp_foreach_non_finite_check_and_unscale__typed_handle();
10397 return op.redispatch(dispatchKeySet, self, found_inf, inv_scale);
10398}
10399
10400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_add_Scalar, name, "aten::_foreach_add")
10401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_add_Scalar, overload_name, "Scalar")
10402STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_add_Scalar, schema_str, "_foreach_add.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]")
10403
10404// aten::_foreach_add.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
10405static C10_NOINLINE c10::TypedOperatorHandle<_foreach_add_Scalar::schema> create__foreach_add_Scalar_typed_handle() {
10406 return c10::Dispatcher::singleton()
10407 .findSchemaOrThrow(_foreach_add_Scalar::name, _foreach_add_Scalar::overload_name)
10408 .typed<_foreach_add_Scalar::schema>();
10409}
10410
10411// aten::_foreach_add.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
10412::std::vector<at::Tensor> _foreach_add_Scalar::call(at::TensorList self, const at::Scalar & scalar) {
10413
10414 static auto op = create__foreach_add_Scalar_typed_handle();
10415 return op.call(self, scalar);
10416}
10417
10418// aten::_foreach_add.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
10419::std::vector<at::Tensor> _foreach_add_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
10420
10421 static auto op = create__foreach_add_Scalar_typed_handle();
10422 return op.redispatch(dispatchKeySet, self, scalar);
10423}
10424
10425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_add__Scalar, name, "aten::_foreach_add_")
10426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_add__Scalar, overload_name, "Scalar")
10427STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_add__Scalar, schema_str, "_foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()")
10428
10429// aten::_foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
10430static C10_NOINLINE c10::TypedOperatorHandle<_foreach_add__Scalar::schema> create__foreach_add__Scalar_typed_handle() {
10431 return c10::Dispatcher::singleton()
10432 .findSchemaOrThrow(_foreach_add__Scalar::name, _foreach_add__Scalar::overload_name)
10433 .typed<_foreach_add__Scalar::schema>();
10434}
10435
10436// aten::_foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
10437void _foreach_add__Scalar::call(at::TensorList self, const at::Scalar & scalar) {
10438
10439 static auto op = create__foreach_add__Scalar_typed_handle();
10440 return op.call(self, scalar);
10441}
10442
10443// aten::_foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
10444void _foreach_add__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
10445
10446 static auto op = create__foreach_add__Scalar_typed_handle();
10447 return op.redispatch(dispatchKeySet, self, scalar);
10448}
10449
10450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_min_Scalar, name, "aten::_foreach_clamp_min")
10451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_min_Scalar, overload_name, "Scalar")
10452STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_min_Scalar, schema_str, "_foreach_clamp_min.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]")
10453
10454// aten::_foreach_clamp_min.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
10455static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_min_Scalar::schema> create__foreach_clamp_min_Scalar_typed_handle() {
10456 return c10::Dispatcher::singleton()
10457 .findSchemaOrThrow(_foreach_clamp_min_Scalar::name, _foreach_clamp_min_Scalar::overload_name)
10458 .typed<_foreach_clamp_min_Scalar::schema>();
10459}
10460
10461// aten::_foreach_clamp_min.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
10462::std::vector<at::Tensor> _foreach_clamp_min_Scalar::call(at::TensorList self, const at::Scalar & scalar) {
10463
10464 static auto op = create__foreach_clamp_min_Scalar_typed_handle();
10465 return op.call(self, scalar);
10466}
10467
10468// aten::_foreach_clamp_min.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
10469::std::vector<at::Tensor> _foreach_clamp_min_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
10470
10471 static auto op = create__foreach_clamp_min_Scalar_typed_handle();
10472 return op.redispatch(dispatchKeySet, self, scalar);
10473}
10474
10475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_min__Scalar, name, "aten::_foreach_clamp_min_")
10476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_min__Scalar, overload_name, "Scalar")
10477STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_min__Scalar, schema_str, "_foreach_clamp_min_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()")
10478
10479// aten::_foreach_clamp_min_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
10480static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_min__Scalar::schema> create__foreach_clamp_min__Scalar_typed_handle() {
10481 return c10::Dispatcher::singleton()
10482 .findSchemaOrThrow(_foreach_clamp_min__Scalar::name, _foreach_clamp_min__Scalar::overload_name)
10483 .typed<_foreach_clamp_min__Scalar::schema>();
10484}
10485
10486// aten::_foreach_clamp_min_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
10487void _foreach_clamp_min__Scalar::call(at::TensorList self, const at::Scalar & scalar) {
10488
10489 static auto op = create__foreach_clamp_min__Scalar_typed_handle();
10490 return op.call(self, scalar);
10491}
10492
10493// aten::_foreach_clamp_min_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
10494void _foreach_clamp_min__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
10495
10496 static auto op = create__foreach_clamp_min__Scalar_typed_handle();
10497 return op.redispatch(dispatchKeySet, self, scalar);
10498}
10499
10500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_minimum_Scalar, name, "aten::_foreach_minimum")
10501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_minimum_Scalar, overload_name, "Scalar")
10502STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_minimum_Scalar, schema_str, "_foreach_minimum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]")
10503
10504// aten::_foreach_minimum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
10505static C10_NOINLINE c10::TypedOperatorHandle<_foreach_minimum_Scalar::schema> create__foreach_minimum_Scalar_typed_handle() {
10506 return c10::Dispatcher::singleton()
10507 .findSchemaOrThrow(_foreach_minimum_Scalar::name, _foreach_minimum_Scalar::overload_name)
10508 .typed<_foreach_minimum_Scalar::schema>();
10509}
10510
10511// aten::_foreach_minimum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
10512::std::vector<at::Tensor> _foreach_minimum_Scalar::call(at::TensorList self, const at::Scalar & scalar) {
10513
10514 static auto op = create__foreach_minimum_Scalar_typed_handle();
10515 return op.call(self, scalar);
10516}
10517
10518// aten::_foreach_minimum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
10519::std::vector<at::Tensor> _foreach_minimum_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
10520
10521 static auto op = create__foreach_minimum_Scalar_typed_handle();
10522 return op.redispatch(dispatchKeySet, self, scalar);
10523}
10524
10525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_minimum__Scalar, name, "aten::_foreach_minimum_")
10526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_minimum__Scalar, overload_name, "Scalar")
10527STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_minimum__Scalar, schema_str, "_foreach_minimum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()")
10528
10529// aten::_foreach_minimum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
10530static C10_NOINLINE c10::TypedOperatorHandle<_foreach_minimum__Scalar::schema> create__foreach_minimum__Scalar_typed_handle() {
10531 return c10::Dispatcher::singleton()
10532 .findSchemaOrThrow(_foreach_minimum__Scalar::name, _foreach_minimum__Scalar::overload_name)
10533 .typed<_foreach_minimum__Scalar::schema>();
10534}
10535
10536// aten::_foreach_minimum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
10537void _foreach_minimum__Scalar::call(at::TensorList self, const at::Scalar & scalar) {
10538
10539 static auto op = create__foreach_minimum__Scalar_typed_handle();
10540 return op.call(self, scalar);
10541}
10542
10543// aten::_foreach_minimum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
10544void _foreach_minimum__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
10545
10546 static auto op = create__foreach_minimum__Scalar_typed_handle();
10547 return op.redispatch(dispatchKeySet, self, scalar);
10548}
10549
10550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_add_List, name, "aten::_foreach_add")
10551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_add_List, overload_name, "List")
10552STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_add_List, schema_str, "_foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]")
10553
10554// aten::_foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]
10555static C10_NOINLINE c10::TypedOperatorHandle<_foreach_add_List::schema> create__foreach_add_List_typed_handle() {
10556 return c10::Dispatcher::singleton()
10557 .findSchemaOrThrow(_foreach_add_List::name, _foreach_add_List::overload_name)
10558 .typed<_foreach_add_List::schema>();
10559}
10560
10561// aten::_foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]
10562::std::vector<at::Tensor> _foreach_add_List::call(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
10563
10564 static auto op = create__foreach_add_List_typed_handle();
10565 return op.call(self, other, alpha);
10566}
10567
10568// aten::_foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]
10569::std::vector<at::Tensor> _foreach_add_List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
10570
10571 static auto op = create__foreach_add_List_typed_handle();
10572 return op.redispatch(dispatchKeySet, self, other, alpha);
10573}
10574
10575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_add__List, name, "aten::_foreach_add_")
10576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_add__List, overload_name, "List")
10577STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_add__List, schema_str, "_foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()")
10578
10579// aten::_foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()
10580static C10_NOINLINE c10::TypedOperatorHandle<_foreach_add__List::schema> create__foreach_add__List_typed_handle() {
10581 return c10::Dispatcher::singleton()
10582 .findSchemaOrThrow(_foreach_add__List::name, _foreach_add__List::overload_name)
10583 .typed<_foreach_add__List::schema>();
10584}
10585
10586// aten::_foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()
10587void _foreach_add__List::call(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
10588
10589 static auto op = create__foreach_add__List_typed_handle();
10590 return op.call(self, other, alpha);
10591}
10592
10593// aten::_foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()
10594void _foreach_add__List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
10595
10596 static auto op = create__foreach_add__List_typed_handle();
10597 return op.redispatch(dispatchKeySet, self, other, alpha);
10598}
10599
10600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_min_List, name, "aten::_foreach_clamp_min")
10601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_min_List, overload_name, "List")
10602STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_min_List, schema_str, "_foreach_clamp_min.List(Tensor[] self, Tensor[] other) -> Tensor[]")
10603
10604// aten::_foreach_clamp_min.List(Tensor[] self, Tensor[] other) -> Tensor[]
10605static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_min_List::schema> create__foreach_clamp_min_List_typed_handle() {
10606 return c10::Dispatcher::singleton()
10607 .findSchemaOrThrow(_foreach_clamp_min_List::name, _foreach_clamp_min_List::overload_name)
10608 .typed<_foreach_clamp_min_List::schema>();
10609}
10610
10611// aten::_foreach_clamp_min.List(Tensor[] self, Tensor[] other) -> Tensor[]
10612::std::vector<at::Tensor> _foreach_clamp_min_List::call(at::TensorList self, at::TensorList other) {
10613
10614 static auto op = create__foreach_clamp_min_List_typed_handle();
10615 return op.call(self, other);
10616}
10617
10618// aten::_foreach_clamp_min.List(Tensor[] self, Tensor[] other) -> Tensor[]
10619::std::vector<at::Tensor> _foreach_clamp_min_List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
10620
10621 static auto op = create__foreach_clamp_min_List_typed_handle();
10622 return op.redispatch(dispatchKeySet, self, other);
10623}
10624
10625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_min__List, name, "aten::_foreach_clamp_min_")
10626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_min__List, overload_name, "List")
10627STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_min__List, schema_str, "_foreach_clamp_min_.List(Tensor(a!)[] self, Tensor[] other) -> ()")
10628
10629// aten::_foreach_clamp_min_.List(Tensor(a!)[] self, Tensor[] other) -> ()
10630static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_min__List::schema> create__foreach_clamp_min__List_typed_handle() {
10631 return c10::Dispatcher::singleton()
10632 .findSchemaOrThrow(_foreach_clamp_min__List::name, _foreach_clamp_min__List::overload_name)
10633 .typed<_foreach_clamp_min__List::schema>();
10634}
10635
10636// aten::_foreach_clamp_min_.List(Tensor(a!)[] self, Tensor[] other) -> ()
10637void _foreach_clamp_min__List::call(at::TensorList self, at::TensorList other) {
10638
10639 static auto op = create__foreach_clamp_min__List_typed_handle();
10640 return op.call(self, other);
10641}
10642
10643// aten::_foreach_clamp_min_.List(Tensor(a!)[] self, Tensor[] other) -> ()
10644void _foreach_clamp_min__List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
10645
10646 static auto op = create__foreach_clamp_min__List_typed_handle();
10647 return op.redispatch(dispatchKeySet, self, other);
10648}
10649
10650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_minimum_List, name, "aten::_foreach_minimum")
10651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_minimum_List, overload_name, "List")
10652STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_minimum_List, schema_str, "_foreach_minimum.List(Tensor[] self, Tensor[] other) -> Tensor[]")
10653
10654// aten::_foreach_minimum.List(Tensor[] self, Tensor[] other) -> Tensor[]
10655static C10_NOINLINE c10::TypedOperatorHandle<_foreach_minimum_List::schema> create__foreach_minimum_List_typed_handle() {
10656 return c10::Dispatcher::singleton()
10657 .findSchemaOrThrow(_foreach_minimum_List::name, _foreach_minimum_List::overload_name)
10658 .typed<_foreach_minimum_List::schema>();
10659}
10660
10661// aten::_foreach_minimum.List(Tensor[] self, Tensor[] other) -> Tensor[]
10662::std::vector<at::Tensor> _foreach_minimum_List::call(at::TensorList self, at::TensorList other) {
10663
10664 static auto op = create__foreach_minimum_List_typed_handle();
10665 return op.call(self, other);
10666}
10667
10668// aten::_foreach_minimum.List(Tensor[] self, Tensor[] other) -> Tensor[]
10669::std::vector<at::Tensor> _foreach_minimum_List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
10670
10671 static auto op = create__foreach_minimum_List_typed_handle();
10672 return op.redispatch(dispatchKeySet, self, other);
10673}
10674
10675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_minimum__List, name, "aten::_foreach_minimum_")
10676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_minimum__List, overload_name, "List")
10677STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_minimum__List, schema_str, "_foreach_minimum_.List(Tensor(a!)[] self, Tensor[] other) -> ()")
10678
10679// aten::_foreach_minimum_.List(Tensor(a!)[] self, Tensor[] other) -> ()
10680static C10_NOINLINE c10::TypedOperatorHandle<_foreach_minimum__List::schema> create__foreach_minimum__List_typed_handle() {
10681 return c10::Dispatcher::singleton()
10682 .findSchemaOrThrow(_foreach_minimum__List::name, _foreach_minimum__List::overload_name)
10683 .typed<_foreach_minimum__List::schema>();
10684}
10685
10686// aten::_foreach_minimum_.List(Tensor(a!)[] self, Tensor[] other) -> ()
10687void _foreach_minimum__List::call(at::TensorList self, at::TensorList other) {
10688
10689 static auto op = create__foreach_minimum__List_typed_handle();
10690 return op.call(self, other);
10691}
10692
10693// aten::_foreach_minimum_.List(Tensor(a!)[] self, Tensor[] other) -> ()
10694void _foreach_minimum__List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
10695
10696 static auto op = create__foreach_minimum__List_typed_handle();
10697 return op.redispatch(dispatchKeySet, self, other);
10698}
10699
10700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_add_ScalarList, name, "aten::_foreach_add")
10701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_add_ScalarList, overload_name, "ScalarList")
10702STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_add_ScalarList, schema_str, "_foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]")
10703
10704// aten::_foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
10705static C10_NOINLINE c10::TypedOperatorHandle<_foreach_add_ScalarList::schema> create__foreach_add_ScalarList_typed_handle() {
10706 return c10::Dispatcher::singleton()
10707 .findSchemaOrThrow(_foreach_add_ScalarList::name, _foreach_add_ScalarList::overload_name)
10708 .typed<_foreach_add_ScalarList::schema>();
10709}
10710
10711// aten::_foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
10712::std::vector<at::Tensor> _foreach_add_ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
10713
10714 static auto op = create__foreach_add_ScalarList_typed_handle();
10715 return op.call(self, scalars);
10716}
10717
10718// aten::_foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
10719::std::vector<at::Tensor> _foreach_add_ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
10720
10721 static auto op = create__foreach_add_ScalarList_typed_handle();
10722 return op.redispatch(dispatchKeySet, self, scalars);
10723}
10724
10725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_add__ScalarList, name, "aten::_foreach_add_")
10726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_add__ScalarList, overload_name, "ScalarList")
10727STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_add__ScalarList, schema_str, "_foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()")
10728
10729// aten::_foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
10730static C10_NOINLINE c10::TypedOperatorHandle<_foreach_add__ScalarList::schema> create__foreach_add__ScalarList_typed_handle() {
10731 return c10::Dispatcher::singleton()
10732 .findSchemaOrThrow(_foreach_add__ScalarList::name, _foreach_add__ScalarList::overload_name)
10733 .typed<_foreach_add__ScalarList::schema>();
10734}
10735
10736// aten::_foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
10737void _foreach_add__ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
10738
10739 static auto op = create__foreach_add__ScalarList_typed_handle();
10740 return op.call(self, scalars);
10741}
10742
10743// aten::_foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
10744void _foreach_add__ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
10745
10746 static auto op = create__foreach_add__ScalarList_typed_handle();
10747 return op.redispatch(dispatchKeySet, self, scalars);
10748}
10749
10750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_min_ScalarList, name, "aten::_foreach_clamp_min")
10751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_min_ScalarList, overload_name, "ScalarList")
10752STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_min_ScalarList, schema_str, "_foreach_clamp_min.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]")
10753
10754// aten::_foreach_clamp_min.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
10755static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_min_ScalarList::schema> create__foreach_clamp_min_ScalarList_typed_handle() {
10756 return c10::Dispatcher::singleton()
10757 .findSchemaOrThrow(_foreach_clamp_min_ScalarList::name, _foreach_clamp_min_ScalarList::overload_name)
10758 .typed<_foreach_clamp_min_ScalarList::schema>();
10759}
10760
10761// aten::_foreach_clamp_min.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
10762::std::vector<at::Tensor> _foreach_clamp_min_ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
10763
10764 static auto op = create__foreach_clamp_min_ScalarList_typed_handle();
10765 return op.call(self, scalars);
10766}
10767
10768// aten::_foreach_clamp_min.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
10769::std::vector<at::Tensor> _foreach_clamp_min_ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
10770
10771 static auto op = create__foreach_clamp_min_ScalarList_typed_handle();
10772 return op.redispatch(dispatchKeySet, self, scalars);
10773}
10774
10775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_min__ScalarList, name, "aten::_foreach_clamp_min_")
10776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_min__ScalarList, overload_name, "ScalarList")
10777STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_min__ScalarList, schema_str, "_foreach_clamp_min_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()")
10778
10779// aten::_foreach_clamp_min_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
10780static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_min__ScalarList::schema> create__foreach_clamp_min__ScalarList_typed_handle() {
10781 return c10::Dispatcher::singleton()
10782 .findSchemaOrThrow(_foreach_clamp_min__ScalarList::name, _foreach_clamp_min__ScalarList::overload_name)
10783 .typed<_foreach_clamp_min__ScalarList::schema>();
10784}
10785
10786// aten::_foreach_clamp_min_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
10787void _foreach_clamp_min__ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
10788
10789 static auto op = create__foreach_clamp_min__ScalarList_typed_handle();
10790 return op.call(self, scalars);
10791}
10792
10793// aten::_foreach_clamp_min_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
10794void _foreach_clamp_min__ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
10795
10796 static auto op = create__foreach_clamp_min__ScalarList_typed_handle();
10797 return op.redispatch(dispatchKeySet, self, scalars);
10798}
10799
10800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_minimum_ScalarList, name, "aten::_foreach_minimum")
10801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_minimum_ScalarList, overload_name, "ScalarList")
10802STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_minimum_ScalarList, schema_str, "_foreach_minimum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]")
10803
10804// aten::_foreach_minimum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
10805static C10_NOINLINE c10::TypedOperatorHandle<_foreach_minimum_ScalarList::schema> create__foreach_minimum_ScalarList_typed_handle() {
10806 return c10::Dispatcher::singleton()
10807 .findSchemaOrThrow(_foreach_minimum_ScalarList::name, _foreach_minimum_ScalarList::overload_name)
10808 .typed<_foreach_minimum_ScalarList::schema>();
10809}
10810
10811// aten::_foreach_minimum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
10812::std::vector<at::Tensor> _foreach_minimum_ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
10813
10814 static auto op = create__foreach_minimum_ScalarList_typed_handle();
10815 return op.call(self, scalars);
10816}
10817
10818// aten::_foreach_minimum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
10819::std::vector<at::Tensor> _foreach_minimum_ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
10820
10821 static auto op = create__foreach_minimum_ScalarList_typed_handle();
10822 return op.redispatch(dispatchKeySet, self, scalars);
10823}
10824
10825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_minimum__ScalarList, name, "aten::_foreach_minimum_")
10826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_minimum__ScalarList, overload_name, "ScalarList")
10827STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_minimum__ScalarList, schema_str, "_foreach_minimum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()")
10828
10829// aten::_foreach_minimum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
10830static C10_NOINLINE c10::TypedOperatorHandle<_foreach_minimum__ScalarList::schema> create__foreach_minimum__ScalarList_typed_handle() {
10831 return c10::Dispatcher::singleton()
10832 .findSchemaOrThrow(_foreach_minimum__ScalarList::name, _foreach_minimum__ScalarList::overload_name)
10833 .typed<_foreach_minimum__ScalarList::schema>();
10834}
10835
10836// aten::_foreach_minimum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
10837void _foreach_minimum__ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
10838
10839 static auto op = create__foreach_minimum__ScalarList_typed_handle();
10840 return op.call(self, scalars);
10841}
10842
10843// aten::_foreach_minimum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
10844void _foreach_minimum__ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
10845
10846 static auto op = create__foreach_minimum__ScalarList_typed_handle();
10847 return op.redispatch(dispatchKeySet, self, scalars);
10848}
10849
10850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_cosh, name, "aten::_foreach_cosh")
10851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_cosh, overload_name, "")
10852STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_cosh, schema_str, "_foreach_cosh(Tensor[] self) -> Tensor[]")
10853
10854// aten::_foreach_cosh(Tensor[] self) -> Tensor[]
10855static C10_NOINLINE c10::TypedOperatorHandle<_foreach_cosh::schema> create__foreach_cosh_typed_handle() {
10856 return c10::Dispatcher::singleton()
10857 .findSchemaOrThrow(_foreach_cosh::name, _foreach_cosh::overload_name)
10858 .typed<_foreach_cosh::schema>();
10859}
10860
10861// aten::_foreach_cosh(Tensor[] self) -> Tensor[]
10862::std::vector<at::Tensor> _foreach_cosh::call(at::TensorList self) {
10863
10864 static auto op = create__foreach_cosh_typed_handle();
10865 return op.call(self);
10866}
10867
10868// aten::_foreach_cosh(Tensor[] self) -> Tensor[]
10869::std::vector<at::Tensor> _foreach_cosh::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
10870
10871 static auto op = create__foreach_cosh_typed_handle();
10872 return op.redispatch(dispatchKeySet, self);
10873}
10874
10875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_cosh_, name, "aten::_foreach_cosh_")
10876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_cosh_, overload_name, "")
10877STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_cosh_, schema_str, "_foreach_cosh_(Tensor(a!)[] self) -> ()")
10878
10879// aten::_foreach_cosh_(Tensor(a!)[] self) -> ()
10880static C10_NOINLINE c10::TypedOperatorHandle<_foreach_cosh_::schema> create__foreach_cosh__typed_handle() {
10881 return c10::Dispatcher::singleton()
10882 .findSchemaOrThrow(_foreach_cosh_::name, _foreach_cosh_::overload_name)
10883 .typed<_foreach_cosh_::schema>();
10884}
10885
10886// aten::_foreach_cosh_(Tensor(a!)[] self) -> ()
10887void _foreach_cosh_::call(at::TensorList self) {
10888
10889 static auto op = create__foreach_cosh__typed_handle();
10890 return op.call(self);
10891}
10892
10893// aten::_foreach_cosh_(Tensor(a!)[] self) -> ()
10894void _foreach_cosh_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
10895
10896 static auto op = create__foreach_cosh__typed_handle();
10897 return op.redispatch(dispatchKeySet, self);
10898}
10899
10900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_erfc, name, "aten::_foreach_erfc")
10901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_erfc, overload_name, "")
10902STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_erfc, schema_str, "_foreach_erfc(Tensor[] self) -> Tensor[]")
10903
10904// aten::_foreach_erfc(Tensor[] self) -> Tensor[]
10905static C10_NOINLINE c10::TypedOperatorHandle<_foreach_erfc::schema> create__foreach_erfc_typed_handle() {
10906 return c10::Dispatcher::singleton()
10907 .findSchemaOrThrow(_foreach_erfc::name, _foreach_erfc::overload_name)
10908 .typed<_foreach_erfc::schema>();
10909}
10910
10911// aten::_foreach_erfc(Tensor[] self) -> Tensor[]
10912::std::vector<at::Tensor> _foreach_erfc::call(at::TensorList self) {
10913
10914 static auto op = create__foreach_erfc_typed_handle();
10915 return op.call(self);
10916}
10917
10918// aten::_foreach_erfc(Tensor[] self) -> Tensor[]
10919::std::vector<at::Tensor> _foreach_erfc::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
10920
10921 static auto op = create__foreach_erfc_typed_handle();
10922 return op.redispatch(dispatchKeySet, self);
10923}
10924
10925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_erfc_, name, "aten::_foreach_erfc_")
10926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_erfc_, overload_name, "")
10927STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_erfc_, schema_str, "_foreach_erfc_(Tensor(a!)[] self) -> ()")
10928
10929// aten::_foreach_erfc_(Tensor(a!)[] self) -> ()
10930static C10_NOINLINE c10::TypedOperatorHandle<_foreach_erfc_::schema> create__foreach_erfc__typed_handle() {
10931 return c10::Dispatcher::singleton()
10932 .findSchemaOrThrow(_foreach_erfc_::name, _foreach_erfc_::overload_name)
10933 .typed<_foreach_erfc_::schema>();
10934}
10935
10936// aten::_foreach_erfc_(Tensor(a!)[] self) -> ()
10937void _foreach_erfc_::call(at::TensorList self) {
10938
10939 static auto op = create__foreach_erfc__typed_handle();
10940 return op.call(self);
10941}
10942
10943// aten::_foreach_erfc_(Tensor(a!)[] self) -> ()
10944void _foreach_erfc_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
10945
10946 static auto op = create__foreach_erfc__typed_handle();
10947 return op.redispatch(dispatchKeySet, self);
10948}
10949
10950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_round, name, "aten::_foreach_round")
10951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_round, overload_name, "")
10952STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_round, schema_str, "_foreach_round(Tensor[] self) -> Tensor[]")
10953
10954// aten::_foreach_round(Tensor[] self) -> Tensor[]
10955static C10_NOINLINE c10::TypedOperatorHandle<_foreach_round::schema> create__foreach_round_typed_handle() {
10956 return c10::Dispatcher::singleton()
10957 .findSchemaOrThrow(_foreach_round::name, _foreach_round::overload_name)
10958 .typed<_foreach_round::schema>();
10959}
10960
10961// aten::_foreach_round(Tensor[] self) -> Tensor[]
10962::std::vector<at::Tensor> _foreach_round::call(at::TensorList self) {
10963
10964 static auto op = create__foreach_round_typed_handle();
10965 return op.call(self);
10966}
10967
10968// aten::_foreach_round(Tensor[] self) -> Tensor[]
10969::std::vector<at::Tensor> _foreach_round::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
10970
10971 static auto op = create__foreach_round_typed_handle();
10972 return op.redispatch(dispatchKeySet, self);
10973}
10974
10975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_round_, name, "aten::_foreach_round_")
10976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_round_, overload_name, "")
10977STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_round_, schema_str, "_foreach_round_(Tensor(a!)[] self) -> ()")
10978
10979// aten::_foreach_round_(Tensor(a!)[] self) -> ()
10980static C10_NOINLINE c10::TypedOperatorHandle<_foreach_round_::schema> create__foreach_round__typed_handle() {
10981 return c10::Dispatcher::singleton()
10982 .findSchemaOrThrow(_foreach_round_::name, _foreach_round_::overload_name)
10983 .typed<_foreach_round_::schema>();
10984}
10985
10986// aten::_foreach_round_(Tensor(a!)[] self) -> ()
10987void _foreach_round_::call(at::TensorList self) {
10988
10989 static auto op = create__foreach_round__typed_handle();
10990 return op.call(self);
10991}
10992
10993// aten::_foreach_round_(Tensor(a!)[] self) -> ()
10994void _foreach_round_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
10995
10996 static auto op = create__foreach_round__typed_handle();
10997 return op.redispatch(dispatchKeySet, self);
10998}
10999
11000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_lgamma, name, "aten::_foreach_lgamma")
11001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_lgamma, overload_name, "")
11002STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_lgamma, schema_str, "_foreach_lgamma(Tensor[] self) -> Tensor[]")
11003
11004// aten::_foreach_lgamma(Tensor[] self) -> Tensor[]
11005static C10_NOINLINE c10::TypedOperatorHandle<_foreach_lgamma::schema> create__foreach_lgamma_typed_handle() {
11006 return c10::Dispatcher::singleton()
11007 .findSchemaOrThrow(_foreach_lgamma::name, _foreach_lgamma::overload_name)
11008 .typed<_foreach_lgamma::schema>();
11009}
11010
11011// aten::_foreach_lgamma(Tensor[] self) -> Tensor[]
11012::std::vector<at::Tensor> _foreach_lgamma::call(at::TensorList self) {
11013
11014 static auto op = create__foreach_lgamma_typed_handle();
11015 return op.call(self);
11016}
11017
11018// aten::_foreach_lgamma(Tensor[] self) -> Tensor[]
11019::std::vector<at::Tensor> _foreach_lgamma::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
11020
11021 static auto op = create__foreach_lgamma_typed_handle();
11022 return op.redispatch(dispatchKeySet, self);
11023}
11024
11025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_lgamma_, name, "aten::_foreach_lgamma_")
11026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_lgamma_, overload_name, "")
11027STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_lgamma_, schema_str, "_foreach_lgamma_(Tensor(a!)[] self) -> ()")
11028
11029// aten::_foreach_lgamma_(Tensor(a!)[] self) -> ()
11030static C10_NOINLINE c10::TypedOperatorHandle<_foreach_lgamma_::schema> create__foreach_lgamma__typed_handle() {
11031 return c10::Dispatcher::singleton()
11032 .findSchemaOrThrow(_foreach_lgamma_::name, _foreach_lgamma_::overload_name)
11033 .typed<_foreach_lgamma_::schema>();
11034}
11035
11036// aten::_foreach_lgamma_(Tensor(a!)[] self) -> ()
11037void _foreach_lgamma_::call(at::TensorList self) {
11038
11039 static auto op = create__foreach_lgamma__typed_handle();
11040 return op.call(self);
11041}
11042
11043// aten::_foreach_lgamma_(Tensor(a!)[] self) -> ()
11044void _foreach_lgamma_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
11045
11046 static auto op = create__foreach_lgamma__typed_handle();
11047 return op.redispatch(dispatchKeySet, self);
11048}
11049
11050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_frac, name, "aten::_foreach_frac")
11051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_frac, overload_name, "")
11052STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_frac, schema_str, "_foreach_frac(Tensor[] self) -> Tensor[]")
11053
11054// aten::_foreach_frac(Tensor[] self) -> Tensor[]
11055static C10_NOINLINE c10::TypedOperatorHandle<_foreach_frac::schema> create__foreach_frac_typed_handle() {
11056 return c10::Dispatcher::singleton()
11057 .findSchemaOrThrow(_foreach_frac::name, _foreach_frac::overload_name)
11058 .typed<_foreach_frac::schema>();
11059}
11060
11061// aten::_foreach_frac(Tensor[] self) -> Tensor[]
11062::std::vector<at::Tensor> _foreach_frac::call(at::TensorList self) {
11063
11064 static auto op = create__foreach_frac_typed_handle();
11065 return op.call(self);
11066}
11067
11068// aten::_foreach_frac(Tensor[] self) -> Tensor[]
11069::std::vector<at::Tensor> _foreach_frac::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
11070
11071 static auto op = create__foreach_frac_typed_handle();
11072 return op.redispatch(dispatchKeySet, self);
11073}
11074
11075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_frac_, name, "aten::_foreach_frac_")
11076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_frac_, overload_name, "")
11077STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_frac_, schema_str, "_foreach_frac_(Tensor(a!)[] self) -> ()")
11078
11079// aten::_foreach_frac_(Tensor(a!)[] self) -> ()
11080static C10_NOINLINE c10::TypedOperatorHandle<_foreach_frac_::schema> create__foreach_frac__typed_handle() {
11081 return c10::Dispatcher::singleton()
11082 .findSchemaOrThrow(_foreach_frac_::name, _foreach_frac_::overload_name)
11083 .typed<_foreach_frac_::schema>();
11084}
11085
11086// aten::_foreach_frac_(Tensor(a!)[] self) -> ()
11087void _foreach_frac_::call(at::TensorList self) {
11088
11089 static auto op = create__foreach_frac__typed_handle();
11090 return op.call(self);
11091}
11092
11093// aten::_foreach_frac_(Tensor(a!)[] self) -> ()
11094void _foreach_frac_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
11095
11096 static auto op = create__foreach_frac__typed_handle();
11097 return op.redispatch(dispatchKeySet, self);
11098}
11099
11100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_trunc, name, "aten::_foreach_trunc")
11101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_trunc, overload_name, "")
11102STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_trunc, schema_str, "_foreach_trunc(Tensor[] self) -> Tensor[]")
11103
11104// aten::_foreach_trunc(Tensor[] self) -> Tensor[]
11105static C10_NOINLINE c10::TypedOperatorHandle<_foreach_trunc::schema> create__foreach_trunc_typed_handle() {
11106 return c10::Dispatcher::singleton()
11107 .findSchemaOrThrow(_foreach_trunc::name, _foreach_trunc::overload_name)
11108 .typed<_foreach_trunc::schema>();
11109}
11110
11111// aten::_foreach_trunc(Tensor[] self) -> Tensor[]
11112::std::vector<at::Tensor> _foreach_trunc::call(at::TensorList self) {
11113
11114 static auto op = create__foreach_trunc_typed_handle();
11115 return op.call(self);
11116}
11117
11118// aten::_foreach_trunc(Tensor[] self) -> Tensor[]
11119::std::vector<at::Tensor> _foreach_trunc::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
11120
11121 static auto op = create__foreach_trunc_typed_handle();
11122 return op.redispatch(dispatchKeySet, self);
11123}
11124
11125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_trunc_, name, "aten::_foreach_trunc_")
11126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_trunc_, overload_name, "")
11127STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_trunc_, schema_str, "_foreach_trunc_(Tensor(a!)[] self) -> ()")
11128
11129// aten::_foreach_trunc_(Tensor(a!)[] self) -> ()
11130static C10_NOINLINE c10::TypedOperatorHandle<_foreach_trunc_::schema> create__foreach_trunc__typed_handle() {
11131 return c10::Dispatcher::singleton()
11132 .findSchemaOrThrow(_foreach_trunc_::name, _foreach_trunc_::overload_name)
11133 .typed<_foreach_trunc_::schema>();
11134}
11135
11136// aten::_foreach_trunc_(Tensor(a!)[] self) -> ()
11137void _foreach_trunc_::call(at::TensorList self) {
11138
11139 static auto op = create__foreach_trunc__typed_handle();
11140 return op.call(self);
11141}
11142
11143// aten::_foreach_trunc_(Tensor(a!)[] self) -> ()
11144void _foreach_trunc_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
11145
11146 static auto op = create__foreach_trunc__typed_handle();
11147 return op.redispatch(dispatchKeySet, self);
11148}
11149
11150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_lerp_List, name, "aten::_foreach_lerp")
11151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_lerp_List, overload_name, "List")
11152STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_lerp_List, schema_str, "_foreach_lerp.List(Tensor[] self, Tensor[] tensors1, Tensor[] weights) -> Tensor[]")
11153
11154// aten::_foreach_lerp.List(Tensor[] self, Tensor[] tensors1, Tensor[] weights) -> Tensor[]
11155static C10_NOINLINE c10::TypedOperatorHandle<_foreach_lerp_List::schema> create__foreach_lerp_List_typed_handle() {
11156 return c10::Dispatcher::singleton()
11157 .findSchemaOrThrow(_foreach_lerp_List::name, _foreach_lerp_List::overload_name)
11158 .typed<_foreach_lerp_List::schema>();
11159}
11160
11161// aten::_foreach_lerp.List(Tensor[] self, Tensor[] tensors1, Tensor[] weights) -> Tensor[]
11162::std::vector<at::Tensor> _foreach_lerp_List::call(at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
11163
11164 static auto op = create__foreach_lerp_List_typed_handle();
11165 return op.call(self, tensors1, weights);
11166}
11167
11168// aten::_foreach_lerp.List(Tensor[] self, Tensor[] tensors1, Tensor[] weights) -> Tensor[]
11169::std::vector<at::Tensor> _foreach_lerp_List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
11170
11171 static auto op = create__foreach_lerp_List_typed_handle();
11172 return op.redispatch(dispatchKeySet, self, tensors1, weights);
11173}
11174
11175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_lerp__List, name, "aten::_foreach_lerp_")
11176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_lerp__List, overload_name, "List")
11177STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_lerp__List, schema_str, "_foreach_lerp_.List(Tensor(a!)[] self, Tensor[] tensors1, Tensor[] weights) -> ()")
11178
11179// aten::_foreach_lerp_.List(Tensor(a!)[] self, Tensor[] tensors1, Tensor[] weights) -> ()
11180static C10_NOINLINE c10::TypedOperatorHandle<_foreach_lerp__List::schema> create__foreach_lerp__List_typed_handle() {
11181 return c10::Dispatcher::singleton()
11182 .findSchemaOrThrow(_foreach_lerp__List::name, _foreach_lerp__List::overload_name)
11183 .typed<_foreach_lerp__List::schema>();
11184}
11185
11186// aten::_foreach_lerp_.List(Tensor(a!)[] self, Tensor[] tensors1, Tensor[] weights) -> ()
11187void _foreach_lerp__List::call(at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
11188
11189 static auto op = create__foreach_lerp__List_typed_handle();
11190 return op.call(self, tensors1, weights);
11191}
11192
11193// aten::_foreach_lerp_.List(Tensor(a!)[] self, Tensor[] tensors1, Tensor[] weights) -> ()
11194void _foreach_lerp__List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
11195
11196 static auto op = create__foreach_lerp__List_typed_handle();
11197 return op.redispatch(dispatchKeySet, self, tensors1, weights);
11198}
11199
11200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_lerp_Scalar, name, "aten::_foreach_lerp")
11201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_lerp_Scalar, overload_name, "Scalar")
11202STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_lerp_Scalar, schema_str, "_foreach_lerp.Scalar(Tensor[] self, Tensor[] tensors1, Scalar weight) -> Tensor[]")
11203
11204// aten::_foreach_lerp.Scalar(Tensor[] self, Tensor[] tensors1, Scalar weight) -> Tensor[]
11205static C10_NOINLINE c10::TypedOperatorHandle<_foreach_lerp_Scalar::schema> create__foreach_lerp_Scalar_typed_handle() {
11206 return c10::Dispatcher::singleton()
11207 .findSchemaOrThrow(_foreach_lerp_Scalar::name, _foreach_lerp_Scalar::overload_name)
11208 .typed<_foreach_lerp_Scalar::schema>();
11209}
11210
11211// aten::_foreach_lerp.Scalar(Tensor[] self, Tensor[] tensors1, Scalar weight) -> Tensor[]
11212::std::vector<at::Tensor> _foreach_lerp_Scalar::call(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
11213
11214 static auto op = create__foreach_lerp_Scalar_typed_handle();
11215 return op.call(self, tensors1, weight);
11216}
11217
11218// aten::_foreach_lerp.Scalar(Tensor[] self, Tensor[] tensors1, Scalar weight) -> Tensor[]
11219::std::vector<at::Tensor> _foreach_lerp_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
11220
11221 static auto op = create__foreach_lerp_Scalar_typed_handle();
11222 return op.redispatch(dispatchKeySet, self, tensors1, weight);
11223}
11224
11225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_lerp__Scalar, name, "aten::_foreach_lerp_")
11226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_lerp__Scalar, overload_name, "Scalar")
11227STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_lerp__Scalar, schema_str, "_foreach_lerp_.Scalar(Tensor(a!)[] self, Tensor[] tensors1, Scalar weight) -> ()")
11228
11229// aten::_foreach_lerp_.Scalar(Tensor(a!)[] self, Tensor[] tensors1, Scalar weight) -> ()
11230static C10_NOINLINE c10::TypedOperatorHandle<_foreach_lerp__Scalar::schema> create__foreach_lerp__Scalar_typed_handle() {
11231 return c10::Dispatcher::singleton()
11232 .findSchemaOrThrow(_foreach_lerp__Scalar::name, _foreach_lerp__Scalar::overload_name)
11233 .typed<_foreach_lerp__Scalar::schema>();
11234}
11235
11236// aten::_foreach_lerp_.Scalar(Tensor(a!)[] self, Tensor[] tensors1, Scalar weight) -> ()
11237void _foreach_lerp__Scalar::call(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
11238
11239 static auto op = create__foreach_lerp__Scalar_typed_handle();
11240 return op.call(self, tensors1, weight);
11241}
11242
11243// aten::_foreach_lerp_.Scalar(Tensor(a!)[] self, Tensor[] tensors1, Scalar weight) -> ()
11244void _foreach_lerp__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
11245
11246 static auto op = create__foreach_lerp__Scalar_typed_handle();
11247 return op.redispatch(dispatchKeySet, self, tensors1, weight);
11248}
11249
11250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mse_loss_backward_grad_input, name, "aten::mse_loss_backward")
11251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mse_loss_backward_grad_input, overload_name, "grad_input")
11252STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mse_loss_backward_grad_input, schema_str, "mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)")
11253
11254// aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
11255static C10_NOINLINE c10::TypedOperatorHandle<mse_loss_backward_grad_input::schema> create_mse_loss_backward_grad_input_typed_handle() {
11256 return c10::Dispatcher::singleton()
11257 .findSchemaOrThrow(mse_loss_backward_grad_input::name, mse_loss_backward_grad_input::overload_name)
11258 .typed<mse_loss_backward_grad_input::schema>();
11259}
11260
11261// aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
11262at::Tensor & mse_loss_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input) {
11263
11264 static auto op = create_mse_loss_backward_grad_input_typed_handle();
11265 return op.call(grad_output, self, target, reduction, grad_input);
11266}
11267
11268// aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
11269at::Tensor & mse_loss_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input) {
11270
11271 static auto op = create_mse_loss_backward_grad_input_typed_handle();
11272 return op.redispatch(dispatchKeySet, grad_output, self, target, reduction, grad_input);
11273}
11274
11275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mse_loss_backward, name, "aten::mse_loss_backward")
11276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mse_loss_backward, overload_name, "")
11277STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mse_loss_backward, schema_str, "mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor")
11278
11279// aten::mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor
11280static C10_NOINLINE c10::TypedOperatorHandle<mse_loss_backward::schema> create_mse_loss_backward_typed_handle() {
11281 return c10::Dispatcher::singleton()
11282 .findSchemaOrThrow(mse_loss_backward::name, mse_loss_backward::overload_name)
11283 .typed<mse_loss_backward::schema>();
11284}
11285
11286// aten::mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor
11287at::Tensor mse_loss_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
11288
11289 static auto op = create_mse_loss_backward_typed_handle();
11290 return op.call(grad_output, self, target, reduction);
11291}
11292
11293// aten::mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor
11294at::Tensor mse_loss_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
11295
11296 static auto op = create_mse_loss_backward_typed_handle();
11297 return op.redispatch(dispatchKeySet, grad_output, self, target, reduction);
11298}
11299
11300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multi_margin_loss_backward_grad_input, name, "aten::multi_margin_loss_backward")
11301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multi_margin_loss_backward_grad_input, overload_name, "grad_input")
11302STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multi_margin_loss_backward_grad_input, schema_str, "multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)")
11303
11304// aten::multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)
11305static C10_NOINLINE c10::TypedOperatorHandle<multi_margin_loss_backward_grad_input::schema> create_multi_margin_loss_backward_grad_input_typed_handle() {
11306 return c10::Dispatcher::singleton()
11307 .findSchemaOrThrow(multi_margin_loss_backward_grad_input::name, multi_margin_loss_backward_grad_input::overload_name)
11308 .typed<multi_margin_loss_backward_grad_input::schema>();
11309}
11310
11311// aten::multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)
11312at::Tensor & multi_margin_loss_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input) {
11313
11314 static auto op = create_multi_margin_loss_backward_grad_input_typed_handle();
11315 return op.call(grad_output, self, target, p, margin, weight, reduction, grad_input);
11316}
11317
11318// aten::multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)
11319at::Tensor & multi_margin_loss_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input) {
11320
11321 static auto op = create_multi_margin_loss_backward_grad_input_typed_handle();
11322 return op.redispatch(dispatchKeySet, grad_output, self, target, p, margin, weight, reduction, grad_input);
11323}
11324
11325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multi_margin_loss_backward, name, "aten::multi_margin_loss_backward")
11326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multi_margin_loss_backward, overload_name, "")
11327STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multi_margin_loss_backward, schema_str, "multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor")
11328
11329// aten::multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor
11330static C10_NOINLINE c10::TypedOperatorHandle<multi_margin_loss_backward::schema> create_multi_margin_loss_backward_typed_handle() {
11331 return c10::Dispatcher::singleton()
11332 .findSchemaOrThrow(multi_margin_loss_backward::name, multi_margin_loss_backward::overload_name)
11333 .typed<multi_margin_loss_backward::schema>();
11334}
11335
11336// aten::multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor
11337at::Tensor multi_margin_loss_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction) {
11338
11339 static auto op = create_multi_margin_loss_backward_typed_handle();
11340 return op.call(grad_output, self, target, p, margin, weight, reduction);
11341}
11342
11343// aten::multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor
11344at::Tensor multi_margin_loss_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction) {
11345
11346 static auto op = create_multi_margin_loss_backward_typed_handle();
11347 return op.redispatch(dispatchKeySet, grad_output, self, target, p, margin, weight, reduction);
11348}
11349
11350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multilabel_margin_loss_backward_grad_input, name, "aten::multilabel_margin_loss_backward")
11351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multilabel_margin_loss_backward_grad_input, overload_name, "grad_input")
11352STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multilabel_margin_loss_backward_grad_input, schema_str, "multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!)")
11353
11354// aten::multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!)
11355static C10_NOINLINE c10::TypedOperatorHandle<multilabel_margin_loss_backward_grad_input::schema> create_multilabel_margin_loss_backward_grad_input_typed_handle() {
11356 return c10::Dispatcher::singleton()
11357 .findSchemaOrThrow(multilabel_margin_loss_backward_grad_input::name, multilabel_margin_loss_backward_grad_input::overload_name)
11358 .typed<multilabel_margin_loss_backward_grad_input::schema>();
11359}
11360
11361// aten::multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!)
11362at::Tensor & multilabel_margin_loss_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target, at::Tensor & grad_input) {
11363
11364 static auto op = create_multilabel_margin_loss_backward_grad_input_typed_handle();
11365 return op.call(grad_output, self, target, reduction, is_target, grad_input);
11366}
11367
11368// aten::multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!)
11369at::Tensor & multilabel_margin_loss_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target, at::Tensor & grad_input) {
11370
11371 static auto op = create_multilabel_margin_loss_backward_grad_input_typed_handle();
11372 return op.redispatch(dispatchKeySet, grad_output, self, target, reduction, is_target, grad_input);
11373}
11374
11375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multilabel_margin_loss_backward, name, "aten::multilabel_margin_loss_backward")
11376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multilabel_margin_loss_backward, overload_name, "")
11377STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(multilabel_margin_loss_backward, schema_str, "multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor")
11378
11379// aten::multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor
11380static C10_NOINLINE c10::TypedOperatorHandle<multilabel_margin_loss_backward::schema> create_multilabel_margin_loss_backward_typed_handle() {
11381 return c10::Dispatcher::singleton()
11382 .findSchemaOrThrow(multilabel_margin_loss_backward::name, multilabel_margin_loss_backward::overload_name)
11383 .typed<multilabel_margin_loss_backward::schema>();
11384}
11385
11386// aten::multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor
11387at::Tensor multilabel_margin_loss_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) {
11388
11389 static auto op = create_multilabel_margin_loss_backward_typed_handle();
11390 return op.call(grad_output, self, target, reduction, is_target);
11391}
11392
11393// aten::multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor
11394at::Tensor multilabel_margin_loss_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) {
11395
11396 static auto op = create_multilabel_margin_loss_backward_typed_handle();
11397 return op.redispatch(dispatchKeySet, grad_output, self, target, reduction, is_target);
11398}
11399
11400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(elu_backward_grad_input, name, "aten::elu_backward")
11401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(elu_backward_grad_input, overload_name, "grad_input")
11402STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(elu_backward_grad_input, schema_str, "elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!)")
11403
11404// aten::elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!)
11405static C10_NOINLINE c10::TypedOperatorHandle<elu_backward_grad_input::schema> create_elu_backward_grad_input_typed_handle() {
11406 return c10::Dispatcher::singleton()
11407 .findSchemaOrThrow(elu_backward_grad_input::name, elu_backward_grad_input::overload_name)
11408 .typed<elu_backward_grad_input::schema>();
11409}
11410
11411// aten::elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!)
11412at::Tensor & elu_backward_grad_input::call(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result, at::Tensor & grad_input) {
11413
11414 static auto op = create_elu_backward_grad_input_typed_handle();
11415 return op.call(grad_output, alpha, scale, input_scale, is_result, self_or_result, grad_input);
11416}
11417
11418// aten::elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!)
11419at::Tensor & elu_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result, at::Tensor & grad_input) {
11420
11421 static auto op = create_elu_backward_grad_input_typed_handle();
11422 return op.redispatch(dispatchKeySet, grad_output, alpha, scale, input_scale, is_result, self_or_result, grad_input);
11423}
11424
11425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(elu_backward, name, "aten::elu_backward")
11426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(elu_backward, overload_name, "")
11427STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(elu_backward, schema_str, "elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor")
11428
11429// aten::elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor
11430static C10_NOINLINE c10::TypedOperatorHandle<elu_backward::schema> create_elu_backward_typed_handle() {
11431 return c10::Dispatcher::singleton()
11432 .findSchemaOrThrow(elu_backward::name, elu_backward::overload_name)
11433 .typed<elu_backward::schema>();
11434}
11435
11436// aten::elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor
11437at::Tensor elu_backward::call(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) {
11438
11439 static auto op = create_elu_backward_typed_handle();
11440 return op.call(grad_output, alpha, scale, input_scale, is_result, self_or_result);
11441}
11442
11443// aten::elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor
11444at::Tensor elu_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) {
11445
11446 static auto op = create_elu_backward_typed_handle();
11447 return op.redispatch(dispatchKeySet, grad_output, alpha, scale, input_scale, is_result, self_or_result);
11448}
11449
11450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardsigmoid_backward_grad_input, name, "aten::hardsigmoid_backward")
11451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardsigmoid_backward_grad_input, overload_name, "grad_input")
11452STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardsigmoid_backward_grad_input, schema_str, "hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)")
11453
11454// aten::hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
11455static C10_NOINLINE c10::TypedOperatorHandle<hardsigmoid_backward_grad_input::schema> create_hardsigmoid_backward_grad_input_typed_handle() {
11456 return c10::Dispatcher::singleton()
11457 .findSchemaOrThrow(hardsigmoid_backward_grad_input::name, hardsigmoid_backward_grad_input::overload_name)
11458 .typed<hardsigmoid_backward_grad_input::schema>();
11459}
11460
11461// aten::hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
11462at::Tensor & hardsigmoid_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) {
11463
11464 static auto op = create_hardsigmoid_backward_grad_input_typed_handle();
11465 return op.call(grad_output, self, grad_input);
11466}
11467
11468// aten::hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
11469at::Tensor & hardsigmoid_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) {
11470
11471 static auto op = create_hardsigmoid_backward_grad_input_typed_handle();
11472 return op.redispatch(dispatchKeySet, grad_output, self, grad_input);
11473}
11474
11475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardsigmoid_backward, name, "aten::hardsigmoid_backward")
11476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardsigmoid_backward, overload_name, "")
11477STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardsigmoid_backward, schema_str, "hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor")
11478
11479// aten::hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor
11480static C10_NOINLINE c10::TypedOperatorHandle<hardsigmoid_backward::schema> create_hardsigmoid_backward_typed_handle() {
11481 return c10::Dispatcher::singleton()
11482 .findSchemaOrThrow(hardsigmoid_backward::name, hardsigmoid_backward::overload_name)
11483 .typed<hardsigmoid_backward::schema>();
11484}
11485
11486// aten::hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor
11487at::Tensor hardsigmoid_backward::call(const at::Tensor & grad_output, const at::Tensor & self) {
11488
11489 static auto op = create_hardsigmoid_backward_typed_handle();
11490 return op.call(grad_output, self);
11491}
11492
11493// aten::hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor
11494at::Tensor hardsigmoid_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) {
11495
11496 static auto op = create_hardsigmoid_backward_typed_handle();
11497 return op.redispatch(dispatchKeySet, grad_output, self);
11498}
11499
11500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rrelu_with_noise_backward, name, "aten::rrelu_with_noise_backward")
11501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rrelu_with_noise_backward, overload_name, "")
11502STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rrelu_with_noise_backward, schema_str, "rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor")
11503
11504// aten::rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor
11505static C10_NOINLINE c10::TypedOperatorHandle<rrelu_with_noise_backward::schema> create_rrelu_with_noise_backward_typed_handle() {
11506 return c10::Dispatcher::singleton()
11507 .findSchemaOrThrow(rrelu_with_noise_backward::name, rrelu_with_noise_backward::overload_name)
11508 .typed<rrelu_with_noise_backward::schema>();
11509}
11510
11511// aten::rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor
11512at::Tensor rrelu_with_noise_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result) {
11513
11514 static auto op = create_rrelu_with_noise_backward_typed_handle();
11515 return op.call(grad_output, self, noise, lower, upper, training, self_is_result);
11516}
11517
11518// aten::rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor
11519at::Tensor rrelu_with_noise_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result) {
11520
11521 static auto op = create_rrelu_with_noise_backward_typed_handle();
11522 return op.redispatch(dispatchKeySet, grad_output, self, noise, lower, upper, training, self_is_result);
11523}
11524
11525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(softplus_backward_grad_input, name, "aten::softplus_backward")
11526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(softplus_backward_grad_input, overload_name, "grad_input")
11527STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(softplus_backward_grad_input, schema_str, "softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)")
11528
11529// aten::softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)
11530static C10_NOINLINE c10::TypedOperatorHandle<softplus_backward_grad_input::schema> create_softplus_backward_grad_input_typed_handle() {
11531 return c10::Dispatcher::singleton()
11532 .findSchemaOrThrow(softplus_backward_grad_input::name, softplus_backward_grad_input::overload_name)
11533 .typed<softplus_backward_grad_input::schema>();
11534}
11535
11536// aten::softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)
11537at::Tensor & softplus_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & grad_input) {
11538
11539 static auto op = create_softplus_backward_grad_input_typed_handle();
11540 return op.call(grad_output, self, beta, threshold, grad_input);
11541}
11542
11543// aten::softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)
11544at::Tensor & softplus_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & grad_input) {
11545
11546 static auto op = create_softplus_backward_grad_input_typed_handle();
11547 return op.redispatch(dispatchKeySet, grad_output, self, beta, threshold, grad_input);
11548}
11549
11550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(softplus_backward, name, "aten::softplus_backward")
11551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(softplus_backward, overload_name, "")
11552STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(softplus_backward, schema_str, "softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor")
11553
11554// aten::softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor
11555static C10_NOINLINE c10::TypedOperatorHandle<softplus_backward::schema> create_softplus_backward_typed_handle() {
11556 return c10::Dispatcher::singleton()
11557 .findSchemaOrThrow(softplus_backward::name, softplus_backward::overload_name)
11558 .typed<softplus_backward::schema>();
11559}
11560
11561// aten::softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor
11562at::Tensor softplus_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
11563
11564 static auto op = create_softplus_backward_typed_handle();
11565 return op.call(grad_output, self, beta, threshold);
11566}
11567
11568// aten::softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor
11569at::Tensor softplus_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
11570
11571 static auto op = create_softplus_backward_typed_handle();
11572 return op.redispatch(dispatchKeySet, grad_output, self, beta, threshold);
11573}
11574
11575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_adaptive_avg_pool2d_backward, name, "aten::mkldnn_adaptive_avg_pool2d_backward")
11576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_adaptive_avg_pool2d_backward, overload_name, "")
11577STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_adaptive_avg_pool2d_backward, schema_str, "mkldnn_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor")
11578
11579// aten::mkldnn_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor
11580static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_adaptive_avg_pool2d_backward::schema> create_mkldnn_adaptive_avg_pool2d_backward_typed_handle() {
11581 return c10::Dispatcher::singleton()
11582 .findSchemaOrThrow(mkldnn_adaptive_avg_pool2d_backward::name, mkldnn_adaptive_avg_pool2d_backward::overload_name)
11583 .typed<mkldnn_adaptive_avg_pool2d_backward::schema>();
11584}
11585
11586// aten::mkldnn_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor
11587at::Tensor mkldnn_adaptive_avg_pool2d_backward::call(const at::Tensor & grad_output, const at::Tensor & self) {
11588
11589 static auto op = create_mkldnn_adaptive_avg_pool2d_backward_typed_handle();
11590 return op.call(grad_output, self);
11591}
11592
11593// aten::mkldnn_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor
11594at::Tensor mkldnn_adaptive_avg_pool2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) {
11595
11596 static auto op = create_mkldnn_adaptive_avg_pool2d_backward_typed_handle();
11597 return op.redispatch(dispatchKeySet, grad_output, self);
11598}
11599
11600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fractional_max_pool3d_backward_grad_input, name, "aten::fractional_max_pool3d_backward")
11601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fractional_max_pool3d_backward_grad_input, overload_name, "grad_input")
11602STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fractional_max_pool3d_backward_grad_input, schema_str, "fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)")
11603
11604// aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
11605static C10_NOINLINE c10::TypedOperatorHandle<fractional_max_pool3d_backward_grad_input::schema> create_fractional_max_pool3d_backward_grad_input_typed_handle() {
11606 return c10::Dispatcher::singleton()
11607 .findSchemaOrThrow(fractional_max_pool3d_backward_grad_input::name, fractional_max_pool3d_backward_grad_input::overload_name)
11608 .typed<fractional_max_pool3d_backward_grad_input::schema>();
11609}
11610
11611// aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
11612at::Tensor & fractional_max_pool3d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) {
11613
11614 static auto op = create_fractional_max_pool3d_backward_grad_input_typed_handle();
11615 return op.call(grad_output, self, kernel_size, output_size, indices, grad_input);
11616}
11617
11618// aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
11619at::Tensor & fractional_max_pool3d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) {
11620
11621 static auto op = create_fractional_max_pool3d_backward_grad_input_typed_handle();
11622 return op.redispatch(dispatchKeySet, grad_output, self, kernel_size, output_size, indices, grad_input);
11623}
11624
11625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fractional_max_pool3d_backward, name, "aten::fractional_max_pool3d_backward")
11626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fractional_max_pool3d_backward, overload_name, "")
11627STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fractional_max_pool3d_backward, schema_str, "fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor")
11628
11629// aten::fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor
11630static C10_NOINLINE c10::TypedOperatorHandle<fractional_max_pool3d_backward::schema> create_fractional_max_pool3d_backward_typed_handle() {
11631 return c10::Dispatcher::singleton()
11632 .findSchemaOrThrow(fractional_max_pool3d_backward::name, fractional_max_pool3d_backward::overload_name)
11633 .typed<fractional_max_pool3d_backward::schema>();
11634}
11635
11636// aten::fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor
11637at::Tensor fractional_max_pool3d_backward::call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
11638
11639 static auto op = create_fractional_max_pool3d_backward_typed_handle();
11640 return op.call(grad_output, self, kernel_size, output_size, indices);
11641}
11642
11643// aten::fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor
11644at::Tensor fractional_max_pool3d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
11645
11646 static auto op = create_fractional_max_pool3d_backward_typed_handle();
11647 return op.redispatch(dispatchKeySet, grad_output, self, kernel_size, output_size, indices);
11648}
11649
11650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool2d_with_indices_out, name, "aten::max_pool2d_with_indices")
11651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool2d_with_indices_out, overload_name, "out")
11652STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool2d_with_indices_out, schema_str, "max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))")
11653
11654// aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
11655static C10_NOINLINE c10::TypedOperatorHandle<max_pool2d_with_indices_out::schema> create_max_pool2d_with_indices_out_typed_handle() {
11656 return c10::Dispatcher::singleton()
11657 .findSchemaOrThrow(max_pool2d_with_indices_out::name, max_pool2d_with_indices_out::overload_name)
11658 .typed<max_pool2d_with_indices_out::schema>();
11659}
11660
11661// aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
11662::std::tuple<at::Tensor &,at::Tensor &> max_pool2d_with_indices_out::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) {
11663
11664 static auto op = create_max_pool2d_with_indices_out_typed_handle();
11665 return op.call(self, kernel_size, stride, padding, dilation, ceil_mode, out, indices);
11666}
11667
11668// aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
11669::std::tuple<at::Tensor &,at::Tensor &> max_pool2d_with_indices_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) {
11670
11671 static auto op = create_max_pool2d_with_indices_out_typed_handle();
11672 return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out, indices);
11673}
11674
11675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool2d_with_indices, name, "aten::max_pool2d_with_indices")
11676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool2d_with_indices, overload_name, "")
11677STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool2d_with_indices, schema_str, "max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)")
11678
11679// aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
11680static C10_NOINLINE c10::TypedOperatorHandle<max_pool2d_with_indices::schema> create_max_pool2d_with_indices_typed_handle() {
11681 return c10::Dispatcher::singleton()
11682 .findSchemaOrThrow(max_pool2d_with_indices::name, max_pool2d_with_indices::overload_name)
11683 .typed<max_pool2d_with_indices::schema>();
11684}
11685
11686// aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
11687::std::tuple<at::Tensor,at::Tensor> max_pool2d_with_indices::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
11688
11689 static auto op = create_max_pool2d_with_indices_typed_handle();
11690 return op.call(self, kernel_size, stride, padding, dilation, ceil_mode);
11691}
11692
11693// aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
11694::std::tuple<at::Tensor,at::Tensor> max_pool2d_with_indices::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
11695
11696 static auto op = create_max_pool2d_with_indices_typed_handle();
11697 return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode);
11698}
11699
11700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reflection_pad1d_out, name, "aten::reflection_pad1d")
11701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reflection_pad1d_out, overload_name, "out")
11702STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reflection_pad1d_out, schema_str, "reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)")
11703
11704// aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
11705static C10_NOINLINE c10::TypedOperatorHandle<reflection_pad1d_out::schema> create_reflection_pad1d_out_typed_handle() {
11706 return c10::Dispatcher::singleton()
11707 .findSchemaOrThrow(reflection_pad1d_out::name, reflection_pad1d_out::overload_name)
11708 .typed<reflection_pad1d_out::schema>();
11709}
11710
11711// aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
11712at::Tensor & reflection_pad1d_out::call(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
11713
11714 static auto op = create_reflection_pad1d_out_typed_handle();
11715 return op.call(self, padding, out);
11716}
11717
11718// aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
11719at::Tensor & reflection_pad1d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
11720
11721 static auto op = create_reflection_pad1d_out_typed_handle();
11722 return op.redispatch(dispatchKeySet, self, padding, out);
11723}
11724
11725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reflection_pad1d, name, "aten::reflection_pad1d")
11726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reflection_pad1d, overload_name, "")
11727STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reflection_pad1d, schema_str, "reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor")
11728
11729// aten::reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor
11730static C10_NOINLINE c10::TypedOperatorHandle<reflection_pad1d::schema> create_reflection_pad1d_typed_handle() {
11731 return c10::Dispatcher::singleton()
11732 .findSchemaOrThrow(reflection_pad1d::name, reflection_pad1d::overload_name)
11733 .typed<reflection_pad1d::schema>();
11734}
11735
11736// aten::reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor
11737at::Tensor reflection_pad1d::call(const at::Tensor & self, c10::SymIntArrayRef padding) {
11738
11739 static auto op = create_reflection_pad1d_typed_handle();
11740 return op.call(self, padding);
11741}
11742
11743// aten::reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor
11744at::Tensor reflection_pad1d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding) {
11745
11746 static auto op = create_reflection_pad1d_typed_handle();
11747 return op.redispatch(dispatchKeySet, self, padding);
11748}
11749
11750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pad_enum, name, "aten::_pad_enum")
11751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pad_enum, overload_name, "")
11752STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pad_enum, schema_str, "_pad_enum(Tensor self, SymInt[] pad, int mode, float? value=None) -> Tensor")
11753
11754// aten::_pad_enum(Tensor self, SymInt[] pad, int mode, float? value=None) -> Tensor
11755static C10_NOINLINE c10::TypedOperatorHandle<_pad_enum::schema> create__pad_enum_typed_handle() {
11756 return c10::Dispatcher::singleton()
11757 .findSchemaOrThrow(_pad_enum::name, _pad_enum::overload_name)
11758 .typed<_pad_enum::schema>();
11759}
11760
11761// aten::_pad_enum(Tensor self, SymInt[] pad, int mode, float? value=None) -> Tensor
11762at::Tensor _pad_enum::call(const at::Tensor & self, c10::SymIntArrayRef pad, int64_t mode, c10::optional<double> value) {
11763
11764 static auto op = create__pad_enum_typed_handle();
11765 return op.call(self, pad, mode, value);
11766}
11767
11768// aten::_pad_enum(Tensor self, SymInt[] pad, int mode, float? value=None) -> Tensor
11769at::Tensor _pad_enum::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef pad, int64_t mode, c10::optional<double> value) {
11770
11771 static auto op = create__pad_enum_typed_handle();
11772 return op.redispatch(dispatchKeySet, self, pad, mode, value);
11773}
11774
11775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_trilinear3d_vec, name, "aten::upsample_trilinear3d")
11776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_trilinear3d_vec, overload_name, "vec")
11777STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_trilinear3d_vec, schema_str, "upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor")
11778
11779// aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
11780static C10_NOINLINE c10::TypedOperatorHandle<upsample_trilinear3d_vec::schema> create_upsample_trilinear3d_vec_typed_handle() {
11781 return c10::Dispatcher::singleton()
11782 .findSchemaOrThrow(upsample_trilinear3d_vec::name, upsample_trilinear3d_vec::overload_name)
11783 .typed<upsample_trilinear3d_vec::schema>();
11784}
11785
11786// aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
11787at::Tensor upsample_trilinear3d_vec::call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
11788
11789 static auto op = create_upsample_trilinear3d_vec_typed_handle();
11790 return op.call(input, output_size, align_corners, scale_factors);
11791}
11792
11793// aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
11794at::Tensor upsample_trilinear3d_vec::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
11795
11796 static auto op = create_upsample_trilinear3d_vec_typed_handle();
11797 return op.redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors);
11798}
11799
11800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_bicubic2d_aa_vec, name, "aten::_upsample_bicubic2d_aa")
11801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_bicubic2d_aa_vec, overload_name, "vec")
11802STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_bicubic2d_aa_vec, schema_str, "_upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor")
11803
11804// aten::_upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
11805static C10_NOINLINE c10::TypedOperatorHandle<_upsample_bicubic2d_aa_vec::schema> create__upsample_bicubic2d_aa_vec_typed_handle() {
11806 return c10::Dispatcher::singleton()
11807 .findSchemaOrThrow(_upsample_bicubic2d_aa_vec::name, _upsample_bicubic2d_aa_vec::overload_name)
11808 .typed<_upsample_bicubic2d_aa_vec::schema>();
11809}
11810
11811// aten::_upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
11812at::Tensor _upsample_bicubic2d_aa_vec::call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
11813
11814 static auto op = create__upsample_bicubic2d_aa_vec_typed_handle();
11815 return op.call(input, output_size, align_corners, scale_factors);
11816}
11817
11818// aten::_upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
11819at::Tensor _upsample_bicubic2d_aa_vec::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
11820
11821 static auto op = create__upsample_bicubic2d_aa_vec_typed_handle();
11822 return op.redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors);
11823}
11824
11825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest3d_vec, name, "aten::upsample_nearest3d")
11826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest3d_vec, overload_name, "vec")
11827STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest3d_vec, schema_str, "upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor")
11828
11829// aten::upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
11830static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest3d_vec::schema> create_upsample_nearest3d_vec_typed_handle() {
11831 return c10::Dispatcher::singleton()
11832 .findSchemaOrThrow(upsample_nearest3d_vec::name, upsample_nearest3d_vec::overload_name)
11833 .typed<upsample_nearest3d_vec::schema>();
11834}
11835
11836// aten::upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
11837at::Tensor upsample_nearest3d_vec::call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
11838
11839 static auto op = create_upsample_nearest3d_vec_typed_handle();
11840 return op.call(input, output_size, scale_factors);
11841}
11842
11843// aten::upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
11844at::Tensor upsample_nearest3d_vec::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
11845
11846 static auto op = create_upsample_nearest3d_vec_typed_handle();
11847 return op.redispatch(dispatchKeySet, input, output_size, scale_factors);
11848}
11849
11850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_bilinear2d_aa_backward_grad_input, name, "aten::_upsample_bilinear2d_aa_backward")
11851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_bilinear2d_aa_backward_grad_input, overload_name, "grad_input")
11852STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_bilinear2d_aa_backward_grad_input, schema_str, "_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)")
11853
11854// aten::_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
11855static C10_NOINLINE c10::TypedOperatorHandle<_upsample_bilinear2d_aa_backward_grad_input::schema> create__upsample_bilinear2d_aa_backward_grad_input_typed_handle() {
11856 return c10::Dispatcher::singleton()
11857 .findSchemaOrThrow(_upsample_bilinear2d_aa_backward_grad_input::name, _upsample_bilinear2d_aa_backward_grad_input::overload_name)
11858 .typed<_upsample_bilinear2d_aa_backward_grad_input::schema>();
11859}
11860
11861// aten::_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
11862at::Tensor & _upsample_bilinear2d_aa_backward_grad_input::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
11863
11864 static auto op = create__upsample_bilinear2d_aa_backward_grad_input_typed_handle();
11865 return op.call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
11866}
11867
11868// aten::_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
11869at::Tensor & _upsample_bilinear2d_aa_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
11870
11871 static auto op = create__upsample_bilinear2d_aa_backward_grad_input_typed_handle();
11872 return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
11873}
11874
11875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_bilinear2d_aa_backward, name, "aten::_upsample_bilinear2d_aa_backward")
11876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_bilinear2d_aa_backward, overload_name, "")
11877STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_bilinear2d_aa_backward, schema_str, "_upsample_bilinear2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor")
11878
11879// aten::_upsample_bilinear2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
11880static C10_NOINLINE c10::TypedOperatorHandle<_upsample_bilinear2d_aa_backward::schema> create__upsample_bilinear2d_aa_backward_typed_handle() {
11881 return c10::Dispatcher::singleton()
11882 .findSchemaOrThrow(_upsample_bilinear2d_aa_backward::name, _upsample_bilinear2d_aa_backward::overload_name)
11883 .typed<_upsample_bilinear2d_aa_backward::schema>();
11884}
11885
11886// aten::_upsample_bilinear2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
11887at::Tensor _upsample_bilinear2d_aa_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
11888
11889 static auto op = create__upsample_bilinear2d_aa_backward_typed_handle();
11890 return op.call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
11891}
11892
11893// aten::_upsample_bilinear2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
11894at::Tensor _upsample_bilinear2d_aa_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
11895
11896 static auto op = create__upsample_bilinear2d_aa_backward_typed_handle();
11897 return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
11898}
11899
11900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_bicubic2d_aa_out, name, "aten::_upsample_bicubic2d_aa")
11901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_bicubic2d_aa_out, overload_name, "out")
11902STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_bicubic2d_aa_out, schema_str, "_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)")
11903
11904// aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
11905static C10_NOINLINE c10::TypedOperatorHandle<_upsample_bicubic2d_aa_out::schema> create__upsample_bicubic2d_aa_out_typed_handle() {
11906 return c10::Dispatcher::singleton()
11907 .findSchemaOrThrow(_upsample_bicubic2d_aa_out::name, _upsample_bicubic2d_aa_out::overload_name)
11908 .typed<_upsample_bicubic2d_aa_out::schema>();
11909}
11910
11911// aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
11912at::Tensor & _upsample_bicubic2d_aa_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
11913
11914 static auto op = create__upsample_bicubic2d_aa_out_typed_handle();
11915 return op.call(self, output_size, align_corners, scales_h, scales_w, out);
11916}
11917
11918// aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
11919at::Tensor & _upsample_bicubic2d_aa_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
11920
11921 static auto op = create__upsample_bicubic2d_aa_out_typed_handle();
11922 return op.redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w, out);
11923}
11924
11925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_bicubic2d_aa, name, "aten::_upsample_bicubic2d_aa")
11926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_bicubic2d_aa, overload_name, "")
11927STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_bicubic2d_aa, schema_str, "_upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor")
11928
11929// aten::_upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
11930static C10_NOINLINE c10::TypedOperatorHandle<_upsample_bicubic2d_aa::schema> create__upsample_bicubic2d_aa_typed_handle() {
11931 return c10::Dispatcher::singleton()
11932 .findSchemaOrThrow(_upsample_bicubic2d_aa::name, _upsample_bicubic2d_aa::overload_name)
11933 .typed<_upsample_bicubic2d_aa::schema>();
11934}
11935
11936// aten::_upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
11937at::Tensor _upsample_bicubic2d_aa::call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
11938
11939 static auto op = create__upsample_bicubic2d_aa_typed_handle();
11940 return op.call(self, output_size, align_corners, scales_h, scales_w);
11941}
11942
11943// aten::_upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
11944at::Tensor _upsample_bicubic2d_aa::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
11945
11946 static auto op = create__upsample_bicubic2d_aa_typed_handle();
11947 return op.redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w);
11948}
11949
11950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_trilinear3d_out, name, "aten::upsample_trilinear3d")
11951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_trilinear3d_out, overload_name, "out")
11952STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_trilinear3d_out, schema_str, "upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)")
11953
11954// aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
11955static C10_NOINLINE c10::TypedOperatorHandle<upsample_trilinear3d_out::schema> create_upsample_trilinear3d_out_typed_handle() {
11956 return c10::Dispatcher::singleton()
11957 .findSchemaOrThrow(upsample_trilinear3d_out::name, upsample_trilinear3d_out::overload_name)
11958 .typed<upsample_trilinear3d_out::schema>();
11959}
11960
11961// aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
11962at::Tensor & upsample_trilinear3d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
11963
11964 static auto op = create_upsample_trilinear3d_out_typed_handle();
11965 return op.call(self, output_size, align_corners, scales_d, scales_h, scales_w, out);
11966}
11967
11968// aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
11969at::Tensor & upsample_trilinear3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
11970
11971 static auto op = create_upsample_trilinear3d_out_typed_handle();
11972 return op.redispatch(dispatchKeySet, self, output_size, align_corners, scales_d, scales_h, scales_w, out);
11973}
11974
11975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_trilinear3d, name, "aten::upsample_trilinear3d")
11976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_trilinear3d, overload_name, "")
11977STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_trilinear3d, schema_str, "upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor")
11978
11979// aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
11980static C10_NOINLINE c10::TypedOperatorHandle<upsample_trilinear3d::schema> create_upsample_trilinear3d_typed_handle() {
11981 return c10::Dispatcher::singleton()
11982 .findSchemaOrThrow(upsample_trilinear3d::name, upsample_trilinear3d::overload_name)
11983 .typed<upsample_trilinear3d::schema>();
11984}
11985
11986// aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
11987at::Tensor upsample_trilinear3d::call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
11988
11989 static auto op = create_upsample_trilinear3d_typed_handle();
11990 return op.call(self, output_size, align_corners, scales_d, scales_h, scales_w);
11991}
11992
11993// aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
11994at::Tensor upsample_trilinear3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
11995
11996 static auto op = create_upsample_trilinear3d_typed_handle();
11997 return op.redispatch(dispatchKeySet, self, output_size, align_corners, scales_d, scales_h, scales_w);
11998}
11999
12000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest3d_out, name, "aten::upsample_nearest3d")
12001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest3d_out, overload_name, "out")
12002STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest3d_out, schema_str, "upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)")
12003
12004// aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
12005static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest3d_out::schema> create_upsample_nearest3d_out_typed_handle() {
12006 return c10::Dispatcher::singleton()
12007 .findSchemaOrThrow(upsample_nearest3d_out::name, upsample_nearest3d_out::overload_name)
12008 .typed<upsample_nearest3d_out::schema>();
12009}
12010
12011// aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
12012at::Tensor & upsample_nearest3d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
12013
12014 static auto op = create_upsample_nearest3d_out_typed_handle();
12015 return op.call(self, output_size, scales_d, scales_h, scales_w, out);
12016}
12017
12018// aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
12019at::Tensor & upsample_nearest3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
12020
12021 static auto op = create_upsample_nearest3d_out_typed_handle();
12022 return op.redispatch(dispatchKeySet, self, output_size, scales_d, scales_h, scales_w, out);
12023}
12024
12025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest3d, name, "aten::upsample_nearest3d")
12026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest3d, overload_name, "")
12027STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest3d, schema_str, "upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor")
12028
12029// aten::upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
12030static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest3d::schema> create_upsample_nearest3d_typed_handle() {
12031 return c10::Dispatcher::singleton()
12032 .findSchemaOrThrow(upsample_nearest3d::name, upsample_nearest3d::overload_name)
12033 .typed<upsample_nearest3d::schema>();
12034}
12035
12036// aten::upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
12037at::Tensor upsample_nearest3d::call(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
12038
12039 static auto op = create_upsample_nearest3d_typed_handle();
12040 return op.call(self, output_size, scales_d, scales_h, scales_w);
12041}
12042
12043// aten::upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
12044at::Tensor upsample_nearest3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
12045
12046 static auto op = create_upsample_nearest3d_typed_handle();
12047 return op.redispatch(dispatchKeySet, self, output_size, scales_d, scales_h, scales_w);
12048}
12049
12050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sigmoid_backward_grad_input, name, "aten::sigmoid_backward")
12051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sigmoid_backward_grad_input, overload_name, "grad_input")
12052STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sigmoid_backward_grad_input, schema_str, "sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)")
12053
12054// aten::sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
12055static C10_NOINLINE c10::TypedOperatorHandle<sigmoid_backward_grad_input::schema> create_sigmoid_backward_grad_input_typed_handle() {
12056 return c10::Dispatcher::singleton()
12057 .findSchemaOrThrow(sigmoid_backward_grad_input::name, sigmoid_backward_grad_input::overload_name)
12058 .typed<sigmoid_backward_grad_input::schema>();
12059}
12060
12061// aten::sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
12062at::Tensor & sigmoid_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input) {
12063
12064 static auto op = create_sigmoid_backward_grad_input_typed_handle();
12065 return op.call(grad_output, output, grad_input);
12066}
12067
12068// aten::sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
12069at::Tensor & sigmoid_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input) {
12070
12071 static auto op = create_sigmoid_backward_grad_input_typed_handle();
12072 return op.redispatch(dispatchKeySet, grad_output, output, grad_input);
12073}
12074
12075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sigmoid_backward, name, "aten::sigmoid_backward")
12076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sigmoid_backward, overload_name, "")
12077STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sigmoid_backward, schema_str, "sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor")
12078
12079// aten::sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor
12080static C10_NOINLINE c10::TypedOperatorHandle<sigmoid_backward::schema> create_sigmoid_backward_typed_handle() {
12081 return c10::Dispatcher::singleton()
12082 .findSchemaOrThrow(sigmoid_backward::name, sigmoid_backward::overload_name)
12083 .typed<sigmoid_backward::schema>();
12084}
12085
12086// aten::sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor
12087at::Tensor sigmoid_backward::call(const at::Tensor & grad_output, const at::Tensor & output) {
12088
12089 static auto op = create_sigmoid_backward_typed_handle();
12090 return op.call(grad_output, output);
12091}
12092
12093// aten::sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor
12094at::Tensor sigmoid_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output) {
12095
12096 static auto op = create_sigmoid_backward_typed_handle();
12097 return op.redispatch(dispatchKeySet, grad_output, output);
12098}
12099
12100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tanh_backward_grad_input, name, "aten::tanh_backward")
12101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tanh_backward_grad_input, overload_name, "grad_input")
12102STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tanh_backward_grad_input, schema_str, "tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)")
12103
12104// aten::tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
12105static C10_NOINLINE c10::TypedOperatorHandle<tanh_backward_grad_input::schema> create_tanh_backward_grad_input_typed_handle() {
12106 return c10::Dispatcher::singleton()
12107 .findSchemaOrThrow(tanh_backward_grad_input::name, tanh_backward_grad_input::overload_name)
12108 .typed<tanh_backward_grad_input::schema>();
12109}
12110
12111// aten::tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
12112at::Tensor & tanh_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input) {
12113
12114 static auto op = create_tanh_backward_grad_input_typed_handle();
12115 return op.call(grad_output, output, grad_input);
12116}
12117
12118// aten::tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
12119at::Tensor & tanh_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input) {
12120
12121 static auto op = create_tanh_backward_grad_input_typed_handle();
12122 return op.redispatch(dispatchKeySet, grad_output, output, grad_input);
12123}
12124
12125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tanh_backward, name, "aten::tanh_backward")
12126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tanh_backward, overload_name, "")
12127STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tanh_backward, schema_str, "tanh_backward(Tensor grad_output, Tensor output) -> Tensor")
12128
12129// aten::tanh_backward(Tensor grad_output, Tensor output) -> Tensor
12130static C10_NOINLINE c10::TypedOperatorHandle<tanh_backward::schema> create_tanh_backward_typed_handle() {
12131 return c10::Dispatcher::singleton()
12132 .findSchemaOrThrow(tanh_backward::name, tanh_backward::overload_name)
12133 .typed<tanh_backward::schema>();
12134}
12135
12136// aten::tanh_backward(Tensor grad_output, Tensor output) -> Tensor
12137at::Tensor tanh_backward::call(const at::Tensor & grad_output, const at::Tensor & output) {
12138
12139 static auto op = create_tanh_backward_typed_handle();
12140 return op.call(grad_output, output);
12141}
12142
12143// aten::tanh_backward(Tensor grad_output, Tensor output) -> Tensor
12144at::Tensor tanh_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output) {
12145
12146 static auto op = create_tanh_backward_typed_handle();
12147 return op.redispatch(dispatchKeySet, grad_output, output);
12148}
12149
12150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(thnn_conv2d_out, name, "aten::thnn_conv2d")
12151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(thnn_conv2d_out, overload_name, "out")
12152STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(thnn_conv2d_out, schema_str, "thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!)")
12153
12154// aten::thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!)
12155static C10_NOINLINE c10::TypedOperatorHandle<thnn_conv2d_out::schema> create_thnn_conv2d_out_typed_handle() {
12156 return c10::Dispatcher::singleton()
12157 .findSchemaOrThrow(thnn_conv2d_out::name, thnn_conv2d_out::overload_name)
12158 .typed<thnn_conv2d_out::schema>();
12159}
12160
12161// aten::thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!)
12162at::Tensor & thnn_conv2d_out::call(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) {
12163
12164 static auto op = create_thnn_conv2d_out_typed_handle();
12165 return op.call(self, weight, kernel_size, bias, stride, padding, out);
12166}
12167
12168// aten::thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!)
12169at::Tensor & thnn_conv2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) {
12170
12171 static auto op = create_thnn_conv2d_out_typed_handle();
12172 return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, out);
12173}
12174
12175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(thnn_conv2d, name, "aten::thnn_conv2d")
12176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(thnn_conv2d, overload_name, "")
12177STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(thnn_conv2d, schema_str, "thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0) -> Tensor")
12178
12179// aten::thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0) -> Tensor
12180static C10_NOINLINE c10::TypedOperatorHandle<thnn_conv2d::schema> create_thnn_conv2d_typed_handle() {
12181 return c10::Dispatcher::singleton()
12182 .findSchemaOrThrow(thnn_conv2d::name, thnn_conv2d::overload_name)
12183 .typed<thnn_conv2d::schema>();
12184}
12185
12186// aten::thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0) -> Tensor
12187at::Tensor thnn_conv2d::call(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
12188
12189 static auto op = create_thnn_conv2d_typed_handle();
12190 return op.call(self, weight, kernel_size, bias, stride, padding);
12191}
12192
12193// aten::thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0) -> Tensor
12194at::Tensor thnn_conv2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
12195
12196 static auto op = create_thnn_conv2d_typed_handle();
12197 return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding);
12198}
12199
12200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_slow_conv2d_forward_output, name, "aten::_slow_conv2d_forward")
12201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_slow_conv2d_forward_output, overload_name, "output")
12202STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_slow_conv2d_forward_output, schema_str, "_slow_conv2d_forward.output(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, *, Tensor(a!) output) -> Tensor(a!)")
12203
12204// aten::_slow_conv2d_forward.output(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, *, Tensor(a!) output) -> Tensor(a!)
12205static C10_NOINLINE c10::TypedOperatorHandle<_slow_conv2d_forward_output::schema> create__slow_conv2d_forward_output_typed_handle() {
12206 return c10::Dispatcher::singleton()
12207 .findSchemaOrThrow(_slow_conv2d_forward_output::name, _slow_conv2d_forward_output::overload_name)
12208 .typed<_slow_conv2d_forward_output::schema>();
12209}
12210
12211// aten::_slow_conv2d_forward.output(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, *, Tensor(a!) output) -> Tensor(a!)
12212at::Tensor & _slow_conv2d_forward_output::call(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & output) {
12213
12214 static auto op = create__slow_conv2d_forward_output_typed_handle();
12215 return op.call(self, weight, kernel_size, bias, stride, padding, output);
12216}
12217
12218// aten::_slow_conv2d_forward.output(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, *, Tensor(a!) output) -> Tensor(a!)
12219at::Tensor & _slow_conv2d_forward_output::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & output) {
12220
12221 static auto op = create__slow_conv2d_forward_output_typed_handle();
12222 return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output);
12223}
12224
12225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_slow_conv2d_forward, name, "aten::_slow_conv2d_forward")
12226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_slow_conv2d_forward, overload_name, "")
12227STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_slow_conv2d_forward, schema_str, "_slow_conv2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding) -> Tensor")
12228
12229// aten::_slow_conv2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding) -> Tensor
12230static C10_NOINLINE c10::TypedOperatorHandle<_slow_conv2d_forward::schema> create__slow_conv2d_forward_typed_handle() {
12231 return c10::Dispatcher::singleton()
12232 .findSchemaOrThrow(_slow_conv2d_forward::name, _slow_conv2d_forward::overload_name)
12233 .typed<_slow_conv2d_forward::schema>();
12234}
12235
12236// aten::_slow_conv2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding) -> Tensor
12237at::Tensor _slow_conv2d_forward::call(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
12238
12239 static auto op = create__slow_conv2d_forward_typed_handle();
12240 return op.call(self, weight, kernel_size, bias, stride, padding);
12241}
12242
12243// aten::_slow_conv2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding) -> Tensor
12244at::Tensor _slow_conv2d_forward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
12245
12246 static auto op = create__slow_conv2d_forward_typed_handle();
12247 return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding);
12248}
12249
12250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(column_stack, name, "aten::column_stack")
12251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(column_stack, overload_name, "")
12252STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(column_stack, schema_str, "column_stack(Tensor[] tensors) -> Tensor")
12253
12254// aten::column_stack(Tensor[] tensors) -> Tensor
12255static C10_NOINLINE c10::TypedOperatorHandle<column_stack::schema> create_column_stack_typed_handle() {
12256 return c10::Dispatcher::singleton()
12257 .findSchemaOrThrow(column_stack::name, column_stack::overload_name)
12258 .typed<column_stack::schema>();
12259}
12260
12261// aten::column_stack(Tensor[] tensors) -> Tensor
12262at::Tensor column_stack::call(at::TensorList tensors) {
12263
12264 static auto op = create_column_stack_typed_handle();
12265 return op.call(tensors);
12266}
12267
12268// aten::column_stack(Tensor[] tensors) -> Tensor
12269at::Tensor column_stack::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
12270
12271 static auto op = create_column_stack_typed_handle();
12272 return op.redispatch(dispatchKeySet, tensors);
12273}
12274
12275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(column_stack_out, name, "aten::column_stack")
12276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(column_stack_out, overload_name, "out")
12277STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(column_stack_out, schema_str, "column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)")
12278
12279// aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
12280static C10_NOINLINE c10::TypedOperatorHandle<column_stack_out::schema> create_column_stack_out_typed_handle() {
12281 return c10::Dispatcher::singleton()
12282 .findSchemaOrThrow(column_stack_out::name, column_stack_out::overload_name)
12283 .typed<column_stack_out::schema>();
12284}
12285
12286// aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
12287at::Tensor & column_stack_out::call(at::TensorList tensors, at::Tensor & out) {
12288
12289 static auto op = create_column_stack_out_typed_handle();
12290 return op.call(tensors, out);
12291}
12292
12293// aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
12294at::Tensor & column_stack_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) {
12295
12296 static auto op = create_column_stack_out_typed_handle();
12297 return op.redispatch(dispatchKeySet, tensors, out);
12298}
12299
12300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_entr, name, "aten::special_entr")
12301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_entr, overload_name, "")
12302STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_entr, schema_str, "special_entr(Tensor self) -> Tensor")
12303
12304// aten::special_entr(Tensor self) -> Tensor
12305static C10_NOINLINE c10::TypedOperatorHandle<special_entr::schema> create_special_entr_typed_handle() {
12306 return c10::Dispatcher::singleton()
12307 .findSchemaOrThrow(special_entr::name, special_entr::overload_name)
12308 .typed<special_entr::schema>();
12309}
12310
12311// aten::special_entr(Tensor self) -> Tensor
12312at::Tensor special_entr::call(const at::Tensor & self) {
12313
12314 static auto op = create_special_entr_typed_handle();
12315 return op.call(self);
12316}
12317
12318// aten::special_entr(Tensor self) -> Tensor
12319at::Tensor special_entr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
12320
12321 static auto op = create_special_entr_typed_handle();
12322 return op.redispatch(dispatchKeySet, self);
12323}
12324
12325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_entr_out, name, "aten::special_entr")
12326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_entr_out, overload_name, "out")
12327STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_entr_out, schema_str, "special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
12328
12329// aten::special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
12330static C10_NOINLINE c10::TypedOperatorHandle<special_entr_out::schema> create_special_entr_out_typed_handle() {
12331 return c10::Dispatcher::singleton()
12332 .findSchemaOrThrow(special_entr_out::name, special_entr_out::overload_name)
12333 .typed<special_entr_out::schema>();
12334}
12335
12336// aten::special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
12337at::Tensor & special_entr_out::call(const at::Tensor & self, at::Tensor & out) {
12338
12339 static auto op = create_special_entr_out_typed_handle();
12340 return op.call(self, out);
12341}
12342
12343// aten::special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
12344at::Tensor & special_entr_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
12345
12346 static auto op = create_special_entr_out_typed_handle();
12347 return op.redispatch(dispatchKeySet, self, out);
12348}
12349
12350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_ndtri, name, "aten::special_ndtri")
12351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_ndtri, overload_name, "")
12352STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_ndtri, schema_str, "special_ndtri(Tensor self) -> Tensor")
12353
12354// aten::special_ndtri(Tensor self) -> Tensor
12355static C10_NOINLINE c10::TypedOperatorHandle<special_ndtri::schema> create_special_ndtri_typed_handle() {
12356 return c10::Dispatcher::singleton()
12357 .findSchemaOrThrow(special_ndtri::name, special_ndtri::overload_name)
12358 .typed<special_ndtri::schema>();
12359}
12360
12361// aten::special_ndtri(Tensor self) -> Tensor
12362at::Tensor special_ndtri::call(const at::Tensor & self) {
12363
12364 static auto op = create_special_ndtri_typed_handle();
12365 return op.call(self);
12366}
12367
12368// aten::special_ndtri(Tensor self) -> Tensor
12369at::Tensor special_ndtri::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
12370
12371 static auto op = create_special_ndtri_typed_handle();
12372 return op.redispatch(dispatchKeySet, self);
12373}
12374
12375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_ndtri_out, name, "aten::special_ndtri")
12376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_ndtri_out, overload_name, "out")
12377STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_ndtri_out, schema_str, "special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
12378
12379// aten::special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
12380static C10_NOINLINE c10::TypedOperatorHandle<special_ndtri_out::schema> create_special_ndtri_out_typed_handle() {
12381 return c10::Dispatcher::singleton()
12382 .findSchemaOrThrow(special_ndtri_out::name, special_ndtri_out::overload_name)
12383 .typed<special_ndtri_out::schema>();
12384}
12385
12386// aten::special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
12387at::Tensor & special_ndtri_out::call(const at::Tensor & self, at::Tensor & out) {
12388
12389 static auto op = create_special_ndtri_out_typed_handle();
12390 return op.call(self, out);
12391}
12392
12393// aten::special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
12394at::Tensor & special_ndtri_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
12395
12396 static auto op = create_special_ndtri_out_typed_handle();
12397 return op.redispatch(dispatchKeySet, self, out);
12398}
12399
12400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_erfc, name, "aten::special_erfc")
12401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_erfc, overload_name, "")
12402STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_erfc, schema_str, "special_erfc(Tensor self) -> Tensor")
12403
12404// aten::special_erfc(Tensor self) -> Tensor
12405static C10_NOINLINE c10::TypedOperatorHandle<special_erfc::schema> create_special_erfc_typed_handle() {
12406 return c10::Dispatcher::singleton()
12407 .findSchemaOrThrow(special_erfc::name, special_erfc::overload_name)
12408 .typed<special_erfc::schema>();
12409}
12410
12411// aten::special_erfc(Tensor self) -> Tensor
12412at::Tensor special_erfc::call(const at::Tensor & self) {
12413
12414 static auto op = create_special_erfc_typed_handle();
12415 return op.call(self);
12416}
12417
12418// aten::special_erfc(Tensor self) -> Tensor
12419at::Tensor special_erfc::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
12420
12421 static auto op = create_special_erfc_typed_handle();
12422 return op.redispatch(dispatchKeySet, self);
12423}
12424
12425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_erfc_out, name, "aten::special_erfc")
12426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_erfc_out, overload_name, "out")
12427STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_erfc_out, schema_str, "special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
12428
12429// aten::special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
12430static C10_NOINLINE c10::TypedOperatorHandle<special_erfc_out::schema> create_special_erfc_out_typed_handle() {
12431 return c10::Dispatcher::singleton()
12432 .findSchemaOrThrow(special_erfc_out::name, special_erfc_out::overload_name)
12433 .typed<special_erfc_out::schema>();
12434}
12435
12436// aten::special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
12437at::Tensor & special_erfc_out::call(const at::Tensor & self, at::Tensor & out) {
12438
12439 static auto op = create_special_erfc_out_typed_handle();
12440 return op.call(self, out);
12441}
12442
12443// aten::special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
12444at::Tensor & special_erfc_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
12445
12446 static auto op = create_special_erfc_out_typed_handle();
12447 return op.redispatch(dispatchKeySet, self, out);
12448}
12449
12450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_i1e, name, "aten::special_i1e")
12451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_i1e, overload_name, "")
12452STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_i1e, schema_str, "special_i1e(Tensor self) -> Tensor")
12453
12454// aten::special_i1e(Tensor self) -> Tensor
12455static C10_NOINLINE c10::TypedOperatorHandle<special_i1e::schema> create_special_i1e_typed_handle() {
12456 return c10::Dispatcher::singleton()
12457 .findSchemaOrThrow(special_i1e::name, special_i1e::overload_name)
12458 .typed<special_i1e::schema>();
12459}
12460
12461// aten::special_i1e(Tensor self) -> Tensor
12462at::Tensor special_i1e::call(const at::Tensor & self) {
12463
12464 static auto op = create_special_i1e_typed_handle();
12465 return op.call(self);
12466}
12467
12468// aten::special_i1e(Tensor self) -> Tensor
12469at::Tensor special_i1e::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
12470
12471 static auto op = create_special_i1e_typed_handle();
12472 return op.redispatch(dispatchKeySet, self);
12473}
12474
12475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_i1e_out, name, "aten::special_i1e")
12476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_i1e_out, overload_name, "out")
12477STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_i1e_out, schema_str, "special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
12478
12479// aten::special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
12480static C10_NOINLINE c10::TypedOperatorHandle<special_i1e_out::schema> create_special_i1e_out_typed_handle() {
12481 return c10::Dispatcher::singleton()
12482 .findSchemaOrThrow(special_i1e_out::name, special_i1e_out::overload_name)
12483 .typed<special_i1e_out::schema>();
12484}
12485
12486// aten::special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
12487at::Tensor & special_i1e_out::call(const at::Tensor & self, at::Tensor & out) {
12488
12489 static auto op = create_special_i1e_out_typed_handle();
12490 return op.call(self, out);
12491}
12492
12493// aten::special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
12494at::Tensor & special_i1e_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
12495
12496 static auto op = create_special_i1e_out_typed_handle();
12497 return op.redispatch(dispatchKeySet, self, out);
12498}
12499
12500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_logsumexp, name, "aten::special_logsumexp")
12501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_logsumexp, overload_name, "")
12502STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_logsumexp, schema_str, "special_logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor")
12503
12504// aten::special_logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
12505static C10_NOINLINE c10::TypedOperatorHandle<special_logsumexp::schema> create_special_logsumexp_typed_handle() {
12506 return c10::Dispatcher::singleton()
12507 .findSchemaOrThrow(special_logsumexp::name, special_logsumexp::overload_name)
12508 .typed<special_logsumexp::schema>();
12509}
12510
12511// aten::special_logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
12512at::Tensor special_logsumexp::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
12513
12514 static auto op = create_special_logsumexp_typed_handle();
12515 return op.call(self, dim, keepdim);
12516}
12517
12518// aten::special_logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
12519at::Tensor special_logsumexp::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
12520
12521 static auto op = create_special_logsumexp_typed_handle();
12522 return op.redispatch(dispatchKeySet, self, dim, keepdim);
12523}
12524
12525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_logsumexp_out, name, "aten::special_logsumexp")
12526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_logsumexp_out, overload_name, "out")
12527STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_logsumexp_out, schema_str, "special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)")
12528
12529// aten::special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
12530static C10_NOINLINE c10::TypedOperatorHandle<special_logsumexp_out::schema> create_special_logsumexp_out_typed_handle() {
12531 return c10::Dispatcher::singleton()
12532 .findSchemaOrThrow(special_logsumexp_out::name, special_logsumexp_out::overload_name)
12533 .typed<special_logsumexp_out::schema>();
12534}
12535
12536// aten::special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
12537at::Tensor & special_logsumexp_out::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
12538
12539 static auto op = create_special_logsumexp_out_typed_handle();
12540 return op.call(self, dim, keepdim, out);
12541}
12542
12543// aten::special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
12544at::Tensor & special_logsumexp_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
12545
12546 static auto op = create_special_logsumexp_out_typed_handle();
12547 return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
12548}
12549
12550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_gammainc_out, name, "aten::special_gammainc")
12551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_gammainc_out, overload_name, "out")
12552STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_gammainc_out, schema_str, "special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
12553
12554// aten::special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
12555static C10_NOINLINE c10::TypedOperatorHandle<special_gammainc_out::schema> create_special_gammainc_out_typed_handle() {
12556 return c10::Dispatcher::singleton()
12557 .findSchemaOrThrow(special_gammainc_out::name, special_gammainc_out::overload_name)
12558 .typed<special_gammainc_out::schema>();
12559}
12560
12561// aten::special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
12562at::Tensor & special_gammainc_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
12563
12564 static auto op = create_special_gammainc_out_typed_handle();
12565 return op.call(self, other, out);
12566}
12567
12568// aten::special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
12569at::Tensor & special_gammainc_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
12570
12571 static auto op = create_special_gammainc_out_typed_handle();
12572 return op.redispatch(dispatchKeySet, self, other, out);
12573}
12574
12575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_gammainc, name, "aten::special_gammainc")
12576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_gammainc, overload_name, "")
12577STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_gammainc, schema_str, "special_gammainc(Tensor self, Tensor other) -> Tensor")
12578
12579// aten::special_gammainc(Tensor self, Tensor other) -> Tensor
12580static C10_NOINLINE c10::TypedOperatorHandle<special_gammainc::schema> create_special_gammainc_typed_handle() {
12581 return c10::Dispatcher::singleton()
12582 .findSchemaOrThrow(special_gammainc::name, special_gammainc::overload_name)
12583 .typed<special_gammainc::schema>();
12584}
12585
12586// aten::special_gammainc(Tensor self, Tensor other) -> Tensor
12587at::Tensor special_gammainc::call(const at::Tensor & self, const at::Tensor & other) {
12588
12589 static auto op = create_special_gammainc_typed_handle();
12590 return op.call(self, other);
12591}
12592
12593// aten::special_gammainc(Tensor self, Tensor other) -> Tensor
12594at::Tensor special_gammainc::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
12595
12596 static auto op = create_special_gammainc_typed_handle();
12597 return op.redispatch(dispatchKeySet, self, other);
12598}
12599
12600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_rfft2, name, "aten::fft_rfft2")
12601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_rfft2, overload_name, "")
12602STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_rfft2, schema_str, "fft_rfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor")
12603
12604// aten::fft_rfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
12605static C10_NOINLINE c10::TypedOperatorHandle<fft_rfft2::schema> create_fft_rfft2_typed_handle() {
12606 return c10::Dispatcher::singleton()
12607 .findSchemaOrThrow(fft_rfft2::name, fft_rfft2::overload_name)
12608 .typed<fft_rfft2::schema>();
12609}
12610
12611// aten::fft_rfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
12612at::Tensor fft_rfft2::call(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
12613
12614 static auto op = create_fft_rfft2_typed_handle();
12615 return op.call(self, s, dim, norm);
12616}
12617
12618// aten::fft_rfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
12619at::Tensor fft_rfft2::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
12620
12621 static auto op = create_fft_rfft2_typed_handle();
12622 return op.redispatch(dispatchKeySet, self, s, dim, norm);
12623}
12624
12625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_rfft2_out, name, "aten::fft_rfft2")
12626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_rfft2_out, overload_name, "out")
12627STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_rfft2_out, schema_str, "fft_rfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)")
12628
12629// aten::fft_rfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
12630static C10_NOINLINE c10::TypedOperatorHandle<fft_rfft2_out::schema> create_fft_rfft2_out_typed_handle() {
12631 return c10::Dispatcher::singleton()
12632 .findSchemaOrThrow(fft_rfft2_out::name, fft_rfft2_out::overload_name)
12633 .typed<fft_rfft2_out::schema>();
12634}
12635
12636// aten::fft_rfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
12637at::Tensor & fft_rfft2_out::call(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
12638
12639 static auto op = create_fft_rfft2_out_typed_handle();
12640 return op.call(self, s, dim, norm, out);
12641}
12642
12643// aten::fft_rfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
12644at::Tensor & fft_rfft2_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
12645
12646 static auto op = create_fft_rfft2_out_typed_handle();
12647 return op.redispatch(dispatchKeySet, self, s, dim, norm, out);
12648}
12649
12650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_hfftn, name, "aten::fft_hfftn")
12651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_hfftn, overload_name, "")
12652STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_hfftn, schema_str, "fft_hfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor")
12653
12654// aten::fft_hfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
12655static C10_NOINLINE c10::TypedOperatorHandle<fft_hfftn::schema> create_fft_hfftn_typed_handle() {
12656 return c10::Dispatcher::singleton()
12657 .findSchemaOrThrow(fft_hfftn::name, fft_hfftn::overload_name)
12658 .typed<fft_hfftn::schema>();
12659}
12660
12661// aten::fft_hfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
12662at::Tensor fft_hfftn::call(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
12663
12664 static auto op = create_fft_hfftn_typed_handle();
12665 return op.call(self, s, dim, norm);
12666}
12667
12668// aten::fft_hfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
12669at::Tensor fft_hfftn::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
12670
12671 static auto op = create_fft_hfftn_typed_handle();
12672 return op.redispatch(dispatchKeySet, self, s, dim, norm);
12673}
12674
12675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_hfftn_out, name, "aten::fft_hfftn")
12676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_hfftn_out, overload_name, "out")
12677STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_hfftn_out, schema_str, "fft_hfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)")
12678
12679// aten::fft_hfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
12680static C10_NOINLINE c10::TypedOperatorHandle<fft_hfftn_out::schema> create_fft_hfftn_out_typed_handle() {
12681 return c10::Dispatcher::singleton()
12682 .findSchemaOrThrow(fft_hfftn_out::name, fft_hfftn_out::overload_name)
12683 .typed<fft_hfftn_out::schema>();
12684}
12685
12686// aten::fft_hfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
12687const at::Tensor & fft_hfftn_out::call(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) {
12688
12689 static auto op = create_fft_hfftn_out_typed_handle();
12690 return op.call(self, s, dim, norm, out);
12691}
12692
12693// aten::fft_hfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
12694const at::Tensor & fft_hfftn_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) {
12695
12696 static auto op = create_fft_hfftn_out_typed_handle();
12697 return op.redispatch(dispatchKeySet, self, s, dim, norm, out);
12698}
12699
12700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lu, name, "aten::linalg_lu")
12701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lu, overload_name, "")
12702STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lu, schema_str, "linalg_lu(Tensor A, *, bool pivot=True) -> (Tensor P, Tensor L, Tensor U)")
12703
12704// aten::linalg_lu(Tensor A, *, bool pivot=True) -> (Tensor P, Tensor L, Tensor U)
12705static C10_NOINLINE c10::TypedOperatorHandle<linalg_lu::schema> create_linalg_lu_typed_handle() {
12706 return c10::Dispatcher::singleton()
12707 .findSchemaOrThrow(linalg_lu::name, linalg_lu::overload_name)
12708 .typed<linalg_lu::schema>();
12709}
12710
12711// aten::linalg_lu(Tensor A, *, bool pivot=True) -> (Tensor P, Tensor L, Tensor U)
12712::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu::call(const at::Tensor & A, bool pivot) {
12713
12714 static auto op = create_linalg_lu_typed_handle();
12715 return op.call(A, pivot);
12716}
12717
12718// aten::linalg_lu(Tensor A, *, bool pivot=True) -> (Tensor P, Tensor L, Tensor U)
12719::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot) {
12720
12721 static auto op = create_linalg_lu_typed_handle();
12722 return op.redispatch(dispatchKeySet, A, pivot);
12723}
12724
12725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lu_out, name, "aten::linalg_lu")
12726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lu_out, overload_name, "out")
12727STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lu_out, schema_str, "linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)")
12728
12729// aten::linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)
12730static C10_NOINLINE c10::TypedOperatorHandle<linalg_lu_out::schema> create_linalg_lu_out_typed_handle() {
12731 return c10::Dispatcher::singleton()
12732 .findSchemaOrThrow(linalg_lu_out::name, linalg_lu_out::overload_name)
12733 .typed<linalg_lu_out::schema>();
12734}
12735
12736// aten::linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)
12737::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_out::call(const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U) {
12738
12739 static auto op = create_linalg_lu_out_typed_handle();
12740 return op.call(A, pivot, P, L, U);
12741}
12742
12743// aten::linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)
12744::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U) {
12745
12746 static auto op = create_linalg_lu_out_typed_handle();
12747 return op.redispatch(dispatchKeySet, A, pivot, P, L, U);
12748}
12749
12750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_ldl_factor_ex, name, "aten::linalg_ldl_factor_ex")
12751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_ldl_factor_ex, overload_name, "")
12752STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_ldl_factor_ex, schema_str, "linalg_ldl_factor_ex(Tensor self, *, bool hermitian=False, bool check_errors=False) -> (Tensor LD, Tensor pivots, Tensor info)")
12753
12754// aten::linalg_ldl_factor_ex(Tensor self, *, bool hermitian=False, bool check_errors=False) -> (Tensor LD, Tensor pivots, Tensor info)
12755static C10_NOINLINE c10::TypedOperatorHandle<linalg_ldl_factor_ex::schema> create_linalg_ldl_factor_ex_typed_handle() {
12756 return c10::Dispatcher::singleton()
12757 .findSchemaOrThrow(linalg_ldl_factor_ex::name, linalg_ldl_factor_ex::overload_name)
12758 .typed<linalg_ldl_factor_ex::schema>();
12759}
12760
12761// aten::linalg_ldl_factor_ex(Tensor self, *, bool hermitian=False, bool check_errors=False) -> (Tensor LD, Tensor pivots, Tensor info)
12762::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_ldl_factor_ex::call(const at::Tensor & self, bool hermitian, bool check_errors) {
12763
12764 static auto op = create_linalg_ldl_factor_ex_typed_handle();
12765 return op.call(self, hermitian, check_errors);
12766}
12767
12768// aten::linalg_ldl_factor_ex(Tensor self, *, bool hermitian=False, bool check_errors=False) -> (Tensor LD, Tensor pivots, Tensor info)
12769::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_ldl_factor_ex::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian, bool check_errors) {
12770
12771 static auto op = create_linalg_ldl_factor_ex_typed_handle();
12772 return op.redispatch(dispatchKeySet, self, hermitian, check_errors);
12773}
12774
12775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_ldl_factor_ex_out, name, "aten::linalg_ldl_factor_ex")
12776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_ldl_factor_ex_out, overload_name, "out")
12777STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_ldl_factor_ex_out, schema_str, "linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info)")
12778
12779// aten::linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info)
12780static C10_NOINLINE c10::TypedOperatorHandle<linalg_ldl_factor_ex_out::schema> create_linalg_ldl_factor_ex_out_typed_handle() {
12781 return c10::Dispatcher::singleton()
12782 .findSchemaOrThrow(linalg_ldl_factor_ex_out::name, linalg_ldl_factor_ex_out::overload_name)
12783 .typed<linalg_ldl_factor_ex_out::schema>();
12784}
12785
12786// aten::linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info)
12787::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_ldl_factor_ex_out::call(const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info) {
12788
12789 static auto op = create_linalg_ldl_factor_ex_out_typed_handle();
12790 return op.call(self, hermitian, check_errors, LD, pivots, info);
12791}
12792
12793// aten::linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info)
12794::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_ldl_factor_ex_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info) {
12795
12796 static auto op = create_linalg_ldl_factor_ex_out_typed_handle();
12797 return op.redispatch(dispatchKeySet, self, hermitian, check_errors, LD, pivots, info);
12798}
12799
12800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_ldl_solve, name, "aten::linalg_ldl_solve")
12801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_ldl_solve, overload_name, "")
12802STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_ldl_solve, schema_str, "linalg_ldl_solve(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False) -> Tensor")
12803
12804// aten::linalg_ldl_solve(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False) -> Tensor
12805static C10_NOINLINE c10::TypedOperatorHandle<linalg_ldl_solve::schema> create_linalg_ldl_solve_typed_handle() {
12806 return c10::Dispatcher::singleton()
12807 .findSchemaOrThrow(linalg_ldl_solve::name, linalg_ldl_solve::overload_name)
12808 .typed<linalg_ldl_solve::schema>();
12809}
12810
12811// aten::linalg_ldl_solve(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False) -> Tensor
12812at::Tensor linalg_ldl_solve::call(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian) {
12813
12814 static auto op = create_linalg_ldl_solve_typed_handle();
12815 return op.call(LD, pivots, B, hermitian);
12816}
12817
12818// aten::linalg_ldl_solve(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False) -> Tensor
12819at::Tensor linalg_ldl_solve::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian) {
12820
12821 static auto op = create_linalg_ldl_solve_typed_handle();
12822 return op.redispatch(dispatchKeySet, LD, pivots, B, hermitian);
12823}
12824
12825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_ldl_solve_out, name, "aten::linalg_ldl_solve")
12826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_ldl_solve_out, overload_name, "out")
12827STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_ldl_solve_out, schema_str, "linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)")
12828
12829// aten::linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
12830static C10_NOINLINE c10::TypedOperatorHandle<linalg_ldl_solve_out::schema> create_linalg_ldl_solve_out_typed_handle() {
12831 return c10::Dispatcher::singleton()
12832 .findSchemaOrThrow(linalg_ldl_solve_out::name, linalg_ldl_solve_out::overload_name)
12833 .typed<linalg_ldl_solve_out::schema>();
12834}
12835
12836// aten::linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
12837at::Tensor & linalg_ldl_solve_out::call(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out) {
12838
12839 static auto op = create_linalg_ldl_solve_out_typed_handle();
12840 return op.call(LD, pivots, B, hermitian, out);
12841}
12842
12843// aten::linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
12844at::Tensor & linalg_ldl_solve_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out) {
12845
12846 static auto op = create_linalg_ldl_solve_out_typed_handle();
12847 return op.redispatch(dispatchKeySet, LD, pivots, B, hermitian, out);
12848}
12849
12850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lstsq, name, "aten::linalg_lstsq")
12851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lstsq, overload_name, "")
12852STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lstsq, schema_str, "linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values)")
12853
12854// aten::linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values)
12855static C10_NOINLINE c10::TypedOperatorHandle<linalg_lstsq::schema> create_linalg_lstsq_typed_handle() {
12856 return c10::Dispatcher::singleton()
12857 .findSchemaOrThrow(linalg_lstsq::name, linalg_lstsq::overload_name)
12858 .typed<linalg_lstsq::schema>();
12859}
12860
12861// aten::linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values)
12862::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> linalg_lstsq::call(const at::Tensor & self, const at::Tensor & b, c10::optional<double> rcond, c10::optional<c10::string_view> driver) {
12863
12864 static auto op = create_linalg_lstsq_typed_handle();
12865 return op.call(self, b, rcond, driver);
12866}
12867
12868// aten::linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values)
12869::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> linalg_lstsq::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & b, c10::optional<double> rcond, c10::optional<c10::string_view> driver) {
12870
12871 static auto op = create_linalg_lstsq_typed_handle();
12872 return op.redispatch(dispatchKeySet, self, b, rcond, driver);
12873}
12874
12875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lstsq_out, name, "aten::linalg_lstsq")
12876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lstsq_out, overload_name, "out")
12877STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lstsq_out, schema_str, "linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values)")
12878
12879// aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values)
12880static C10_NOINLINE c10::TypedOperatorHandle<linalg_lstsq_out::schema> create_linalg_lstsq_out_typed_handle() {
12881 return c10::Dispatcher::singleton()
12882 .findSchemaOrThrow(linalg_lstsq_out::name, linalg_lstsq_out::overload_name)
12883 .typed<linalg_lstsq_out::schema>();
12884}
12885
12886// aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values)
12887::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> linalg_lstsq_out::call(const at::Tensor & self, const at::Tensor & b, c10::optional<double> rcond, c10::optional<c10::string_view> driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values) {
12888
12889 static auto op = create_linalg_lstsq_out_typed_handle();
12890 return op.call(self, b, rcond, driver, solution, residuals, rank, singular_values);
12891}
12892
12893// aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values)
12894::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> linalg_lstsq_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & b, c10::optional<double> rcond, c10::optional<c10::string_view> driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values) {
12895
12896 static auto op = create_linalg_lstsq_out_typed_handle();
12897 return op.redispatch(dispatchKeySet, self, b, rcond, driver, solution, residuals, rank, singular_values);
12898}
12899
12900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_vecdot, name, "aten::linalg_vecdot")
12901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_vecdot, overload_name, "")
12902STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_vecdot, schema_str, "linalg_vecdot(Tensor x, Tensor y, *, int dim=-1) -> Tensor")
12903
12904// aten::linalg_vecdot(Tensor x, Tensor y, *, int dim=-1) -> Tensor
12905static C10_NOINLINE c10::TypedOperatorHandle<linalg_vecdot::schema> create_linalg_vecdot_typed_handle() {
12906 return c10::Dispatcher::singleton()
12907 .findSchemaOrThrow(linalg_vecdot::name, linalg_vecdot::overload_name)
12908 .typed<linalg_vecdot::schema>();
12909}
12910
12911// aten::linalg_vecdot(Tensor x, Tensor y, *, int dim=-1) -> Tensor
12912at::Tensor linalg_vecdot::call(const at::Tensor & x, const at::Tensor & y, int64_t dim) {
12913
12914 static auto op = create_linalg_vecdot_typed_handle();
12915 return op.call(x, y, dim);
12916}
12917
12918// aten::linalg_vecdot(Tensor x, Tensor y, *, int dim=-1) -> Tensor
12919at::Tensor linalg_vecdot::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & y, int64_t dim) {
12920
12921 static auto op = create_linalg_vecdot_typed_handle();
12922 return op.redispatch(dispatchKeySet, x, y, dim);
12923}
12924
12925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_vecdot_out, name, "aten::linalg_vecdot")
12926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_vecdot_out, overload_name, "out")
12927STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_vecdot_out, schema_str, "linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)")
12928
12929// aten::linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)
12930static C10_NOINLINE c10::TypedOperatorHandle<linalg_vecdot_out::schema> create_linalg_vecdot_out_typed_handle() {
12931 return c10::Dispatcher::singleton()
12932 .findSchemaOrThrow(linalg_vecdot_out::name, linalg_vecdot_out::overload_name)
12933 .typed<linalg_vecdot_out::schema>();
12934}
12935
12936// aten::linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)
12937at::Tensor & linalg_vecdot_out::call(const at::Tensor & x, const at::Tensor & y, int64_t dim, at::Tensor & out) {
12938
12939 static auto op = create_linalg_vecdot_out_typed_handle();
12940 return op.call(x, y, dim, out);
12941}
12942
12943// aten::linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)
12944at::Tensor & linalg_vecdot_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & y, int64_t dim, at::Tensor & out) {
12945
12946 static auto op = create_linalg_vecdot_out_typed_handle();
12947 return op.redispatch(dispatchKeySet, x, y, dim, out);
12948}
12949
12950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_exp, name, "aten::linalg_matrix_exp")
12951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_exp, overload_name, "")
12952STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_exp, schema_str, "linalg_matrix_exp(Tensor self) -> Tensor")
12953
12954// aten::linalg_matrix_exp(Tensor self) -> Tensor
12955static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_exp::schema> create_linalg_matrix_exp_typed_handle() {
12956 return c10::Dispatcher::singleton()
12957 .findSchemaOrThrow(linalg_matrix_exp::name, linalg_matrix_exp::overload_name)
12958 .typed<linalg_matrix_exp::schema>();
12959}
12960
12961// aten::linalg_matrix_exp(Tensor self) -> Tensor
12962at::Tensor linalg_matrix_exp::call(const at::Tensor & self) {
12963
12964 static auto op = create_linalg_matrix_exp_typed_handle();
12965 return op.call(self);
12966}
12967
12968// aten::linalg_matrix_exp(Tensor self) -> Tensor
12969at::Tensor linalg_matrix_exp::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
12970
12971 static auto op = create_linalg_matrix_exp_typed_handle();
12972 return op.redispatch(dispatchKeySet, self);
12973}
12974
12975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_eigh, name, "aten::_linalg_eigh")
12976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_eigh, overload_name, "")
12977STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_eigh, schema_str, "_linalg_eigh(Tensor A, str UPLO=\"L\", bool compute_v=True) -> (Tensor eigenvalues, Tensor eigenvectors)")
12978
12979// aten::_linalg_eigh(Tensor A, str UPLO="L", bool compute_v=True) -> (Tensor eigenvalues, Tensor eigenvectors)
12980static C10_NOINLINE c10::TypedOperatorHandle<_linalg_eigh::schema> create__linalg_eigh_typed_handle() {
12981 return c10::Dispatcher::singleton()
12982 .findSchemaOrThrow(_linalg_eigh::name, _linalg_eigh::overload_name)
12983 .typed<_linalg_eigh::schema>();
12984}
12985
12986// aten::_linalg_eigh(Tensor A, str UPLO="L", bool compute_v=True) -> (Tensor eigenvalues, Tensor eigenvectors)
12987::std::tuple<at::Tensor,at::Tensor> _linalg_eigh::call(const at::Tensor & A, c10::string_view UPLO, bool compute_v) {
12988
12989 static auto op = create__linalg_eigh_typed_handle();
12990 return op.call(A, UPLO, compute_v);
12991}
12992
12993// aten::_linalg_eigh(Tensor A, str UPLO="L", bool compute_v=True) -> (Tensor eigenvalues, Tensor eigenvectors)
12994::std::tuple<at::Tensor,at::Tensor> _linalg_eigh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::string_view UPLO, bool compute_v) {
12995
12996 static auto op = create__linalg_eigh_typed_handle();
12997 return op.redispatch(dispatchKeySet, A, UPLO, compute_v);
12998}
12999
13000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_eigh_eigenvalues, name, "aten::_linalg_eigh")
13001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_eigh_eigenvalues, overload_name, "eigenvalues")
13002STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_eigh_eigenvalues, schema_str, "_linalg_eigh.eigenvalues(Tensor A, str UPLO=\"L\", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)")
13003
13004// aten::_linalg_eigh.eigenvalues(Tensor A, str UPLO="L", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
13005static C10_NOINLINE c10::TypedOperatorHandle<_linalg_eigh_eigenvalues::schema> create__linalg_eigh_eigenvalues_typed_handle() {
13006 return c10::Dispatcher::singleton()
13007 .findSchemaOrThrow(_linalg_eigh_eigenvalues::name, _linalg_eigh_eigenvalues::overload_name)
13008 .typed<_linalg_eigh_eigenvalues::schema>();
13009}
13010
13011// aten::_linalg_eigh.eigenvalues(Tensor A, str UPLO="L", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
13012::std::tuple<at::Tensor &,at::Tensor &> _linalg_eigh_eigenvalues::call(const at::Tensor & A, c10::string_view UPLO, bool compute_v, at::Tensor & eigenvalues, at::Tensor & eigenvectors) {
13013
13014 static auto op = create__linalg_eigh_eigenvalues_typed_handle();
13015 return op.call(A, UPLO, compute_v, eigenvalues, eigenvectors);
13016}
13017
13018// aten::_linalg_eigh.eigenvalues(Tensor A, str UPLO="L", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
13019::std::tuple<at::Tensor &,at::Tensor &> _linalg_eigh_eigenvalues::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::string_view UPLO, bool compute_v, at::Tensor & eigenvalues, at::Tensor & eigenvectors) {
13020
13021 static auto op = create__linalg_eigh_eigenvalues_typed_handle();
13022 return op.redispatch(dispatchKeySet, A, UPLO, compute_v, eigenvalues, eigenvectors);
13023}
13024
13025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_norm, name, "aten::linalg_norm")
13026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_norm, overload_name, "")
13027STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_norm, schema_str, "linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor")
13028
13029// aten::linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
13030static C10_NOINLINE c10::TypedOperatorHandle<linalg_norm::schema> create_linalg_norm_typed_handle() {
13031 return c10::Dispatcher::singleton()
13032 .findSchemaOrThrow(linalg_norm::name, linalg_norm::overload_name)
13033 .typed<linalg_norm::schema>();
13034}
13035
13036// aten::linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
13037at::Tensor linalg_norm::call(const at::Tensor & self, const c10::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
13038
13039 static auto op = create_linalg_norm_typed_handle();
13040 return op.call(self, ord, dim, keepdim, dtype);
13041}
13042
13043// aten::linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
13044at::Tensor linalg_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
13045
13046 static auto op = create_linalg_norm_typed_handle();
13047 return op.redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype);
13048}
13049
13050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_norm_ord_str, name, "aten::linalg_norm")
13051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_norm_ord_str, overload_name, "ord_str")
13052STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_norm_ord_str, schema_str, "linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor")
13053
13054// aten::linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
13055static C10_NOINLINE c10::TypedOperatorHandle<linalg_norm_ord_str::schema> create_linalg_norm_ord_str_typed_handle() {
13056 return c10::Dispatcher::singleton()
13057 .findSchemaOrThrow(linalg_norm_ord_str::name, linalg_norm_ord_str::overload_name)
13058 .typed<linalg_norm_ord_str::schema>();
13059}
13060
13061// aten::linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
13062at::Tensor linalg_norm_ord_str::call(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
13063
13064 static auto op = create_linalg_norm_ord_str_typed_handle();
13065 return op.call(self, ord, dim, keepdim, dtype);
13066}
13067
13068// aten::linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
13069at::Tensor linalg_norm_ord_str::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
13070
13071 static auto op = create_linalg_norm_ord_str_typed_handle();
13072 return op.redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype);
13073}
13074
13075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_norm_out, name, "aten::linalg_norm")
13076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_norm_out, overload_name, "out")
13077STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_norm_out, schema_str, "linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)")
13078
13079// aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
13080static C10_NOINLINE c10::TypedOperatorHandle<linalg_norm_out::schema> create_linalg_norm_out_typed_handle() {
13081 return c10::Dispatcher::singleton()
13082 .findSchemaOrThrow(linalg_norm_out::name, linalg_norm_out::overload_name)
13083 .typed<linalg_norm_out::schema>();
13084}
13085
13086// aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
13087at::Tensor & linalg_norm_out::call(const at::Tensor & self, const c10::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
13088
13089 static auto op = create_linalg_norm_out_typed_handle();
13090 return op.call(self, ord, dim, keepdim, dtype, out);
13091}
13092
13093// aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
13094at::Tensor & linalg_norm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
13095
13096 static auto op = create_linalg_norm_out_typed_handle();
13097 return op.redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out);
13098}
13099
13100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_norm_ord_str_out, name, "aten::linalg_norm")
13101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_norm_ord_str_out, overload_name, "ord_str_out")
13102STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_norm_ord_str_out, schema_str, "linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)")
13103
13104// aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
13105static C10_NOINLINE c10::TypedOperatorHandle<linalg_norm_ord_str_out::schema> create_linalg_norm_ord_str_out_typed_handle() {
13106 return c10::Dispatcher::singleton()
13107 .findSchemaOrThrow(linalg_norm_ord_str_out::name, linalg_norm_ord_str_out::overload_name)
13108 .typed<linalg_norm_ord_str_out::schema>();
13109}
13110
13111// aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
13112at::Tensor & linalg_norm_ord_str_out::call(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
13113
13114 static auto op = create_linalg_norm_ord_str_out_typed_handle();
13115 return op.call(self, ord, dim, keepdim, dtype, out);
13116}
13117
13118// aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
13119at::Tensor & linalg_norm_ord_str_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
13120
13121 static auto op = create_linalg_norm_ord_str_out_typed_handle();
13122 return op.redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out);
13123}
13124
13125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_svdvals, name, "aten::linalg_svdvals")
13126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_svdvals, overload_name, "")
13127STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_svdvals, schema_str, "linalg_svdvals(Tensor A, *, str? driver=None) -> Tensor")
13128
13129// aten::linalg_svdvals(Tensor A, *, str? driver=None) -> Tensor
13130static C10_NOINLINE c10::TypedOperatorHandle<linalg_svdvals::schema> create_linalg_svdvals_typed_handle() {
13131 return c10::Dispatcher::singleton()
13132 .findSchemaOrThrow(linalg_svdvals::name, linalg_svdvals::overload_name)
13133 .typed<linalg_svdvals::schema>();
13134}
13135
13136// aten::linalg_svdvals(Tensor A, *, str? driver=None) -> Tensor
13137at::Tensor linalg_svdvals::call(const at::Tensor & A, c10::optional<c10::string_view> driver) {
13138
13139 static auto op = create_linalg_svdvals_typed_handle();
13140 return op.call(A, driver);
13141}
13142
13143// aten::linalg_svdvals(Tensor A, *, str? driver=None) -> Tensor
13144at::Tensor linalg_svdvals::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::optional<c10::string_view> driver) {
13145
13146 static auto op = create_linalg_svdvals_typed_handle();
13147 return op.redispatch(dispatchKeySet, A, driver);
13148}
13149
13150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_svdvals_out, name, "aten::linalg_svdvals")
13151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_svdvals_out, overload_name, "out")
13152STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_svdvals_out, schema_str, "linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!)")
13153
13154// aten::linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!)
13155static C10_NOINLINE c10::TypedOperatorHandle<linalg_svdvals_out::schema> create_linalg_svdvals_out_typed_handle() {
13156 return c10::Dispatcher::singleton()
13157 .findSchemaOrThrow(linalg_svdvals_out::name, linalg_svdvals_out::overload_name)
13158 .typed<linalg_svdvals_out::schema>();
13159}
13160
13161// aten::linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!)
13162at::Tensor & linalg_svdvals_out::call(const at::Tensor & A, c10::optional<c10::string_view> driver, at::Tensor & out) {
13163
13164 static auto op = create_linalg_svdvals_out_typed_handle();
13165 return op.call(A, driver, out);
13166}
13167
13168// aten::linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!)
13169at::Tensor & linalg_svdvals_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::optional<c10::string_view> driver, at::Tensor & out) {
13170
13171 static auto op = create_linalg_svdvals_out_typed_handle();
13172 return op.redispatch(dispatchKeySet, A, driver, out);
13173}
13174
13175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_power, name, "aten::linalg_matrix_power")
13176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_power, overload_name, "")
13177STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_power, schema_str, "linalg_matrix_power(Tensor self, int n) -> Tensor")
13178
13179// aten::linalg_matrix_power(Tensor self, int n) -> Tensor
13180static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_power::schema> create_linalg_matrix_power_typed_handle() {
13181 return c10::Dispatcher::singleton()
13182 .findSchemaOrThrow(linalg_matrix_power::name, linalg_matrix_power::overload_name)
13183 .typed<linalg_matrix_power::schema>();
13184}
13185
13186// aten::linalg_matrix_power(Tensor self, int n) -> Tensor
13187at::Tensor linalg_matrix_power::call(const at::Tensor & self, int64_t n) {
13188
13189 static auto op = create_linalg_matrix_power_typed_handle();
13190 return op.call(self, n);
13191}
13192
13193// aten::linalg_matrix_power(Tensor self, int n) -> Tensor
13194at::Tensor linalg_matrix_power::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n) {
13195
13196 static auto op = create_linalg_matrix_power_typed_handle();
13197 return op.redispatch(dispatchKeySet, self, n);
13198}
13199
13200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_power_out, name, "aten::linalg_matrix_power")
13201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_power_out, overload_name, "out")
13202STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_power_out, schema_str, "linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)")
13203
13204// aten::linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)
13205static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_power_out::schema> create_linalg_matrix_power_out_typed_handle() {
13206 return c10::Dispatcher::singleton()
13207 .findSchemaOrThrow(linalg_matrix_power_out::name, linalg_matrix_power_out::overload_name)
13208 .typed<linalg_matrix_power_out::schema>();
13209}
13210
13211// aten::linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)
13212at::Tensor & linalg_matrix_power_out::call(const at::Tensor & self, int64_t n, at::Tensor & out) {
13213
13214 static auto op = create_linalg_matrix_power_out_typed_handle();
13215 return op.call(self, n, out);
13216}
13217
13218// aten::linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)
13219at::Tensor & linalg_matrix_power_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n, at::Tensor & out) {
13220
13221 static auto op = create_linalg_matrix_power_out_typed_handle();
13222 return op.redispatch(dispatchKeySet, self, n, out);
13223}
13224
13225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_serialization_subcmul, name, "aten::_test_serialization_subcmul")
13226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_serialization_subcmul, overload_name, "")
13227STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_serialization_subcmul, schema_str, "_test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor")
13228
13229// aten::_test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor
13230static C10_NOINLINE c10::TypedOperatorHandle<_test_serialization_subcmul::schema> create__test_serialization_subcmul_typed_handle() {
13231 return c10::Dispatcher::singleton()
13232 .findSchemaOrThrow(_test_serialization_subcmul::name, _test_serialization_subcmul::overload_name)
13233 .typed<_test_serialization_subcmul::schema>();
13234}
13235
13236// aten::_test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor
13237at::Tensor _test_serialization_subcmul::call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
13238
13239 static auto op = create__test_serialization_subcmul_typed_handle();
13240 return op.call(self, other, alpha);
13241}
13242
13243// aten::_test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor
13244at::Tensor _test_serialization_subcmul::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
13245
13246 static auto op = create__test_serialization_subcmul_typed_handle();
13247 return op.redispatch(dispatchKeySet, self, other, alpha);
13248}
13249
13250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_optional_intlist, name, "aten::_test_optional_intlist")
13251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_optional_intlist, overload_name, "")
13252STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_optional_intlist, schema_str, "_test_optional_intlist(Tensor values, int[]? addends) -> Tensor")
13253
13254// aten::_test_optional_intlist(Tensor values, int[]? addends) -> Tensor
13255static C10_NOINLINE c10::TypedOperatorHandle<_test_optional_intlist::schema> create__test_optional_intlist_typed_handle() {
13256 return c10::Dispatcher::singleton()
13257 .findSchemaOrThrow(_test_optional_intlist::name, _test_optional_intlist::overload_name)
13258 .typed<_test_optional_intlist::schema>();
13259}
13260
13261// aten::_test_optional_intlist(Tensor values, int[]? addends) -> Tensor
13262at::Tensor _test_optional_intlist::call(const at::Tensor & values, at::OptionalIntArrayRef addends) {
13263
13264 static auto op = create__test_optional_intlist_typed_handle();
13265 return op.call(values, addends);
13266}
13267
13268// aten::_test_optional_intlist(Tensor values, int[]? addends) -> Tensor
13269at::Tensor _test_optional_intlist::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, at::OptionalIntArrayRef addends) {
13270
13271 static auto op = create__test_optional_intlist_typed_handle();
13272 return op.redispatch(dispatchKeySet, values, addends);
13273}
13274
13275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_ambiguous_defaults_a, name, "aten::_test_ambiguous_defaults")
13276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_ambiguous_defaults_a, overload_name, "a")
13277STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_ambiguous_defaults_a, schema_str, "_test_ambiguous_defaults.a(Tensor dummy, int a=1, int b=1) -> Tensor")
13278
13279// aten::_test_ambiguous_defaults.a(Tensor dummy, int a=1, int b=1) -> Tensor
13280static C10_NOINLINE c10::TypedOperatorHandle<_test_ambiguous_defaults_a::schema> create__test_ambiguous_defaults_a_typed_handle() {
13281 return c10::Dispatcher::singleton()
13282 .findSchemaOrThrow(_test_ambiguous_defaults_a::name, _test_ambiguous_defaults_a::overload_name)
13283 .typed<_test_ambiguous_defaults_a::schema>();
13284}
13285
13286// aten::_test_ambiguous_defaults.a(Tensor dummy, int a=1, int b=1) -> Tensor
13287at::Tensor _test_ambiguous_defaults_a::call(const at::Tensor & dummy, int64_t a, int64_t b) {
13288
13289 static auto op = create__test_ambiguous_defaults_a_typed_handle();
13290 return op.call(dummy, a, b);
13291}
13292
13293// aten::_test_ambiguous_defaults.a(Tensor dummy, int a=1, int b=1) -> Tensor
13294at::Tensor _test_ambiguous_defaults_a::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dummy, int64_t a, int64_t b) {
13295
13296 static auto op = create__test_ambiguous_defaults_a_typed_handle();
13297 return op.redispatch(dispatchKeySet, dummy, a, b);
13298}
13299
13300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_ambiguous_defaults_b, name, "aten::_test_ambiguous_defaults")
13301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_ambiguous_defaults_b, overload_name, "b")
13302STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_ambiguous_defaults_b, schema_str, "_test_ambiguous_defaults.b(Tensor dummy, int a=2, str b=\"2\") -> Tensor")
13303
13304// aten::_test_ambiguous_defaults.b(Tensor dummy, int a=2, str b="2") -> Tensor
13305static C10_NOINLINE c10::TypedOperatorHandle<_test_ambiguous_defaults_b::schema> create__test_ambiguous_defaults_b_typed_handle() {
13306 return c10::Dispatcher::singleton()
13307 .findSchemaOrThrow(_test_ambiguous_defaults_b::name, _test_ambiguous_defaults_b::overload_name)
13308 .typed<_test_ambiguous_defaults_b::schema>();
13309}
13310
13311// aten::_test_ambiguous_defaults.b(Tensor dummy, int a=2, str b="2") -> Tensor
13312at::Tensor _test_ambiguous_defaults_b::call(const at::Tensor & dummy, int64_t a, c10::string_view b) {
13313
13314 static auto op = create__test_ambiguous_defaults_b_typed_handle();
13315 return op.call(dummy, a, b);
13316}
13317
13318// aten::_test_ambiguous_defaults.b(Tensor dummy, int a=2, str b="2") -> Tensor
13319at::Tensor _test_ambiguous_defaults_b::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dummy, int64_t a, c10::string_view b) {
13320
13321 static auto op = create__test_ambiguous_defaults_b_typed_handle();
13322 return op.redispatch(dispatchKeySet, dummy, a, b);
13323}
13324
13325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_autograd_multiple_dispatch_fullcoverage, name, "aten::_test_autograd_multiple_dispatch")
13326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_autograd_multiple_dispatch_fullcoverage, overload_name, "fullcoverage")
13327STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_autograd_multiple_dispatch_fullcoverage, schema_str, "_test_autograd_multiple_dispatch.fullcoverage(Tensor self) -> Tensor")
13328
13329// aten::_test_autograd_multiple_dispatch.fullcoverage(Tensor self) -> Tensor
13330static C10_NOINLINE c10::TypedOperatorHandle<_test_autograd_multiple_dispatch_fullcoverage::schema> create__test_autograd_multiple_dispatch_fullcoverage_typed_handle() {
13331 return c10::Dispatcher::singleton()
13332 .findSchemaOrThrow(_test_autograd_multiple_dispatch_fullcoverage::name, _test_autograd_multiple_dispatch_fullcoverage::overload_name)
13333 .typed<_test_autograd_multiple_dispatch_fullcoverage::schema>();
13334}
13335
13336// aten::_test_autograd_multiple_dispatch.fullcoverage(Tensor self) -> Tensor
13337at::Tensor _test_autograd_multiple_dispatch_fullcoverage::call(const at::Tensor & self) {
13338
13339 static auto op = create__test_autograd_multiple_dispatch_fullcoverage_typed_handle();
13340 return op.call(self);
13341}
13342
13343// aten::_test_autograd_multiple_dispatch.fullcoverage(Tensor self) -> Tensor
13344at::Tensor _test_autograd_multiple_dispatch_fullcoverage::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
13345
13346 static auto op = create__test_autograd_multiple_dispatch_fullcoverage_typed_handle();
13347 return op.redispatch(dispatchKeySet, self);
13348}
13349
13350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_autograd_multiple_dispatch_ntonly, name, "aten::_test_autograd_multiple_dispatch")
13351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_autograd_multiple_dispatch_ntonly, overload_name, "ntonly")
13352STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_autograd_multiple_dispatch_ntonly, schema_str, "_test_autograd_multiple_dispatch.ntonly(Tensor self, bool b) -> Tensor")
13353
13354// aten::_test_autograd_multiple_dispatch.ntonly(Tensor self, bool b) -> Tensor
13355static C10_NOINLINE c10::TypedOperatorHandle<_test_autograd_multiple_dispatch_ntonly::schema> create__test_autograd_multiple_dispatch_ntonly_typed_handle() {
13356 return c10::Dispatcher::singleton()
13357 .findSchemaOrThrow(_test_autograd_multiple_dispatch_ntonly::name, _test_autograd_multiple_dispatch_ntonly::overload_name)
13358 .typed<_test_autograd_multiple_dispatch_ntonly::schema>();
13359}
13360
13361// aten::_test_autograd_multiple_dispatch.ntonly(Tensor self, bool b) -> Tensor
13362at::Tensor _test_autograd_multiple_dispatch_ntonly::call(const at::Tensor & self, bool b) {
13363
13364 static auto op = create__test_autograd_multiple_dispatch_ntonly_typed_handle();
13365 return op.call(self, b);
13366}
13367
13368// aten::_test_autograd_multiple_dispatch.ntonly(Tensor self, bool b) -> Tensor
13369at::Tensor _test_autograd_multiple_dispatch_ntonly::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool b) {
13370
13371 static auto op = create__test_autograd_multiple_dispatch_ntonly_typed_handle();
13372 return op.redispatch(dispatchKeySet, self, b);
13373}
13374
13375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(segment_reduce, name, "aten::segment_reduce")
13376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(segment_reduce, overload_name, "")
13377STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(segment_reduce, schema_str, "segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor")
13378
13379// aten::segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor
13380static C10_NOINLINE c10::TypedOperatorHandle<segment_reduce::schema> create_segment_reduce_typed_handle() {
13381 return c10::Dispatcher::singleton()
13382 .findSchemaOrThrow(segment_reduce::name, segment_reduce::overload_name)
13383 .typed<segment_reduce::schema>();
13384}
13385
13386// aten::segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor
13387at::Tensor segment_reduce::call(const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & indices, const c10::optional<at::Tensor> & offsets, int64_t axis, bool unsafe, const c10::optional<at::Scalar> & initial) {
13388
13389 static auto op = create_segment_reduce_typed_handle();
13390 return op.call(data, reduce, lengths, indices, offsets, axis, unsafe, initial);
13391}
13392
13393// aten::segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor
13394at::Tensor segment_reduce::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & indices, const c10::optional<at::Tensor> & offsets, int64_t axis, bool unsafe, const c10::optional<at::Scalar> & initial) {
13395
13396 static auto op = create_segment_reduce_typed_handle();
13397 return op.redispatch(dispatchKeySet, data, reduce, lengths, indices, offsets, axis, unsafe, initial);
13398}
13399
13400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_segment_reduce_backward, name, "aten::_segment_reduce_backward")
13401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_segment_reduce_backward, overload_name, "")
13402STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_segment_reduce_backward, schema_str, "_segment_reduce_backward(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None) -> Tensor")
13403
13404// aten::_segment_reduce_backward(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None) -> Tensor
13405static C10_NOINLINE c10::TypedOperatorHandle<_segment_reduce_backward::schema> create__segment_reduce_backward_typed_handle() {
13406 return c10::Dispatcher::singleton()
13407 .findSchemaOrThrow(_segment_reduce_backward::name, _segment_reduce_backward::overload_name)
13408 .typed<_segment_reduce_backward::schema>();
13409}
13410
13411// aten::_segment_reduce_backward(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None) -> Tensor
13412at::Tensor _segment_reduce_backward::call(const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & offsets, int64_t axis, const c10::optional<at::Scalar> & initial) {
13413
13414 static auto op = create__segment_reduce_backward_typed_handle();
13415 return op.call(grad, output, data, reduce, lengths, offsets, axis, initial);
13416}
13417
13418// aten::_segment_reduce_backward(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None) -> Tensor
13419at::Tensor _segment_reduce_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & offsets, int64_t axis, const c10::optional<at::Scalar> & initial) {
13420
13421 static auto op = create__segment_reduce_backward_typed_handle();
13422 return op.redispatch(dispatchKeySet, grad, output, data, reduce, lengths, offsets, axis, initial);
13423}
13424
13425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_make_dual_copy, name, "aten::_make_dual_copy")
13426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_make_dual_copy, overload_name, "")
13427STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_make_dual_copy, schema_str, "_make_dual_copy(Tensor primal, Tensor tangent, int level) -> Tensor")
13428
13429// aten::_make_dual_copy(Tensor primal, Tensor tangent, int level) -> Tensor
13430static C10_NOINLINE c10::TypedOperatorHandle<_make_dual_copy::schema> create__make_dual_copy_typed_handle() {
13431 return c10::Dispatcher::singleton()
13432 .findSchemaOrThrow(_make_dual_copy::name, _make_dual_copy::overload_name)
13433 .typed<_make_dual_copy::schema>();
13434}
13435
13436// aten::_make_dual_copy(Tensor primal, Tensor tangent, int level) -> Tensor
13437at::Tensor _make_dual_copy::call(const at::Tensor & primal, const at::Tensor & tangent, int64_t level) {
13438
13439 static auto op = create__make_dual_copy_typed_handle();
13440 return op.call(primal, tangent, level);
13441}
13442
13443// aten::_make_dual_copy(Tensor primal, Tensor tangent, int level) -> Tensor
13444at::Tensor _make_dual_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & primal, const at::Tensor & tangent, int64_t level) {
13445
13446 static auto op = create__make_dual_copy_typed_handle();
13447 return op.redispatch(dispatchKeySet, primal, tangent, level);
13448}
13449
13450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(view_as_complex_copy, name, "aten::view_as_complex_copy")
13451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(view_as_complex_copy, overload_name, "")
13452STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(view_as_complex_copy, schema_str, "view_as_complex_copy(Tensor self) -> Tensor")
13453
13454// aten::view_as_complex_copy(Tensor self) -> Tensor
13455static C10_NOINLINE c10::TypedOperatorHandle<view_as_complex_copy::schema> create_view_as_complex_copy_typed_handle() {
13456 return c10::Dispatcher::singleton()
13457 .findSchemaOrThrow(view_as_complex_copy::name, view_as_complex_copy::overload_name)
13458 .typed<view_as_complex_copy::schema>();
13459}
13460
13461// aten::view_as_complex_copy(Tensor self) -> Tensor
13462at::Tensor view_as_complex_copy::call(const at::Tensor & self) {
13463
13464 static auto op = create_view_as_complex_copy_typed_handle();
13465 return op.call(self);
13466}
13467
13468// aten::view_as_complex_copy(Tensor self) -> Tensor
13469at::Tensor view_as_complex_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
13470
13471 static auto op = create_view_as_complex_copy_typed_handle();
13472 return op.redispatch(dispatchKeySet, self);
13473}
13474
13475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_neg_view_copy, name, "aten::_neg_view_copy")
13476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_neg_view_copy, overload_name, "")
13477STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_neg_view_copy, schema_str, "_neg_view_copy(Tensor self) -> Tensor")
13478
13479// aten::_neg_view_copy(Tensor self) -> Tensor
13480static C10_NOINLINE c10::TypedOperatorHandle<_neg_view_copy::schema> create__neg_view_copy_typed_handle() {
13481 return c10::Dispatcher::singleton()
13482 .findSchemaOrThrow(_neg_view_copy::name, _neg_view_copy::overload_name)
13483 .typed<_neg_view_copy::schema>();
13484}
13485
13486// aten::_neg_view_copy(Tensor self) -> Tensor
13487at::Tensor _neg_view_copy::call(const at::Tensor & self) {
13488
13489 static auto op = create__neg_view_copy_typed_handle();
13490 return op.call(self);
13491}
13492
13493// aten::_neg_view_copy(Tensor self) -> Tensor
13494at::Tensor _neg_view_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
13495
13496 static auto op = create__neg_view_copy_typed_handle();
13497 return op.redispatch(dispatchKeySet, self);
13498}
13499
13500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(expand_copy, name, "aten::expand_copy")
13501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(expand_copy, overload_name, "")
13502STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(expand_copy, schema_str, "expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor")
13503
13504// aten::expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor
13505static C10_NOINLINE c10::TypedOperatorHandle<expand_copy::schema> create_expand_copy_typed_handle() {
13506 return c10::Dispatcher::singleton()
13507 .findSchemaOrThrow(expand_copy::name, expand_copy::overload_name)
13508 .typed<expand_copy::schema>();
13509}
13510
13511// aten::expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor
13512at::Tensor expand_copy::call(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) {
13513
13514 static auto op = create_expand_copy_typed_handle();
13515 return op.call(self, size, implicit);
13516}
13517
13518// aten::expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor
13519at::Tensor expand_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) {
13520
13521 static auto op = create_expand_copy_typed_handle();
13522 return op.redispatch(dispatchKeySet, self, size, implicit);
13523}
13524
13525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unsqueeze_copy, name, "aten::unsqueeze_copy")
13526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unsqueeze_copy, overload_name, "")
13527STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unsqueeze_copy, schema_str, "unsqueeze_copy(Tensor self, int dim) -> Tensor")
13528
13529// aten::unsqueeze_copy(Tensor self, int dim) -> Tensor
13530static C10_NOINLINE c10::TypedOperatorHandle<unsqueeze_copy::schema> create_unsqueeze_copy_typed_handle() {
13531 return c10::Dispatcher::singleton()
13532 .findSchemaOrThrow(unsqueeze_copy::name, unsqueeze_copy::overload_name)
13533 .typed<unsqueeze_copy::schema>();
13534}
13535
13536// aten::unsqueeze_copy(Tensor self, int dim) -> Tensor
13537at::Tensor unsqueeze_copy::call(const at::Tensor & self, int64_t dim) {
13538
13539 static auto op = create_unsqueeze_copy_typed_handle();
13540 return op.call(self, dim);
13541}
13542
13543// aten::unsqueeze_copy(Tensor self, int dim) -> Tensor
13544at::Tensor unsqueeze_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
13545
13546 static auto op = create_unsqueeze_copy_typed_handle();
13547 return op.redispatch(dispatchKeySet, self, dim);
13548}
13549
13550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(crow_indices_copy, name, "aten::crow_indices_copy")
13551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(crow_indices_copy, overload_name, "")
13552STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(crow_indices_copy, schema_str, "crow_indices_copy(Tensor self) -> Tensor")
13553
13554// aten::crow_indices_copy(Tensor self) -> Tensor
13555static C10_NOINLINE c10::TypedOperatorHandle<crow_indices_copy::schema> create_crow_indices_copy_typed_handle() {
13556 return c10::Dispatcher::singleton()
13557 .findSchemaOrThrow(crow_indices_copy::name, crow_indices_copy::overload_name)
13558 .typed<crow_indices_copy::schema>();
13559}
13560
13561// aten::crow_indices_copy(Tensor self) -> Tensor
13562at::Tensor crow_indices_copy::call(const at::Tensor & self) {
13563
13564 static auto op = create_crow_indices_copy_typed_handle();
13565 return op.call(self);
13566}
13567
13568// aten::crow_indices_copy(Tensor self) -> Tensor
13569at::Tensor crow_indices_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
13570
13571 static auto op = create_crow_indices_copy_typed_handle();
13572 return op.redispatch(dispatchKeySet, self);
13573}
13574
13575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_padded_tensor, name, "aten::to_padded_tensor")
13576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_padded_tensor, overload_name, "")
13577STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_padded_tensor, schema_str, "to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor")
13578
13579// aten::to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor
13580static C10_NOINLINE c10::TypedOperatorHandle<to_padded_tensor::schema> create_to_padded_tensor_typed_handle() {
13581 return c10::Dispatcher::singleton()
13582 .findSchemaOrThrow(to_padded_tensor::name, to_padded_tensor::overload_name)
13583 .typed<to_padded_tensor::schema>();
13584}
13585
13586// aten::to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor
13587at::Tensor to_padded_tensor::call(const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size) {
13588
13589 static auto op = create_to_padded_tensor_typed_handle();
13590 return op.call(self, padding, output_size);
13591}
13592
13593// aten::to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor
13594at::Tensor to_padded_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size) {
13595
13596 static auto op = create_to_padded_tensor_typed_handle();
13597 return op.redispatch(dispatchKeySet, self, padding, output_size);
13598}
13599
13600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_tensor_softmax_with_shape, name, "aten::_nested_tensor_softmax_with_shape")
13601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_tensor_softmax_with_shape, overload_name, "")
13602STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_tensor_softmax_with_shape, schema_str, "_nested_tensor_softmax_with_shape(Tensor self, Tensor query) -> Tensor")
13603
13604// aten::_nested_tensor_softmax_with_shape(Tensor self, Tensor query) -> Tensor
13605static C10_NOINLINE c10::TypedOperatorHandle<_nested_tensor_softmax_with_shape::schema> create__nested_tensor_softmax_with_shape_typed_handle() {
13606 return c10::Dispatcher::singleton()
13607 .findSchemaOrThrow(_nested_tensor_softmax_with_shape::name, _nested_tensor_softmax_with_shape::overload_name)
13608 .typed<_nested_tensor_softmax_with_shape::schema>();
13609}
13610
13611// aten::_nested_tensor_softmax_with_shape(Tensor self, Tensor query) -> Tensor
13612at::Tensor _nested_tensor_softmax_with_shape::call(const at::Tensor & self, const at::Tensor & query) {
13613
13614 static auto op = create__nested_tensor_softmax_with_shape_typed_handle();
13615 return op.call(self, query);
13616}
13617
13618// aten::_nested_tensor_softmax_with_shape(Tensor self, Tensor query) -> Tensor
13619at::Tensor _nested_tensor_softmax_with_shape::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & query) {
13620
13621 static auto op = create__nested_tensor_softmax_with_shape_typed_handle();
13622 return op.redispatch(dispatchKeySet, self, query);
13623}
13624
13625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_flash_attention_forward, name, "aten::_flash_attention_forward")
13626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_flash_attention_forward, overload_name, "")
13627STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_flash_attention_forward, schema_str, "_flash_attention_forward(Tensor query, Tensor key, Tensor value, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, bool return_debug_mask) -> (Tensor output, Tensor softmax_logsumexp, int philox_seed, int philox_offset, Tensor debug_attn_mask)")
13628
13629// aten::_flash_attention_forward(Tensor query, Tensor key, Tensor value, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, bool return_debug_mask) -> (Tensor output, Tensor softmax_logsumexp, int philox_seed, int philox_offset, Tensor debug_attn_mask)
13630static C10_NOINLINE c10::TypedOperatorHandle<_flash_attention_forward::schema> create__flash_attention_forward_typed_handle() {
13631 return c10::Dispatcher::singleton()
13632 .findSchemaOrThrow(_flash_attention_forward::name, _flash_attention_forward::overload_name)
13633 .typed<_flash_attention_forward::schema>();
13634}
13635
13636// aten::_flash_attention_forward(Tensor query, Tensor key, Tensor value, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, bool return_debug_mask) -> (Tensor output, Tensor softmax_logsumexp, int philox_seed, int philox_offset, Tensor debug_attn_mask)
13637::std::tuple<at::Tensor,at::Tensor,int64_t,int64_t,at::Tensor> _flash_attention_forward::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, bool return_debug_mask) {
13638
13639 static auto op = create__flash_attention_forward_typed_handle();
13640 return op.call(query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, return_debug_mask);
13641}
13642
13643// aten::_flash_attention_forward(Tensor query, Tensor key, Tensor value, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, bool return_debug_mask) -> (Tensor output, Tensor softmax_logsumexp, int philox_seed, int philox_offset, Tensor debug_attn_mask)
13644::std::tuple<at::Tensor,at::Tensor,int64_t,int64_t,at::Tensor> _flash_attention_forward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, bool return_debug_mask) {
13645
13646 static auto op = create__flash_attention_forward_typed_handle();
13647 return op.redispatch(dispatchKeySet, query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, return_debug_mask);
13648}
13649
13650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_bessel_j0, name, "aten::special_bessel_j0")
13651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_bessel_j0, overload_name, "")
13652STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_bessel_j0, schema_str, "special_bessel_j0(Tensor self) -> Tensor")
13653
13654// aten::special_bessel_j0(Tensor self) -> Tensor
13655static C10_NOINLINE c10::TypedOperatorHandle<special_bessel_j0::schema> create_special_bessel_j0_typed_handle() {
13656 return c10::Dispatcher::singleton()
13657 .findSchemaOrThrow(special_bessel_j0::name, special_bessel_j0::overload_name)
13658 .typed<special_bessel_j0::schema>();
13659}
13660
13661// aten::special_bessel_j0(Tensor self) -> Tensor
13662at::Tensor special_bessel_j0::call(const at::Tensor & self) {
13663
13664 static auto op = create_special_bessel_j0_typed_handle();
13665 return op.call(self);
13666}
13667
13668// aten::special_bessel_j0(Tensor self) -> Tensor
13669at::Tensor special_bessel_j0::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
13670
13671 static auto op = create_special_bessel_j0_typed_handle();
13672 return op.redispatch(dispatchKeySet, self);
13673}
13674
13675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_bessel_j0_out, name, "aten::special_bessel_j0")
13676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_bessel_j0_out, overload_name, "out")
13677STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_bessel_j0_out, schema_str, "special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
13678
13679// aten::special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13680static C10_NOINLINE c10::TypedOperatorHandle<special_bessel_j0_out::schema> create_special_bessel_j0_out_typed_handle() {
13681 return c10::Dispatcher::singleton()
13682 .findSchemaOrThrow(special_bessel_j0_out::name, special_bessel_j0_out::overload_name)
13683 .typed<special_bessel_j0_out::schema>();
13684}
13685
13686// aten::special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13687at::Tensor & special_bessel_j0_out::call(const at::Tensor & self, at::Tensor & out) {
13688
13689 static auto op = create_special_bessel_j0_out_typed_handle();
13690 return op.call(self, out);
13691}
13692
13693// aten::special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13694at::Tensor & special_bessel_j0_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
13695
13696 static auto op = create_special_bessel_j0_out_typed_handle();
13697 return op.redispatch(dispatchKeySet, self, out);
13698}
13699
13700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_bessel_y0, name, "aten::special_bessel_y0")
13701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_bessel_y0, overload_name, "")
13702STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_bessel_y0, schema_str, "special_bessel_y0(Tensor self) -> Tensor")
13703
13704// aten::special_bessel_y0(Tensor self) -> Tensor
13705static C10_NOINLINE c10::TypedOperatorHandle<special_bessel_y0::schema> create_special_bessel_y0_typed_handle() {
13706 return c10::Dispatcher::singleton()
13707 .findSchemaOrThrow(special_bessel_y0::name, special_bessel_y0::overload_name)
13708 .typed<special_bessel_y0::schema>();
13709}
13710
13711// aten::special_bessel_y0(Tensor self) -> Tensor
13712at::Tensor special_bessel_y0::call(const at::Tensor & self) {
13713
13714 static auto op = create_special_bessel_y0_typed_handle();
13715 return op.call(self);
13716}
13717
13718// aten::special_bessel_y0(Tensor self) -> Tensor
13719at::Tensor special_bessel_y0::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
13720
13721 static auto op = create_special_bessel_y0_typed_handle();
13722 return op.redispatch(dispatchKeySet, self);
13723}
13724
13725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_bessel_y0_out, name, "aten::special_bessel_y0")
13726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_bessel_y0_out, overload_name, "out")
13727STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_bessel_y0_out, schema_str, "special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
13728
13729// aten::special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13730static C10_NOINLINE c10::TypedOperatorHandle<special_bessel_y0_out::schema> create_special_bessel_y0_out_typed_handle() {
13731 return c10::Dispatcher::singleton()
13732 .findSchemaOrThrow(special_bessel_y0_out::name, special_bessel_y0_out::overload_name)
13733 .typed<special_bessel_y0_out::schema>();
13734}
13735
13736// aten::special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13737at::Tensor & special_bessel_y0_out::call(const at::Tensor & self, at::Tensor & out) {
13738
13739 static auto op = create_special_bessel_y0_out_typed_handle();
13740 return op.call(self, out);
13741}
13742
13743// aten::special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13744at::Tensor & special_bessel_y0_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
13745
13746 static auto op = create_special_bessel_y0_out_typed_handle();
13747 return op.redispatch(dispatchKeySet, self, out);
13748}
13749
13750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_u, name, "aten::special_chebyshev_polynomial_u")
13751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_u, overload_name, "")
13752STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_u, schema_str, "special_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor")
13753
13754// aten::special_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor
13755static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_u::schema> create_special_chebyshev_polynomial_u_typed_handle() {
13756 return c10::Dispatcher::singleton()
13757 .findSchemaOrThrow(special_chebyshev_polynomial_u::name, special_chebyshev_polynomial_u::overload_name)
13758 .typed<special_chebyshev_polynomial_u::schema>();
13759}
13760
13761// aten::special_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor
13762at::Tensor special_chebyshev_polynomial_u::call(const at::Tensor & x, const at::Tensor & n) {
13763
13764 static auto op = create_special_chebyshev_polynomial_u_typed_handle();
13765 return op.call(x, n);
13766}
13767
13768// aten::special_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor
13769at::Tensor special_chebyshev_polynomial_u::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) {
13770
13771 static auto op = create_special_chebyshev_polynomial_u_typed_handle();
13772 return op.redispatch(dispatchKeySet, x, n);
13773}
13774
13775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_u_x_scalar, name, "aten::special_chebyshev_polynomial_u")
13776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_u_x_scalar, overload_name, "x_scalar")
13777STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_u_x_scalar, schema_str, "special_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor")
13778
13779// aten::special_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor
13780static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_u_x_scalar::schema> create_special_chebyshev_polynomial_u_x_scalar_typed_handle() {
13781 return c10::Dispatcher::singleton()
13782 .findSchemaOrThrow(special_chebyshev_polynomial_u_x_scalar::name, special_chebyshev_polynomial_u_x_scalar::overload_name)
13783 .typed<special_chebyshev_polynomial_u_x_scalar::schema>();
13784}
13785
13786// aten::special_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor
13787at::Tensor special_chebyshev_polynomial_u_x_scalar::call(const at::Scalar & x, const at::Tensor & n) {
13788
13789 static auto op = create_special_chebyshev_polynomial_u_x_scalar_typed_handle();
13790 return op.call(x, n);
13791}
13792
13793// aten::special_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor
13794at::Tensor special_chebyshev_polynomial_u_x_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) {
13795
13796 static auto op = create_special_chebyshev_polynomial_u_x_scalar_typed_handle();
13797 return op.redispatch(dispatchKeySet, x, n);
13798}
13799
13800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_u_n_scalar, name, "aten::special_chebyshev_polynomial_u")
13801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_u_n_scalar, overload_name, "n_scalar")
13802STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_u_n_scalar, schema_str, "special_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor")
13803
13804// aten::special_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor
13805static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_u_n_scalar::schema> create_special_chebyshev_polynomial_u_n_scalar_typed_handle() {
13806 return c10::Dispatcher::singleton()
13807 .findSchemaOrThrow(special_chebyshev_polynomial_u_n_scalar::name, special_chebyshev_polynomial_u_n_scalar::overload_name)
13808 .typed<special_chebyshev_polynomial_u_n_scalar::schema>();
13809}
13810
13811// aten::special_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor
13812at::Tensor special_chebyshev_polynomial_u_n_scalar::call(const at::Tensor & x, const at::Scalar & n) {
13813
13814 static auto op = create_special_chebyshev_polynomial_u_n_scalar_typed_handle();
13815 return op.call(x, n);
13816}
13817
13818// aten::special_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor
13819at::Tensor special_chebyshev_polynomial_u_n_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) {
13820
13821 static auto op = create_special_chebyshev_polynomial_u_n_scalar_typed_handle();
13822 return op.redispatch(dispatchKeySet, x, n);
13823}
13824
13825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_u_out, name, "aten::special_chebyshev_polynomial_u")
13826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_u_out, overload_name, "out")
13827STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_u_out, schema_str, "special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)")
13828
13829// aten::special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
13830static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_u_out::schema> create_special_chebyshev_polynomial_u_out_typed_handle() {
13831 return c10::Dispatcher::singleton()
13832 .findSchemaOrThrow(special_chebyshev_polynomial_u_out::name, special_chebyshev_polynomial_u_out::overload_name)
13833 .typed<special_chebyshev_polynomial_u_out::schema>();
13834}
13835
13836// aten::special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
13837at::Tensor & special_chebyshev_polynomial_u_out::call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
13838
13839 static auto op = create_special_chebyshev_polynomial_u_out_typed_handle();
13840 return op.call(x, n, out);
13841}
13842
13843// aten::special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
13844at::Tensor & special_chebyshev_polynomial_u_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
13845
13846 static auto op = create_special_chebyshev_polynomial_u_out_typed_handle();
13847 return op.redispatch(dispatchKeySet, x, n, out);
13848}
13849
13850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_u_x_scalar_out, name, "aten::special_chebyshev_polynomial_u")
13851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_u_x_scalar_out, overload_name, "x_scalar_out")
13852STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_u_x_scalar_out, schema_str, "special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)")
13853
13854// aten::special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
13855static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_u_x_scalar_out::schema> create_special_chebyshev_polynomial_u_x_scalar_out_typed_handle() {
13856 return c10::Dispatcher::singleton()
13857 .findSchemaOrThrow(special_chebyshev_polynomial_u_x_scalar_out::name, special_chebyshev_polynomial_u_x_scalar_out::overload_name)
13858 .typed<special_chebyshev_polynomial_u_x_scalar_out::schema>();
13859}
13860
13861// aten::special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
13862at::Tensor & special_chebyshev_polynomial_u_x_scalar_out::call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
13863
13864 static auto op = create_special_chebyshev_polynomial_u_x_scalar_out_typed_handle();
13865 return op.call(x, n, out);
13866}
13867
13868// aten::special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
13869at::Tensor & special_chebyshev_polynomial_u_x_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
13870
13871 static auto op = create_special_chebyshev_polynomial_u_x_scalar_out_typed_handle();
13872 return op.redispatch(dispatchKeySet, x, n, out);
13873}
13874
13875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_u_n_scalar_out, name, "aten::special_chebyshev_polynomial_u")
13876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_u_n_scalar_out, overload_name, "n_scalar_out")
13877STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_u_n_scalar_out, schema_str, "special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)")
13878
13879// aten::special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
13880static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_u_n_scalar_out::schema> create_special_chebyshev_polynomial_u_n_scalar_out_typed_handle() {
13881 return c10::Dispatcher::singleton()
13882 .findSchemaOrThrow(special_chebyshev_polynomial_u_n_scalar_out::name, special_chebyshev_polynomial_u_n_scalar_out::overload_name)
13883 .typed<special_chebyshev_polynomial_u_n_scalar_out::schema>();
13884}
13885
13886// aten::special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
13887at::Tensor & special_chebyshev_polynomial_u_n_scalar_out::call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
13888
13889 static auto op = create_special_chebyshev_polynomial_u_n_scalar_out_typed_handle();
13890 return op.call(x, n, out);
13891}
13892
13893// aten::special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
13894at::Tensor & special_chebyshev_polynomial_u_n_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
13895
13896 static auto op = create_special_chebyshev_polynomial_u_n_scalar_out_typed_handle();
13897 return op.redispatch(dispatchKeySet, x, n, out);
13898}
13899
13900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_he, name, "aten::special_hermite_polynomial_he")
13901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_he, overload_name, "")
13902STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_he, schema_str, "special_hermite_polynomial_he(Tensor x, Tensor n) -> Tensor")
13903
13904// aten::special_hermite_polynomial_he(Tensor x, Tensor n) -> Tensor
13905static C10_NOINLINE c10::TypedOperatorHandle<special_hermite_polynomial_he::schema> create_special_hermite_polynomial_he_typed_handle() {
13906 return c10::Dispatcher::singleton()
13907 .findSchemaOrThrow(special_hermite_polynomial_he::name, special_hermite_polynomial_he::overload_name)
13908 .typed<special_hermite_polynomial_he::schema>();
13909}
13910
13911// aten::special_hermite_polynomial_he(Tensor x, Tensor n) -> Tensor
13912at::Tensor special_hermite_polynomial_he::call(const at::Tensor & x, const at::Tensor & n) {
13913
13914 static auto op = create_special_hermite_polynomial_he_typed_handle();
13915 return op.call(x, n);
13916}
13917
13918// aten::special_hermite_polynomial_he(Tensor x, Tensor n) -> Tensor
13919at::Tensor special_hermite_polynomial_he::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) {
13920
13921 static auto op = create_special_hermite_polynomial_he_typed_handle();
13922 return op.redispatch(dispatchKeySet, x, n);
13923}
13924
13925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_he_x_scalar, name, "aten::special_hermite_polynomial_he")
13926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_he_x_scalar, overload_name, "x_scalar")
13927STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_he_x_scalar, schema_str, "special_hermite_polynomial_he.x_scalar(Scalar x, Tensor n) -> Tensor")
13928
13929// aten::special_hermite_polynomial_he.x_scalar(Scalar x, Tensor n) -> Tensor
13930static C10_NOINLINE c10::TypedOperatorHandle<special_hermite_polynomial_he_x_scalar::schema> create_special_hermite_polynomial_he_x_scalar_typed_handle() {
13931 return c10::Dispatcher::singleton()
13932 .findSchemaOrThrow(special_hermite_polynomial_he_x_scalar::name, special_hermite_polynomial_he_x_scalar::overload_name)
13933 .typed<special_hermite_polynomial_he_x_scalar::schema>();
13934}
13935
13936// aten::special_hermite_polynomial_he.x_scalar(Scalar x, Tensor n) -> Tensor
13937at::Tensor special_hermite_polynomial_he_x_scalar::call(const at::Scalar & x, const at::Tensor & n) {
13938
13939 static auto op = create_special_hermite_polynomial_he_x_scalar_typed_handle();
13940 return op.call(x, n);
13941}
13942
13943// aten::special_hermite_polynomial_he.x_scalar(Scalar x, Tensor n) -> Tensor
13944at::Tensor special_hermite_polynomial_he_x_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) {
13945
13946 static auto op = create_special_hermite_polynomial_he_x_scalar_typed_handle();
13947 return op.redispatch(dispatchKeySet, x, n);
13948}
13949
13950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_he_n_scalar, name, "aten::special_hermite_polynomial_he")
13951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_he_n_scalar, overload_name, "n_scalar")
13952STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_he_n_scalar, schema_str, "special_hermite_polynomial_he.n_scalar(Tensor x, Scalar n) -> Tensor")
13953
13954// aten::special_hermite_polynomial_he.n_scalar(Tensor x, Scalar n) -> Tensor
13955static C10_NOINLINE c10::TypedOperatorHandle<special_hermite_polynomial_he_n_scalar::schema> create_special_hermite_polynomial_he_n_scalar_typed_handle() {
13956 return c10::Dispatcher::singleton()
13957 .findSchemaOrThrow(special_hermite_polynomial_he_n_scalar::name, special_hermite_polynomial_he_n_scalar::overload_name)
13958 .typed<special_hermite_polynomial_he_n_scalar::schema>();
13959}
13960
13961// aten::special_hermite_polynomial_he.n_scalar(Tensor x, Scalar n) -> Tensor
13962at::Tensor special_hermite_polynomial_he_n_scalar::call(const at::Tensor & x, const at::Scalar & n) {
13963
13964 static auto op = create_special_hermite_polynomial_he_n_scalar_typed_handle();
13965 return op.call(x, n);
13966}
13967
13968// aten::special_hermite_polynomial_he.n_scalar(Tensor x, Scalar n) -> Tensor
13969at::Tensor special_hermite_polynomial_he_n_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) {
13970
13971 static auto op = create_special_hermite_polynomial_he_n_scalar_typed_handle();
13972 return op.redispatch(dispatchKeySet, x, n);
13973}
13974
13975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_he_out, name, "aten::special_hermite_polynomial_he")
13976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_he_out, overload_name, "out")
13977STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_he_out, schema_str, "special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)")
13978
13979// aten::special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
13980static C10_NOINLINE c10::TypedOperatorHandle<special_hermite_polynomial_he_out::schema> create_special_hermite_polynomial_he_out_typed_handle() {
13981 return c10::Dispatcher::singleton()
13982 .findSchemaOrThrow(special_hermite_polynomial_he_out::name, special_hermite_polynomial_he_out::overload_name)
13983 .typed<special_hermite_polynomial_he_out::schema>();
13984}
13985
13986// aten::special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
13987at::Tensor & special_hermite_polynomial_he_out::call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
13988
13989 static auto op = create_special_hermite_polynomial_he_out_typed_handle();
13990 return op.call(x, n, out);
13991}
13992
13993// aten::special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
13994at::Tensor & special_hermite_polynomial_he_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
13995
13996 static auto op = create_special_hermite_polynomial_he_out_typed_handle();
13997 return op.redispatch(dispatchKeySet, x, n, out);
13998}
13999
14000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_he_x_scalar_out, name, "aten::special_hermite_polynomial_he")
14001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_he_x_scalar_out, overload_name, "x_scalar_out")
14002STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_he_x_scalar_out, schema_str, "special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)")
14003
14004// aten::special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
14005static C10_NOINLINE c10::TypedOperatorHandle<special_hermite_polynomial_he_x_scalar_out::schema> create_special_hermite_polynomial_he_x_scalar_out_typed_handle() {
14006 return c10::Dispatcher::singleton()
14007 .findSchemaOrThrow(special_hermite_polynomial_he_x_scalar_out::name, special_hermite_polynomial_he_x_scalar_out::overload_name)
14008 .typed<special_hermite_polynomial_he_x_scalar_out::schema>();
14009}
14010
14011// aten::special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
14012at::Tensor & special_hermite_polynomial_he_x_scalar_out::call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
14013
14014 static auto op = create_special_hermite_polynomial_he_x_scalar_out_typed_handle();
14015 return op.call(x, n, out);
14016}
14017
14018// aten::special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
14019at::Tensor & special_hermite_polynomial_he_x_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
14020
14021 static auto op = create_special_hermite_polynomial_he_x_scalar_out_typed_handle();
14022 return op.redispatch(dispatchKeySet, x, n, out);
14023}
14024
14025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_he_n_scalar_out, name, "aten::special_hermite_polynomial_he")
14026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_he_n_scalar_out, overload_name, "n_scalar_out")
14027STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_he_n_scalar_out, schema_str, "special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)")
14028
14029// aten::special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
14030static C10_NOINLINE c10::TypedOperatorHandle<special_hermite_polynomial_he_n_scalar_out::schema> create_special_hermite_polynomial_he_n_scalar_out_typed_handle() {
14031 return c10::Dispatcher::singleton()
14032 .findSchemaOrThrow(special_hermite_polynomial_he_n_scalar_out::name, special_hermite_polynomial_he_n_scalar_out::overload_name)
14033 .typed<special_hermite_polynomial_he_n_scalar_out::schema>();
14034}
14035
14036// aten::special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
14037at::Tensor & special_hermite_polynomial_he_n_scalar_out::call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
14038
14039 static auto op = create_special_hermite_polynomial_he_n_scalar_out_typed_handle();
14040 return op.call(x, n, out);
14041}
14042
14043// aten::special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
14044at::Tensor & special_hermite_polynomial_he_n_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
14045
14046 static auto op = create_special_hermite_polynomial_he_n_scalar_out_typed_handle();
14047 return op.redispatch(dispatchKeySet, x, n, out);
14048}
14049
14050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_modified_bessel_i1, name, "aten::special_modified_bessel_i1")
14051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_modified_bessel_i1, overload_name, "")
14052STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_modified_bessel_i1, schema_str, "special_modified_bessel_i1(Tensor self) -> Tensor")
14053
14054// aten::special_modified_bessel_i1(Tensor self) -> Tensor
14055static C10_NOINLINE c10::TypedOperatorHandle<special_modified_bessel_i1::schema> create_special_modified_bessel_i1_typed_handle() {
14056 return c10::Dispatcher::singleton()
14057 .findSchemaOrThrow(special_modified_bessel_i1::name, special_modified_bessel_i1::overload_name)
14058 .typed<special_modified_bessel_i1::schema>();
14059}
14060
14061// aten::special_modified_bessel_i1(Tensor self) -> Tensor
14062at::Tensor special_modified_bessel_i1::call(const at::Tensor & self) {
14063
14064 static auto op = create_special_modified_bessel_i1_typed_handle();
14065 return op.call(self);
14066}
14067
14068// aten::special_modified_bessel_i1(Tensor self) -> Tensor
14069at::Tensor special_modified_bessel_i1::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
14070
14071 static auto op = create_special_modified_bessel_i1_typed_handle();
14072 return op.redispatch(dispatchKeySet, self);
14073}
14074
14075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_modified_bessel_i1_out, name, "aten::special_modified_bessel_i1")
14076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_modified_bessel_i1_out, overload_name, "out")
14077STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_modified_bessel_i1_out, schema_str, "special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
14078
14079// aten::special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14080static C10_NOINLINE c10::TypedOperatorHandle<special_modified_bessel_i1_out::schema> create_special_modified_bessel_i1_out_typed_handle() {
14081 return c10::Dispatcher::singleton()
14082 .findSchemaOrThrow(special_modified_bessel_i1_out::name, special_modified_bessel_i1_out::overload_name)
14083 .typed<special_modified_bessel_i1_out::schema>();
14084}
14085
14086// aten::special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14087at::Tensor & special_modified_bessel_i1_out::call(const at::Tensor & self, at::Tensor & out) {
14088
14089 static auto op = create_special_modified_bessel_i1_out_typed_handle();
14090 return op.call(self, out);
14091}
14092
14093// aten::special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14094at::Tensor & special_modified_bessel_i1_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
14095
14096 static auto op = create_special_modified_bessel_i1_out_typed_handle();
14097 return op.redispatch(dispatchKeySet, self, out);
14098}
14099
14100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_v, name, "aten::special_shifted_chebyshev_polynomial_v")
14101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_v, overload_name, "")
14102STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_v, schema_str, "special_shifted_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor")
14103
14104// aten::special_shifted_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor
14105static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_v::schema> create_special_shifted_chebyshev_polynomial_v_typed_handle() {
14106 return c10::Dispatcher::singleton()
14107 .findSchemaOrThrow(special_shifted_chebyshev_polynomial_v::name, special_shifted_chebyshev_polynomial_v::overload_name)
14108 .typed<special_shifted_chebyshev_polynomial_v::schema>();
14109}
14110
14111// aten::special_shifted_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor
14112at::Tensor special_shifted_chebyshev_polynomial_v::call(const at::Tensor & x, const at::Tensor & n) {
14113
14114 static auto op = create_special_shifted_chebyshev_polynomial_v_typed_handle();
14115 return op.call(x, n);
14116}
14117
14118// aten::special_shifted_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor
14119at::Tensor special_shifted_chebyshev_polynomial_v::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) {
14120
14121 static auto op = create_special_shifted_chebyshev_polynomial_v_typed_handle();
14122 return op.redispatch(dispatchKeySet, x, n);
14123}
14124
14125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_v_x_scalar, name, "aten::special_shifted_chebyshev_polynomial_v")
14126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_v_x_scalar, overload_name, "x_scalar")
14127STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_v_x_scalar, schema_str, "special_shifted_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor")
14128
14129// aten::special_shifted_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor
14130static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_v_x_scalar::schema> create_special_shifted_chebyshev_polynomial_v_x_scalar_typed_handle() {
14131 return c10::Dispatcher::singleton()
14132 .findSchemaOrThrow(special_shifted_chebyshev_polynomial_v_x_scalar::name, special_shifted_chebyshev_polynomial_v_x_scalar::overload_name)
14133 .typed<special_shifted_chebyshev_polynomial_v_x_scalar::schema>();
14134}
14135
14136// aten::special_shifted_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor
14137at::Tensor special_shifted_chebyshev_polynomial_v_x_scalar::call(const at::Scalar & x, const at::Tensor & n) {
14138
14139 static auto op = create_special_shifted_chebyshev_polynomial_v_x_scalar_typed_handle();
14140 return op.call(x, n);
14141}
14142
14143// aten::special_shifted_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor
14144at::Tensor special_shifted_chebyshev_polynomial_v_x_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) {
14145
14146 static auto op = create_special_shifted_chebyshev_polynomial_v_x_scalar_typed_handle();
14147 return op.redispatch(dispatchKeySet, x, n);
14148}
14149
14150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_v_n_scalar, name, "aten::special_shifted_chebyshev_polynomial_v")
14151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_v_n_scalar, overload_name, "n_scalar")
14152STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_v_n_scalar, schema_str, "special_shifted_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor")
14153
14154// aten::special_shifted_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor
14155static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_v_n_scalar::schema> create_special_shifted_chebyshev_polynomial_v_n_scalar_typed_handle() {
14156 return c10::Dispatcher::singleton()
14157 .findSchemaOrThrow(special_shifted_chebyshev_polynomial_v_n_scalar::name, special_shifted_chebyshev_polynomial_v_n_scalar::overload_name)
14158 .typed<special_shifted_chebyshev_polynomial_v_n_scalar::schema>();
14159}
14160
14161// aten::special_shifted_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor
14162at::Tensor special_shifted_chebyshev_polynomial_v_n_scalar::call(const at::Tensor & x, const at::Scalar & n) {
14163
14164 static auto op = create_special_shifted_chebyshev_polynomial_v_n_scalar_typed_handle();
14165 return op.call(x, n);
14166}
14167
14168// aten::special_shifted_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor
14169at::Tensor special_shifted_chebyshev_polynomial_v_n_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) {
14170
14171 static auto op = create_special_shifted_chebyshev_polynomial_v_n_scalar_typed_handle();
14172 return op.redispatch(dispatchKeySet, x, n);
14173}
14174
14175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_v_out, name, "aten::special_shifted_chebyshev_polynomial_v")
14176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_v_out, overload_name, "out")
14177STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_v_out, schema_str, "special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)")
14178
14179// aten::special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
14180static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_v_out::schema> create_special_shifted_chebyshev_polynomial_v_out_typed_handle() {
14181 return c10::Dispatcher::singleton()
14182 .findSchemaOrThrow(special_shifted_chebyshev_polynomial_v_out::name, special_shifted_chebyshev_polynomial_v_out::overload_name)
14183 .typed<special_shifted_chebyshev_polynomial_v_out::schema>();
14184}
14185
14186// aten::special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
14187at::Tensor & special_shifted_chebyshev_polynomial_v_out::call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
14188
14189 static auto op = create_special_shifted_chebyshev_polynomial_v_out_typed_handle();
14190 return op.call(x, n, out);
14191}
14192
14193// aten::special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
14194at::Tensor & special_shifted_chebyshev_polynomial_v_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
14195
14196 static auto op = create_special_shifted_chebyshev_polynomial_v_out_typed_handle();
14197 return op.redispatch(dispatchKeySet, x, n, out);
14198}
14199
14200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_v_x_scalar_out, name, "aten::special_shifted_chebyshev_polynomial_v")
14201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_v_x_scalar_out, overload_name, "x_scalar_out")
14202STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_v_x_scalar_out, schema_str, "special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)")
14203
14204// aten::special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
14205static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_v_x_scalar_out::schema> create_special_shifted_chebyshev_polynomial_v_x_scalar_out_typed_handle() {
14206 return c10::Dispatcher::singleton()
14207 .findSchemaOrThrow(special_shifted_chebyshev_polynomial_v_x_scalar_out::name, special_shifted_chebyshev_polynomial_v_x_scalar_out::overload_name)
14208 .typed<special_shifted_chebyshev_polynomial_v_x_scalar_out::schema>();
14209}
14210
14211// aten::special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
14212at::Tensor & special_shifted_chebyshev_polynomial_v_x_scalar_out::call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
14213
14214 static auto op = create_special_shifted_chebyshev_polynomial_v_x_scalar_out_typed_handle();
14215 return op.call(x, n, out);
14216}
14217
14218// aten::special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
14219at::Tensor & special_shifted_chebyshev_polynomial_v_x_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
14220
14221 static auto op = create_special_shifted_chebyshev_polynomial_v_x_scalar_out_typed_handle();
14222 return op.redispatch(dispatchKeySet, x, n, out);
14223}
14224
14225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_v_n_scalar_out, name, "aten::special_shifted_chebyshev_polynomial_v")
14226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_v_n_scalar_out, overload_name, "n_scalar_out")
14227STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_v_n_scalar_out, schema_str, "special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)")
14228
14229// aten::special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
14230static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_v_n_scalar_out::schema> create_special_shifted_chebyshev_polynomial_v_n_scalar_out_typed_handle() {
14231 return c10::Dispatcher::singleton()
14232 .findSchemaOrThrow(special_shifted_chebyshev_polynomial_v_n_scalar_out::name, special_shifted_chebyshev_polynomial_v_n_scalar_out::overload_name)
14233 .typed<special_shifted_chebyshev_polynomial_v_n_scalar_out::schema>();
14234}
14235
14236// aten::special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
14237at::Tensor & special_shifted_chebyshev_polynomial_v_n_scalar_out::call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
14238
14239 static auto op = create_special_shifted_chebyshev_polynomial_v_n_scalar_out_typed_handle();
14240 return op.call(x, n, out);
14241}
14242
14243// aten::special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
14244at::Tensor & special_shifted_chebyshev_polynomial_v_n_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
14245
14246 static auto op = create_special_shifted_chebyshev_polynomial_v_n_scalar_out_typed_handle();
14247 return op.redispatch(dispatchKeySet, x, n, out);
14248}
14249
14250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_w, name, "aten::special_shifted_chebyshev_polynomial_w")
14251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_w, overload_name, "")
14252STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_w, schema_str, "special_shifted_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor")
14253
14254// aten::special_shifted_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor
14255static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_w::schema> create_special_shifted_chebyshev_polynomial_w_typed_handle() {
14256 return c10::Dispatcher::singleton()
14257 .findSchemaOrThrow(special_shifted_chebyshev_polynomial_w::name, special_shifted_chebyshev_polynomial_w::overload_name)
14258 .typed<special_shifted_chebyshev_polynomial_w::schema>();
14259}
14260
14261// aten::special_shifted_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor
14262at::Tensor special_shifted_chebyshev_polynomial_w::call(const at::Tensor & x, const at::Tensor & n) {
14263
14264 static auto op = create_special_shifted_chebyshev_polynomial_w_typed_handle();
14265 return op.call(x, n);
14266}
14267
14268// aten::special_shifted_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor
14269at::Tensor special_shifted_chebyshev_polynomial_w::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) {
14270
14271 static auto op = create_special_shifted_chebyshev_polynomial_w_typed_handle();
14272 return op.redispatch(dispatchKeySet, x, n);
14273}
14274
14275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_w_x_scalar, name, "aten::special_shifted_chebyshev_polynomial_w")
14276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_w_x_scalar, overload_name, "x_scalar")
14277STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_w_x_scalar, schema_str, "special_shifted_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor")
14278
14279// aten::special_shifted_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor
14280static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_w_x_scalar::schema> create_special_shifted_chebyshev_polynomial_w_x_scalar_typed_handle() {
14281 return c10::Dispatcher::singleton()
14282 .findSchemaOrThrow(special_shifted_chebyshev_polynomial_w_x_scalar::name, special_shifted_chebyshev_polynomial_w_x_scalar::overload_name)
14283 .typed<special_shifted_chebyshev_polynomial_w_x_scalar::schema>();
14284}
14285
14286// aten::special_shifted_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor
14287at::Tensor special_shifted_chebyshev_polynomial_w_x_scalar::call(const at::Scalar & x, const at::Tensor & n) {
14288
14289 static auto op = create_special_shifted_chebyshev_polynomial_w_x_scalar_typed_handle();
14290 return op.call(x, n);
14291}
14292
14293// aten::special_shifted_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor
14294at::Tensor special_shifted_chebyshev_polynomial_w_x_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) {
14295
14296 static auto op = create_special_shifted_chebyshev_polynomial_w_x_scalar_typed_handle();
14297 return op.redispatch(dispatchKeySet, x, n);
14298}
14299
14300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_w_n_scalar, name, "aten::special_shifted_chebyshev_polynomial_w")
14301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_w_n_scalar, overload_name, "n_scalar")
14302STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_w_n_scalar, schema_str, "special_shifted_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor")
14303
14304// aten::special_shifted_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor
14305static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_w_n_scalar::schema> create_special_shifted_chebyshev_polynomial_w_n_scalar_typed_handle() {
14306 return c10::Dispatcher::singleton()
14307 .findSchemaOrThrow(special_shifted_chebyshev_polynomial_w_n_scalar::name, special_shifted_chebyshev_polynomial_w_n_scalar::overload_name)
14308 .typed<special_shifted_chebyshev_polynomial_w_n_scalar::schema>();
14309}
14310
14311// aten::special_shifted_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor
14312at::Tensor special_shifted_chebyshev_polynomial_w_n_scalar::call(const at::Tensor & x, const at::Scalar & n) {
14313
14314 static auto op = create_special_shifted_chebyshev_polynomial_w_n_scalar_typed_handle();
14315 return op.call(x, n);
14316}
14317
14318// aten::special_shifted_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor
14319at::Tensor special_shifted_chebyshev_polynomial_w_n_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) {
14320
14321 static auto op = create_special_shifted_chebyshev_polynomial_w_n_scalar_typed_handle();
14322 return op.redispatch(dispatchKeySet, x, n);
14323}
14324
14325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_w_out, name, "aten::special_shifted_chebyshev_polynomial_w")
14326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_w_out, overload_name, "out")
14327STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_w_out, schema_str, "special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)")
14328
14329// aten::special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
14330static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_w_out::schema> create_special_shifted_chebyshev_polynomial_w_out_typed_handle() {
14331 return c10::Dispatcher::singleton()
14332 .findSchemaOrThrow(special_shifted_chebyshev_polynomial_w_out::name, special_shifted_chebyshev_polynomial_w_out::overload_name)
14333 .typed<special_shifted_chebyshev_polynomial_w_out::schema>();
14334}
14335
14336// aten::special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
14337at::Tensor & special_shifted_chebyshev_polynomial_w_out::call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
14338
14339 static auto op = create_special_shifted_chebyshev_polynomial_w_out_typed_handle();
14340 return op.call(x, n, out);
14341}
14342
14343// aten::special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
14344at::Tensor & special_shifted_chebyshev_polynomial_w_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
14345
14346 static auto op = create_special_shifted_chebyshev_polynomial_w_out_typed_handle();
14347 return op.redispatch(dispatchKeySet, x, n, out);
14348}
14349
14350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_w_x_scalar_out, name, "aten::special_shifted_chebyshev_polynomial_w")
14351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_w_x_scalar_out, overload_name, "x_scalar_out")
14352STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_w_x_scalar_out, schema_str, "special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)")
14353
14354// aten::special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
14355static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_w_x_scalar_out::schema> create_special_shifted_chebyshev_polynomial_w_x_scalar_out_typed_handle() {
14356 return c10::Dispatcher::singleton()
14357 .findSchemaOrThrow(special_shifted_chebyshev_polynomial_w_x_scalar_out::name, special_shifted_chebyshev_polynomial_w_x_scalar_out::overload_name)
14358 .typed<special_shifted_chebyshev_polynomial_w_x_scalar_out::schema>();
14359}
14360
14361// aten::special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
14362at::Tensor & special_shifted_chebyshev_polynomial_w_x_scalar_out::call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
14363
14364 static auto op = create_special_shifted_chebyshev_polynomial_w_x_scalar_out_typed_handle();
14365 return op.call(x, n, out);
14366}
14367
14368// aten::special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
14369at::Tensor & special_shifted_chebyshev_polynomial_w_x_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
14370
14371 static auto op = create_special_shifted_chebyshev_polynomial_w_x_scalar_out_typed_handle();
14372 return op.redispatch(dispatchKeySet, x, n, out);
14373}
14374
14375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_w_n_scalar_out, name, "aten::special_shifted_chebyshev_polynomial_w")
14376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_w_n_scalar_out, overload_name, "n_scalar_out")
14377STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_w_n_scalar_out, schema_str, "special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)")
14378
14379// aten::special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
14380static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_w_n_scalar_out::schema> create_special_shifted_chebyshev_polynomial_w_n_scalar_out_typed_handle() {
14381 return c10::Dispatcher::singleton()
14382 .findSchemaOrThrow(special_shifted_chebyshev_polynomial_w_n_scalar_out::name, special_shifted_chebyshev_polynomial_w_n_scalar_out::overload_name)
14383 .typed<special_shifted_chebyshev_polynomial_w_n_scalar_out::schema>();
14384}
14385
14386// aten::special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
14387at::Tensor & special_shifted_chebyshev_polynomial_w_n_scalar_out::call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
14388
14389 static auto op = create_special_shifted_chebyshev_polynomial_w_n_scalar_out_typed_handle();
14390 return op.call(x, n, out);
14391}
14392
14393// aten::special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
14394at::Tensor & special_shifted_chebyshev_polynomial_w_n_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
14395
14396 static auto op = create_special_shifted_chebyshev_polynomial_w_n_scalar_out_typed_handle();
14397 return op.redispatch(dispatchKeySet, x, n, out);
14398}
14399
14400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_spherical_bessel_j0, name, "aten::special_spherical_bessel_j0")
14401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_spherical_bessel_j0, overload_name, "")
14402STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_spherical_bessel_j0, schema_str, "special_spherical_bessel_j0(Tensor x) -> Tensor")
14403
14404// aten::special_spherical_bessel_j0(Tensor x) -> Tensor
14405static C10_NOINLINE c10::TypedOperatorHandle<special_spherical_bessel_j0::schema> create_special_spherical_bessel_j0_typed_handle() {
14406 return c10::Dispatcher::singleton()
14407 .findSchemaOrThrow(special_spherical_bessel_j0::name, special_spherical_bessel_j0::overload_name)
14408 .typed<special_spherical_bessel_j0::schema>();
14409}
14410
14411// aten::special_spherical_bessel_j0(Tensor x) -> Tensor
14412at::Tensor special_spherical_bessel_j0::call(const at::Tensor & x) {
14413
14414 static auto op = create_special_spherical_bessel_j0_typed_handle();
14415 return op.call(x);
14416}
14417
14418// aten::special_spherical_bessel_j0(Tensor x) -> Tensor
14419at::Tensor special_spherical_bessel_j0::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x) {
14420
14421 static auto op = create_special_spherical_bessel_j0_typed_handle();
14422 return op.redispatch(dispatchKeySet, x);
14423}
14424
14425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_spherical_bessel_j0_out, name, "aten::special_spherical_bessel_j0")
14426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_spherical_bessel_j0_out, overload_name, "out")
14427STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_spherical_bessel_j0_out, schema_str, "special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)")
14428
14429// aten::special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
14430static C10_NOINLINE c10::TypedOperatorHandle<special_spherical_bessel_j0_out::schema> create_special_spherical_bessel_j0_out_typed_handle() {
14431 return c10::Dispatcher::singleton()
14432 .findSchemaOrThrow(special_spherical_bessel_j0_out::name, special_spherical_bessel_j0_out::overload_name)
14433 .typed<special_spherical_bessel_j0_out::schema>();
14434}
14435
14436// aten::special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
14437at::Tensor & special_spherical_bessel_j0_out::call(const at::Tensor & x, at::Tensor & out) {
14438
14439 static auto op = create_special_spherical_bessel_j0_out_typed_handle();
14440 return op.call(x, out);
14441}
14442
14443// aten::special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
14444at::Tensor & special_spherical_bessel_j0_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, at::Tensor & out) {
14445
14446 static auto op = create_special_spherical_bessel_j0_out_typed_handle();
14447 return op.redispatch(dispatchKeySet, x, out);
14448}
14449
14450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cudnn_ctc_loss_out, name, "aten::_cudnn_ctc_loss")
14451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cudnn_ctc_loss_out, overload_name, "out")
14452STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cudnn_ctc_loss_out, schema_str, "_cudnn_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
14453
14454// aten::_cudnn_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
14455static C10_NOINLINE c10::TypedOperatorHandle<_cudnn_ctc_loss_out::schema> create__cudnn_ctc_loss_out_typed_handle() {
14456 return c10::Dispatcher::singleton()
14457 .findSchemaOrThrow(_cudnn_ctc_loss_out::name, _cudnn_ctc_loss_out::overload_name)
14458 .typed<_cudnn_ctc_loss_out::schema>();
14459}
14460
14461// aten::_cudnn_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
14462::std::tuple<at::Tensor &,at::Tensor &> _cudnn_ctc_loss_out::call(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) {
14463
14464 static auto op = create__cudnn_ctc_loss_out_typed_handle();
14465 return op.call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity, out0, out1);
14466}
14467
14468// aten::_cudnn_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
14469::std::tuple<at::Tensor &,at::Tensor &> _cudnn_ctc_loss_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) {
14470
14471 static auto op = create__cudnn_ctc_loss_out_typed_handle();
14472 return op.redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity, out0, out1);
14473}
14474
14475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cudnn_rnn_out, name, "aten::_cudnn_rnn")
14476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cudnn_rnn_out, overload_name, "out")
14477STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cudnn_rnn_out, schema_str, "_cudnn_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))")
14478
14479// aten::_cudnn_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
14480static C10_NOINLINE c10::TypedOperatorHandle<_cudnn_rnn_out::schema> create__cudnn_rnn_out_typed_handle() {
14481 return c10::Dispatcher::singleton()
14482 .findSchemaOrThrow(_cudnn_rnn_out::name, _cudnn_rnn_out::overload_name)
14483 .typed<_cudnn_rnn_out::schema>();
14484}
14485
14486// aten::_cudnn_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
14487::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _cudnn_rnn_out::call(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) {
14488
14489 static auto op = create__cudnn_rnn_out_typed_handle();
14490 return op.call(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4);
14491}
14492
14493// aten::_cudnn_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
14494::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _cudnn_rnn_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) {
14495
14496 static auto op = create__cudnn_rnn_out_typed_handle();
14497 return op.redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4);
14498}
14499
14500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fused_dropout_out, name, "aten::_fused_dropout")
14501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fused_dropout_out, overload_name, "out")
14502STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fused_dropout_out, schema_str, "_fused_dropout.out(Tensor self, float p, Generator? generator=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
14503
14504// aten::_fused_dropout.out(Tensor self, float p, Generator? generator=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
14505static C10_NOINLINE c10::TypedOperatorHandle<_fused_dropout_out::schema> create__fused_dropout_out_typed_handle() {
14506 return c10::Dispatcher::singleton()
14507 .findSchemaOrThrow(_fused_dropout_out::name, _fused_dropout_out::overload_name)
14508 .typed<_fused_dropout_out::schema>();
14509}
14510
14511// aten::_fused_dropout.out(Tensor self, float p, Generator? generator=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
14512::std::tuple<at::Tensor &,at::Tensor &> _fused_dropout_out::call(const at::Tensor & self, double p, c10::optional<at::Generator> generator, at::Tensor & out0, at::Tensor & out1) {
14513
14514 static auto op = create__fused_dropout_out_typed_handle();
14515 return op.call(self, p, generator, out0, out1);
14516}
14517
14518// aten::_fused_dropout.out(Tensor self, float p, Generator? generator=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
14519::std::tuple<at::Tensor &,at::Tensor &> _fused_dropout_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, c10::optional<at::Generator> generator, at::Tensor & out0, at::Tensor & out1) {
14520
14521 static auto op = create__fused_dropout_out_typed_handle();
14522 return op.redispatch(dispatchKeySet, self, p, generator, out0, out1);
14523}
14524
14525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_conj_physical_out, name, "aten::_conj_physical")
14526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_conj_physical_out, overload_name, "out")
14527STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_conj_physical_out, schema_str, "_conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
14528
14529// aten::_conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14530static C10_NOINLINE c10::TypedOperatorHandle<_conj_physical_out::schema> create__conj_physical_out_typed_handle() {
14531 return c10::Dispatcher::singleton()
14532 .findSchemaOrThrow(_conj_physical_out::name, _conj_physical_out::overload_name)
14533 .typed<_conj_physical_out::schema>();
14534}
14535
14536// aten::_conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14537at::Tensor & _conj_physical_out::call(const at::Tensor & self, at::Tensor & out) {
14538
14539 static auto op = create__conj_physical_out_typed_handle();
14540 return op.call(self, out);
14541}
14542
14543// aten::_conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14544at::Tensor & _conj_physical_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
14545
14546 static auto op = create__conj_physical_out_typed_handle();
14547 return op.redispatch(dispatchKeySet, self, out);
14548}
14549
14550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(blackman_window_out, name, "aten::blackman_window")
14551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(blackman_window_out, overload_name, "out")
14552STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(blackman_window_out, schema_str, "blackman_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)")
14553
14554// aten::blackman_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
14555static C10_NOINLINE c10::TypedOperatorHandle<blackman_window_out::schema> create_blackman_window_out_typed_handle() {
14556 return c10::Dispatcher::singleton()
14557 .findSchemaOrThrow(blackman_window_out::name, blackman_window_out::overload_name)
14558 .typed<blackman_window_out::schema>();
14559}
14560
14561// aten::blackman_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
14562at::Tensor & blackman_window_out::call(int64_t window_length, at::Tensor & out) {
14563
14564 static auto op = create_blackman_window_out_typed_handle();
14565 return op.call(window_length, out);
14566}
14567
14568// aten::blackman_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
14569at::Tensor & blackman_window_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) {
14570
14571 static auto op = create_blackman_window_out_typed_handle();
14572 return op.redispatch(dispatchKeySet, window_length, out);
14573}
14574
14575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(blackman_window_periodic_out, name, "aten::blackman_window")
14576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(blackman_window_periodic_out, overload_name, "periodic_out")
14577STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(blackman_window_periodic_out, schema_str, "blackman_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)")
14578
14579// aten::blackman_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
14580static C10_NOINLINE c10::TypedOperatorHandle<blackman_window_periodic_out::schema> create_blackman_window_periodic_out_typed_handle() {
14581 return c10::Dispatcher::singleton()
14582 .findSchemaOrThrow(blackman_window_periodic_out::name, blackman_window_periodic_out::overload_name)
14583 .typed<blackman_window_periodic_out::schema>();
14584}
14585
14586// aten::blackman_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
14587at::Tensor & blackman_window_periodic_out::call(int64_t window_length, bool periodic, at::Tensor & out) {
14588
14589 static auto op = create_blackman_window_periodic_out_typed_handle();
14590 return op.call(window_length, periodic, out);
14591}
14592
14593// aten::blackman_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
14594at::Tensor & blackman_window_periodic_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) {
14595
14596 static auto op = create_blackman_window_periodic_out_typed_handle();
14597 return op.redispatch(dispatchKeySet, window_length, periodic, out);
14598}
14599
14600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(convolution_out, name, "aten::convolution")
14601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(convolution_out, overload_name, "out")
14602STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(convolution_out, schema_str, "convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!)")
14603
14604// aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!)
14605static C10_NOINLINE c10::TypedOperatorHandle<convolution_out::schema> create_convolution_out_typed_handle() {
14606 return c10::Dispatcher::singleton()
14607 .findSchemaOrThrow(convolution_out::name, convolution_out::overload_name)
14608 .typed<convolution_out::schema>();
14609}
14610
14611// aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!)
14612at::Tensor & convolution_out::call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, at::Tensor & out) {
14613
14614 static auto op = create_convolution_out_typed_handle();
14615 return op.call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out);
14616}
14617
14618// aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!)
14619at::Tensor & convolution_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, at::Tensor & out) {
14620
14621 static auto op = create_convolution_out_typed_handle();
14622 return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out);
14623}
14624
14625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(convolution_backward_overrideable_out, name, "aten::convolution_backward_overrideable")
14626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(convolution_backward_overrideable_out, overload_name, "out")
14627STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(convolution_backward_overrideable_out, schema_str, "convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
14628
14629// aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
14630static C10_NOINLINE c10::TypedOperatorHandle<convolution_backward_overrideable_out::schema> create_convolution_backward_overrideable_out_typed_handle() {
14631 return c10::Dispatcher::singleton()
14632 .findSchemaOrThrow(convolution_backward_overrideable_out::name, convolution_backward_overrideable_out::overload_name)
14633 .typed<convolution_backward_overrideable_out::schema>();
14634}
14635
14636// aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
14637::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_overrideable_out::call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
14638
14639 static auto op = create_convolution_backward_overrideable_out_typed_handle();
14640 return op.call(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2);
14641}
14642
14643// aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
14644::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_overrideable_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
14645
14646 static auto op = create_convolution_backward_overrideable_out_typed_handle();
14647 return op.redispatch(dispatchKeySet, grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2);
14648}
14649
14650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_convolution_out, name, "aten::_convolution")
14651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_convolution_out, overload_name, "out")
14652STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_convolution_out, schema_str, "_convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)")
14653
14654// aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
14655static C10_NOINLINE c10::TypedOperatorHandle<_convolution_out::schema> create__convolution_out_typed_handle() {
14656 return c10::Dispatcher::singleton()
14657 .findSchemaOrThrow(_convolution_out::name, _convolution_out::overload_name)
14658 .typed<_convolution_out::schema>();
14659}
14660
14661// aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
14662at::Tensor & _convolution_out::call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, at::Tensor & out) {
14663
14664 static auto op = create__convolution_out_typed_handle();
14665 return op.call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32, out);
14666}
14667
14668// aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
14669at::Tensor & _convolution_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, at::Tensor & out) {
14670
14671 static auto op = create__convolution_out_typed_handle();
14672 return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32, out);
14673}
14674
14675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_affine_grid_generator_out, name, "aten::cudnn_affine_grid_generator")
14676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_affine_grid_generator_out, overload_name, "out")
14677STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_affine_grid_generator_out, schema_str, "cudnn_affine_grid_generator.out(Tensor theta, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)")
14678
14679// aten::cudnn_affine_grid_generator.out(Tensor theta, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)
14680static C10_NOINLINE c10::TypedOperatorHandle<cudnn_affine_grid_generator_out::schema> create_cudnn_affine_grid_generator_out_typed_handle() {
14681 return c10::Dispatcher::singleton()
14682 .findSchemaOrThrow(cudnn_affine_grid_generator_out::name, cudnn_affine_grid_generator_out::overload_name)
14683 .typed<cudnn_affine_grid_generator_out::schema>();
14684}
14685
14686// aten::cudnn_affine_grid_generator.out(Tensor theta, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)
14687at::Tensor & cudnn_affine_grid_generator_out::call(const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out) {
14688
14689 static auto op = create_cudnn_affine_grid_generator_out_typed_handle();
14690 return op.call(theta, N, C, H, W, out);
14691}
14692
14693// aten::cudnn_affine_grid_generator.out(Tensor theta, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)
14694at::Tensor & cudnn_affine_grid_generator_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out) {
14695
14696 static auto op = create_cudnn_affine_grid_generator_out_typed_handle();
14697 return op.redispatch(dispatchKeySet, theta, N, C, H, W, out);
14698}
14699
14700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_batch_norm_backward_out, name, "aten::cudnn_batch_norm_backward")
14701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_batch_norm_backward_out, overload_name, "out")
14702STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_batch_norm_backward_out, schema_str, "cudnn_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
14703
14704// aten::cudnn_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
14705static C10_NOINLINE c10::TypedOperatorHandle<cudnn_batch_norm_backward_out::schema> create_cudnn_batch_norm_backward_out_typed_handle() {
14706 return c10::Dispatcher::singleton()
14707 .findSchemaOrThrow(cudnn_batch_norm_backward_out::name, cudnn_batch_norm_backward_out::overload_name)
14708 .typed<cudnn_batch_norm_backward_out::schema>();
14709}
14710
14711// aten::cudnn_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
14712::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> cudnn_batch_norm_backward_out::call(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
14713
14714 static auto op = create_cudnn_batch_norm_backward_out_typed_handle();
14715 return op.call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace, out0, out1, out2);
14716}
14717
14718// aten::cudnn_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
14719::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> cudnn_batch_norm_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
14720
14721 static auto op = create_cudnn_batch_norm_backward_out_typed_handle();
14722 return op.redispatch(dispatchKeySet, input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace, out0, out1, out2);
14723}
14724
14725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_convolution_transpose_out, name, "aten::cudnn_convolution_transpose")
14726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_convolution_transpose_out, overload_name, "out")
14727STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_convolution_transpose_out, schema_str, "cudnn_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)")
14728
14729// aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
14730static C10_NOINLINE c10::TypedOperatorHandle<cudnn_convolution_transpose_out::schema> create_cudnn_convolution_transpose_out_typed_handle() {
14731 return c10::Dispatcher::singleton()
14732 .findSchemaOrThrow(cudnn_convolution_transpose_out::name, cudnn_convolution_transpose_out::overload_name)
14733 .typed<cudnn_convolution_transpose_out::schema>();
14734}
14735
14736// aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
14737at::Tensor & cudnn_convolution_transpose_out::call(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) {
14738
14739 static auto op = create_cudnn_convolution_transpose_out_typed_handle();
14740 return op.call(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out);
14741}
14742
14743// aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
14744at::Tensor & cudnn_convolution_transpose_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) {
14745
14746 static auto op = create_cudnn_convolution_transpose_out_typed_handle();
14747 return op.redispatch(dispatchKeySet, self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out);
14748}
14749
14750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_grid_sampler_backward_out, name, "aten::cudnn_grid_sampler_backward")
14751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_grid_sampler_backward_out, overload_name, "out")
14752STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_grid_sampler_backward_out, schema_str, "cudnn_grid_sampler_backward.out(Tensor self, Tensor grid, Tensor grad_output, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
14753
14754// aten::cudnn_grid_sampler_backward.out(Tensor self, Tensor grid, Tensor grad_output, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
14755static C10_NOINLINE c10::TypedOperatorHandle<cudnn_grid_sampler_backward_out::schema> create_cudnn_grid_sampler_backward_out_typed_handle() {
14756 return c10::Dispatcher::singleton()
14757 .findSchemaOrThrow(cudnn_grid_sampler_backward_out::name, cudnn_grid_sampler_backward_out::overload_name)
14758 .typed<cudnn_grid_sampler_backward_out::schema>();
14759}
14760
14761// aten::cudnn_grid_sampler_backward.out(Tensor self, Tensor grid, Tensor grad_output, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
14762::std::tuple<at::Tensor &,at::Tensor &> cudnn_grid_sampler_backward_out::call(const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output, at::Tensor & out0, at::Tensor & out1) {
14763
14764 static auto op = create_cudnn_grid_sampler_backward_out_typed_handle();
14765 return op.call(self, grid, grad_output, out0, out1);
14766}
14767
14768// aten::cudnn_grid_sampler_backward.out(Tensor self, Tensor grid, Tensor grad_output, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
14769::std::tuple<at::Tensor &,at::Tensor &> cudnn_grid_sampler_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output, at::Tensor & out0, at::Tensor & out1) {
14770
14771 static auto op = create_cudnn_grid_sampler_backward_out_typed_handle();
14772 return op.redispatch(dispatchKeySet, self, grid, grad_output, out0, out1);
14773}
14774
14775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_ctc_loss_out, name, "aten::_ctc_loss")
14776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_ctc_loss_out, overload_name, "out")
14777STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_ctc_loss_out, schema_str, "_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
14778
14779// aten::_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
14780static C10_NOINLINE c10::TypedOperatorHandle<_ctc_loss_out::schema> create__ctc_loss_out_typed_handle() {
14781 return c10::Dispatcher::singleton()
14782 .findSchemaOrThrow(_ctc_loss_out::name, _ctc_loss_out::overload_name)
14783 .typed<_ctc_loss_out::schema>();
14784}
14785
14786// aten::_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
14787::std::tuple<at::Tensor &,at::Tensor &> _ctc_loss_out::call(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) {
14788
14789 static auto op = create__ctc_loss_out_typed_handle();
14790 return op.call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1);
14791}
14792
14793// aten::_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
14794::std::tuple<at::Tensor &,at::Tensor &> _ctc_loss_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) {
14795
14796 static auto op = create__ctc_loss_out_typed_handle();
14797 return op.redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1);
14798}
14799
14800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_ctc_loss_Tensor_out, name, "aten::_ctc_loss")
14801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_ctc_loss_Tensor_out, overload_name, "Tensor_out")
14802STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_ctc_loss_Tensor_out, schema_str, "_ctc_loss.Tensor_out(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
14803
14804// aten::_ctc_loss.Tensor_out(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
14805static C10_NOINLINE c10::TypedOperatorHandle<_ctc_loss_Tensor_out::schema> create__ctc_loss_Tensor_out_typed_handle() {
14806 return c10::Dispatcher::singleton()
14807 .findSchemaOrThrow(_ctc_loss_Tensor_out::name, _ctc_loss_Tensor_out::overload_name)
14808 .typed<_ctc_loss_Tensor_out::schema>();
14809}
14810
14811// aten::_ctc_loss.Tensor_out(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
14812::std::tuple<at::Tensor &,at::Tensor &> _ctc_loss_Tensor_out::call(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) {
14813
14814 static auto op = create__ctc_loss_Tensor_out_typed_handle();
14815 return op.call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1);
14816}
14817
14818// aten::_ctc_loss.Tensor_out(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
14819::std::tuple<at::Tensor &,at::Tensor &> _ctc_loss_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) {
14820
14821 static auto op = create__ctc_loss_Tensor_out_typed_handle();
14822 return op.redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1);
14823}
14824
14825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(embedding_dense_backward_out, name, "aten::embedding_dense_backward")
14826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(embedding_dense_backward_out, overload_name, "out")
14827STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(embedding_dense_backward_out, schema_str, "embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!)")
14828
14829// aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!)
14830static C10_NOINLINE c10::TypedOperatorHandle<embedding_dense_backward_out::schema> create_embedding_dense_backward_out_typed_handle() {
14831 return c10::Dispatcher::singleton()
14832 .findSchemaOrThrow(embedding_dense_backward_out::name, embedding_dense_backward_out::overload_name)
14833 .typed<embedding_dense_backward_out::schema>();
14834}
14835
14836// aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!)
14837at::Tensor & embedding_dense_backward_out::call(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, at::Tensor & out) {
14838
14839 static auto op = create_embedding_dense_backward_out_typed_handle();
14840 return op.call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out);
14841}
14842
14843// aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!)
14844at::Tensor & embedding_dense_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, at::Tensor & out) {
14845
14846 static auto op = create_embedding_dense_backward_out_typed_handle();
14847 return op.redispatch(dispatchKeySet, grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out);
14848}
14849
14850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_embedding_bag_out, name, "aten::_embedding_bag")
14851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_embedding_bag_out, overload_name, "out")
14852STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_embedding_bag_out, schema_str, "_embedding_bag.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))")
14853
14854// aten::_embedding_bag.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
14855static C10_NOINLINE c10::TypedOperatorHandle<_embedding_bag_out::schema> create__embedding_bag_out_typed_handle() {
14856 return c10::Dispatcher::singleton()
14857 .findSchemaOrThrow(_embedding_bag_out::name, _embedding_bag_out::overload_name)
14858 .typed<_embedding_bag_out::schema>();
14859}
14860
14861// aten::_embedding_bag.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
14862::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _embedding_bag_out::call(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
14863
14864 static auto op = create__embedding_bag_out_typed_handle();
14865 return op.call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx, out0, out1, out2, out3);
14866}
14867
14868// aten::_embedding_bag.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
14869::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _embedding_bag_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
14870
14871 static auto op = create__embedding_bag_out_typed_handle();
14872 return op.redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx, out0, out1, out2, out3);
14873}
14874
14875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(new_empty_out, name, "aten::new_empty")
14876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(new_empty_out, overload_name, "out")
14877STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(new_empty_out, schema_str, "new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)")
14878
14879// aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
14880static C10_NOINLINE c10::TypedOperatorHandle<new_empty_out::schema> create_new_empty_out_typed_handle() {
14881 return c10::Dispatcher::singleton()
14882 .findSchemaOrThrow(new_empty_out::name, new_empty_out::overload_name)
14883 .typed<new_empty_out::schema>();
14884}
14885
14886// aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
14887at::Tensor & new_empty_out::call(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
14888
14889 static auto op = create_new_empty_out_typed_handle();
14890 return op.call(self, size, out);
14891}
14892
14893// aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
14894at::Tensor & new_empty_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
14895
14896 static auto op = create_new_empty_out_typed_handle();
14897 return op.redispatch(dispatchKeySet, self, size, out);
14898}
14899
14900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fill_Scalar_out, name, "aten::fill")
14901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fill_Scalar_out, overload_name, "Scalar_out")
14902STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fill_Scalar_out, schema_str, "fill.Scalar_out(Tensor self, Scalar value, *, Tensor(a!) out) -> Tensor(a!)")
14903
14904// aten::fill.Scalar_out(Tensor self, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
14905static C10_NOINLINE c10::TypedOperatorHandle<fill_Scalar_out::schema> create_fill_Scalar_out_typed_handle() {
14906 return c10::Dispatcher::singleton()
14907 .findSchemaOrThrow(fill_Scalar_out::name, fill_Scalar_out::overload_name)
14908 .typed<fill_Scalar_out::schema>();
14909}
14910
14911// aten::fill.Scalar_out(Tensor self, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
14912at::Tensor & fill_Scalar_out::call(const at::Tensor & self, const at::Scalar & value, at::Tensor & out) {
14913
14914 static auto op = create_fill_Scalar_out_typed_handle();
14915 return op.call(self, value, out);
14916}
14917
14918// aten::fill.Scalar_out(Tensor self, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
14919at::Tensor & fill_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & value, at::Tensor & out) {
14920
14921 static auto op = create_fill_Scalar_out_typed_handle();
14922 return op.redispatch(dispatchKeySet, self, value, out);
14923}
14924
14925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fill_Tensor_out, name, "aten::fill")
14926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fill_Tensor_out, overload_name, "Tensor_out")
14927STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fill_Tensor_out, schema_str, "fill.Tensor_out(Tensor self, Tensor value, *, Tensor(a!) out) -> Tensor(a!)")
14928
14929// aten::fill.Tensor_out(Tensor self, Tensor value, *, Tensor(a!) out) -> Tensor(a!)
14930static C10_NOINLINE c10::TypedOperatorHandle<fill_Tensor_out::schema> create_fill_Tensor_out_typed_handle() {
14931 return c10::Dispatcher::singleton()
14932 .findSchemaOrThrow(fill_Tensor_out::name, fill_Tensor_out::overload_name)
14933 .typed<fill_Tensor_out::schema>();
14934}
14935
14936// aten::fill.Tensor_out(Tensor self, Tensor value, *, Tensor(a!) out) -> Tensor(a!)
14937at::Tensor & fill_Tensor_out::call(const at::Tensor & self, const at::Tensor & value, at::Tensor & out) {
14938
14939 static auto op = create_fill_Tensor_out_typed_handle();
14940 return op.call(self, value, out);
14941}
14942
14943// aten::fill.Tensor_out(Tensor self, Tensor value, *, Tensor(a!) out) -> Tensor(a!)
14944at::Tensor & fill_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & value, at::Tensor & out) {
14945
14946 static auto op = create_fill_Tensor_out_typed_handle();
14947 return op.redispatch(dispatchKeySet, self, value, out);
14948}
14949
14950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(grid_sampler_2d_backward_out, name, "aten::grid_sampler_2d_backward")
14951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(grid_sampler_2d_backward_out, overload_name, "out")
14952STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(grid_sampler_2d_backward_out, schema_str, "grid_sampler_2d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
14953
14954// aten::grid_sampler_2d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
14955static C10_NOINLINE c10::TypedOperatorHandle<grid_sampler_2d_backward_out::schema> create_grid_sampler_2d_backward_out_typed_handle() {
14956 return c10::Dispatcher::singleton()
14957 .findSchemaOrThrow(grid_sampler_2d_backward_out::name, grid_sampler_2d_backward_out::overload_name)
14958 .typed<grid_sampler_2d_backward_out::schema>();
14959}
14960
14961// aten::grid_sampler_2d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
14962::std::tuple<at::Tensor &,at::Tensor &> grid_sampler_2d_backward_out::call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) {
14963
14964 static auto op = create_grid_sampler_2d_backward_out_typed_handle();
14965 return op.call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1);
14966}
14967
14968// aten::grid_sampler_2d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
14969::std::tuple<at::Tensor &,at::Tensor &> grid_sampler_2d_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) {
14970
14971 static auto op = create_grid_sampler_2d_backward_out_typed_handle();
14972 return op.redispatch(dispatchKeySet, grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1);
14973}
14974
14975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_index_put_impl_out, name, "aten::_index_put_impl")
14976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_index_put_impl_out, overload_name, "out")
14977STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_index_put_impl_out, schema_str, "_index_put_impl.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False, *, Tensor(a!) out) -> Tensor(a!)")
14978
14979// aten::_index_put_impl.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False, *, Tensor(a!) out) -> Tensor(a!)
14980static C10_NOINLINE c10::TypedOperatorHandle<_index_put_impl_out::schema> create__index_put_impl_out_typed_handle() {
14981 return c10::Dispatcher::singleton()
14982 .findSchemaOrThrow(_index_put_impl_out::name, _index_put_impl_out::overload_name)
14983 .typed<_index_put_impl_out::schema>();
14984}
14985
14986// aten::_index_put_impl.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False, *, Tensor(a!) out) -> Tensor(a!)
14987at::Tensor & _index_put_impl_out::call(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe, at::Tensor & out) {
14988
14989 static auto op = create__index_put_impl_out_typed_handle();
14990 return op.call(self, indices, values, accumulate, unsafe, out);
14991}
14992
14993// aten::_index_put_impl.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False, *, Tensor(a!) out) -> Tensor(a!)
14994at::Tensor & _index_put_impl_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe, at::Tensor & out) {
14995
14996 static auto op = create__index_put_impl_out_typed_handle();
14997 return op.redispatch(dispatchKeySet, self, indices, values, accumulate, unsafe, out);
14998}
14999
15000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_index_put_impl, name, "aten::_index_put_impl")
15001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_index_put_impl, overload_name, "")
15002STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_index_put_impl, schema_str, "_index_put_impl(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor")
15003
15004// aten::_index_put_impl(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor
15005static C10_NOINLINE c10::TypedOperatorHandle<_index_put_impl::schema> create__index_put_impl_typed_handle() {
15006 return c10::Dispatcher::singleton()
15007 .findSchemaOrThrow(_index_put_impl::name, _index_put_impl::overload_name)
15008 .typed<_index_put_impl::schema>();
15009}
15010
15011// aten::_index_put_impl(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor
15012at::Tensor _index_put_impl::call(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) {
15013
15014 static auto op = create__index_put_impl_typed_handle();
15015 return op.call(self, indices, values, accumulate, unsafe);
15016}
15017
15018// aten::_index_put_impl(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor
15019at::Tensor _index_put_impl::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) {
15020
15021 static auto op = create__index_put_impl_typed_handle();
15022 return op.redispatch(dispatchKeySet, self, indices, values, accumulate, unsafe);
15023}
15024
15025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_linear_out, name, "aten::mkldnn_linear")
15026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_linear_out, overload_name, "out")
15027STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_linear_out, schema_str, "mkldnn_linear.out(Tensor self, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)")
15028
15029// aten::mkldnn_linear.out(Tensor self, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)
15030static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_linear_out::schema> create_mkldnn_linear_out_typed_handle() {
15031 return c10::Dispatcher::singleton()
15032 .findSchemaOrThrow(mkldnn_linear_out::name, mkldnn_linear_out::overload_name)
15033 .typed<mkldnn_linear_out::schema>();
15034}
15035
15036// aten::mkldnn_linear.out(Tensor self, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)
15037at::Tensor & mkldnn_linear_out::call(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::Tensor & out) {
15038
15039 static auto op = create_mkldnn_linear_out_typed_handle();
15040 return op.call(self, weight, bias, out);
15041}
15042
15043// aten::mkldnn_linear.out(Tensor self, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)
15044at::Tensor & mkldnn_linear_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::Tensor & out) {
15045
15046 static auto op = create_mkldnn_linear_out_typed_handle();
15047 return op.redispatch(dispatchKeySet, self, weight, bias, out);
15048}
15049
15050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_linear_backward_weights_out, name, "aten::mkldnn_linear_backward_weights")
15051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_linear_backward_weights_out, overload_name, "out")
15052STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_linear_backward_weights_out, schema_str, "mkldnn_linear_backward_weights.out(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
15053
15054// aten::mkldnn_linear_backward_weights.out(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
15055static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_linear_backward_weights_out::schema> create_mkldnn_linear_backward_weights_out_typed_handle() {
15056 return c10::Dispatcher::singleton()
15057 .findSchemaOrThrow(mkldnn_linear_backward_weights_out::name, mkldnn_linear_backward_weights_out::overload_name)
15058 .typed<mkldnn_linear_backward_weights_out::schema>();
15059}
15060
15061// aten::mkldnn_linear_backward_weights.out(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
15062::std::tuple<at::Tensor &,at::Tensor &> mkldnn_linear_backward_weights_out::call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined, at::Tensor & out0, at::Tensor & out1) {
15063
15064 static auto op = create_mkldnn_linear_backward_weights_out_typed_handle();
15065 return op.call(grad_output, input, weight, bias_defined, out0, out1);
15066}
15067
15068// aten::mkldnn_linear_backward_weights.out(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
15069::std::tuple<at::Tensor &,at::Tensor &> mkldnn_linear_backward_weights_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined, at::Tensor & out0, at::Tensor & out1) {
15070
15071 static auto op = create_mkldnn_linear_backward_weights_out_typed_handle();
15072 return op.redispatch(dispatchKeySet, grad_output, input, weight, bias_defined, out0, out1);
15073}
15074
15075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_aminmax_out, name, "aten::_aminmax")
15076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_aminmax_out, overload_name, "out")
15077STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_aminmax_out, schema_str, "_aminmax.out(Tensor self, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
15078
15079// aten::_aminmax.out(Tensor self, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
15080static C10_NOINLINE c10::TypedOperatorHandle<_aminmax_out::schema> create__aminmax_out_typed_handle() {
15081 return c10::Dispatcher::singleton()
15082 .findSchemaOrThrow(_aminmax_out::name, _aminmax_out::overload_name)
15083 .typed<_aminmax_out::schema>();
15084}
15085
15086// aten::_aminmax.out(Tensor self, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
15087::std::tuple<at::Tensor &,at::Tensor &> _aminmax_out::call(const at::Tensor & self, at::Tensor & out0, at::Tensor & out1) {
15088
15089 static auto op = create__aminmax_out_typed_handle();
15090 return op.call(self, out0, out1);
15091}
15092
15093// aten::_aminmax.out(Tensor self, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
15094::std::tuple<at::Tensor &,at::Tensor &> _aminmax_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out0, at::Tensor & out1) {
15095
15096 static auto op = create__aminmax_out_typed_handle();
15097 return op.redispatch(dispatchKeySet, self, out0, out1);
15098}
15099
15100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_aminmax_dim_out, name, "aten::_aminmax")
15101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_aminmax_dim_out, overload_name, "dim_out")
15102STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_aminmax_dim_out, schema_str, "_aminmax.dim_out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
15103
15104// aten::_aminmax.dim_out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
15105static C10_NOINLINE c10::TypedOperatorHandle<_aminmax_dim_out::schema> create__aminmax_dim_out_typed_handle() {
15106 return c10::Dispatcher::singleton()
15107 .findSchemaOrThrow(_aminmax_dim_out::name, _aminmax_dim_out::overload_name)
15108 .typed<_aminmax_dim_out::schema>();
15109}
15110
15111// aten::_aminmax.dim_out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
15112::std::tuple<at::Tensor &,at::Tensor &> _aminmax_dim_out::call(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out0, at::Tensor & out1) {
15113
15114 static auto op = create__aminmax_dim_out_typed_handle();
15115 return op.call(self, dim, keepdim, out0, out1);
15116}
15117
15118// aten::_aminmax.dim_out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
15119::std::tuple<at::Tensor &,at::Tensor &> _aminmax_dim_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out0, at::Tensor & out1) {
15120
15121 static auto op = create__aminmax_dim_out_typed_handle();
15122 return op.redispatch(dispatchKeySet, self, dim, keepdim, out0, out1);
15123}
15124
15125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_max_pool3d_backward_out, name, "aten::mkldnn_max_pool3d_backward")
15126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_max_pool3d_backward_out, overload_name, "out")
15127STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_max_pool3d_backward_out, schema_str, "mkldnn_max_pool3d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)")
15128
15129// aten::mkldnn_max_pool3d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
15130static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_max_pool3d_backward_out::schema> create_mkldnn_max_pool3d_backward_out_typed_handle() {
15131 return c10::Dispatcher::singleton()
15132 .findSchemaOrThrow(mkldnn_max_pool3d_backward_out::name, mkldnn_max_pool3d_backward_out::overload_name)
15133 .typed<mkldnn_max_pool3d_backward_out::schema>();
15134}
15135
15136// aten::mkldnn_max_pool3d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
15137at::Tensor & mkldnn_max_pool3d_backward_out::call(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
15138
15139 static auto op = create_mkldnn_max_pool3d_backward_out_typed_handle();
15140 return op.call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode, out);
15141}
15142
15143// aten::mkldnn_max_pool3d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
15144at::Tensor & mkldnn_max_pool3d_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
15145
15146 static auto op = create_mkldnn_max_pool3d_backward_out_typed_handle();
15147 return op.redispatch(dispatchKeySet, grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode, out);
15148}
15149
15150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantized_max_pool1d_out, name, "aten::quantized_max_pool1d")
15151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantized_max_pool1d_out, overload_name, "out")
15152STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantized_max_pool1d_out, schema_str, "quantized_max_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)")
15153
15154// aten::quantized_max_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
15155static C10_NOINLINE c10::TypedOperatorHandle<quantized_max_pool1d_out::schema> create_quantized_max_pool1d_out_typed_handle() {
15156 return c10::Dispatcher::singleton()
15157 .findSchemaOrThrow(quantized_max_pool1d_out::name, quantized_max_pool1d_out::overload_name)
15158 .typed<quantized_max_pool1d_out::schema>();
15159}
15160
15161// aten::quantized_max_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
15162at::Tensor & quantized_max_pool1d_out::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
15163
15164 static auto op = create_quantized_max_pool1d_out_typed_handle();
15165 return op.call(self, kernel_size, stride, padding, dilation, ceil_mode, out);
15166}
15167
15168// aten::quantized_max_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
15169at::Tensor & quantized_max_pool1d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
15170
15171 static auto op = create_quantized_max_pool1d_out_typed_handle();
15172 return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out);
15173}
15174
15175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_convolution_out, name, "aten::mkldnn_convolution")
15176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_convolution_out, overload_name, "out")
15177STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_convolution_out, schema_str, "mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)")
15178
15179// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)
15180static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_convolution_out::schema> create_mkldnn_convolution_out_typed_handle() {
15181 return c10::Dispatcher::singleton()
15182 .findSchemaOrThrow(mkldnn_convolution_out::name, mkldnn_convolution_out::overload_name)
15183 .typed<mkldnn_convolution_out::schema>();
15184}
15185
15186// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)
15187at::Tensor & mkldnn_convolution_out::call(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) {
15188
15189 static auto op = create_mkldnn_convolution_out_typed_handle();
15190 return op.call(self, weight, bias, padding, stride, dilation, groups, out);
15191}
15192
15193// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)
15194at::Tensor & mkldnn_convolution_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) {
15195
15196 static auto op = create_mkldnn_convolution_out_typed_handle();
15197 return op.redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, out);
15198}
15199
15200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_batch_norm_backward_out, name, "aten::miopen_batch_norm_backward")
15201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_batch_norm_backward_out, overload_name, "out")
15202STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_batch_norm_backward_out, schema_str, "miopen_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
15203
15204// aten::miopen_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
15205static C10_NOINLINE c10::TypedOperatorHandle<miopen_batch_norm_backward_out::schema> create_miopen_batch_norm_backward_out_typed_handle() {
15206 return c10::Dispatcher::singleton()
15207 .findSchemaOrThrow(miopen_batch_norm_backward_out::name, miopen_batch_norm_backward_out::overload_name)
15208 .typed<miopen_batch_norm_backward_out::schema>();
15209}
15210
15211// aten::miopen_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
15212::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> miopen_batch_norm_backward_out::call(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
15213
15214 static auto op = create_miopen_batch_norm_backward_out_typed_handle();
15215 return op.call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, out0, out1, out2);
15216}
15217
15218// aten::miopen_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
15219::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> miopen_batch_norm_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
15220
15221 static auto op = create_miopen_batch_norm_backward_out_typed_handle();
15222 return op.redispatch(dispatchKeySet, input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, out0, out1, out2);
15223}
15224
15225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mul_Scalar_out, name, "aten::mul")
15226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mul_Scalar_out, overload_name, "Scalar_out")
15227STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mul_Scalar_out, schema_str, "mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
15228
15229// aten::mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
15230static C10_NOINLINE c10::TypedOperatorHandle<mul_Scalar_out::schema> create_mul_Scalar_out_typed_handle() {
15231 return c10::Dispatcher::singleton()
15232 .findSchemaOrThrow(mul_Scalar_out::name, mul_Scalar_out::overload_name)
15233 .typed<mul_Scalar_out::schema>();
15234}
15235
15236// aten::mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
15237at::Tensor & mul_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
15238
15239 static auto op = create_mul_Scalar_out_typed_handle();
15240 return op.call(self, other, out);
15241}
15242
15243// aten::mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
15244at::Tensor & mul_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
15245
15246 static auto op = create_mul_Scalar_out_typed_handle();
15247 return op.redispatch(dispatchKeySet, self, other, out);
15248}
15249
15250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_backward_elemt_out, name, "aten::batch_norm_backward_elemt")
15251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_backward_elemt_out, overload_name, "out")
15252STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_backward_elemt_out, schema_str, "batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!)")
15253
15254// aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!)
15255static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_backward_elemt_out::schema> create_batch_norm_backward_elemt_out_typed_handle() {
15256 return c10::Dispatcher::singleton()
15257 .findSchemaOrThrow(batch_norm_backward_elemt_out::name, batch_norm_backward_elemt_out::overload_name)
15258 .typed<batch_norm_backward_elemt_out::schema>();
15259}
15260
15261// aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!)
15262at::Tensor & batch_norm_backward_elemt_out::call(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, const at::Tensor & mean_dy, const at::Tensor & mean_dy_xmu, const at::Tensor & count, at::Tensor & out) {
15263
15264 static auto op = create_batch_norm_backward_elemt_out_typed_handle();
15265 return op.call(grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count, out);
15266}
15267
15268// aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!)
15269at::Tensor & batch_norm_backward_elemt_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, const at::Tensor & mean_dy, const at::Tensor & mean_dy_xmu, const at::Tensor & count, at::Tensor & out) {
15270
15271 static auto op = create_batch_norm_backward_elemt_out_typed_handle();
15272 return op.redispatch(dispatchKeySet, grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count, out);
15273}
15274
15275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pixel_unshuffle_out, name, "aten::pixel_unshuffle")
15276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pixel_unshuffle_out, overload_name, "out")
15277STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pixel_unshuffle_out, schema_str, "pixel_unshuffle.out(Tensor self, int downscale_factor, *, Tensor(a!) out) -> Tensor(a!)")
15278
15279// aten::pixel_unshuffle.out(Tensor self, int downscale_factor, *, Tensor(a!) out) -> Tensor(a!)
15280static C10_NOINLINE c10::TypedOperatorHandle<pixel_unshuffle_out::schema> create_pixel_unshuffle_out_typed_handle() {
15281 return c10::Dispatcher::singleton()
15282 .findSchemaOrThrow(pixel_unshuffle_out::name, pixel_unshuffle_out::overload_name)
15283 .typed<pixel_unshuffle_out::schema>();
15284}
15285
15286// aten::pixel_unshuffle.out(Tensor self, int downscale_factor, *, Tensor(a!) out) -> Tensor(a!)
15287at::Tensor & pixel_unshuffle_out::call(const at::Tensor & self, int64_t downscale_factor, at::Tensor & out) {
15288
15289 static auto op = create_pixel_unshuffle_out_typed_handle();
15290 return op.call(self, downscale_factor, out);
15291}
15292
15293// aten::pixel_unshuffle.out(Tensor self, int downscale_factor, *, Tensor(a!) out) -> Tensor(a!)
15294at::Tensor & pixel_unshuffle_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t downscale_factor, at::Tensor & out) {
15295
15296 static auto op = create_pixel_unshuffle_out_typed_handle();
15297 return op.redispatch(dispatchKeySet, self, downscale_factor, out);
15298}
15299
15300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pin_memory_out, name, "aten::_pin_memory")
15301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pin_memory_out, overload_name, "out")
15302STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pin_memory_out, schema_str, "_pin_memory.out(Tensor self, Device? device=None, *, Tensor(a!) out) -> Tensor(a!)")
15303
15304// aten::_pin_memory.out(Tensor self, Device? device=None, *, Tensor(a!) out) -> Tensor(a!)
15305static C10_NOINLINE c10::TypedOperatorHandle<_pin_memory_out::schema> create__pin_memory_out_typed_handle() {
15306 return c10::Dispatcher::singleton()
15307 .findSchemaOrThrow(_pin_memory_out::name, _pin_memory_out::overload_name)
15308 .typed<_pin_memory_out::schema>();
15309}
15310
15311// aten::_pin_memory.out(Tensor self, Device? device=None, *, Tensor(a!) out) -> Tensor(a!)
15312at::Tensor & _pin_memory_out::call(const at::Tensor & self, c10::optional<at::Device> device, at::Tensor & out) {
15313
15314 static auto op = create__pin_memory_out_typed_handle();
15315 return op.call(self, device, out);
15316}
15317
15318// aten::_pin_memory.out(Tensor self, Device? device=None, *, Tensor(a!) out) -> Tensor(a!)
15319at::Tensor & _pin_memory_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::Device> device, at::Tensor & out) {
15320
15321 static auto op = create__pin_memory_out_typed_handle();
15322 return op.redispatch(dispatchKeySet, self, device, out);
15323}
15324
15325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randn_names_out, name, "aten::randn")
15326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randn_names_out, overload_name, "names_out")
15327STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randn_names_out, schema_str, "randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)")
15328
15329// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
15330static C10_NOINLINE c10::TypedOperatorHandle<randn_names_out::schema> create_randn_names_out_typed_handle() {
15331 return c10::Dispatcher::singleton()
15332 .findSchemaOrThrow(randn_names_out::name, randn_names_out::overload_name)
15333 .typed<randn_names_out::schema>();
15334}
15335
15336// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
15337at::Tensor & randn_names_out::call(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) {
15338
15339 static auto op = create_randn_names_out_typed_handle();
15340 return op.call(size, names, out);
15341}
15342
15343// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
15344at::Tensor & randn_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) {
15345
15346 static auto op = create_randn_names_out_typed_handle();
15347 return op.redispatch(dispatchKeySet, size, names, out);
15348}
15349
15350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randn_generator_with_names_out, name, "aten::randn")
15351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randn_generator_with_names_out, overload_name, "generator_with_names_out")
15352STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randn_generator_with_names_out, schema_str, "randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)")
15353
15354// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
15355static C10_NOINLINE c10::TypedOperatorHandle<randn_generator_with_names_out::schema> create_randn_generator_with_names_out_typed_handle() {
15356 return c10::Dispatcher::singleton()
15357 .findSchemaOrThrow(randn_generator_with_names_out::name, randn_generator_with_names_out::overload_name)
15358 .typed<randn_generator_with_names_out::schema>();
15359}
15360
15361// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
15362at::Tensor & randn_generator_with_names_out::call(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::Tensor & out) {
15363
15364 static auto op = create_randn_generator_with_names_out_typed_handle();
15365 return op.call(size, generator, names, out);
15366}
15367
15368// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
15369at::Tensor & randn_generator_with_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::Tensor & out) {
15370
15371 static auto op = create_randn_generator_with_names_out_typed_handle();
15372 return op.redispatch(dispatchKeySet, size, generator, names, out);
15373}
15374
15375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diagonal_scatter_out, name, "aten::diagonal_scatter")
15376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diagonal_scatter_out, overload_name, "out")
15377STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diagonal_scatter_out, schema_str, "diagonal_scatter.out(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)")
15378
15379// aten::diagonal_scatter.out(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)
15380static C10_NOINLINE c10::TypedOperatorHandle<diagonal_scatter_out::schema> create_diagonal_scatter_out_typed_handle() {
15381 return c10::Dispatcher::singleton()
15382 .findSchemaOrThrow(diagonal_scatter_out::name, diagonal_scatter_out::overload_name)
15383 .typed<diagonal_scatter_out::schema>();
15384}
15385
15386// aten::diagonal_scatter.out(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)
15387at::Tensor & diagonal_scatter_out::call(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
15388
15389 static auto op = create_diagonal_scatter_out_typed_handle();
15390 return op.call(self, src, offset, dim1, dim2, out);
15391}
15392
15393// aten::diagonal_scatter.out(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)
15394at::Tensor & diagonal_scatter_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
15395
15396 static auto op = create_diagonal_scatter_out_typed_handle();
15397 return op.redispatch(dispatchKeySet, self, src, offset, dim1, dim2, out);
15398}
15399
15400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(as_strided_scatter_out, name, "aten::as_strided_scatter")
15401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(as_strided_scatter_out, overload_name, "out")
15402STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(as_strided_scatter_out, schema_str, "as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)")
15403
15404// aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)
15405static C10_NOINLINE c10::TypedOperatorHandle<as_strided_scatter_out::schema> create_as_strided_scatter_out_typed_handle() {
15406 return c10::Dispatcher::singleton()
15407 .findSchemaOrThrow(as_strided_scatter_out::name, as_strided_scatter_out::overload_name)
15408 .typed<as_strided_scatter_out::schema>();
15409}
15410
15411// aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)
15412at::Tensor & as_strided_scatter_out::call(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset, at::Tensor & out) {
15413
15414 static auto op = create_as_strided_scatter_out_typed_handle();
15415 return op.call(self, src, size, stride, storage_offset, out);
15416}
15417
15418// aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)
15419at::Tensor & as_strided_scatter_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset, at::Tensor & out) {
15420
15421 static auto op = create_as_strided_scatter_out_typed_handle();
15422 return op.redispatch(dispatchKeySet, self, src, size, stride, storage_offset, out);
15423}
15424
15425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_transform_bias_rescale_qkv_out, name, "aten::_transform_bias_rescale_qkv")
15426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_transform_bias_rescale_qkv_out, overload_name, "out")
15427STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_transform_bias_rescale_qkv_out, schema_str, "_transform_bias_rescale_qkv.out(Tensor qkv, Tensor qkv_bias, int num_heads, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
15428
15429// aten::_transform_bias_rescale_qkv.out(Tensor qkv, Tensor qkv_bias, int num_heads, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
15430static C10_NOINLINE c10::TypedOperatorHandle<_transform_bias_rescale_qkv_out::schema> create__transform_bias_rescale_qkv_out_typed_handle() {
15431 return c10::Dispatcher::singleton()
15432 .findSchemaOrThrow(_transform_bias_rescale_qkv_out::name, _transform_bias_rescale_qkv_out::overload_name)
15433 .typed<_transform_bias_rescale_qkv_out::schema>();
15434}
15435
15436// aten::_transform_bias_rescale_qkv.out(Tensor qkv, Tensor qkv_bias, int num_heads, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
15437::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _transform_bias_rescale_qkv_out::call(const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
15438
15439 static auto op = create__transform_bias_rescale_qkv_out_typed_handle();
15440 return op.call(qkv, qkv_bias, num_heads, out0, out1, out2);
15441}
15442
15443// aten::_transform_bias_rescale_qkv.out(Tensor qkv, Tensor qkv_bias, int num_heads, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
15444::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _transform_bias_rescale_qkv_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
15445
15446 static auto op = create__transform_bias_rescale_qkv_out_typed_handle();
15447 return op.redispatch(dispatchKeySet, qkv, qkv_bias, num_heads, out0, out1, out2);
15448}
15449
15450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_unique_out, name, "aten::_unique")
15451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_unique_out, overload_name, "out")
15452STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_unique_out, schema_str, "_unique.out(Tensor self, bool sorted=True, bool return_inverse=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
15453
15454// aten::_unique.out(Tensor self, bool sorted=True, bool return_inverse=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
15455static C10_NOINLINE c10::TypedOperatorHandle<_unique_out::schema> create__unique_out_typed_handle() {
15456 return c10::Dispatcher::singleton()
15457 .findSchemaOrThrow(_unique_out::name, _unique_out::overload_name)
15458 .typed<_unique_out::schema>();
15459}
15460
15461// aten::_unique.out(Tensor self, bool sorted=True, bool return_inverse=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
15462::std::tuple<at::Tensor &,at::Tensor &> _unique_out::call(const at::Tensor & self, bool sorted, bool return_inverse, at::Tensor & out0, at::Tensor & out1) {
15463
15464 static auto op = create__unique_out_typed_handle();
15465 return op.call(self, sorted, return_inverse, out0, out1);
15466}
15467
15468// aten::_unique.out(Tensor self, bool sorted=True, bool return_inverse=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
15469::std::tuple<at::Tensor &,at::Tensor &> _unique_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool sorted, bool return_inverse, at::Tensor & out0, at::Tensor & out1) {
15470
15471 static auto op = create__unique_out_typed_handle();
15472 return op.redispatch(dispatchKeySet, self, sorted, return_inverse, out0, out1);
15473}
15474
15475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_weight_norm_interface_out, name, "aten::_weight_norm_interface")
15476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_weight_norm_interface_out, overload_name, "out")
15477STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_weight_norm_interface_out, schema_str, "_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
15478
15479// aten::_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
15480static C10_NOINLINE c10::TypedOperatorHandle<_weight_norm_interface_out::schema> create__weight_norm_interface_out_typed_handle() {
15481 return c10::Dispatcher::singleton()
15482 .findSchemaOrThrow(_weight_norm_interface_out::name, _weight_norm_interface_out::overload_name)
15483 .typed<_weight_norm_interface_out::schema>();
15484}
15485
15486// aten::_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
15487::std::tuple<at::Tensor &,at::Tensor &> _weight_norm_interface_out::call(const at::Tensor & v, const at::Tensor & g, int64_t dim, at::Tensor & out0, at::Tensor & out1) {
15488
15489 static auto op = create__weight_norm_interface_out_typed_handle();
15490 return op.call(v, g, dim, out0, out1);
15491}
15492
15493// aten::_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
15494::std::tuple<at::Tensor &,at::Tensor &> _weight_norm_interface_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & v, const at::Tensor & g, int64_t dim, at::Tensor & out0, at::Tensor & out1) {
15495
15496 static auto op = create__weight_norm_interface_out_typed_handle();
15497 return op.redispatch(dispatchKeySet, v, g, dim, out0, out1);
15498}
15499
15500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(zeros_names_out, name, "aten::zeros")
15501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(zeros_names_out, overload_name, "names_out")
15502STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(zeros_names_out, schema_str, "zeros.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)")
15503
15504// aten::zeros.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
15505static C10_NOINLINE c10::TypedOperatorHandle<zeros_names_out::schema> create_zeros_names_out_typed_handle() {
15506 return c10::Dispatcher::singleton()
15507 .findSchemaOrThrow(zeros_names_out::name, zeros_names_out::overload_name)
15508 .typed<zeros_names_out::schema>();
15509}
15510
15511// aten::zeros.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
15512at::Tensor & zeros_names_out::call(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) {
15513
15514 static auto op = create_zeros_names_out_typed_handle();
15515 return op.call(size, names, out);
15516}
15517
15518// aten::zeros.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
15519at::Tensor & zeros_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) {
15520
15521 static auto op = create_zeros_names_out_typed_handle();
15522 return op.redispatch(dispatchKeySet, size, names, out);
15523}
15524
15525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_standard_gamma_out, name, "aten::_standard_gamma")
15526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_standard_gamma_out, overload_name, "out")
15527STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_standard_gamma_out, schema_str, "_standard_gamma.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)")
15528
15529// aten::_standard_gamma.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
15530static C10_NOINLINE c10::TypedOperatorHandle<_standard_gamma_out::schema> create__standard_gamma_out_typed_handle() {
15531 return c10::Dispatcher::singleton()
15532 .findSchemaOrThrow(_standard_gamma_out::name, _standard_gamma_out::overload_name)
15533 .typed<_standard_gamma_out::schema>();
15534}
15535
15536// aten::_standard_gamma.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
15537at::Tensor & _standard_gamma_out::call(const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) {
15538
15539 static auto op = create__standard_gamma_out_typed_handle();
15540 return op.call(self, generator, out);
15541}
15542
15543// aten::_standard_gamma.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
15544at::Tensor & _standard_gamma_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) {
15545
15546 static auto op = create__standard_gamma_out_typed_handle();
15547 return op.redispatch(dispatchKeySet, self, generator, out);
15548}
15549
15550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sample_dirichlet_out, name, "aten::_sample_dirichlet")
15551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sample_dirichlet_out, overload_name, "out")
15552STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sample_dirichlet_out, schema_str, "_sample_dirichlet.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)")
15553
15554// aten::_sample_dirichlet.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
15555static C10_NOINLINE c10::TypedOperatorHandle<_sample_dirichlet_out::schema> create__sample_dirichlet_out_typed_handle() {
15556 return c10::Dispatcher::singleton()
15557 .findSchemaOrThrow(_sample_dirichlet_out::name, _sample_dirichlet_out::overload_name)
15558 .typed<_sample_dirichlet_out::schema>();
15559}
15560
15561// aten::_sample_dirichlet.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
15562at::Tensor & _sample_dirichlet_out::call(const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) {
15563
15564 static auto op = create__sample_dirichlet_out_typed_handle();
15565 return op.call(self, generator, out);
15566}
15567
15568// aten::_sample_dirichlet.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
15569at::Tensor & _sample_dirichlet_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) {
15570
15571 static auto op = create__sample_dirichlet_out_typed_handle();
15572 return op.redispatch(dispatchKeySet, self, generator, out);
15573}
15574
15575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(binomial_out, name, "aten::binomial")
15576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(binomial_out, overload_name, "out")
15577STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(binomial_out, schema_str, "binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)")
15578
15579// aten::binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
15580static C10_NOINLINE c10::TypedOperatorHandle<binomial_out::schema> create_binomial_out_typed_handle() {
15581 return c10::Dispatcher::singleton()
15582 .findSchemaOrThrow(binomial_out::name, binomial_out::overload_name)
15583 .typed<binomial_out::schema>();
15584}
15585
15586// aten::binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
15587at::Tensor & binomial_out::call(const at::Tensor & count, const at::Tensor & prob, c10::optional<at::Generator> generator, at::Tensor & out) {
15588
15589 static auto op = create_binomial_out_typed_handle();
15590 return op.call(count, prob, generator, out);
15591}
15592
15593// aten::binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
15594at::Tensor & binomial_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & count, const at::Tensor & prob, c10::optional<at::Generator> generator, at::Tensor & out) {
15595
15596 static auto op = create_binomial_out_typed_handle();
15597 return op.redispatch(dispatchKeySet, count, prob, generator, out);
15598}
15599
15600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_sum_dim_out, name, "aten::_sparse_sum")
15601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_sum_dim_out, overload_name, "dim_out")
15602STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_sum_dim_out, schema_str, "_sparse_sum.dim_out(Tensor self, int[1] dim, *, Tensor(a!) out) -> Tensor(a!)")
15603
15604// aten::_sparse_sum.dim_out(Tensor self, int[1] dim, *, Tensor(a!) out) -> Tensor(a!)
15605static C10_NOINLINE c10::TypedOperatorHandle<_sparse_sum_dim_out::schema> create__sparse_sum_dim_out_typed_handle() {
15606 return c10::Dispatcher::singleton()
15607 .findSchemaOrThrow(_sparse_sum_dim_out::name, _sparse_sum_dim_out::overload_name)
15608 .typed<_sparse_sum_dim_out::schema>();
15609}
15610
15611// aten::_sparse_sum.dim_out(Tensor self, int[1] dim, *, Tensor(a!) out) -> Tensor(a!)
15612at::Tensor & _sparse_sum_dim_out::call(const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) {
15613
15614 static auto op = create__sparse_sum_dim_out_typed_handle();
15615 return op.call(self, dim, out);
15616}
15617
15618// aten::_sparse_sum.dim_out(Tensor self, int[1] dim, *, Tensor(a!) out) -> Tensor(a!)
15619at::Tensor & _sparse_sum_dim_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) {
15620
15621 static auto op = create__sparse_sum_dim_out_typed_handle();
15622 return op.redispatch(dispatchKeySet, self, dim, out);
15623}
15624
15625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_addmm_out, name, "aten::_sparse_addmm")
15626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_addmm_out, overload_name, "out")
15627STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_addmm_out, schema_str, "_sparse_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)")
15628
15629// aten::_sparse_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
15630static C10_NOINLINE c10::TypedOperatorHandle<_sparse_addmm_out::schema> create__sparse_addmm_out_typed_handle() {
15631 return c10::Dispatcher::singleton()
15632 .findSchemaOrThrow(_sparse_addmm_out::name, _sparse_addmm_out::overload_name)
15633 .typed<_sparse_addmm_out::schema>();
15634}
15635
15636// aten::_sparse_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
15637at::Tensor & _sparse_addmm_out::call(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
15638
15639 static auto op = create__sparse_addmm_out_typed_handle();
15640 return op.call(self, mat1, mat2, beta, alpha, out);
15641}
15642
15643// aten::_sparse_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
15644at::Tensor & _sparse_addmm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
15645
15646 static auto op = create__sparse_addmm_out_typed_handle();
15647 return op.redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, out);
15648}
15649
15650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_resize_out, name, "aten::sparse_resize")
15651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_resize_out, overload_name, "out")
15652STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_resize_out, schema_str, "sparse_resize.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)")
15653
15654// aten::sparse_resize.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)
15655static C10_NOINLINE c10::TypedOperatorHandle<sparse_resize_out::schema> create_sparse_resize_out_typed_handle() {
15656 return c10::Dispatcher::singleton()
15657 .findSchemaOrThrow(sparse_resize_out::name, sparse_resize_out::overload_name)
15658 .typed<sparse_resize_out::schema>();
15659}
15660
15661// aten::sparse_resize.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)
15662const at::Tensor & sparse_resize_out::call(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out) {
15663
15664 static auto op = create_sparse_resize_out_typed_handle();
15665 return op.call(self, size, sparse_dim, dense_dim, out);
15666}
15667
15668// aten::sparse_resize.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)
15669const at::Tensor & sparse_resize_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out) {
15670
15671 static auto op = create_sparse_resize_out_typed_handle();
15672 return op.redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim, out);
15673}
15674
15675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_resize, name, "aten::sparse_resize")
15676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_resize, overload_name, "")
15677STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_resize, schema_str, "sparse_resize(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor")
15678
15679// aten::sparse_resize(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor
15680static C10_NOINLINE c10::TypedOperatorHandle<sparse_resize::schema> create_sparse_resize_typed_handle() {
15681 return c10::Dispatcher::singleton()
15682 .findSchemaOrThrow(sparse_resize::name, sparse_resize::overload_name)
15683 .typed<sparse_resize::schema>();
15684}
15685
15686// aten::sparse_resize(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor
15687at::Tensor sparse_resize::call(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
15688
15689 static auto op = create_sparse_resize_typed_handle();
15690 return op.call(self, size, sparse_dim, dense_dim);
15691}
15692
15693// aten::sparse_resize(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor
15694at::Tensor sparse_resize::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
15695
15696 static auto op = create_sparse_resize_typed_handle();
15697 return op.redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim);
15698}
15699
15700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_mask_out, name, "aten::sparse_mask")
15701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_mask_out, overload_name, "out")
15702STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_mask_out, schema_str, "sparse_mask.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)")
15703
15704// aten::sparse_mask.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)
15705static C10_NOINLINE c10::TypedOperatorHandle<sparse_mask_out::schema> create_sparse_mask_out_typed_handle() {
15706 return c10::Dispatcher::singleton()
15707 .findSchemaOrThrow(sparse_mask_out::name, sparse_mask_out::overload_name)
15708 .typed<sparse_mask_out::schema>();
15709}
15710
15711// aten::sparse_mask.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)
15712at::Tensor & sparse_mask_out::call(const at::Tensor & self, const at::Tensor & mask, at::Tensor & out) {
15713
15714 static auto op = create_sparse_mask_out_typed_handle();
15715 return op.call(self, mask, out);
15716}
15717
15718// aten::sparse_mask.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)
15719at::Tensor & sparse_mask_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, at::Tensor & out) {
15720
15721 static auto op = create_sparse_mask_out_typed_handle();
15722 return op.redispatch(dispatchKeySet, self, mask, out);
15723}
15724
15725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copy_sparse_to_sparse_out, name, "aten::copy_sparse_to_sparse")
15726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copy_sparse_to_sparse_out, overload_name, "out")
15727STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copy_sparse_to_sparse_out, schema_str, "copy_sparse_to_sparse.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)")
15728
15729// aten::copy_sparse_to_sparse.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)
15730static C10_NOINLINE c10::TypedOperatorHandle<copy_sparse_to_sparse_out::schema> create_copy_sparse_to_sparse_out_typed_handle() {
15731 return c10::Dispatcher::singleton()
15732 .findSchemaOrThrow(copy_sparse_to_sparse_out::name, copy_sparse_to_sparse_out::overload_name)
15733 .typed<copy_sparse_to_sparse_out::schema>();
15734}
15735
15736// aten::copy_sparse_to_sparse.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)
15737at::Tensor & copy_sparse_to_sparse_out::call(const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out) {
15738
15739 static auto op = create_copy_sparse_to_sparse_out_typed_handle();
15740 return op.call(self, src, non_blocking, out);
15741}
15742
15743// aten::copy_sparse_to_sparse.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)
15744at::Tensor & copy_sparse_to_sparse_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out) {
15745
15746 static auto op = create_copy_sparse_to_sparse_out_typed_handle();
15747 return op.redispatch(dispatchKeySet, self, src, non_blocking, out);
15748}
15749
15750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copy_sparse_to_sparse, name, "aten::copy_sparse_to_sparse")
15751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copy_sparse_to_sparse, overload_name, "")
15752STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copy_sparse_to_sparse, schema_str, "copy_sparse_to_sparse(Tensor self, Tensor src, bool non_blocking=False) -> Tensor")
15753
15754// aten::copy_sparse_to_sparse(Tensor self, Tensor src, bool non_blocking=False) -> Tensor
15755static C10_NOINLINE c10::TypedOperatorHandle<copy_sparse_to_sparse::schema> create_copy_sparse_to_sparse_typed_handle() {
15756 return c10::Dispatcher::singleton()
15757 .findSchemaOrThrow(copy_sparse_to_sparse::name, copy_sparse_to_sparse::overload_name)
15758 .typed<copy_sparse_to_sparse::schema>();
15759}
15760
15761// aten::copy_sparse_to_sparse(Tensor self, Tensor src, bool non_blocking=False) -> Tensor
15762at::Tensor copy_sparse_to_sparse::call(const at::Tensor & self, const at::Tensor & src, bool non_blocking) {
15763
15764 static auto op = create_copy_sparse_to_sparse_typed_handle();
15765 return op.call(self, src, non_blocking);
15766}
15767
15768// aten::copy_sparse_to_sparse(Tensor self, Tensor src, bool non_blocking=False) -> Tensor
15769at::Tensor copy_sparse_to_sparse::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, bool non_blocking) {
15770
15771 static auto op = create_copy_sparse_to_sparse_typed_handle();
15772 return op.redispatch(dispatchKeySet, self, src, non_blocking);
15773}
15774
15775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse_sparse_dim_out, name, "aten::to_sparse")
15776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse_sparse_dim_out, overload_name, "sparse_dim_out")
15777STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse_sparse_dim_out, schema_str, "to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!)")
15778
15779// aten::to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!)
15780static C10_NOINLINE c10::TypedOperatorHandle<to_sparse_sparse_dim_out::schema> create_to_sparse_sparse_dim_out_typed_handle() {
15781 return c10::Dispatcher::singleton()
15782 .findSchemaOrThrow(to_sparse_sparse_dim_out::name, to_sparse_sparse_dim_out::overload_name)
15783 .typed<to_sparse_sparse_dim_out::schema>();
15784}
15785
15786// aten::to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!)
15787at::Tensor & to_sparse_sparse_dim_out::call(const at::Tensor & self, int64_t sparse_dim, at::Tensor & out) {
15788
15789 static auto op = create_to_sparse_sparse_dim_out_typed_handle();
15790 return op.call(self, sparse_dim, out);
15791}
15792
15793// aten::to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!)
15794at::Tensor & to_sparse_sparse_dim_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sparse_dim, at::Tensor & out) {
15795
15796 static auto op = create_to_sparse_sparse_dim_out_typed_handle();
15797 return op.redispatch(dispatchKeySet, self, sparse_dim, out);
15798}
15799
15800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse_out, name, "aten::to_sparse")
15801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse_out, overload_name, "out")
15802STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse_out, schema_str, "to_sparse.out(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None, Tensor(a!) out) -> Tensor(a!)")
15803
15804// aten::to_sparse.out(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None, Tensor(a!) out) -> Tensor(a!)
15805static C10_NOINLINE c10::TypedOperatorHandle<to_sparse_out::schema> create_to_sparse_out_typed_handle() {
15806 return c10::Dispatcher::singleton()
15807 .findSchemaOrThrow(to_sparse_out::name, to_sparse_out::overload_name)
15808 .typed<to_sparse_out::schema>();
15809}
15810
15811// aten::to_sparse.out(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None, Tensor(a!) out) -> Tensor(a!)
15812at::Tensor & to_sparse_out::call(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim, at::Tensor & out) {
15813
15814 static auto op = create_to_sparse_out_typed_handle();
15815 return op.call(self, layout, blocksize, dense_dim, out);
15816}
15817
15818// aten::to_sparse.out(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None, Tensor(a!) out) -> Tensor(a!)
15819at::Tensor & to_sparse_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim, at::Tensor & out) {
15820
15821 static auto op = create_to_sparse_out_typed_handle();
15822 return op.redispatch(dispatchKeySet, self, layout, blocksize, dense_dim, out);
15823}
15824
15825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_mkldnn_out, name, "aten::to_mkldnn")
15826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_mkldnn_out, overload_name, "out")
15827STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_mkldnn_out, schema_str, "to_mkldnn.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)")
15828
15829// aten::to_mkldnn.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
15830static C10_NOINLINE c10::TypedOperatorHandle<to_mkldnn_out::schema> create_to_mkldnn_out_typed_handle() {
15831 return c10::Dispatcher::singleton()
15832 .findSchemaOrThrow(to_mkldnn_out::name, to_mkldnn_out::overload_name)
15833 .typed<to_mkldnn_out::schema>();
15834}
15835
15836// aten::to_mkldnn.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
15837at::Tensor & to_mkldnn_out::call(const at::Tensor & self, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
15838
15839 static auto op = create_to_mkldnn_out_typed_handle();
15840 return op.call(self, dtype, out);
15841}
15842
15843// aten::to_mkldnn.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
15844at::Tensor & to_mkldnn_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
15845
15846 static auto op = create_to_mkldnn_out_typed_handle();
15847 return op.redispatch(dispatchKeySet, self, dtype, out);
15848}
15849
15850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(int_repr_out, name, "aten::int_repr")
15851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(int_repr_out, overload_name, "out")
15852STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(int_repr_out, schema_str, "int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
15853
15854// aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
15855static C10_NOINLINE c10::TypedOperatorHandle<int_repr_out::schema> create_int_repr_out_typed_handle() {
15856 return c10::Dispatcher::singleton()
15857 .findSchemaOrThrow(int_repr_out::name, int_repr_out::overload_name)
15858 .typed<int_repr_out::schema>();
15859}
15860
15861// aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
15862at::Tensor & int_repr_out::call(const at::Tensor & self, at::Tensor & out) {
15863
15864 static auto op = create_int_repr_out_typed_handle();
15865 return op.call(self, out);
15866}
15867
15868// aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
15869at::Tensor & int_repr_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
15870
15871 static auto op = create_int_repr_out_typed_handle();
15872 return op.redispatch(dispatchKeySet, self, out);
15873}
15874
15875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fake_quantize_per_channel_affine_cachemask_out, name, "aten::fake_quantize_per_channel_affine_cachemask")
15876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fake_quantize_per_channel_affine_cachemask_out, overload_name, "out")
15877STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fake_quantize_per_channel_affine_cachemask_out, schema_str, "fake_quantize_per_channel_affine_cachemask.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
15878
15879// aten::fake_quantize_per_channel_affine_cachemask.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
15880static C10_NOINLINE c10::TypedOperatorHandle<fake_quantize_per_channel_affine_cachemask_out::schema> create_fake_quantize_per_channel_affine_cachemask_out_typed_handle() {
15881 return c10::Dispatcher::singleton()
15882 .findSchemaOrThrow(fake_quantize_per_channel_affine_cachemask_out::name, fake_quantize_per_channel_affine_cachemask_out::overload_name)
15883 .typed<fake_quantize_per_channel_affine_cachemask_out::schema>();
15884}
15885
15886// aten::fake_quantize_per_channel_affine_cachemask.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
15887::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_channel_affine_cachemask_out::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) {
15888
15889 static auto op = create_fake_quantize_per_channel_affine_cachemask_out_typed_handle();
15890 return op.call(self, scale, zero_point, axis, quant_min, quant_max, out0, out1);
15891}
15892
15893// aten::fake_quantize_per_channel_affine_cachemask.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
15894::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_channel_affine_cachemask_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) {
15895
15896 static auto op = create_fake_quantize_per_channel_affine_cachemask_out_typed_handle();
15897 return op.redispatch(dispatchKeySet, self, scale, zero_point, axis, quant_min, quant_max, out0, out1);
15898}
15899
15900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fused_moving_avg_obs_fq_helper_out, name, "aten::_fused_moving_avg_obs_fq_helper")
15901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fused_moving_avg_obs_fq_helper_out, overload_name, "out")
15902STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fused_moving_avg_obs_fq_helper_out, schema_str, "_fused_moving_avg_obs_fq_helper.out(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False, *, Tensor(e!) out0, Tensor(f!) out1) -> (Tensor(e!), Tensor(f!))")
15903
15904// aten::_fused_moving_avg_obs_fq_helper.out(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False, *, Tensor(e!) out0, Tensor(f!) out1) -> (Tensor(e!), Tensor(f!))
15905static C10_NOINLINE c10::TypedOperatorHandle<_fused_moving_avg_obs_fq_helper_out::schema> create__fused_moving_avg_obs_fq_helper_out_typed_handle() {
15906 return c10::Dispatcher::singleton()
15907 .findSchemaOrThrow(_fused_moving_avg_obs_fq_helper_out::name, _fused_moving_avg_obs_fq_helper_out::overload_name)
15908 .typed<_fused_moving_avg_obs_fq_helper_out::schema>();
15909}
15910
15911// aten::_fused_moving_avg_obs_fq_helper.out(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False, *, Tensor(e!) out0, Tensor(f!) out1) -> (Tensor(e!), Tensor(f!))
15912::std::tuple<at::Tensor &,at::Tensor &> _fused_moving_avg_obs_fq_helper_out::call(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant, at::Tensor & out0, at::Tensor & out1) {
15913
15914 static auto op = create__fused_moving_avg_obs_fq_helper_out_typed_handle();
15915 return op.call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant, out0, out1);
15916}
15917
15918// aten::_fused_moving_avg_obs_fq_helper.out(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False, *, Tensor(e!) out0, Tensor(f!) out1) -> (Tensor(e!), Tensor(f!))
15919::std::tuple<at::Tensor &,at::Tensor &> _fused_moving_avg_obs_fq_helper_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant, at::Tensor & out0, at::Tensor & out1) {
15920
15921 static auto op = create__fused_moving_avg_obs_fq_helper_out_typed_handle();
15922 return op.redispatch(dispatchKeySet, self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant, out0, out1);
15923}
15924
15925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fused_moving_avg_obs_fq_helper_functional, name, "aten::_fused_moving_avg_obs_fq_helper_functional")
15926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fused_moving_avg_obs_fq_helper_functional, overload_name, "")
15927STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fused_moving_avg_obs_fq_helper_functional, schema_str, "_fused_moving_avg_obs_fq_helper_functional(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor running_min, Tensor running_max, Tensor scale, Tensor zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask, Tensor running_min_out, Tensor running_max_out, Tensor scale_out, Tensor zero_point_out)")
15928
15929// aten::_fused_moving_avg_obs_fq_helper_functional(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor running_min, Tensor running_max, Tensor scale, Tensor zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask, Tensor running_min_out, Tensor running_max_out, Tensor scale_out, Tensor zero_point_out)
15930static C10_NOINLINE c10::TypedOperatorHandle<_fused_moving_avg_obs_fq_helper_functional::schema> create__fused_moving_avg_obs_fq_helper_functional_typed_handle() {
15931 return c10::Dispatcher::singleton()
15932 .findSchemaOrThrow(_fused_moving_avg_obs_fq_helper_functional::name, _fused_moving_avg_obs_fq_helper_functional::overload_name)
15933 .typed<_fused_moving_avg_obs_fq_helper_functional::schema>();
15934}
15935
15936// aten::_fused_moving_avg_obs_fq_helper_functional(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor running_min, Tensor running_max, Tensor scale, Tensor zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask, Tensor running_min_out, Tensor running_max_out, Tensor scale_out, Tensor zero_point_out)
15937::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _fused_moving_avg_obs_fq_helper_functional::call(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, const at::Tensor & running_min, const at::Tensor & running_max, const at::Tensor & scale, const at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) {
15938
15939 static auto op = create__fused_moving_avg_obs_fq_helper_functional_typed_handle();
15940 return op.call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
15941}
15942
15943// aten::_fused_moving_avg_obs_fq_helper_functional(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor running_min, Tensor running_max, Tensor scale, Tensor zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask, Tensor running_min_out, Tensor running_max_out, Tensor scale_out, Tensor zero_point_out)
15944::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _fused_moving_avg_obs_fq_helper_functional::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, const at::Tensor & running_min, const at::Tensor & running_max, const at::Tensor & scale, const at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) {
15945
15946 static auto op = create__fused_moving_avg_obs_fq_helper_functional_typed_handle();
15947 return op.redispatch(dispatchKeySet, self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
15948}
15949
15950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_to_copy_out, name, "aten::_to_copy")
15951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_to_copy_out, overload_name, "out")
15952STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_to_copy_out, schema_str, "_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)")
15953
15954// aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
15955static C10_NOINLINE c10::TypedOperatorHandle<_to_copy_out::schema> create__to_copy_out_typed_handle() {
15956 return c10::Dispatcher::singleton()
15957 .findSchemaOrThrow(_to_copy_out::name, _to_copy_out::overload_name)
15958 .typed<_to_copy_out::schema>();
15959}
15960
15961// aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
15962at::Tensor & _to_copy_out::call(const at::Tensor & self, bool non_blocking, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
15963
15964 static auto op = create__to_copy_out_typed_handle();
15965 return op.call(self, non_blocking, memory_format, out);
15966}
15967
15968// aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
15969at::Tensor & _to_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
15970
15971 static auto op = create__to_copy_out_typed_handle();
15972 return op.redispatch(dispatchKeySet, self, non_blocking, memory_format, out);
15973}
15974
15975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lift_out, name, "aten::lift")
15976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lift_out, overload_name, "out")
15977STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lift_out, schema_str, "lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
15978
15979// aten::lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
15980static C10_NOINLINE c10::TypedOperatorHandle<lift_out::schema> create_lift_out_typed_handle() {
15981 return c10::Dispatcher::singleton()
15982 .findSchemaOrThrow(lift_out::name, lift_out::overload_name)
15983 .typed<lift_out::schema>();
15984}
15985
15986// aten::lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
15987at::Tensor & lift_out::call(const at::Tensor & self, at::Tensor & out) {
15988
15989 static auto op = create_lift_out_typed_handle();
15990 return op.call(self, out);
15991}
15992
15993// aten::lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
15994at::Tensor & lift_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
15995
15996 static auto op = create_lift_out_typed_handle();
15997 return op.redispatch(dispatchKeySet, self, out);
15998}
15999
16000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_and_Scalar_Tensor_out, name, "aten::bitwise_and")
16001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_and_Scalar_Tensor_out, overload_name, "Scalar_Tensor_out")
16002STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_and_Scalar_Tensor_out, schema_str, "bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
16003
16004// aten::bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
16005static C10_NOINLINE c10::TypedOperatorHandle<bitwise_and_Scalar_Tensor_out::schema> create_bitwise_and_Scalar_Tensor_out_typed_handle() {
16006 return c10::Dispatcher::singleton()
16007 .findSchemaOrThrow(bitwise_and_Scalar_Tensor_out::name, bitwise_and_Scalar_Tensor_out::overload_name)
16008 .typed<bitwise_and_Scalar_Tensor_out::schema>();
16009}
16010
16011// aten::bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
16012at::Tensor & bitwise_and_Scalar_Tensor_out::call(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
16013
16014 static auto op = create_bitwise_and_Scalar_Tensor_out_typed_handle();
16015 return op.call(self, other, out);
16016}
16017
16018// aten::bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
16019at::Tensor & bitwise_and_Scalar_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
16020
16021 static auto op = create_bitwise_and_Scalar_Tensor_out_typed_handle();
16022 return op.redispatch(dispatchKeySet, self, other, out);
16023}
16024
16025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_xor_Scalar_Tensor_out, name, "aten::bitwise_xor")
16026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_xor_Scalar_Tensor_out, overload_name, "Scalar_Tensor_out")
16027STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_xor_Scalar_Tensor_out, schema_str, "bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
16028
16029// aten::bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
16030static C10_NOINLINE c10::TypedOperatorHandle<bitwise_xor_Scalar_Tensor_out::schema> create_bitwise_xor_Scalar_Tensor_out_typed_handle() {
16031 return c10::Dispatcher::singleton()
16032 .findSchemaOrThrow(bitwise_xor_Scalar_Tensor_out::name, bitwise_xor_Scalar_Tensor_out::overload_name)
16033 .typed<bitwise_xor_Scalar_Tensor_out::schema>();
16034}
16035
16036// aten::bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
16037at::Tensor & bitwise_xor_Scalar_Tensor_out::call(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
16038
16039 static auto op = create_bitwise_xor_Scalar_Tensor_out_typed_handle();
16040 return op.call(self, other, out);
16041}
16042
16043// aten::bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
16044at::Tensor & bitwise_xor_Scalar_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
16045
16046 static auto op = create_bitwise_xor_Scalar_Tensor_out_typed_handle();
16047 return op.redispatch(dispatchKeySet, self, other, out);
16048}
16049
16050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__lshift___Scalar_out, name, "aten::__lshift__")
16051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__lshift___Scalar_out, overload_name, "Scalar_out")
16052STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__lshift___Scalar_out, schema_str, "__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
16053
16054// aten::__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
16055static C10_NOINLINE c10::TypedOperatorHandle<__lshift___Scalar_out::schema> create___lshift___Scalar_out_typed_handle() {
16056 return c10::Dispatcher::singleton()
16057 .findSchemaOrThrow(__lshift___Scalar_out::name, __lshift___Scalar_out::overload_name)
16058 .typed<__lshift___Scalar_out::schema>();
16059}
16060
16061// aten::__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
16062at::Tensor & __lshift___Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
16063
16064 static auto op = create___lshift___Scalar_out_typed_handle();
16065 return op.call(self, other, out);
16066}
16067
16068// aten::__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
16069at::Tensor & __lshift___Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
16070
16071 static auto op = create___lshift___Scalar_out_typed_handle();
16072 return op.redispatch(dispatchKeySet, self, other, out);
16073}
16074
16075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__lshift___Tensor_out, name, "aten::__lshift__")
16076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__lshift___Tensor_out, overload_name, "Tensor_out")
16077STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__lshift___Tensor_out, schema_str, "__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
16078
16079// aten::__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
16080static C10_NOINLINE c10::TypedOperatorHandle<__lshift___Tensor_out::schema> create___lshift___Tensor_out_typed_handle() {
16081 return c10::Dispatcher::singleton()
16082 .findSchemaOrThrow(__lshift___Tensor_out::name, __lshift___Tensor_out::overload_name)
16083 .typed<__lshift___Tensor_out::schema>();
16084}
16085
16086// aten::__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
16087at::Tensor & __lshift___Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
16088
16089 static auto op = create___lshift___Tensor_out_typed_handle();
16090 return op.call(self, other, out);
16091}
16092
16093// aten::__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
16094at::Tensor & __lshift___Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
16095
16096 static auto op = create___lshift___Tensor_out_typed_handle();
16097 return op.redispatch(dispatchKeySet, self, other, out);
16098}
16099
16100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_left_shift_Scalar_Tensor_out, name, "aten::bitwise_left_shift")
16101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_left_shift_Scalar_Tensor_out, overload_name, "Scalar_Tensor_out")
16102STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_left_shift_Scalar_Tensor_out, schema_str, "bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
16103
16104// aten::bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
16105static C10_NOINLINE c10::TypedOperatorHandle<bitwise_left_shift_Scalar_Tensor_out::schema> create_bitwise_left_shift_Scalar_Tensor_out_typed_handle() {
16106 return c10::Dispatcher::singleton()
16107 .findSchemaOrThrow(bitwise_left_shift_Scalar_Tensor_out::name, bitwise_left_shift_Scalar_Tensor_out::overload_name)
16108 .typed<bitwise_left_shift_Scalar_Tensor_out::schema>();
16109}
16110
16111// aten::bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
16112at::Tensor & bitwise_left_shift_Scalar_Tensor_out::call(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
16113
16114 static auto op = create_bitwise_left_shift_Scalar_Tensor_out_typed_handle();
16115 return op.call(self, other, out);
16116}
16117
16118// aten::bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
16119at::Tensor & bitwise_left_shift_Scalar_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
16120
16121 static auto op = create_bitwise_left_shift_Scalar_Tensor_out_typed_handle();
16122 return op.redispatch(dispatchKeySet, self, other, out);
16123}
16124
16125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__rshift___Scalar_out, name, "aten::__rshift__")
16126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__rshift___Scalar_out, overload_name, "Scalar_out")
16127STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__rshift___Scalar_out, schema_str, "__rshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
16128
16129// aten::__rshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
16130static C10_NOINLINE c10::TypedOperatorHandle<__rshift___Scalar_out::schema> create___rshift___Scalar_out_typed_handle() {
16131 return c10::Dispatcher::singleton()
16132 .findSchemaOrThrow(__rshift___Scalar_out::name, __rshift___Scalar_out::overload_name)
16133 .typed<__rshift___Scalar_out::schema>();
16134}
16135
16136// aten::__rshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
16137at::Tensor & __rshift___Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
16138
16139 static auto op = create___rshift___Scalar_out_typed_handle();
16140 return op.call(self, other, out);
16141}
16142
16143// aten::__rshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
16144at::Tensor & __rshift___Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
16145
16146 static auto op = create___rshift___Scalar_out_typed_handle();
16147 return op.redispatch(dispatchKeySet, self, other, out);
16148}
16149
16150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__rshift___Tensor_out, name, "aten::__rshift__")
16151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__rshift___Tensor_out, overload_name, "Tensor_out")
16152STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__rshift___Tensor_out, schema_str, "__rshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
16153
16154// aten::__rshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
16155static C10_NOINLINE c10::TypedOperatorHandle<__rshift___Tensor_out::schema> create___rshift___Tensor_out_typed_handle() {
16156 return c10::Dispatcher::singleton()
16157 .findSchemaOrThrow(__rshift___Tensor_out::name, __rshift___Tensor_out::overload_name)
16158 .typed<__rshift___Tensor_out::schema>();
16159}
16160
16161// aten::__rshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
16162at::Tensor & __rshift___Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
16163
16164 static auto op = create___rshift___Tensor_out_typed_handle();
16165 return op.call(self, other, out);
16166}
16167
16168// aten::__rshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
16169at::Tensor & __rshift___Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
16170
16171 static auto op = create___rshift___Tensor_out_typed_handle();
16172 return op.redispatch(dispatchKeySet, self, other, out);
16173}
16174
16175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_right_shift_Scalar_Tensor_out, name, "aten::bitwise_right_shift")
16176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_right_shift_Scalar_Tensor_out, overload_name, "Scalar_Tensor_out")
16177STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_right_shift_Scalar_Tensor_out, schema_str, "bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
16178
16179// aten::bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
16180static C10_NOINLINE c10::TypedOperatorHandle<bitwise_right_shift_Scalar_Tensor_out::schema> create_bitwise_right_shift_Scalar_Tensor_out_typed_handle() {
16181 return c10::Dispatcher::singleton()
16182 .findSchemaOrThrow(bitwise_right_shift_Scalar_Tensor_out::name, bitwise_right_shift_Scalar_Tensor_out::overload_name)
16183 .typed<bitwise_right_shift_Scalar_Tensor_out::schema>();
16184}
16185
16186// aten::bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
16187at::Tensor & bitwise_right_shift_Scalar_Tensor_out::call(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
16188
16189 static auto op = create_bitwise_right_shift_Scalar_Tensor_out_typed_handle();
16190 return op.call(self, other, out);
16191}
16192
16193// aten::bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
16194at::Tensor & bitwise_right_shift_Scalar_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
16195
16196 static auto op = create_bitwise_right_shift_Scalar_Tensor_out_typed_handle();
16197 return op.redispatch(dispatchKeySet, self, other, out);
16198}
16199
16200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(exponential_out, name, "aten::exponential")
16201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(exponential_out, overload_name, "out")
16202STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(exponential_out, schema_str, "exponential.out(Tensor self, float lambd=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)")
16203
16204// aten::exponential.out(Tensor self, float lambd=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
16205static C10_NOINLINE c10::TypedOperatorHandle<exponential_out::schema> create_exponential_out_typed_handle() {
16206 return c10::Dispatcher::singleton()
16207 .findSchemaOrThrow(exponential_out::name, exponential_out::overload_name)
16208 .typed<exponential_out::schema>();
16209}
16210
16211// aten::exponential.out(Tensor self, float lambd=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
16212at::Tensor & exponential_out::call(const at::Tensor & self, double lambd, c10::optional<at::Generator> generator, at::Tensor & out) {
16213
16214 static auto op = create_exponential_out_typed_handle();
16215 return op.call(self, lambd, generator, out);
16216}
16217
16218// aten::exponential.out(Tensor self, float lambd=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
16219at::Tensor & exponential_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double lambd, c10::optional<at::Generator> generator, at::Tensor & out) {
16220
16221 static auto op = create_exponential_out_typed_handle();
16222 return op.redispatch(dispatchKeySet, self, lambd, generator, out);
16223}
16224
16225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(exponential, name, "aten::exponential")
16226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(exponential, overload_name, "")
16227STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(exponential, schema_str, "exponential(Tensor self, float lambd=1, *, Generator? generator=None) -> Tensor")
16228
16229// aten::exponential(Tensor self, float lambd=1, *, Generator? generator=None) -> Tensor
16230static C10_NOINLINE c10::TypedOperatorHandle<exponential::schema> create_exponential_typed_handle() {
16231 return c10::Dispatcher::singleton()
16232 .findSchemaOrThrow(exponential::name, exponential::overload_name)
16233 .typed<exponential::schema>();
16234}
16235
16236// aten::exponential(Tensor self, float lambd=1, *, Generator? generator=None) -> Tensor
16237at::Tensor exponential::call(const at::Tensor & self, double lambd, c10::optional<at::Generator> generator) {
16238
16239 static auto op = create_exponential_typed_handle();
16240 return op.call(self, lambd, generator);
16241}
16242
16243// aten::exponential(Tensor self, float lambd=1, *, Generator? generator=None) -> Tensor
16244at::Tensor exponential::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double lambd, c10::optional<at::Generator> generator) {
16245
16246 static auto op = create_exponential_typed_handle();
16247 return op.redispatch(dispatchKeySet, self, lambd, generator);
16248}
16249
16250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(geometric_out, name, "aten::geometric")
16251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(geometric_out, overload_name, "out")
16252STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(geometric_out, schema_str, "geometric.out(Tensor self, float p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)")
16253
16254// aten::geometric.out(Tensor self, float p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
16255static C10_NOINLINE c10::TypedOperatorHandle<geometric_out::schema> create_geometric_out_typed_handle() {
16256 return c10::Dispatcher::singleton()
16257 .findSchemaOrThrow(geometric_out::name, geometric_out::overload_name)
16258 .typed<geometric_out::schema>();
16259}
16260
16261// aten::geometric.out(Tensor self, float p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
16262at::Tensor & geometric_out::call(const at::Tensor & self, double p, c10::optional<at::Generator> generator, at::Tensor & out) {
16263
16264 static auto op = create_geometric_out_typed_handle();
16265 return op.call(self, p, generator, out);
16266}
16267
16268// aten::geometric.out(Tensor self, float p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
16269at::Tensor & geometric_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, c10::optional<at::Generator> generator, at::Tensor & out) {
16270
16271 static auto op = create_geometric_out_typed_handle();
16272 return op.redispatch(dispatchKeySet, self, p, generator, out);
16273}
16274
16275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(geometric, name, "aten::geometric")
16276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(geometric, overload_name, "")
16277STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(geometric, schema_str, "geometric(Tensor self, float p, *, Generator? generator=None) -> Tensor")
16278
16279// aten::geometric(Tensor self, float p, *, Generator? generator=None) -> Tensor
16280static C10_NOINLINE c10::TypedOperatorHandle<geometric::schema> create_geometric_typed_handle() {
16281 return c10::Dispatcher::singleton()
16282 .findSchemaOrThrow(geometric::name, geometric::overload_name)
16283 .typed<geometric::schema>();
16284}
16285
16286// aten::geometric(Tensor self, float p, *, Generator? generator=None) -> Tensor
16287at::Tensor geometric::call(const at::Tensor & self, double p, c10::optional<at::Generator> generator) {
16288
16289 static auto op = create_geometric_typed_handle();
16290 return op.call(self, p, generator);
16291}
16292
16293// aten::geometric(Tensor self, float p, *, Generator? generator=None) -> Tensor
16294at::Tensor geometric::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, c10::optional<at::Generator> generator) {
16295
16296 static auto op = create_geometric_typed_handle();
16297 return op.redispatch(dispatchKeySet, self, p, generator);
16298}
16299
16300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_amp_foreach_non_finite_check_and_unscale_out, name, "aten::_amp_foreach_non_finite_check_and_unscale")
16301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_amp_foreach_non_finite_check_and_unscale_out, overload_name, "out")
16302STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_amp_foreach_non_finite_check_and_unscale_out, schema_str, "_amp_foreach_non_finite_check_and_unscale.out(Tensor[] self, Tensor(b!) found_inf, Tensor inv_scale, *, Tensor(a!)[] out) -> ()")
16303
16304// aten::_amp_foreach_non_finite_check_and_unscale.out(Tensor[] self, Tensor(b!) found_inf, Tensor inv_scale, *, Tensor(a!)[] out) -> ()
16305static C10_NOINLINE c10::TypedOperatorHandle<_amp_foreach_non_finite_check_and_unscale_out::schema> create__amp_foreach_non_finite_check_and_unscale_out_typed_handle() {
16306 return c10::Dispatcher::singleton()
16307 .findSchemaOrThrow(_amp_foreach_non_finite_check_and_unscale_out::name, _amp_foreach_non_finite_check_and_unscale_out::overload_name)
16308 .typed<_amp_foreach_non_finite_check_and_unscale_out::schema>();
16309}
16310
16311// aten::_amp_foreach_non_finite_check_and_unscale.out(Tensor[] self, Tensor(b!) found_inf, Tensor inv_scale, *, Tensor(a!)[] out) -> ()
16312void _amp_foreach_non_finite_check_and_unscale_out::call(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale, at::TensorList out) {
16313
16314 static auto op = create__amp_foreach_non_finite_check_and_unscale_out_typed_handle();
16315 return op.call(self, found_inf, inv_scale, out);
16316}
16317
16318// aten::_amp_foreach_non_finite_check_and_unscale.out(Tensor[] self, Tensor(b!) found_inf, Tensor inv_scale, *, Tensor(a!)[] out) -> ()
16319void _amp_foreach_non_finite_check_and_unscale_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale, at::TensorList out) {
16320
16321 static auto op = create__amp_foreach_non_finite_check_and_unscale_out_typed_handle();
16322 return op.redispatch(dispatchKeySet, self, found_inf, inv_scale, out);
16323}
16324
16325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_amp_foreach_non_finite_check_and_unscale, name, "aten::_amp_foreach_non_finite_check_and_unscale")
16326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_amp_foreach_non_finite_check_and_unscale, overload_name, "")
16327STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_amp_foreach_non_finite_check_and_unscale, schema_str, "_amp_foreach_non_finite_check_and_unscale(Tensor[] self, Tensor found_inf, Tensor inv_scale) -> (Tensor[] self_out, Tensor found_inf_out)")
16328
16329// aten::_amp_foreach_non_finite_check_and_unscale(Tensor[] self, Tensor found_inf, Tensor inv_scale) -> (Tensor[] self_out, Tensor found_inf_out)
16330static C10_NOINLINE c10::TypedOperatorHandle<_amp_foreach_non_finite_check_and_unscale::schema> create__amp_foreach_non_finite_check_and_unscale_typed_handle() {
16331 return c10::Dispatcher::singleton()
16332 .findSchemaOrThrow(_amp_foreach_non_finite_check_and_unscale::name, _amp_foreach_non_finite_check_and_unscale::overload_name)
16333 .typed<_amp_foreach_non_finite_check_and_unscale::schema>();
16334}
16335
16336// aten::_amp_foreach_non_finite_check_and_unscale(Tensor[] self, Tensor found_inf, Tensor inv_scale) -> (Tensor[] self_out, Tensor found_inf_out)
16337::std::tuple<::std::vector<at::Tensor>,at::Tensor> _amp_foreach_non_finite_check_and_unscale::call(at::TensorList self, const at::Tensor & found_inf, const at::Tensor & inv_scale) {
16338
16339 static auto op = create__amp_foreach_non_finite_check_and_unscale_typed_handle();
16340 return op.call(self, found_inf, inv_scale);
16341}
16342
16343// aten::_amp_foreach_non_finite_check_and_unscale(Tensor[] self, Tensor found_inf, Tensor inv_scale) -> (Tensor[] self_out, Tensor found_inf_out)
16344::std::tuple<::std::vector<at::Tensor>,at::Tensor> _amp_foreach_non_finite_check_and_unscale::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & found_inf, const at::Tensor & inv_scale) {
16345
16346 static auto op = create__amp_foreach_non_finite_check_and_unscale_typed_handle();
16347 return op.redispatch(dispatchKeySet, self, found_inf, inv_scale);
16348}
16349
16350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_add_Scalar_out, name, "aten::_foreach_add")
16351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_add_Scalar_out, overload_name, "Scalar_out")
16352STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_add_Scalar_out, schema_str, "_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()")
16353
16354// aten::_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
16355static C10_NOINLINE c10::TypedOperatorHandle<_foreach_add_Scalar_out::schema> create__foreach_add_Scalar_out_typed_handle() {
16356 return c10::Dispatcher::singleton()
16357 .findSchemaOrThrow(_foreach_add_Scalar_out::name, _foreach_add_Scalar_out::overload_name)
16358 .typed<_foreach_add_Scalar_out::schema>();
16359}
16360
16361// aten::_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
16362void _foreach_add_Scalar_out::call(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
16363
16364 static auto op = create__foreach_add_Scalar_out_typed_handle();
16365 return op.call(self, scalar, out);
16366}
16367
16368// aten::_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
16369void _foreach_add_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
16370
16371 static auto op = create__foreach_add_Scalar_out_typed_handle();
16372 return op.redispatch(dispatchKeySet, self, scalar, out);
16373}
16374
16375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_min_Scalar_out, name, "aten::_foreach_clamp_min")
16376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_min_Scalar_out, overload_name, "Scalar_out")
16377STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_min_Scalar_out, schema_str, "_foreach_clamp_min.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()")
16378
16379// aten::_foreach_clamp_min.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
16380static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_min_Scalar_out::schema> create__foreach_clamp_min_Scalar_out_typed_handle() {
16381 return c10::Dispatcher::singleton()
16382 .findSchemaOrThrow(_foreach_clamp_min_Scalar_out::name, _foreach_clamp_min_Scalar_out::overload_name)
16383 .typed<_foreach_clamp_min_Scalar_out::schema>();
16384}
16385
16386// aten::_foreach_clamp_min.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
16387void _foreach_clamp_min_Scalar_out::call(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
16388
16389 static auto op = create__foreach_clamp_min_Scalar_out_typed_handle();
16390 return op.call(self, scalar, out);
16391}
16392
16393// aten::_foreach_clamp_min.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
16394void _foreach_clamp_min_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
16395
16396 static auto op = create__foreach_clamp_min_Scalar_out_typed_handle();
16397 return op.redispatch(dispatchKeySet, self, scalar, out);
16398}
16399
16400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_minimum_Scalar_out, name, "aten::_foreach_minimum")
16401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_minimum_Scalar_out, overload_name, "Scalar_out")
16402STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_minimum_Scalar_out, schema_str, "_foreach_minimum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()")
16403
16404// aten::_foreach_minimum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
16405static C10_NOINLINE c10::TypedOperatorHandle<_foreach_minimum_Scalar_out::schema> create__foreach_minimum_Scalar_out_typed_handle() {
16406 return c10::Dispatcher::singleton()
16407 .findSchemaOrThrow(_foreach_minimum_Scalar_out::name, _foreach_minimum_Scalar_out::overload_name)
16408 .typed<_foreach_minimum_Scalar_out::schema>();
16409}
16410
16411// aten::_foreach_minimum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
16412void _foreach_minimum_Scalar_out::call(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
16413
16414 static auto op = create__foreach_minimum_Scalar_out_typed_handle();
16415 return op.call(self, scalar, out);
16416}
16417
16418// aten::_foreach_minimum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
16419void _foreach_minimum_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
16420
16421 static auto op = create__foreach_minimum_Scalar_out_typed_handle();
16422 return op.redispatch(dispatchKeySet, self, scalar, out);
16423}
16424
16425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_add_List_out, name, "aten::_foreach_add")
16426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_add_List_out, overload_name, "List_out")
16427STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_add_List_out, schema_str, "_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()")
16428
16429// aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()
16430static C10_NOINLINE c10::TypedOperatorHandle<_foreach_add_List_out::schema> create__foreach_add_List_out_typed_handle() {
16431 return c10::Dispatcher::singleton()
16432 .findSchemaOrThrow(_foreach_add_List_out::name, _foreach_add_List_out::overload_name)
16433 .typed<_foreach_add_List_out::schema>();
16434}
16435
16436// aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()
16437void _foreach_add_List_out::call(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) {
16438
16439 static auto op = create__foreach_add_List_out_typed_handle();
16440 return op.call(self, other, alpha, out);
16441}
16442
16443// aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()
16444void _foreach_add_List_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) {
16445
16446 static auto op = create__foreach_add_List_out_typed_handle();
16447 return op.redispatch(dispatchKeySet, self, other, alpha, out);
16448}
16449
16450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_min_List_out, name, "aten::_foreach_clamp_min")
16451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_min_List_out, overload_name, "List_out")
16452STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_min_List_out, schema_str, "_foreach_clamp_min.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()")
16453
16454// aten::_foreach_clamp_min.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
16455static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_min_List_out::schema> create__foreach_clamp_min_List_out_typed_handle() {
16456 return c10::Dispatcher::singleton()
16457 .findSchemaOrThrow(_foreach_clamp_min_List_out::name, _foreach_clamp_min_List_out::overload_name)
16458 .typed<_foreach_clamp_min_List_out::schema>();
16459}
16460
16461// aten::_foreach_clamp_min.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
16462void _foreach_clamp_min_List_out::call(at::TensorList self, at::TensorList other, at::TensorList out) {
16463
16464 static auto op = create__foreach_clamp_min_List_out_typed_handle();
16465 return op.call(self, other, out);
16466}
16467
16468// aten::_foreach_clamp_min.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
16469void _foreach_clamp_min_List_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) {
16470
16471 static auto op = create__foreach_clamp_min_List_out_typed_handle();
16472 return op.redispatch(dispatchKeySet, self, other, out);
16473}
16474
16475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_minimum_List_out, name, "aten::_foreach_minimum")
16476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_minimum_List_out, overload_name, "List_out")
16477STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_minimum_List_out, schema_str, "_foreach_minimum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()")
16478
16479// aten::_foreach_minimum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
16480static C10_NOINLINE c10::TypedOperatorHandle<_foreach_minimum_List_out::schema> create__foreach_minimum_List_out_typed_handle() {
16481 return c10::Dispatcher::singleton()
16482 .findSchemaOrThrow(_foreach_minimum_List_out::name, _foreach_minimum_List_out::overload_name)
16483 .typed<_foreach_minimum_List_out::schema>();
16484}
16485
16486// aten::_foreach_minimum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
16487void _foreach_minimum_List_out::call(at::TensorList self, at::TensorList other, at::TensorList out) {
16488
16489 static auto op = create__foreach_minimum_List_out_typed_handle();
16490 return op.call(self, other, out);
16491}
16492
16493// aten::_foreach_minimum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
16494void _foreach_minimum_List_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) {
16495
16496 static auto op = create__foreach_minimum_List_out_typed_handle();
16497 return op.redispatch(dispatchKeySet, self, other, out);
16498}
16499
16500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_add_ScalarList_out, name, "aten::_foreach_add")
16501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_add_ScalarList_out, overload_name, "ScalarList_out")
16502STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_add_ScalarList_out, schema_str, "_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()")
16503
16504// aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
16505static C10_NOINLINE c10::TypedOperatorHandle<_foreach_add_ScalarList_out::schema> create__foreach_add_ScalarList_out_typed_handle() {
16506 return c10::Dispatcher::singleton()
16507 .findSchemaOrThrow(_foreach_add_ScalarList_out::name, _foreach_add_ScalarList_out::overload_name)
16508 .typed<_foreach_add_ScalarList_out::schema>();
16509}
16510
16511// aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
16512void _foreach_add_ScalarList_out::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
16513
16514 static auto op = create__foreach_add_ScalarList_out_typed_handle();
16515 return op.call(self, scalars, out);
16516}
16517
16518// aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
16519void _foreach_add_ScalarList_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
16520
16521 static auto op = create__foreach_add_ScalarList_out_typed_handle();
16522 return op.redispatch(dispatchKeySet, self, scalars, out);
16523}
16524
16525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_min_ScalarList_out, name, "aten::_foreach_clamp_min")
16526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_min_ScalarList_out, overload_name, "ScalarList_out")
16527STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_clamp_min_ScalarList_out, schema_str, "_foreach_clamp_min.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()")
16528
16529// aten::_foreach_clamp_min.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
16530static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_min_ScalarList_out::schema> create__foreach_clamp_min_ScalarList_out_typed_handle() {
16531 return c10::Dispatcher::singleton()
16532 .findSchemaOrThrow(_foreach_clamp_min_ScalarList_out::name, _foreach_clamp_min_ScalarList_out::overload_name)
16533 .typed<_foreach_clamp_min_ScalarList_out::schema>();
16534}
16535
16536// aten::_foreach_clamp_min.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
16537void _foreach_clamp_min_ScalarList_out::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
16538
16539 static auto op = create__foreach_clamp_min_ScalarList_out_typed_handle();
16540 return op.call(self, scalars, out);
16541}
16542
16543// aten::_foreach_clamp_min.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
16544void _foreach_clamp_min_ScalarList_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
16545
16546 static auto op = create__foreach_clamp_min_ScalarList_out_typed_handle();
16547 return op.redispatch(dispatchKeySet, self, scalars, out);
16548}
16549
16550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_minimum_ScalarList_out, name, "aten::_foreach_minimum")
16551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_minimum_ScalarList_out, overload_name, "ScalarList_out")
16552STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_minimum_ScalarList_out, schema_str, "_foreach_minimum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()")
16553
16554// aten::_foreach_minimum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
16555static C10_NOINLINE c10::TypedOperatorHandle<_foreach_minimum_ScalarList_out::schema> create__foreach_minimum_ScalarList_out_typed_handle() {
16556 return c10::Dispatcher::singleton()
16557 .findSchemaOrThrow(_foreach_minimum_ScalarList_out::name, _foreach_minimum_ScalarList_out::overload_name)
16558 .typed<_foreach_minimum_ScalarList_out::schema>();
16559}
16560
16561// aten::_foreach_minimum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
16562void _foreach_minimum_ScalarList_out::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
16563
16564 static auto op = create__foreach_minimum_ScalarList_out_typed_handle();
16565 return op.call(self, scalars, out);
16566}
16567
16568// aten::_foreach_minimum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
16569void _foreach_minimum_ScalarList_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
16570
16571 static auto op = create__foreach_minimum_ScalarList_out_typed_handle();
16572 return op.redispatch(dispatchKeySet, self, scalars, out);
16573}
16574
16575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_cosh_out, name, "aten::_foreach_cosh")
16576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_cosh_out, overload_name, "out")
16577STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_cosh_out, schema_str, "_foreach_cosh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
16578
16579// aten::_foreach_cosh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
16580static C10_NOINLINE c10::TypedOperatorHandle<_foreach_cosh_out::schema> create__foreach_cosh_out_typed_handle() {
16581 return c10::Dispatcher::singleton()
16582 .findSchemaOrThrow(_foreach_cosh_out::name, _foreach_cosh_out::overload_name)
16583 .typed<_foreach_cosh_out::schema>();
16584}
16585
16586// aten::_foreach_cosh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
16587void _foreach_cosh_out::call(at::TensorList self, at::TensorList out) {
16588
16589 static auto op = create__foreach_cosh_out_typed_handle();
16590 return op.call(self, out);
16591}
16592
16593// aten::_foreach_cosh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
16594void _foreach_cosh_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
16595
16596 static auto op = create__foreach_cosh_out_typed_handle();
16597 return op.redispatch(dispatchKeySet, self, out);
16598}
16599
16600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_erfc_out, name, "aten::_foreach_erfc")
16601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_erfc_out, overload_name, "out")
16602STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_erfc_out, schema_str, "_foreach_erfc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
16603
16604// aten::_foreach_erfc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
16605static C10_NOINLINE c10::TypedOperatorHandle<_foreach_erfc_out::schema> create__foreach_erfc_out_typed_handle() {
16606 return c10::Dispatcher::singleton()
16607 .findSchemaOrThrow(_foreach_erfc_out::name, _foreach_erfc_out::overload_name)
16608 .typed<_foreach_erfc_out::schema>();
16609}
16610
16611// aten::_foreach_erfc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
16612void _foreach_erfc_out::call(at::TensorList self, at::TensorList out) {
16613
16614 static auto op = create__foreach_erfc_out_typed_handle();
16615 return op.call(self, out);
16616}
16617
16618// aten::_foreach_erfc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
16619void _foreach_erfc_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
16620
16621 static auto op = create__foreach_erfc_out_typed_handle();
16622 return op.redispatch(dispatchKeySet, self, out);
16623}
16624
16625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_round_out, name, "aten::_foreach_round")
16626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_round_out, overload_name, "out")
16627STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_round_out, schema_str, "_foreach_round.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
16628
16629// aten::_foreach_round.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
16630static C10_NOINLINE c10::TypedOperatorHandle<_foreach_round_out::schema> create__foreach_round_out_typed_handle() {
16631 return c10::Dispatcher::singleton()
16632 .findSchemaOrThrow(_foreach_round_out::name, _foreach_round_out::overload_name)
16633 .typed<_foreach_round_out::schema>();
16634}
16635
16636// aten::_foreach_round.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
16637void _foreach_round_out::call(at::TensorList self, at::TensorList out) {
16638
16639 static auto op = create__foreach_round_out_typed_handle();
16640 return op.call(self, out);
16641}
16642
16643// aten::_foreach_round.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
16644void _foreach_round_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
16645
16646 static auto op = create__foreach_round_out_typed_handle();
16647 return op.redispatch(dispatchKeySet, self, out);
16648}
16649
16650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_lgamma_out, name, "aten::_foreach_lgamma")
16651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_lgamma_out, overload_name, "out")
16652STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_lgamma_out, schema_str, "_foreach_lgamma.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
16653
16654// aten::_foreach_lgamma.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
16655static C10_NOINLINE c10::TypedOperatorHandle<_foreach_lgamma_out::schema> create__foreach_lgamma_out_typed_handle() {
16656 return c10::Dispatcher::singleton()
16657 .findSchemaOrThrow(_foreach_lgamma_out::name, _foreach_lgamma_out::overload_name)
16658 .typed<_foreach_lgamma_out::schema>();
16659}
16660
16661// aten::_foreach_lgamma.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
16662void _foreach_lgamma_out::call(at::TensorList self, at::TensorList out) {
16663
16664 static auto op = create__foreach_lgamma_out_typed_handle();
16665 return op.call(self, out);
16666}
16667
16668// aten::_foreach_lgamma.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
16669void _foreach_lgamma_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
16670
16671 static auto op = create__foreach_lgamma_out_typed_handle();
16672 return op.redispatch(dispatchKeySet, self, out);
16673}
16674
16675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_frac_out, name, "aten::_foreach_frac")
16676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_frac_out, overload_name, "out")
16677STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_frac_out, schema_str, "_foreach_frac.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
16678
16679// aten::_foreach_frac.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
16680static C10_NOINLINE c10::TypedOperatorHandle<_foreach_frac_out::schema> create__foreach_frac_out_typed_handle() {
16681 return c10::Dispatcher::singleton()
16682 .findSchemaOrThrow(_foreach_frac_out::name, _foreach_frac_out::overload_name)
16683 .typed<_foreach_frac_out::schema>();
16684}
16685
16686// aten::_foreach_frac.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
16687void _foreach_frac_out::call(at::TensorList self, at::TensorList out) {
16688
16689 static auto op = create__foreach_frac_out_typed_handle();
16690 return op.call(self, out);
16691}
16692
16693// aten::_foreach_frac.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
16694void _foreach_frac_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
16695
16696 static auto op = create__foreach_frac_out_typed_handle();
16697 return op.redispatch(dispatchKeySet, self, out);
16698}
16699
16700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_trunc_out, name, "aten::_foreach_trunc")
16701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_trunc_out, overload_name, "out")
16702STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_trunc_out, schema_str, "_foreach_trunc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
16703
16704// aten::_foreach_trunc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
16705static C10_NOINLINE c10::TypedOperatorHandle<_foreach_trunc_out::schema> create__foreach_trunc_out_typed_handle() {
16706 return c10::Dispatcher::singleton()
16707 .findSchemaOrThrow(_foreach_trunc_out::name, _foreach_trunc_out::overload_name)
16708 .typed<_foreach_trunc_out::schema>();
16709}
16710
16711// aten::_foreach_trunc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
16712void _foreach_trunc_out::call(at::TensorList self, at::TensorList out) {
16713
16714 static auto op = create__foreach_trunc_out_typed_handle();
16715 return op.call(self, out);
16716}
16717
16718// aten::_foreach_trunc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
16719void _foreach_trunc_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
16720
16721 static auto op = create__foreach_trunc_out_typed_handle();
16722 return op.redispatch(dispatchKeySet, self, out);
16723}
16724
16725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_lerp_List_out, name, "aten::_foreach_lerp")
16726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_lerp_List_out, overload_name, "List_out")
16727STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_lerp_List_out, schema_str, "_foreach_lerp.List_out(Tensor[] self, Tensor[] tensors1, Tensor[] weights, *, Tensor(a!)[] out) -> ()")
16728
16729// aten::_foreach_lerp.List_out(Tensor[] self, Tensor[] tensors1, Tensor[] weights, *, Tensor(a!)[] out) -> ()
16730static C10_NOINLINE c10::TypedOperatorHandle<_foreach_lerp_List_out::schema> create__foreach_lerp_List_out_typed_handle() {
16731 return c10::Dispatcher::singleton()
16732 .findSchemaOrThrow(_foreach_lerp_List_out::name, _foreach_lerp_List_out::overload_name)
16733 .typed<_foreach_lerp_List_out::schema>();
16734}
16735
16736// aten::_foreach_lerp.List_out(Tensor[] self, Tensor[] tensors1, Tensor[] weights, *, Tensor(a!)[] out) -> ()
16737void _foreach_lerp_List_out::call(at::TensorList self, at::TensorList tensors1, at::TensorList weights, at::TensorList out) {
16738
16739 static auto op = create__foreach_lerp_List_out_typed_handle();
16740 return op.call(self, tensors1, weights, out);
16741}
16742
16743// aten::_foreach_lerp.List_out(Tensor[] self, Tensor[] tensors1, Tensor[] weights, *, Tensor(a!)[] out) -> ()
16744void _foreach_lerp_List_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, at::TensorList weights, at::TensorList out) {
16745
16746 static auto op = create__foreach_lerp_List_out_typed_handle();
16747 return op.redispatch(dispatchKeySet, self, tensors1, weights, out);
16748}
16749
16750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_lerp_Scalar_out, name, "aten::_foreach_lerp")
16751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_lerp_Scalar_out, overload_name, "Scalar_out")
16752STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_lerp_Scalar_out, schema_str, "_foreach_lerp.Scalar_out(Tensor[] self, Tensor[] tensors1, Scalar weight, *, Tensor(a!)[] out) -> ()")
16753
16754// aten::_foreach_lerp.Scalar_out(Tensor[] self, Tensor[] tensors1, Scalar weight, *, Tensor(a!)[] out) -> ()
16755static C10_NOINLINE c10::TypedOperatorHandle<_foreach_lerp_Scalar_out::schema> create__foreach_lerp_Scalar_out_typed_handle() {
16756 return c10::Dispatcher::singleton()
16757 .findSchemaOrThrow(_foreach_lerp_Scalar_out::name, _foreach_lerp_Scalar_out::overload_name)
16758 .typed<_foreach_lerp_Scalar_out::schema>();
16759}
16760
16761// aten::_foreach_lerp.Scalar_out(Tensor[] self, Tensor[] tensors1, Scalar weight, *, Tensor(a!)[] out) -> ()
16762void _foreach_lerp_Scalar_out::call(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight, at::TensorList out) {
16763
16764 static auto op = create__foreach_lerp_Scalar_out_typed_handle();
16765 return op.call(self, tensors1, weight, out);
16766}
16767
16768// aten::_foreach_lerp.Scalar_out(Tensor[] self, Tensor[] tensors1, Scalar weight, *, Tensor(a!)[] out) -> ()
16769void _foreach_lerp_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, const at::Scalar & weight, at::TensorList out) {
16770
16771 static auto op = create__foreach_lerp_Scalar_out_typed_handle();
16772 return op.redispatch(dispatchKeySet, self, tensors1, weight, out);
16773}
16774
16775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rrelu_with_noise_backward_out, name, "aten::rrelu_with_noise_backward")
16776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rrelu_with_noise_backward_out, overload_name, "out")
16777STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rrelu_with_noise_backward_out, schema_str, "rrelu_with_noise_backward.out(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result, *, Tensor(a!) out) -> Tensor(a!)")
16778
16779// aten::rrelu_with_noise_backward.out(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result, *, Tensor(a!) out) -> Tensor(a!)
16780static C10_NOINLINE c10::TypedOperatorHandle<rrelu_with_noise_backward_out::schema> create_rrelu_with_noise_backward_out_typed_handle() {
16781 return c10::Dispatcher::singleton()
16782 .findSchemaOrThrow(rrelu_with_noise_backward_out::name, rrelu_with_noise_backward_out::overload_name)
16783 .typed<rrelu_with_noise_backward_out::schema>();
16784}
16785
16786// aten::rrelu_with_noise_backward.out(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result, *, Tensor(a!) out) -> Tensor(a!)
16787at::Tensor & rrelu_with_noise_backward_out::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result, at::Tensor & out) {
16788
16789 static auto op = create_rrelu_with_noise_backward_out_typed_handle();
16790 return op.call(grad_output, self, noise, lower, upper, training, self_is_result, out);
16791}
16792
16793// aten::rrelu_with_noise_backward.out(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result, *, Tensor(a!) out) -> Tensor(a!)
16794at::Tensor & rrelu_with_noise_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result, at::Tensor & out) {
16795
16796 static auto op = create_rrelu_with_noise_backward_out_typed_handle();
16797 return op.redispatch(dispatchKeySet, grad_output, self, noise, lower, upper, training, self_is_result, out);
16798}
16799
16800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_adaptive_avg_pool2d_backward_out, name, "aten::mkldnn_adaptive_avg_pool2d_backward")
16801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_adaptive_avg_pool2d_backward_out, overload_name, "out")
16802STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_adaptive_avg_pool2d_backward_out, schema_str, "mkldnn_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
16803
16804// aten::mkldnn_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
16805static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_adaptive_avg_pool2d_backward_out::schema> create_mkldnn_adaptive_avg_pool2d_backward_out_typed_handle() {
16806 return c10::Dispatcher::singleton()
16807 .findSchemaOrThrow(mkldnn_adaptive_avg_pool2d_backward_out::name, mkldnn_adaptive_avg_pool2d_backward_out::overload_name)
16808 .typed<mkldnn_adaptive_avg_pool2d_backward_out::schema>();
16809}
16810
16811// aten::mkldnn_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
16812at::Tensor & mkldnn_adaptive_avg_pool2d_backward_out::call(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) {
16813
16814 static auto op = create_mkldnn_adaptive_avg_pool2d_backward_out_typed_handle();
16815 return op.call(grad_output, self, out);
16816}
16817
16818// aten::mkldnn_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
16819at::Tensor & mkldnn_adaptive_avg_pool2d_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) {
16820
16821 static auto op = create_mkldnn_adaptive_avg_pool2d_backward_out_typed_handle();
16822 return op.redispatch(dispatchKeySet, grad_output, self, out);
16823}
16824
16825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_exp_out, name, "aten::linalg_matrix_exp")
16826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_exp_out, overload_name, "out")
16827STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_exp_out, schema_str, "linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
16828
16829// aten::linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
16830static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_exp_out::schema> create_linalg_matrix_exp_out_typed_handle() {
16831 return c10::Dispatcher::singleton()
16832 .findSchemaOrThrow(linalg_matrix_exp_out::name, linalg_matrix_exp_out::overload_name)
16833 .typed<linalg_matrix_exp_out::schema>();
16834}
16835
16836// aten::linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
16837at::Tensor & linalg_matrix_exp_out::call(const at::Tensor & self, at::Tensor & out) {
16838
16839 static auto op = create_linalg_matrix_exp_out_typed_handle();
16840 return op.call(self, out);
16841}
16842
16843// aten::linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
16844at::Tensor & linalg_matrix_exp_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
16845
16846 static auto op = create_linalg_matrix_exp_out_typed_handle();
16847 return op.redispatch(dispatchKeySet, self, out);
16848}
16849
16850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_optional_intlist_out, name, "aten::_test_optional_intlist")
16851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_optional_intlist_out, overload_name, "out")
16852STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_optional_intlist_out, schema_str, "_test_optional_intlist.out(Tensor values, int[]? addends, *, Tensor(a!) out) -> Tensor(a!)")
16853
16854// aten::_test_optional_intlist.out(Tensor values, int[]? addends, *, Tensor(a!) out) -> Tensor(a!)
16855static C10_NOINLINE c10::TypedOperatorHandle<_test_optional_intlist_out::schema> create__test_optional_intlist_out_typed_handle() {
16856 return c10::Dispatcher::singleton()
16857 .findSchemaOrThrow(_test_optional_intlist_out::name, _test_optional_intlist_out::overload_name)
16858 .typed<_test_optional_intlist_out::schema>();
16859}
16860
16861// aten::_test_optional_intlist.out(Tensor values, int[]? addends, *, Tensor(a!) out) -> Tensor(a!)
16862at::Tensor & _test_optional_intlist_out::call(const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out) {
16863
16864 static auto op = create__test_optional_intlist_out_typed_handle();
16865 return op.call(values, addends, out);
16866}
16867
16868// aten::_test_optional_intlist.out(Tensor values, int[]? addends, *, Tensor(a!) out) -> Tensor(a!)
16869at::Tensor & _test_optional_intlist_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out) {
16870
16871 static auto op = create__test_optional_intlist_out_typed_handle();
16872 return op.redispatch(dispatchKeySet, values, addends, out);
16873}
16874
16875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_autograd_multiple_dispatch_fullcoverage_out, name, "aten::_test_autograd_multiple_dispatch")
16876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_autograd_multiple_dispatch_fullcoverage_out, overload_name, "fullcoverage_out")
16877STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_autograd_multiple_dispatch_fullcoverage_out, schema_str, "_test_autograd_multiple_dispatch.fullcoverage_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
16878
16879// aten::_test_autograd_multiple_dispatch.fullcoverage_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
16880static C10_NOINLINE c10::TypedOperatorHandle<_test_autograd_multiple_dispatch_fullcoverage_out::schema> create__test_autograd_multiple_dispatch_fullcoverage_out_typed_handle() {
16881 return c10::Dispatcher::singleton()
16882 .findSchemaOrThrow(_test_autograd_multiple_dispatch_fullcoverage_out::name, _test_autograd_multiple_dispatch_fullcoverage_out::overload_name)
16883 .typed<_test_autograd_multiple_dispatch_fullcoverage_out::schema>();
16884}
16885
16886// aten::_test_autograd_multiple_dispatch.fullcoverage_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
16887at::Tensor & _test_autograd_multiple_dispatch_fullcoverage_out::call(const at::Tensor & self, at::Tensor & out) {
16888
16889 static auto op = create__test_autograd_multiple_dispatch_fullcoverage_out_typed_handle();
16890 return op.call(self, out);
16891}
16892
16893// aten::_test_autograd_multiple_dispatch.fullcoverage_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
16894at::Tensor & _test_autograd_multiple_dispatch_fullcoverage_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
16895
16896 static auto op = create__test_autograd_multiple_dispatch_fullcoverage_out_typed_handle();
16897 return op.redispatch(dispatchKeySet, self, out);
16898}
16899
16900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(segment_reduce_out, name, "aten::segment_reduce")
16901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(segment_reduce_out, overload_name, "out")
16902STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(segment_reduce_out, schema_str, "segment_reduce.out(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)")
16903
16904// aten::segment_reduce.out(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)
16905static C10_NOINLINE c10::TypedOperatorHandle<segment_reduce_out::schema> create_segment_reduce_out_typed_handle() {
16906 return c10::Dispatcher::singleton()
16907 .findSchemaOrThrow(segment_reduce_out::name, segment_reduce_out::overload_name)
16908 .typed<segment_reduce_out::schema>();
16909}
16910
16911// aten::segment_reduce.out(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)
16912at::Tensor & segment_reduce_out::call(const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & indices, const c10::optional<at::Tensor> & offsets, int64_t axis, bool unsafe, const c10::optional<at::Scalar> & initial, at::Tensor & out) {
16913
16914 static auto op = create_segment_reduce_out_typed_handle();
16915 return op.call(data, reduce, lengths, indices, offsets, axis, unsafe, initial, out);
16916}
16917
16918// aten::segment_reduce.out(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)
16919at::Tensor & segment_reduce_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & indices, const c10::optional<at::Tensor> & offsets, int64_t axis, bool unsafe, const c10::optional<at::Scalar> & initial, at::Tensor & out) {
16920
16921 static auto op = create_segment_reduce_out_typed_handle();
16922 return op.redispatch(dispatchKeySet, data, reduce, lengths, indices, offsets, axis, unsafe, initial, out);
16923}
16924
16925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_segment_reduce_backward_out, name, "aten::_segment_reduce_backward")
16926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_segment_reduce_backward_out, overload_name, "out")
16927STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_segment_reduce_backward_out, schema_str, "_segment_reduce_backward.out(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)")
16928
16929// aten::_segment_reduce_backward.out(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)
16930static C10_NOINLINE c10::TypedOperatorHandle<_segment_reduce_backward_out::schema> create__segment_reduce_backward_out_typed_handle() {
16931 return c10::Dispatcher::singleton()
16932 .findSchemaOrThrow(_segment_reduce_backward_out::name, _segment_reduce_backward_out::overload_name)
16933 .typed<_segment_reduce_backward_out::schema>();
16934}
16935
16936// aten::_segment_reduce_backward.out(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)
16937at::Tensor & _segment_reduce_backward_out::call(const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & offsets, int64_t axis, const c10::optional<at::Scalar> & initial, at::Tensor & out) {
16938
16939 static auto op = create__segment_reduce_backward_out_typed_handle();
16940 return op.call(grad, output, data, reduce, lengths, offsets, axis, initial, out);
16941}
16942
16943// aten::_segment_reduce_backward.out(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)
16944at::Tensor & _segment_reduce_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & offsets, int64_t axis, const c10::optional<at::Scalar> & initial, at::Tensor & out) {
16945
16946 static auto op = create__segment_reduce_backward_out_typed_handle();
16947 return op.redispatch(dispatchKeySet, grad, output, data, reduce, lengths, offsets, axis, initial, out);
16948}
16949
16950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_make_dual_copy_out, name, "aten::_make_dual_copy")
16951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_make_dual_copy_out, overload_name, "out")
16952STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_make_dual_copy_out, schema_str, "_make_dual_copy.out(Tensor primal, Tensor tangent, int level, *, Tensor(a!) out) -> Tensor(a!)")
16953
16954// aten::_make_dual_copy.out(Tensor primal, Tensor tangent, int level, *, Tensor(a!) out) -> Tensor(a!)
16955static C10_NOINLINE c10::TypedOperatorHandle<_make_dual_copy_out::schema> create__make_dual_copy_out_typed_handle() {
16956 return c10::Dispatcher::singleton()
16957 .findSchemaOrThrow(_make_dual_copy_out::name, _make_dual_copy_out::overload_name)
16958 .typed<_make_dual_copy_out::schema>();
16959}
16960
16961// aten::_make_dual_copy.out(Tensor primal, Tensor tangent, int level, *, Tensor(a!) out) -> Tensor(a!)
16962at::Tensor & _make_dual_copy_out::call(const at::Tensor & primal, const at::Tensor & tangent, int64_t level, at::Tensor & out) {
16963
16964 static auto op = create__make_dual_copy_out_typed_handle();
16965 return op.call(primal, tangent, level, out);
16966}
16967
16968// aten::_make_dual_copy.out(Tensor primal, Tensor tangent, int level, *, Tensor(a!) out) -> Tensor(a!)
16969at::Tensor & _make_dual_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & primal, const at::Tensor & tangent, int64_t level, at::Tensor & out) {
16970
16971 static auto op = create__make_dual_copy_out_typed_handle();
16972 return op.redispatch(dispatchKeySet, primal, tangent, level, out);
16973}
16974
16975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(view_as_complex_copy_out, name, "aten::view_as_complex_copy")
16976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(view_as_complex_copy_out, overload_name, "out")
16977STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(view_as_complex_copy_out, schema_str, "view_as_complex_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
16978
16979// aten::view_as_complex_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
16980static C10_NOINLINE c10::TypedOperatorHandle<view_as_complex_copy_out::schema> create_view_as_complex_copy_out_typed_handle() {
16981 return c10::Dispatcher::singleton()
16982 .findSchemaOrThrow(view_as_complex_copy_out::name, view_as_complex_copy_out::overload_name)
16983 .typed<view_as_complex_copy_out::schema>();
16984}
16985
16986// aten::view_as_complex_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
16987at::Tensor & view_as_complex_copy_out::call(const at::Tensor & self, at::Tensor & out) {
16988
16989 static auto op = create_view_as_complex_copy_out_typed_handle();
16990 return op.call(self, out);
16991}
16992
16993// aten::view_as_complex_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
16994at::Tensor & view_as_complex_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
16995
16996 static auto op = create_view_as_complex_copy_out_typed_handle();
16997 return op.redispatch(dispatchKeySet, self, out);
16998}
16999
17000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_neg_view_copy_out, name, "aten::_neg_view_copy")
17001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_neg_view_copy_out, overload_name, "out")
17002STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_neg_view_copy_out, schema_str, "_neg_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
17003
17004// aten::_neg_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17005static C10_NOINLINE c10::TypedOperatorHandle<_neg_view_copy_out::schema> create__neg_view_copy_out_typed_handle() {
17006 return c10::Dispatcher::singleton()
17007 .findSchemaOrThrow(_neg_view_copy_out::name, _neg_view_copy_out::overload_name)
17008 .typed<_neg_view_copy_out::schema>();
17009}
17010
17011// aten::_neg_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17012at::Tensor & _neg_view_copy_out::call(const at::Tensor & self, at::Tensor & out) {
17013
17014 static auto op = create__neg_view_copy_out_typed_handle();
17015 return op.call(self, out);
17016}
17017
17018// aten::_neg_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17019at::Tensor & _neg_view_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
17020
17021 static auto op = create__neg_view_copy_out_typed_handle();
17022 return op.redispatch(dispatchKeySet, self, out);
17023}
17024
17025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(expand_copy_out, name, "aten::expand_copy")
17026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(expand_copy_out, overload_name, "out")
17027STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(expand_copy_out, schema_str, "expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!)")
17028
17029// aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!)
17030static C10_NOINLINE c10::TypedOperatorHandle<expand_copy_out::schema> create_expand_copy_out_typed_handle() {
17031 return c10::Dispatcher::singleton()
17032 .findSchemaOrThrow(expand_copy_out::name, expand_copy_out::overload_name)
17033 .typed<expand_copy_out::schema>();
17034}
17035
17036// aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!)
17037at::Tensor & expand_copy_out::call(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit, at::Tensor & out) {
17038
17039 static auto op = create_expand_copy_out_typed_handle();
17040 return op.call(self, size, implicit, out);
17041}
17042
17043// aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!)
17044at::Tensor & expand_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, bool implicit, at::Tensor & out) {
17045
17046 static auto op = create_expand_copy_out_typed_handle();
17047 return op.redispatch(dispatchKeySet, self, size, implicit, out);
17048}
17049
17050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unsqueeze_copy_out, name, "aten::unsqueeze_copy")
17051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unsqueeze_copy_out, overload_name, "out")
17052STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unsqueeze_copy_out, schema_str, "unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)")
17053
17054// aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
17055static C10_NOINLINE c10::TypedOperatorHandle<unsqueeze_copy_out::schema> create_unsqueeze_copy_out_typed_handle() {
17056 return c10::Dispatcher::singleton()
17057 .findSchemaOrThrow(unsqueeze_copy_out::name, unsqueeze_copy_out::overload_name)
17058 .typed<unsqueeze_copy_out::schema>();
17059}
17060
17061// aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
17062at::Tensor & unsqueeze_copy_out::call(const at::Tensor & self, int64_t dim, at::Tensor & out) {
17063
17064 static auto op = create_unsqueeze_copy_out_typed_handle();
17065 return op.call(self, dim, out);
17066}
17067
17068// aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
17069at::Tensor & unsqueeze_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) {
17070
17071 static auto op = create_unsqueeze_copy_out_typed_handle();
17072 return op.redispatch(dispatchKeySet, self, dim, out);
17073}
17074
17075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(crow_indices_copy_out, name, "aten::crow_indices_copy")
17076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(crow_indices_copy_out, overload_name, "out")
17077STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(crow_indices_copy_out, schema_str, "crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
17078
17079// aten::crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17080static C10_NOINLINE c10::TypedOperatorHandle<crow_indices_copy_out::schema> create_crow_indices_copy_out_typed_handle() {
17081 return c10::Dispatcher::singleton()
17082 .findSchemaOrThrow(crow_indices_copy_out::name, crow_indices_copy_out::overload_name)
17083 .typed<crow_indices_copy_out::schema>();
17084}
17085
17086// aten::crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17087at::Tensor & crow_indices_copy_out::call(const at::Tensor & self, at::Tensor & out) {
17088
17089 static auto op = create_crow_indices_copy_out_typed_handle();
17090 return op.call(self, out);
17091}
17092
17093// aten::crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17094at::Tensor & crow_indices_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
17095
17096 static auto op = create_crow_indices_copy_out_typed_handle();
17097 return op.redispatch(dispatchKeySet, self, out);
17098}
17099
17100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_padded_tensor_out, name, "aten::to_padded_tensor")
17101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_padded_tensor_out, overload_name, "out")
17102STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_padded_tensor_out, schema_str, "to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!)")
17103
17104// aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!)
17105static C10_NOINLINE c10::TypedOperatorHandle<to_padded_tensor_out::schema> create_to_padded_tensor_out_typed_handle() {
17106 return c10::Dispatcher::singleton()
17107 .findSchemaOrThrow(to_padded_tensor_out::name, to_padded_tensor_out::overload_name)
17108 .typed<to_padded_tensor_out::schema>();
17109}
17110
17111// aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!)
17112at::Tensor & to_padded_tensor_out::call(const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size, at::Tensor & out) {
17113
17114 static auto op = create_to_padded_tensor_out_typed_handle();
17115 return op.call(self, padding, output_size, out);
17116}
17117
17118// aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!)
17119at::Tensor & to_padded_tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size, at::Tensor & out) {
17120
17121 static auto op = create_to_padded_tensor_out_typed_handle();
17122 return op.redispatch(dispatchKeySet, self, padding, output_size, out);
17123}
17124
17125}} // namespace at::_ops
17126