1#include <ATen/Tensor.h>
2#include <ATen/core/dispatch/Dispatcher.h>
3
4// @generated by torchgen/gen.py from Operators.cpp
5// NOTE See [Sharded File] comment in VariableType
6
7#ifndef AT_PER_OPERATOR_HEADERS
8#include <ATen/Operators.h>
9#else
10#include <ATen/ops/_cast_Int.h>
11#include <ATen/ops/_cast_Long.h>
12#include <ATen/ops/_backward.h>
13#include <ATen/ops/data.h>
14#include <ATen/ops/retain_grad.h>
15#include <ATen/ops/rename.h>
16#include <ATen/ops/rename.h>
17#include <ATen/ops/_cudnn_rnn_backward.h>
18#include <ATen/ops/native_dropout_backward.h>
19#include <ATen/ops/feature_dropout.h>
20#include <ATen/ops/feature_dropout.h>
21#include <ATen/ops/conj.h>
22#include <ATen/ops/_add_relu.h>
23#include <ATen/ops/_add_relu.h>
24#include <ATen/ops/_add_relu.h>
25#include <ATen/ops/_add_relu.h>
26#include <ATen/ops/_add_relu.h>
27#include <ATen/ops/affine_grid_generator.h>
28#include <ATen/ops/_is_any_true.h>
29#include <ATen/ops/arange.h>
30#include <ATen/ops/arange.h>
31#include <ATen/ops/arange.h>
32#include <ATen/ops/arange.h>
33#include <ATen/ops/arange.h>
34#include <ATen/ops/_dim_arange.h>
35#include <ATen/ops/arcsinh.h>
36#include <ATen/ops/arcsinh.h>
37#include <ATen/ops/arcsinh.h>
38#include <ATen/ops/atanh.h>
39#include <ATen/ops/atanh.h>
40#include <ATen/ops/atanh.h>
41#include <ATen/ops/arcsin.h>
42#include <ATen/ops/arcsin.h>
43#include <ATen/ops/arcsin.h>
44#include <ATen/ops/bartlett_window.h>
45#include <ATen/ops/bartlett_window.h>
46#include <ATen/ops/binary_cross_entropy.h>
47#include <ATen/ops/binary_cross_entropy.h>
48#include <ATen/ops/bmm.h>
49#include <ATen/ops/bmm.h>
50#include <ATen/ops/_sparse_broadcast_to.h>
51#include <ATen/ops/concat.h>
52#include <ATen/ops/concat.h>
53#include <ATen/ops/concat.h>
54#include <ATen/ops/concat.h>
55#include <ATen/ops/chain_matmul.h>
56#include <ATen/ops/chain_matmul.h>
57#include <ATen/ops/clamp_min.h>
58#include <ATen/ops/clamp_min.h>
59#include <ATen/ops/clamp_min.h>
60#include <ATen/ops/clamp_min.h>
61#include <ATen/ops/clamp_min.h>
62#include <ATen/ops/clamp_min.h>
63#include <ATen/ops/_convolution_mode.h>
64#include <ATen/ops/conv1d.h>
65#include <ATen/ops/conv3d.h>
66#include <ATen/ops/conv1d.h>
67#include <ATen/ops/conv3d.h>
68#include <ATen/ops/conv_tbc_backward.h>
69#include <ATen/ops/conv_transpose3d.h>
70#include <ATen/ops/copy.h>
71#include <ATen/ops/copy.h>
72#include <ATen/ops/_copy_from_and_resize.h>
73#include <ATen/ops/cudnn_convolution.h>
74#include <ATen/ops/cudnn_convolution_relu.h>
75#include <ATen/ops/cumprod.h>
76#include <ATen/ops/cumprod.h>
77#include <ATen/ops/cumprod.h>
78#include <ATen/ops/cumprod.h>
79#include <ATen/ops/cumprod.h>
80#include <ATen/ops/cumprod.h>
81#include <ATen/ops/cumulative_trapezoid.h>
82#include <ATen/ops/cumulative_trapezoid.h>
83#include <ATen/ops/ctc_loss.h>
84#include <ATen/ops/ctc_loss.h>
85#include <ATen/ops/diag_embed.h>
86#include <ATen/ops/diagonal.h>
87#include <ATen/ops/diagonal.h>
88#include <ATen/ops/divide.h>
89#include <ATen/ops/divide.h>
90#include <ATen/ops/divide.h>
91#include <ATen/ops/divide.h>
92#include <ATen/ops/divide.h>
93#include <ATen/ops/divide.h>
94#include <ATen/ops/divide.h>
95#include <ATen/ops/divide.h>
96#include <ATen/ops/divide.h>
97#include <ATen/ops/divide.h>
98#include <ATen/ops/_empty_affine_quantized.h>
99#include <ATen/ops/_resize_output.h>
100#include <ATen/ops/empty_like.h>
101#include <ATen/ops/expand.h>
102#include <ATen/ops/flatten.h>
103#include <ATen/ops/flatten.h>
104#include <ATen/ops/flatten.h>
105#include <ATen/ops/flatten.h>
106#include <ATen/ops/floor.h>
107#include <ATen/ops/floor.h>
108#include <ATen/ops/floor.h>
109#include <ATen/ops/grid_sampler_3d_backward.h>
110#include <ATen/ops/hinge_embedding_loss.h>
111#include <ATen/ops/native_group_norm.h>
112#include <ATen/ops/_fft_r2c.h>
113#include <ATen/ops/_fft_r2c.h>
114#include <ATen/ops/is_neg.h>
115#include <ATen/ops/isreal.h>
116#include <ATen/ops/linear_backward.h>
117#include <ATen/ops/mkldnn_linear_backward_input.h>
118#include <ATen/ops/mkldnn_linear_backward.h>
119#include <ATen/ops/_logcumsumexp.h>
120#include <ATen/ops/_logcumsumexp.h>
121#include <ATen/ops/value_selecting_reduction_backward.h>
122#include <ATen/ops/max_pool1d.h>
123#include <ATen/ops/max_pool2d.h>
124#include <ATen/ops/mean.h>
125#include <ATen/ops/mean.h>
126#include <ATen/ops/mean.h>
127#include <ATen/ops/mean.h>
128#include <ATen/ops/mean.h>
129#include <ATen/ops/nanmean.h>
130#include <ATen/ops/nanmean.h>
131#include <ATen/ops/min.h>
132#include <ATen/ops/min.h>
133#include <ATen/ops/min.h>
134#include <ATen/ops/min.h>
135#include <ATen/ops/mm.h>
136#include <ATen/ops/mm.h>
137#include <ATen/ops/mv.h>
138#include <ATen/ops/mv.h>
139#include <ATen/ops/narrow_copy.h>
140#include <ATen/ops/narrow_copy.h>
141#include <ATen/ops/batch_norm_gather_stats_with_counts.h>
142#include <ATen/ops/pairwise_distance.h>
143#include <ATen/ops/_pdist_backward.h>
144#include <ATen/ops/permute.h>
145#include <ATen/ops/matrix_H.h>
146#include <ATen/ops/pixel_shuffle.h>
147#include <ATen/ops/pinverse.h>
148#include <ATen/ops/reshape.h>
149#include <ATen/ops/_reshape_alias.h>
150#include <ATen/ops/select.h>
151#include <ATen/ops/select.h>
152#include <ATen/ops/celu.h>
153#include <ATen/ops/celu.h>
154#include <ATen/ops/silu.h>
155#include <ATen/ops/silu.h>
156#include <ATen/ops/silu.h>
157#include <ATen/ops/mish_backward.h>
158#include <ATen/ops/logit.h>
159#include <ATen/ops/logit.h>
160#include <ATen/ops/logit.h>
161#include <ATen/ops/sinh.h>
162#include <ATen/ops/sinh.h>
163#include <ATen/ops/sinh.h>
164#include <ATen/ops/slice_backward.h>
165#include <ATen/ops/softmax.h>
166#include <ATen/ops/softmax.h>
167#include <ATen/ops/softmax.h>
168#include <ATen/ops/_softmax.h>
169#include <ATen/ops/_softmax.h>
170#include <ATen/ops/unsafe_split.h>
171#include <ATen/ops/dsplit.h>
172#include <ATen/ops/dsplit.h>
173#include <ATen/ops/vstack.h>
174#include <ATen/ops/vstack.h>
175#include <ATen/ops/stft.h>
176#include <ATen/ops/stft.h>
177#include <ATen/ops/_nested_sum_backward.h>
178#include <ATen/ops/sum_to_size.h>
179#include <ATen/ops/sqrt.h>
180#include <ATen/ops/sqrt.h>
181#include <ATen/ops/sqrt.h>
182#include <ATen/ops/std.h>
183#include <ATen/ops/std.h>
184#include <ATen/ops/std.h>
185#include <ATen/ops/std_mean.h>
186#include <ATen/ops/std_mean.h>
187#include <ATen/ops/std_mean.h>
188#include <ATen/ops/std_mean.h>
189#include <ATen/ops/std_mean.h>
190#include <ATen/ops/std.h>
191#include <ATen/ops/std.h>
192#include <ATen/ops/std.h>
193#include <ATen/ops/std.h>
194#include <ATen/ops/std.h>
195#include <ATen/ops/std.h>
196#include <ATen/ops/t.h>
197#include <ATen/ops/t.h>
198#include <ATen/ops/threshold.h>
199#include <ATen/ops/threshold.h>
200#include <ATen/ops/threshold.h>
201#include <ATen/ops/transpose.h>
202#include <ATen/ops/transpose.h>
203#include <ATen/ops/transpose.h>
204#include <ATen/ops/flip.h>
205#include <ATen/ops/roll.h>
206#include <ATen/ops/_nested_from_padded.h>
207#include <ATen/ops/_nested_view_from_buffer.h>
208#include <ATen/ops/_trilinear.h>
209#include <ATen/ops/type_as.h>
210#include <ATen/ops/_has_compatible_shallow_copy_type.h>
211#include <ATen/ops/_unique2.h>
212#include <ATen/ops/_weight_norm_interface_backward.h>
213#include <ATen/ops/zeros_like.h>
214#include <ATen/ops/_sparse_csr_prod.h>
215#include <ATen/ops/_sparse_softmax_backward_data.h>
216#include <ATen/ops/_sparse_log_softmax.h>
217#include <ATen/ops/_sparse_log_softmax.h>
218#include <ATen/ops/_sparse_log_softmax.h>
219#include <ATen/ops/_sparse_log_softmax_backward_data.h>
220#include <ATen/ops/_spdiags.h>
221#include <ATen/ops/frexp.h>
222#include <ATen/ops/frexp.h>
223#include <ATen/ops/zero.h>
224#include <ATen/ops/rsub.h>
225#include <ATen/ops/rsub.h>
226#include <ATen/ops/_sparse_mm_reduce_impl.h>
227#include <ATen/ops/_sparse_bsr_tensor_unsafe.h>
228#include <ATen/ops/_validate_sparse_csc_tensor_args.h>
229#include <ATen/ops/_sparse_coo_tensor_with_dims.h>
230#include <ATen/ops/to_dense_backward.h>
231#include <ATen/ops/_coalesce.h>
232#include <ATen/ops/_values.h>
233#include <ATen/ops/crow_indices.h>
234#include <ATen/ops/q_zero_point.h>
235#include <ATen/ops/q_per_channel_scales.h>
236#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward.h>
237#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward.h>
238#include <ATen/ops/fused_moving_avg_obs_fake_quant.h>
239#include <ATen/ops/_choose_qparams_per_tensor.h>
240#include <ATen/ops/meshgrid.h>
241#include <ATen/ops/meshgrid.h>
242#include <ATen/ops/can_cast.h>
243#include <ATen/ops/lstm_mps_backward.h>
244#include <ATen/ops/_thnn_fused_lstm_cell_backward_impl.h>
245#include <ATen/ops/_thnn_fused_gru_cell.h>
246#include <ATen/ops/quantized_rnn_tanh_cell.h>
247#include <ATen/ops/_pack_padded_sequence.h>
248#include <ATen/ops/is_set_to.h>
249#include <ATen/ops/_masked_softmax.h>
250#include <ATen/ops/view.h>
251#include <ATen/ops/view.h>
252#include <ATen/ops/xor.h>
253#include <ATen/ops/xor.h>
254#include <ATen/ops/xor.h>
255#include <ATen/ops/xor.h>
256#include <ATen/ops/triu.h>
257#include <ATen/ops/lerp.h>
258#include <ATen/ops/lerp.h>
259#include <ATen/ops/addbmm.h>
260#include <ATen/ops/addbmm.h>
261#include <ATen/ops/addbmm.h>
262#include <ATen/ops/triu.h>
263#include <ATen/ops/triu.h>
264#include <ATen/ops/not_equal.h>
265#include <ATen/ops/not_equal.h>
266#include <ATen/ops/not_equal.h>
267#include <ATen/ops/not_equal.h>
268#include <ATen/ops/not_equal.h>
269#include <ATen/ops/not_equal.h>
270#include <ATen/ops/greater.h>
271#include <ATen/ops/greater.h>
272#include <ATen/ops/greater.h>
273#include <ATen/ops/greater.h>
274#include <ATen/ops/greater.h>
275#include <ATen/ops/greater.h>
276#include <ATen/ops/gather.h>
277#include <ATen/ops/gather.h>
278#include <ATen/ops/gather_backward.h>
279#include <ATen/ops/gather.h>
280#include <ATen/ops/gather.h>
281#include <ATen/ops/cross_entropy_loss.h>
282#include <ATen/ops/triangular_solve.h>
283#include <ATen/ops/triangular_solve.h>
284#include <ATen/ops/_linalg_check_errors.h>
285#include <ATen/ops/linalg_solve_triangular.h>
286#include <ATen/ops/linalg_solve_triangular.h>
287#include <ATen/ops/ormqr.h>
288#include <ATen/ops/ormqr.h>
289#include <ATen/ops/i0.h>
290#include <ATen/ops/i0.h>
291#include <ATen/ops/i0.h>
292#include <ATen/ops/sign.h>
293#include <ATen/ops/sign.h>
294#include <ATen/ops/sign.h>
295#include <ATen/ops/lerp.h>
296#include <ATen/ops/lerp.h>
297#include <ATen/ops/lerp.h>
298#include <ATen/ops/lerp.h>
299#include <ATen/ops/min.h>
300#include <ATen/ops/fmin.h>
301#include <ATen/ops/fmin.h>
302#include <ATen/ops/min.h>
303#include <ATen/ops/min.h>
304#include <ATen/ops/equal.h>
305#include <ATen/ops/_foreach_mul.h>
306#include <ATen/ops/_foreach_mul.h>
307#include <ATen/ops/_foreach_div.h>
308#include <ATen/ops/_foreach_div.h>
309#include <ATen/ops/_foreach_mul.h>
310#include <ATen/ops/_foreach_mul.h>
311#include <ATen/ops/_foreach_div.h>
312#include <ATen/ops/_foreach_div.h>
313#include <ATen/ops/_foreach_div.h>
314#include <ATen/ops/_foreach_div.h>
315#include <ATen/ops/_foreach_mul.h>
316#include <ATen/ops/_foreach_mul.h>
317#include <ATen/ops/_foreach_zero.h>
318#include <ATen/ops/_foreach_asin.h>
319#include <ATen/ops/_foreach_asin.h>
320#include <ATen/ops/_foreach_cos.h>
321#include <ATen/ops/_foreach_cos.h>
322#include <ATen/ops/_foreach_floor.h>
323#include <ATen/ops/_foreach_floor.h>
324#include <ATen/ops/_foreach_tanh.h>
325#include <ATen/ops/_foreach_tanh.h>
326#include <ATen/ops/_foreach_addcmul.h>
327#include <ATen/ops/_foreach_addcmul.h>
328#include <ATen/ops/_foreach_addcmul.h>
329#include <ATen/ops/_foreach_addcmul.h>
330#include <ATen/ops/_foreach_addcmul.h>
331#include <ATen/ops/_foreach_addcmul.h>
332#include <ATen/ops/_convert_indices_from_csr_to_coo.h>
333#include <ATen/ops/_convert_indices_from_csr_to_coo.h>
334#include <ATen/ops/nll_loss.h>
335#include <ATen/ops/nll_loss.h>
336#include <ATen/ops/nll_loss_backward.h>
337#include <ATen/ops/nll_loss_backward.h>
338#include <ATen/ops/smooth_l1_loss_backward.h>
339#include <ATen/ops/smooth_l1_loss_backward.h>
340#include <ATen/ops/huber_loss.h>
341#include <ATen/ops/huber_loss.h>
342#include <ATen/ops/huber_loss_backward.h>
343#include <ATen/ops/huber_loss_backward.h>
344#include <ATen/ops/hardsigmoid.h>
345#include <ATen/ops/hardsigmoid.h>
346#include <ATen/ops/hardsigmoid.h>
347#include <ATen/ops/log_sigmoid.h>
348#include <ATen/ops/log_sigmoid.h>
349#include <ATen/ops/adaptive_avg_pool2d.h>
350#include <ATen/ops/adaptive_avg_pool2d.h>
351#include <ATen/ops/adaptive_avg_pool3d.h>
352#include <ATen/ops/adaptive_avg_pool3d.h>
353#include <ATen/ops/_adaptive_avg_pool3d.h>
354#include <ATen/ops/adaptive_max_pool2d.h>
355#include <ATen/ops/adaptive_max_pool2d.h>
356#include <ATen/ops/adaptive_max_pool3d.h>
357#include <ATen/ops/adaptive_max_pool3d.h>
358#include <ATen/ops/avg_pool2d_backward.h>
359#include <ATen/ops/avg_pool2d_backward.h>
360#include <ATen/ops/fractional_max_pool2d.h>
361#include <ATen/ops/fractional_max_pool2d.h>
362#include <ATen/ops/max_unpool2d.h>
363#include <ATen/ops/max_unpool2d.h>
364#include <ATen/ops/max_unpool3d.h>
365#include <ATen/ops/max_unpool3d.h>
366#include <ATen/ops/reflection_pad3d_backward.h>
367#include <ATen/ops/reflection_pad3d_backward.h>
368#include <ATen/ops/replication_pad2d_backward.h>
369#include <ATen/ops/replication_pad2d_backward.h>
370#include <ATen/ops/replication_pad3d.h>
371#include <ATen/ops/replication_pad3d.h>
372#include <ATen/ops/upsample_linear1d.h>
373#include <ATen/ops/upsample_bilinear2d.h>
374#include <ATen/ops/upsample_bicubic2d.h>
375#include <ATen/ops/upsample_nearest2d.h>
376#include <ATen/ops/upsample_linear1d.h>
377#include <ATen/ops/upsample_linear1d.h>
378#include <ATen/ops/upsample_bilinear2d.h>
379#include <ATen/ops/upsample_bilinear2d.h>
380#include <ATen/ops/upsample_bicubic2d.h>
381#include <ATen/ops/upsample_bicubic2d.h>
382#include <ATen/ops/upsample_bicubic2d_backward.h>
383#include <ATen/ops/upsample_bicubic2d_backward.h>
384#include <ATen/ops/upsample_trilinear3d_backward.h>
385#include <ATen/ops/upsample_trilinear3d_backward.h>
386#include <ATen/ops/upsample_nearest2d.h>
387#include <ATen/ops/upsample_nearest2d.h>
388#include <ATen/ops/upsample_nearest3d_backward.h>
389#include <ATen/ops/_upsample_nearest_exact3d_backward.h>
390#include <ATen/ops/upsample_nearest3d_backward.h>
391#include <ATen/ops/_upsample_nearest_exact3d_backward.h>
392#include <ATen/ops/logit_backward.h>
393#include <ATen/ops/logit_backward.h>
394#include <ATen/ops/slow_conv_transpose2d.h>
395#include <ATen/ops/slow_conv_transpose2d.h>
396#include <ATen/ops/_slow_conv2d_backward.h>
397#include <ATen/ops/_slow_conv2d_backward.h>
398#include <ATen/ops/conv_depthwise3d.h>
399#include <ATen/ops/slow_conv_dilated2d.h>
400#include <ATen/ops/col2im.h>
401#include <ATen/ops/col2im.h>
402#include <ATen/ops/isfinite.h>
403#include <ATen/ops/record_stream.h>
404#include <ATen/ops/isposinf.h>
405#include <ATen/ops/isposinf.h>
406#include <ATen/ops/special_expm1.h>
407#include <ATen/ops/special_expm1.h>
408#include <ATen/ops/special_exp2.h>
409#include <ATen/ops/special_exp2.h>
410#include <ATen/ops/special_gammaln.h>
411#include <ATen/ops/special_gammaln.h>
412#include <ATen/ops/special_erfinv.h>
413#include <ATen/ops/special_erfinv.h>
414#include <ATen/ops/special_xlog1py.h>
415#include <ATen/ops/special_xlog1py.h>
416#include <ATen/ops/special_xlog1py.h>
417#include <ATen/ops/special_xlog1py.h>
418#include <ATen/ops/special_xlog1py.h>
419#include <ATen/ops/special_xlog1py.h>
420#include <ATen/ops/special_i0.h>
421#include <ATen/ops/special_i0.h>
422#include <ATen/ops/special_polygamma.h>
423#include <ATen/ops/special_polygamma.h>
424#include <ATen/ops/special_log1p.h>
425#include <ATen/ops/special_log1p.h>
426#include <ATen/ops/fft_irfft.h>
427#include <ATen/ops/fft_irfft.h>
428#include <ATen/ops/fft_ifft2.h>
429#include <ATen/ops/fft_ifft2.h>
430#include <ATen/ops/fft_irfft2.h>
431#include <ATen/ops/fft_irfft2.h>
432#include <ATen/ops/fft_rfftn.h>
433#include <ATen/ops/fft_rfftn.h>
434#include <ATen/ops/linalg_cholesky.h>
435#include <ATen/ops/linalg_cholesky.h>
436#include <ATen/ops/_linalg_det.h>
437#include <ATen/ops/_linalg_det.h>
438#include <ATen/ops/linalg_ldl_factor.h>
439#include <ATen/ops/linalg_ldl_factor.h>
440#include <ATen/ops/linalg_matmul.h>
441#include <ATen/ops/linalg_matmul.h>
442#include <ATen/ops/linalg_slogdet.h>
443#include <ATen/ops/linalg_slogdet.h>
444#include <ATen/ops/logdet.h>
445#include <ATen/ops/linalg_eigvals.h>
446#include <ATen/ops/linalg_eigvals.h>
447#include <ATen/ops/linalg_inv_ex.h>
448#include <ATen/ops/linalg_inv_ex.h>
449#include <ATen/ops/inner.h>
450#include <ATen/ops/inner.h>
451#include <ATen/ops/linalg_vector_norm.h>
452#include <ATen/ops/linalg_vector_norm.h>
453#include <ATen/ops/linalg_solve.h>
454#include <ATen/ops/linalg_solve.h>
455#include <ATen/ops/linalg_tensorinv.h>
456#include <ATen/ops/linalg_tensorinv.h>
457#include <ATen/ops/linalg_matrix_rank.h>
458#include <ATen/ops/linalg_matrix_rank.h>
459#include <ATen/ops/linalg_matrix_rank.h>
460#include <ATen/ops/linalg_matrix_rank.h>
461#include <ATen/ops/linalg_matrix_rank.h>
462#include <ATen/ops/linalg_matrix_rank.h>
463#include <ATen/ops/linalg_matrix_rank.h>
464#include <ATen/ops/linalg_matrix_rank.h>
465#include <ATen/ops/_test_optional_filled_intlist.h>
466#include <ATen/ops/_test_autograd_multiple_dispatch_view_copy.h>
467#include <ATen/ops/pad_sequence.h>
468#include <ATen/ops/_fw_primal_copy.h>
469#include <ATen/ops/view_as_real_copy.h>
470#include <ATen/ops/as_strided_copy.h>
471#include <ATen/ops/_reshape_alias_copy.h>
472#include <ATen/ops/split_copy.h>
473#include <ATen/ops/squeeze_copy.h>
474#include <ATen/ops/squeeze_copy.h>
475#include <ATen/ops/squeeze_copy.h>
476#include <ATen/ops/indices_copy.h>
477#include <ATen/ops/ccol_indices_copy.h>
478#include <ATen/ops/split_copy.h>
479#include <ATen/ops/_scaled_dot_product_efficient_attention.h>
480#include <ATen/ops/_chunk_grad_outputs_efficient_attention.h>
481#include <ATen/ops/_efficient_attention_forward.h>
482#include <ATen/ops/_transformer_decoder_only_layer_fwd.h>
483#include <ATen/ops/special_bessel_j1.h>
484#include <ATen/ops/special_bessel_j1.h>
485#include <ATen/ops/special_chebyshev_polynomial_v.h>
486#include <ATen/ops/special_chebyshev_polynomial_v.h>
487#include <ATen/ops/special_chebyshev_polynomial_v.h>
488#include <ATen/ops/special_chebyshev_polynomial_v.h>
489#include <ATen/ops/special_chebyshev_polynomial_v.h>
490#include <ATen/ops/special_chebyshev_polynomial_v.h>
491#include <ATen/ops/_cudnn_rnn_backward.h>
492#include <ATen/ops/native_dropout_backward.h>
493#include <ATen/ops/_add_relu.h>
494#include <ATen/ops/affine_grid_generator.h>
495#include <ATen/ops/bartlett_window.h>
496#include <ATen/ops/bartlett_window.h>
497#include <ATen/ops/copy.h>
498#include <ATen/ops/_copy_from_and_resize.h>
499#include <ATen/ops/cudnn_convolution.h>
500#include <ATen/ops/cudnn_convolution_relu.h>
501#include <ATen/ops/diag_embed.h>
502#include <ATen/ops/_empty_affine_quantized.h>
503#include <ATen/ops/_resize_output.h>
504#include <ATen/ops/_resize_output.h>
505#include <ATen/ops/empty_like.h>
506#include <ATen/ops/grid_sampler_3d_backward.h>
507#include <ATen/ops/native_group_norm.h>
508#include <ATen/ops/linear_backward.h>
509#include <ATen/ops/mkldnn_linear_backward_input.h>
510#include <ATen/ops/mkldnn_linear_backward.h>
511#include <ATen/ops/batch_norm_gather_stats_with_counts.h>
512#include <ATen/ops/_pdist_backward.h>
513#include <ATen/ops/pixel_shuffle.h>
514#include <ATen/ops/celu.h>
515#include <ATen/ops/slice_backward.h>
516#include <ATen/ops/unsafe_split.h>
517#include <ATen/ops/std_mean.h>
518#include <ATen/ops/flip.h>
519#include <ATen/ops/roll.h>
520#include <ATen/ops/_nested_from_padded.h>
521#include <ATen/ops/_trilinear.h>
522#include <ATen/ops/_unique2.h>
523#include <ATen/ops/_weight_norm_interface_backward.h>
524#include <ATen/ops/zeros_like.h>
525#include <ATen/ops/_sparse_csr_prod.h>
526#include <ATen/ops/_sparse_softmax_backward_data.h>
527#include <ATen/ops/_sparse_log_softmax.h>
528#include <ATen/ops/_sparse_log_softmax_backward_data.h>
529#include <ATen/ops/_spdiags.h>
530#include <ATen/ops/zero.h>
531#include <ATen/ops/zero.h>
532#include <ATen/ops/rsub.h>
533#include <ATen/ops/rsub.h>
534#include <ATen/ops/_sparse_coo_tensor_with_dims.h>
535#include <ATen/ops/_coalesce.h>
536#include <ATen/ops/q_per_channel_scales.h>
537#include <ATen/ops/lstm_mps_backward.h>
538#include <ATen/ops/_thnn_fused_lstm_cell_backward_impl.h>
539#include <ATen/ops/_thnn_fused_gru_cell.h>
540#include <ATen/ops/_pack_padded_sequence.h>
541#include <ATen/ops/_masked_softmax.h>
542#include <ATen/ops/_foreach_mul.h>
543#include <ATen/ops/_foreach_div.h>
544#include <ATen/ops/_foreach_mul.h>
545#include <ATen/ops/_foreach_div.h>
546#include <ATen/ops/_foreach_div.h>
547#include <ATen/ops/_foreach_mul.h>
548#include <ATen/ops/_foreach_zero.h>
549#include <ATen/ops/_foreach_zero.h>
550#include <ATen/ops/_foreach_asin.h>
551#include <ATen/ops/_foreach_cos.h>
552#include <ATen/ops/_foreach_floor.h>
553#include <ATen/ops/_foreach_tanh.h>
554#include <ATen/ops/_foreach_addcmul.h>
555#include <ATen/ops/_foreach_addcmul.h>
556#include <ATen/ops/_foreach_addcmul.h>
557#include <ATen/ops/_adaptive_avg_pool3d.h>
558#include <ATen/ops/_slow_conv2d_backward.h>
559#include <ATen/ops/conv_depthwise3d.h>
560#include <ATen/ops/slow_conv_dilated2d.h>
561#include <ATen/ops/_test_optional_filled_intlist.h>
562#include <ATen/ops/_test_autograd_multiple_dispatch_view_copy.h>
563#include <ATen/ops/_fw_primal_copy.h>
564#include <ATen/ops/view_as_real_copy.h>
565#include <ATen/ops/as_strided_copy.h>
566#include <ATen/ops/_reshape_alias_copy.h>
567#include <ATen/ops/squeeze_copy.h>
568#include <ATen/ops/squeeze_copy.h>
569#include <ATen/ops/squeeze_copy.h>
570#include <ATen/ops/indices_copy.h>
571#include <ATen/ops/ccol_indices_copy.h>
572#include <ATen/ops/_transformer_decoder_only_layer_fwd.h>
573#endif
574
575
576
577namespace at { namespace _ops {
578
579
580STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cast_Int, name, "aten::_cast_Int")
581STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cast_Int, overload_name, "")
582STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cast_Int, schema_str, "_cast_Int(Tensor self, bool non_blocking=False) -> Tensor")
583
584// aten::_cast_Int(Tensor self, bool non_blocking=False) -> Tensor
585static C10_NOINLINE c10::TypedOperatorHandle<_cast_Int::schema> create__cast_Int_typed_handle() {
586 return c10::Dispatcher::singleton()
587 .findSchemaOrThrow(_cast_Int::name, _cast_Int::overload_name)
588 .typed<_cast_Int::schema>();
589}
590
591// aten::_cast_Int(Tensor self, bool non_blocking=False) -> Tensor
592at::Tensor _cast_Int::call(const at::Tensor & self, bool non_blocking) {
593
594 static auto op = create__cast_Int_typed_handle();
595 return op.call(self, non_blocking);
596}
597
598// aten::_cast_Int(Tensor self, bool non_blocking=False) -> Tensor
599at::Tensor _cast_Int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking) {
600
601 static auto op = create__cast_Int_typed_handle();
602 return op.redispatch(dispatchKeySet, self, non_blocking);
603}
604
605STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cast_Long, name, "aten::_cast_Long")
606STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cast_Long, overload_name, "")
607STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cast_Long, schema_str, "_cast_Long(Tensor self, bool non_blocking=False) -> Tensor")
608
609// aten::_cast_Long(Tensor self, bool non_blocking=False) -> Tensor
610static C10_NOINLINE c10::TypedOperatorHandle<_cast_Long::schema> create__cast_Long_typed_handle() {
611 return c10::Dispatcher::singleton()
612 .findSchemaOrThrow(_cast_Long::name, _cast_Long::overload_name)
613 .typed<_cast_Long::schema>();
614}
615
616// aten::_cast_Long(Tensor self, bool non_blocking=False) -> Tensor
617at::Tensor _cast_Long::call(const at::Tensor & self, bool non_blocking) {
618
619 static auto op = create__cast_Long_typed_handle();
620 return op.call(self, non_blocking);
621}
622
623// aten::_cast_Long(Tensor self, bool non_blocking=False) -> Tensor
624at::Tensor _cast_Long::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking) {
625
626 static auto op = create__cast_Long_typed_handle();
627 return op.redispatch(dispatchKeySet, self, non_blocking);
628}
629
630STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_backward, name, "aten::_backward")
631STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_backward, overload_name, "")
632STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_backward, schema_str, "_backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> ()")
633
634// aten::_backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> ()
635static C10_NOINLINE c10::TypedOperatorHandle<_backward::schema> create__backward_typed_handle() {
636 return c10::Dispatcher::singleton()
637 .findSchemaOrThrow(_backward::name, _backward::overload_name)
638 .typed<_backward::schema>();
639}
640
641// aten::_backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> ()
642void _backward::call(const at::Tensor & self, at::TensorList inputs, const c10::optional<at::Tensor> & gradient, c10::optional<bool> retain_graph, bool create_graph) {
643
644 static auto op = create__backward_typed_handle();
645 return op.call(self, inputs, gradient, retain_graph, create_graph);
646}
647
648// aten::_backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> ()
649void _backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList inputs, const c10::optional<at::Tensor> & gradient, c10::optional<bool> retain_graph, bool create_graph) {
650
651 static auto op = create__backward_typed_handle();
652 return op.redispatch(dispatchKeySet, self, inputs, gradient, retain_graph, create_graph);
653}
654
655STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(data, name, "aten::data")
656STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(data, overload_name, "")
657STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(data, schema_str, "data(Tensor self) -> Tensor")
658
659// aten::data(Tensor self) -> Tensor
660static C10_NOINLINE c10::TypedOperatorHandle<data::schema> create_data_typed_handle() {
661 return c10::Dispatcher::singleton()
662 .findSchemaOrThrow(data::name, data::overload_name)
663 .typed<data::schema>();
664}
665
666// aten::data(Tensor self) -> Tensor
667at::Tensor data::call(const at::Tensor & self) {
668
669 static auto op = create_data_typed_handle();
670 return op.call(self);
671}
672
673// aten::data(Tensor self) -> Tensor
674at::Tensor data::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
675
676 static auto op = create_data_typed_handle();
677 return op.redispatch(dispatchKeySet, self);
678}
679
680STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(retain_grad, name, "aten::retain_grad")
681STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(retain_grad, overload_name, "")
682STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(retain_grad, schema_str, "retain_grad(Tensor(a!) self) -> ()")
683
684// aten::retain_grad(Tensor(a!) self) -> ()
685static C10_NOINLINE c10::TypedOperatorHandle<retain_grad::schema> create_retain_grad_typed_handle() {
686 return c10::Dispatcher::singleton()
687 .findSchemaOrThrow(retain_grad::name, retain_grad::overload_name)
688 .typed<retain_grad::schema>();
689}
690
691// aten::retain_grad(Tensor(a!) self) -> ()
692void retain_grad::call(at::Tensor & self) {
693
694 static auto op = create_retain_grad_typed_handle();
695 return op.call(self);
696}
697
698// aten::retain_grad(Tensor(a!) self) -> ()
699void retain_grad::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
700
701 static auto op = create_retain_grad_typed_handle();
702 return op.redispatch(dispatchKeySet, self);
703}
704
705STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rename_, name, "aten::rename_")
706STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rename_, overload_name, "")
707STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rename_, schema_str, "rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)")
708
709// aten::rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)
710static C10_NOINLINE c10::TypedOperatorHandle<rename_::schema> create_rename__typed_handle() {
711 return c10::Dispatcher::singleton()
712 .findSchemaOrThrow(rename_::name, rename_::overload_name)
713 .typed<rename_::schema>();
714}
715
716// aten::rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)
717at::Tensor & rename_::call(at::Tensor & self, c10::optional<at::DimnameList> names) {
718
719 static auto op = create_rename__typed_handle();
720 return op.call(self, names);
721}
722
723// aten::rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)
724at::Tensor & rename_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, c10::optional<at::DimnameList> names) {
725
726 static auto op = create_rename__typed_handle();
727 return op.redispatch(dispatchKeySet, self, names);
728}
729
730STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rename, name, "aten::rename")
731STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rename, overload_name, "")
732STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rename, schema_str, "rename(Tensor(a) self, Dimname[]? names) -> Tensor(a)")
733
734// aten::rename(Tensor(a) self, Dimname[]? names) -> Tensor(a)
735static C10_NOINLINE c10::TypedOperatorHandle<rename::schema> create_rename_typed_handle() {
736 return c10::Dispatcher::singleton()
737 .findSchemaOrThrow(rename::name, rename::overload_name)
738 .typed<rename::schema>();
739}
740
741// aten::rename(Tensor(a) self, Dimname[]? names) -> Tensor(a)
742at::Tensor rename::call(const at::Tensor & self, c10::optional<at::DimnameList> names) {
743
744 static auto op = create_rename_typed_handle();
745 return op.call(self, names);
746}
747
748// aten::rename(Tensor(a) self, Dimname[]? names) -> Tensor(a)
749at::Tensor rename::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::DimnameList> names) {
750
751 static auto op = create_rename_typed_handle();
752 return op.redispatch(dispatchKeySet, self, names);
753}
754
755STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cudnn_rnn_backward, name, "aten::_cudnn_rnn_backward")
756STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cudnn_rnn_backward, overload_name, "")
757STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cudnn_rnn_backward, schema_str, "_cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])")
758
759// aten::_cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])
760static C10_NOINLINE c10::TypedOperatorHandle<_cudnn_rnn_backward::schema> create__cudnn_rnn_backward_typed_handle() {
761 return c10::Dispatcher::singleton()
762 .findSchemaOrThrow(_cudnn_rnn_backward::name, _cudnn_rnn_backward::overload_name)
763 .typed<_cudnn_rnn_backward::schema>();
764}
765
766// aten::_cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])
767::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> _cudnn_rnn_backward::call(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
768
769 static auto op = create__cudnn_rnn_backward_typed_handle();
770 return op.call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask);
771}
772
773// aten::_cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])
774::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> _cudnn_rnn_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
775
776 static auto op = create__cudnn_rnn_backward_typed_handle();
777 return op.redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask);
778}
779
780STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_dropout_backward, name, "aten::native_dropout_backward")
781STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_dropout_backward, overload_name, "")
782STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_dropout_backward, schema_str, "native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor")
783
784// aten::native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor
785static C10_NOINLINE c10::TypedOperatorHandle<native_dropout_backward::schema> create_native_dropout_backward_typed_handle() {
786 return c10::Dispatcher::singleton()
787 .findSchemaOrThrow(native_dropout_backward::name, native_dropout_backward::overload_name)
788 .typed<native_dropout_backward::schema>();
789}
790
791// aten::native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor
792at::Tensor native_dropout_backward::call(const at::Tensor & grad_output, const at::Tensor & mask, double scale) {
793
794 static auto op = create_native_dropout_backward_typed_handle();
795 return op.call(grad_output, mask, scale);
796}
797
798// aten::native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor
799at::Tensor native_dropout_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & mask, double scale) {
800
801 static auto op = create_native_dropout_backward_typed_handle();
802 return op.redispatch(dispatchKeySet, grad_output, mask, scale);
803}
804
805STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(feature_dropout, name, "aten::feature_dropout")
806STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(feature_dropout, overload_name, "")
807STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(feature_dropout, schema_str, "feature_dropout(Tensor input, float p, bool train) -> Tensor")
808
809// aten::feature_dropout(Tensor input, float p, bool train) -> Tensor
810static C10_NOINLINE c10::TypedOperatorHandle<feature_dropout::schema> create_feature_dropout_typed_handle() {
811 return c10::Dispatcher::singleton()
812 .findSchemaOrThrow(feature_dropout::name, feature_dropout::overload_name)
813 .typed<feature_dropout::schema>();
814}
815
816// aten::feature_dropout(Tensor input, float p, bool train) -> Tensor
817at::Tensor feature_dropout::call(const at::Tensor & input, double p, bool train) {
818
819 static auto op = create_feature_dropout_typed_handle();
820 return op.call(input, p, train);
821}
822
823// aten::feature_dropout(Tensor input, float p, bool train) -> Tensor
824at::Tensor feature_dropout::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, bool train) {
825
826 static auto op = create_feature_dropout_typed_handle();
827 return op.redispatch(dispatchKeySet, input, p, train);
828}
829
830STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(feature_dropout_, name, "aten::feature_dropout_")
831STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(feature_dropout_, overload_name, "")
832STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(feature_dropout_, schema_str, "feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)")
833
834// aten::feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
835static C10_NOINLINE c10::TypedOperatorHandle<feature_dropout_::schema> create_feature_dropout__typed_handle() {
836 return c10::Dispatcher::singleton()
837 .findSchemaOrThrow(feature_dropout_::name, feature_dropout_::overload_name)
838 .typed<feature_dropout_::schema>();
839}
840
841// aten::feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
842at::Tensor & feature_dropout_::call(at::Tensor & self, double p, bool train) {
843
844 static auto op = create_feature_dropout__typed_handle();
845 return op.call(self, p, train);
846}
847
848// aten::feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
849at::Tensor & feature_dropout_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, bool train) {
850
851 static auto op = create_feature_dropout__typed_handle();
852 return op.redispatch(dispatchKeySet, self, p, train);
853}
854
855STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conj, name, "aten::conj")
856STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conj, overload_name, "")
857STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conj, schema_str, "conj(Tensor(a) self) -> Tensor(a)")
858
859// aten::conj(Tensor(a) self) -> Tensor(a)
860static C10_NOINLINE c10::TypedOperatorHandle<conj::schema> create_conj_typed_handle() {
861 return c10::Dispatcher::singleton()
862 .findSchemaOrThrow(conj::name, conj::overload_name)
863 .typed<conj::schema>();
864}
865
866// aten::conj(Tensor(a) self) -> Tensor(a)
867at::Tensor conj::call(const at::Tensor & self) {
868
869 static auto op = create_conj_typed_handle();
870 return op.call(self);
871}
872
873// aten::conj(Tensor(a) self) -> Tensor(a)
874at::Tensor conj::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
875
876 static auto op = create_conj_typed_handle();
877 return op.redispatch(dispatchKeySet, self);
878}
879
880STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_add_relu_Tensor, name, "aten::_add_relu")
881STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_add_relu_Tensor, overload_name, "Tensor")
882STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_add_relu_Tensor, schema_str, "_add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor")
883
884// aten::_add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
885static C10_NOINLINE c10::TypedOperatorHandle<_add_relu_Tensor::schema> create__add_relu_Tensor_typed_handle() {
886 return c10::Dispatcher::singleton()
887 .findSchemaOrThrow(_add_relu_Tensor::name, _add_relu_Tensor::overload_name)
888 .typed<_add_relu_Tensor::schema>();
889}
890
891// aten::_add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
892at::Tensor _add_relu_Tensor::call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
893
894 static auto op = create__add_relu_Tensor_typed_handle();
895 return op.call(self, other, alpha);
896}
897
898// aten::_add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
899at::Tensor _add_relu_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
900
901 static auto op = create__add_relu_Tensor_typed_handle();
902 return op.redispatch(dispatchKeySet, self, other, alpha);
903}
904
905STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_add_relu__Tensor, name, "aten::_add_relu_")
906STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_add_relu__Tensor, overload_name, "Tensor")
907STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_add_relu__Tensor, schema_str, "_add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)")
908
909// aten::_add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
910static C10_NOINLINE c10::TypedOperatorHandle<_add_relu__Tensor::schema> create__add_relu__Tensor_typed_handle() {
911 return c10::Dispatcher::singleton()
912 .findSchemaOrThrow(_add_relu__Tensor::name, _add_relu__Tensor::overload_name)
913 .typed<_add_relu__Tensor::schema>();
914}
915
916// aten::_add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
917at::Tensor & _add_relu__Tensor::call(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
918
919 static auto op = create__add_relu__Tensor_typed_handle();
920 return op.call(self, other, alpha);
921}
922
923// aten::_add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
924at::Tensor & _add_relu__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
925
926 static auto op = create__add_relu__Tensor_typed_handle();
927 return op.redispatch(dispatchKeySet, self, other, alpha);
928}
929
930STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_add_relu_out, name, "aten::_add_relu")
931STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_add_relu_out, overload_name, "out")
932STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_add_relu_out, schema_str, "_add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)")
933
934// aten::_add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
935static C10_NOINLINE c10::TypedOperatorHandle<_add_relu_out::schema> create__add_relu_out_typed_handle() {
936 return c10::Dispatcher::singleton()
937 .findSchemaOrThrow(_add_relu_out::name, _add_relu_out::overload_name)
938 .typed<_add_relu_out::schema>();
939}
940
941// aten::_add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
942at::Tensor & _add_relu_out::call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
943
944 static auto op = create__add_relu_out_typed_handle();
945 return op.call(self, other, alpha, out);
946}
947
948// aten::_add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
949at::Tensor & _add_relu_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
950
951 static auto op = create__add_relu_out_typed_handle();
952 return op.redispatch(dispatchKeySet, self, other, alpha, out);
953}
954
955STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_add_relu_Scalar, name, "aten::_add_relu")
956STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_add_relu_Scalar, overload_name, "Scalar")
957STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_add_relu_Scalar, schema_str, "_add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor")
958
959// aten::_add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
960static C10_NOINLINE c10::TypedOperatorHandle<_add_relu_Scalar::schema> create__add_relu_Scalar_typed_handle() {
961 return c10::Dispatcher::singleton()
962 .findSchemaOrThrow(_add_relu_Scalar::name, _add_relu_Scalar::overload_name)
963 .typed<_add_relu_Scalar::schema>();
964}
965
966// aten::_add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
967at::Tensor _add_relu_Scalar::call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
968
969 static auto op = create__add_relu_Scalar_typed_handle();
970 return op.call(self, other, alpha);
971}
972
973// aten::_add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
974at::Tensor _add_relu_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
975
976 static auto op = create__add_relu_Scalar_typed_handle();
977 return op.redispatch(dispatchKeySet, self, other, alpha);
978}
979
980STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_add_relu__Scalar, name, "aten::_add_relu_")
981STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_add_relu__Scalar, overload_name, "Scalar")
982STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_add_relu__Scalar, schema_str, "_add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)")
983
984// aten::_add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
985static C10_NOINLINE c10::TypedOperatorHandle<_add_relu__Scalar::schema> create__add_relu__Scalar_typed_handle() {
986 return c10::Dispatcher::singleton()
987 .findSchemaOrThrow(_add_relu__Scalar::name, _add_relu__Scalar::overload_name)
988 .typed<_add_relu__Scalar::schema>();
989}
990
991// aten::_add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
992at::Tensor & _add_relu__Scalar::call(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
993
994 static auto op = create__add_relu__Scalar_typed_handle();
995 return op.call(self, other, alpha);
996}
997
998// aten::_add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
999at::Tensor & _add_relu__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
1000
1001 static auto op = create__add_relu__Scalar_typed_handle();
1002 return op.redispatch(dispatchKeySet, self, other, alpha);
1003}
1004
1005STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(affine_grid_generator, name, "aten::affine_grid_generator")
1006STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(affine_grid_generator, overload_name, "")
1007STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(affine_grid_generator, schema_str, "affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> Tensor")
1008
1009// aten::affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> Tensor
1010static C10_NOINLINE c10::TypedOperatorHandle<affine_grid_generator::schema> create_affine_grid_generator_typed_handle() {
1011 return c10::Dispatcher::singleton()
1012 .findSchemaOrThrow(affine_grid_generator::name, affine_grid_generator::overload_name)
1013 .typed<affine_grid_generator::schema>();
1014}
1015
1016// aten::affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> Tensor
1017at::Tensor affine_grid_generator::call(const at::Tensor & theta, at::IntArrayRef size, bool align_corners) {
1018
1019 static auto op = create_affine_grid_generator_typed_handle();
1020 return op.call(theta, size, align_corners);
1021}
1022
1023// aten::affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> Tensor
1024at::Tensor affine_grid_generator::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, at::IntArrayRef size, bool align_corners) {
1025
1026 static auto op = create_affine_grid_generator_typed_handle();
1027 return op.redispatch(dispatchKeySet, theta, size, align_corners);
1028}
1029
1030STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_is_any_true, name, "aten::_is_any_true")
1031STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_is_any_true, overload_name, "")
1032STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_is_any_true, schema_str, "_is_any_true(Tensor self) -> Tensor")
1033
1034// aten::_is_any_true(Tensor self) -> Tensor
1035static C10_NOINLINE c10::TypedOperatorHandle<_is_any_true::schema> create__is_any_true_typed_handle() {
1036 return c10::Dispatcher::singleton()
1037 .findSchemaOrThrow(_is_any_true::name, _is_any_true::overload_name)
1038 .typed<_is_any_true::schema>();
1039}
1040
1041// aten::_is_any_true(Tensor self) -> Tensor
1042at::Tensor _is_any_true::call(const at::Tensor & self) {
1043
1044 static auto op = create__is_any_true_typed_handle();
1045 return op.call(self);
1046}
1047
1048// aten::_is_any_true(Tensor self) -> Tensor
1049at::Tensor _is_any_true::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1050
1051 static auto op = create__is_any_true_typed_handle();
1052 return op.redispatch(dispatchKeySet, self);
1053}
1054
1055STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arange, name, "aten::arange")
1056STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arange, overload_name, "")
1057STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arange, schema_str, "arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
1058
1059// aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1060static C10_NOINLINE c10::TypedOperatorHandle<arange::schema> create_arange_typed_handle() {
1061 return c10::Dispatcher::singleton()
1062 .findSchemaOrThrow(arange::name, arange::overload_name)
1063 .typed<arange::schema>();
1064}
1065
1066// aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1067at::Tensor arange::call(const at::Scalar & end, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
1068
1069 static auto op = create_arange_typed_handle();
1070 return op.call(end, dtype, layout, device, pin_memory);
1071}
1072
1073// aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1074at::Tensor arange::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & end, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
1075
1076 static auto op = create_arange_typed_handle();
1077 return op.redispatch(dispatchKeySet, end, dtype, layout, device, pin_memory);
1078}
1079
1080STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arange_start, name, "aten::arange")
1081STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arange_start, overload_name, "start")
1082STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arange_start, schema_str, "arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
1083
1084// aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1085static C10_NOINLINE c10::TypedOperatorHandle<arange_start::schema> create_arange_start_typed_handle() {
1086 return c10::Dispatcher::singleton()
1087 .findSchemaOrThrow(arange_start::name, arange_start::overload_name)
1088 .typed<arange_start::schema>();
1089}
1090
1091// aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1092at::Tensor arange_start::call(const at::Scalar & start, const at::Scalar & end, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
1093
1094 static auto op = create_arange_start_typed_handle();
1095 return op.call(start, end, dtype, layout, device, pin_memory);
1096}
1097
1098// aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1099at::Tensor arange_start::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
1100
1101 static auto op = create_arange_start_typed_handle();
1102 return op.redispatch(dispatchKeySet, start, end, dtype, layout, device, pin_memory);
1103}
1104
1105STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arange_start_step, name, "aten::arange")
1106STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arange_start_step, overload_name, "start_step")
1107STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arange_start_step, schema_str, "arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
1108
1109// aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1110static C10_NOINLINE c10::TypedOperatorHandle<arange_start_step::schema> create_arange_start_step_typed_handle() {
1111 return c10::Dispatcher::singleton()
1112 .findSchemaOrThrow(arange_start_step::name, arange_start_step::overload_name)
1113 .typed<arange_start_step::schema>();
1114}
1115
1116// aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1117at::Tensor arange_start_step::call(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
1118
1119 static auto op = create_arange_start_step_typed_handle();
1120 return op.call(start, end, step, dtype, layout, device, pin_memory);
1121}
1122
1123// aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1124at::Tensor arange_start_step::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
1125
1126 static auto op = create_arange_start_step_typed_handle();
1127 return op.redispatch(dispatchKeySet, start, end, step, dtype, layout, device, pin_memory);
1128}
1129
1130STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arange_out, name, "aten::arange")
1131STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arange_out, overload_name, "out")
1132STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arange_out, schema_str, "arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)")
1133
1134// aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
1135static C10_NOINLINE c10::TypedOperatorHandle<arange_out::schema> create_arange_out_typed_handle() {
1136 return c10::Dispatcher::singleton()
1137 .findSchemaOrThrow(arange_out::name, arange_out::overload_name)
1138 .typed<arange_out::schema>();
1139}
1140
1141// aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
1142at::Tensor & arange_out::call(const at::Scalar & end, at::Tensor & out) {
1143
1144 static auto op = create_arange_out_typed_handle();
1145 return op.call(end, out);
1146}
1147
1148// aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
1149at::Tensor & arange_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & end, at::Tensor & out) {
1150
1151 static auto op = create_arange_out_typed_handle();
1152 return op.redispatch(dispatchKeySet, end, out);
1153}
1154
1155STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arange_start_out, name, "aten::arange")
1156STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arange_start_out, overload_name, "start_out")
1157STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arange_start_out, schema_str, "arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)")
1158
1159// aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
1160static C10_NOINLINE c10::TypedOperatorHandle<arange_start_out::schema> create_arange_start_out_typed_handle() {
1161 return c10::Dispatcher::singleton()
1162 .findSchemaOrThrow(arange_start_out::name, arange_start_out::overload_name)
1163 .typed<arange_start_out::schema>();
1164}
1165
1166// aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
1167at::Tensor & arange_start_out::call(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out) {
1168
1169 static auto op = create_arange_start_out_typed_handle();
1170 return op.call(start, end, step, out);
1171}
1172
1173// aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
1174at::Tensor & arange_start_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out) {
1175
1176 static auto op = create_arange_start_out_typed_handle();
1177 return op.redispatch(dispatchKeySet, start, end, step, out);
1178}
1179
1180STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_dim_arange, name, "aten::_dim_arange")
1181STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_dim_arange, overload_name, "")
1182STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_dim_arange, schema_str, "_dim_arange(Tensor like, int dim) -> Tensor")
1183
1184// aten::_dim_arange(Tensor like, int dim) -> Tensor
1185static C10_NOINLINE c10::TypedOperatorHandle<_dim_arange::schema> create__dim_arange_typed_handle() {
1186 return c10::Dispatcher::singleton()
1187 .findSchemaOrThrow(_dim_arange::name, _dim_arange::overload_name)
1188 .typed<_dim_arange::schema>();
1189}
1190
1191// aten::_dim_arange(Tensor like, int dim) -> Tensor
1192at::Tensor _dim_arange::call(const at::Tensor & like, int64_t dim) {
1193
1194 static auto op = create__dim_arange_typed_handle();
1195 return op.call(like, dim);
1196}
1197
1198// aten::_dim_arange(Tensor like, int dim) -> Tensor
1199at::Tensor _dim_arange::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & like, int64_t dim) {
1200
1201 static auto op = create__dim_arange_typed_handle();
1202 return op.redispatch(dispatchKeySet, like, dim);
1203}
1204
1205STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arcsinh, name, "aten::arcsinh")
1206STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arcsinh, overload_name, "")
1207STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arcsinh, schema_str, "arcsinh(Tensor self) -> Tensor")
1208
1209// aten::arcsinh(Tensor self) -> Tensor
1210static C10_NOINLINE c10::TypedOperatorHandle<arcsinh::schema> create_arcsinh_typed_handle() {
1211 return c10::Dispatcher::singleton()
1212 .findSchemaOrThrow(arcsinh::name, arcsinh::overload_name)
1213 .typed<arcsinh::schema>();
1214}
1215
1216// aten::arcsinh(Tensor self) -> Tensor
1217at::Tensor arcsinh::call(const at::Tensor & self) {
1218
1219 static auto op = create_arcsinh_typed_handle();
1220 return op.call(self);
1221}
1222
1223// aten::arcsinh(Tensor self) -> Tensor
1224at::Tensor arcsinh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1225
1226 static auto op = create_arcsinh_typed_handle();
1227 return op.redispatch(dispatchKeySet, self);
1228}
1229
1230STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arcsinh_, name, "aten::arcsinh_")
1231STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arcsinh_, overload_name, "")
1232STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arcsinh_, schema_str, "arcsinh_(Tensor(a!) self) -> Tensor(a!)")
1233
1234// aten::arcsinh_(Tensor(a!) self) -> Tensor(a!)
1235static C10_NOINLINE c10::TypedOperatorHandle<arcsinh_::schema> create_arcsinh__typed_handle() {
1236 return c10::Dispatcher::singleton()
1237 .findSchemaOrThrow(arcsinh_::name, arcsinh_::overload_name)
1238 .typed<arcsinh_::schema>();
1239}
1240
1241// aten::arcsinh_(Tensor(a!) self) -> Tensor(a!)
1242at::Tensor & arcsinh_::call(at::Tensor & self) {
1243
1244 static auto op = create_arcsinh__typed_handle();
1245 return op.call(self);
1246}
1247
1248// aten::arcsinh_(Tensor(a!) self) -> Tensor(a!)
1249at::Tensor & arcsinh_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
1250
1251 static auto op = create_arcsinh__typed_handle();
1252 return op.redispatch(dispatchKeySet, self);
1253}
1254
1255STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arcsinh_out, name, "aten::arcsinh")
1256STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arcsinh_out, overload_name, "out")
1257STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arcsinh_out, schema_str, "arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
1258
1259// aten::arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1260static C10_NOINLINE c10::TypedOperatorHandle<arcsinh_out::schema> create_arcsinh_out_typed_handle() {
1261 return c10::Dispatcher::singleton()
1262 .findSchemaOrThrow(arcsinh_out::name, arcsinh_out::overload_name)
1263 .typed<arcsinh_out::schema>();
1264}
1265
1266// aten::arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1267at::Tensor & arcsinh_out::call(const at::Tensor & self, at::Tensor & out) {
1268
1269 static auto op = create_arcsinh_out_typed_handle();
1270 return op.call(self, out);
1271}
1272
1273// aten::arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1274at::Tensor & arcsinh_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
1275
1276 static auto op = create_arcsinh_out_typed_handle();
1277 return op.redispatch(dispatchKeySet, self, out);
1278}
1279
1280STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atanh, name, "aten::atanh")
1281STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atanh, overload_name, "")
1282STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atanh, schema_str, "atanh(Tensor self) -> Tensor")
1283
1284// aten::atanh(Tensor self) -> Tensor
1285static C10_NOINLINE c10::TypedOperatorHandle<atanh::schema> create_atanh_typed_handle() {
1286 return c10::Dispatcher::singleton()
1287 .findSchemaOrThrow(atanh::name, atanh::overload_name)
1288 .typed<atanh::schema>();
1289}
1290
1291// aten::atanh(Tensor self) -> Tensor
1292at::Tensor atanh::call(const at::Tensor & self) {
1293
1294 static auto op = create_atanh_typed_handle();
1295 return op.call(self);
1296}
1297
1298// aten::atanh(Tensor self) -> Tensor
1299at::Tensor atanh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1300
1301 static auto op = create_atanh_typed_handle();
1302 return op.redispatch(dispatchKeySet, self);
1303}
1304
1305STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atanh_, name, "aten::atanh_")
1306STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atanh_, overload_name, "")
1307STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atanh_, schema_str, "atanh_(Tensor(a!) self) -> Tensor(a!)")
1308
1309// aten::atanh_(Tensor(a!) self) -> Tensor(a!)
1310static C10_NOINLINE c10::TypedOperatorHandle<atanh_::schema> create_atanh__typed_handle() {
1311 return c10::Dispatcher::singleton()
1312 .findSchemaOrThrow(atanh_::name, atanh_::overload_name)
1313 .typed<atanh_::schema>();
1314}
1315
1316// aten::atanh_(Tensor(a!) self) -> Tensor(a!)
1317at::Tensor & atanh_::call(at::Tensor & self) {
1318
1319 static auto op = create_atanh__typed_handle();
1320 return op.call(self);
1321}
1322
1323// aten::atanh_(Tensor(a!) self) -> Tensor(a!)
1324at::Tensor & atanh_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
1325
1326 static auto op = create_atanh__typed_handle();
1327 return op.redispatch(dispatchKeySet, self);
1328}
1329
1330STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atanh_out, name, "aten::atanh")
1331STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atanh_out, overload_name, "out")
1332STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atanh_out, schema_str, "atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
1333
1334// aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1335static C10_NOINLINE c10::TypedOperatorHandle<atanh_out::schema> create_atanh_out_typed_handle() {
1336 return c10::Dispatcher::singleton()
1337 .findSchemaOrThrow(atanh_out::name, atanh_out::overload_name)
1338 .typed<atanh_out::schema>();
1339}
1340
1341// aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1342at::Tensor & atanh_out::call(const at::Tensor & self, at::Tensor & out) {
1343
1344 static auto op = create_atanh_out_typed_handle();
1345 return op.call(self, out);
1346}
1347
1348// aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1349at::Tensor & atanh_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
1350
1351 static auto op = create_atanh_out_typed_handle();
1352 return op.redispatch(dispatchKeySet, self, out);
1353}
1354
1355STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arcsin, name, "aten::arcsin")
1356STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arcsin, overload_name, "")
1357STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arcsin, schema_str, "arcsin(Tensor self) -> Tensor")
1358
1359// aten::arcsin(Tensor self) -> Tensor
1360static C10_NOINLINE c10::TypedOperatorHandle<arcsin::schema> create_arcsin_typed_handle() {
1361 return c10::Dispatcher::singleton()
1362 .findSchemaOrThrow(arcsin::name, arcsin::overload_name)
1363 .typed<arcsin::schema>();
1364}
1365
1366// aten::arcsin(Tensor self) -> Tensor
1367at::Tensor arcsin::call(const at::Tensor & self) {
1368
1369 static auto op = create_arcsin_typed_handle();
1370 return op.call(self);
1371}
1372
1373// aten::arcsin(Tensor self) -> Tensor
1374at::Tensor arcsin::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1375
1376 static auto op = create_arcsin_typed_handle();
1377 return op.redispatch(dispatchKeySet, self);
1378}
1379
1380STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arcsin_, name, "aten::arcsin_")
1381STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arcsin_, overload_name, "")
1382STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arcsin_, schema_str, "arcsin_(Tensor(a!) self) -> Tensor(a!)")
1383
1384// aten::arcsin_(Tensor(a!) self) -> Tensor(a!)
1385static C10_NOINLINE c10::TypedOperatorHandle<arcsin_::schema> create_arcsin__typed_handle() {
1386 return c10::Dispatcher::singleton()
1387 .findSchemaOrThrow(arcsin_::name, arcsin_::overload_name)
1388 .typed<arcsin_::schema>();
1389}
1390
1391// aten::arcsin_(Tensor(a!) self) -> Tensor(a!)
1392at::Tensor & arcsin_::call(at::Tensor & self) {
1393
1394 static auto op = create_arcsin__typed_handle();
1395 return op.call(self);
1396}
1397
1398// aten::arcsin_(Tensor(a!) self) -> Tensor(a!)
1399at::Tensor & arcsin_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
1400
1401 static auto op = create_arcsin__typed_handle();
1402 return op.redispatch(dispatchKeySet, self);
1403}
1404
1405STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arcsin_out, name, "aten::arcsin")
1406STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arcsin_out, overload_name, "out")
1407STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arcsin_out, schema_str, "arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
1408
1409// aten::arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1410static C10_NOINLINE c10::TypedOperatorHandle<arcsin_out::schema> create_arcsin_out_typed_handle() {
1411 return c10::Dispatcher::singleton()
1412 .findSchemaOrThrow(arcsin_out::name, arcsin_out::overload_name)
1413 .typed<arcsin_out::schema>();
1414}
1415
1416// aten::arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1417at::Tensor & arcsin_out::call(const at::Tensor & self, at::Tensor & out) {
1418
1419 static auto op = create_arcsin_out_typed_handle();
1420 return op.call(self, out);
1421}
1422
1423// aten::arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1424at::Tensor & arcsin_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
1425
1426 static auto op = create_arcsin_out_typed_handle();
1427 return op.redispatch(dispatchKeySet, self, out);
1428}
1429
1430STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bartlett_window, name, "aten::bartlett_window")
1431STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bartlett_window, overload_name, "")
1432STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bartlett_window, schema_str, "bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
1433
1434// aten::bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1435static C10_NOINLINE c10::TypedOperatorHandle<bartlett_window::schema> create_bartlett_window_typed_handle() {
1436 return c10::Dispatcher::singleton()
1437 .findSchemaOrThrow(bartlett_window::name, bartlett_window::overload_name)
1438 .typed<bartlett_window::schema>();
1439}
1440
1441// aten::bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1442at::Tensor bartlett_window::call(int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
1443
1444 static auto op = create_bartlett_window_typed_handle();
1445 return op.call(window_length, dtype, layout, device, pin_memory);
1446}
1447
1448// aten::bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1449at::Tensor bartlett_window::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
1450
1451 static auto op = create_bartlett_window_typed_handle();
1452 return op.redispatch(dispatchKeySet, window_length, dtype, layout, device, pin_memory);
1453}
1454
1455STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bartlett_window_periodic, name, "aten::bartlett_window")
1456STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bartlett_window_periodic, overload_name, "periodic")
1457STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bartlett_window_periodic, schema_str, "bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
1458
1459// aten::bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1460static C10_NOINLINE c10::TypedOperatorHandle<bartlett_window_periodic::schema> create_bartlett_window_periodic_typed_handle() {
1461 return c10::Dispatcher::singleton()
1462 .findSchemaOrThrow(bartlett_window_periodic::name, bartlett_window_periodic::overload_name)
1463 .typed<bartlett_window_periodic::schema>();
1464}
1465
1466// aten::bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1467at::Tensor bartlett_window_periodic::call(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
1468
1469 static auto op = create_bartlett_window_periodic_typed_handle();
1470 return op.call(window_length, periodic, dtype, layout, device, pin_memory);
1471}
1472
1473// aten::bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1474at::Tensor bartlett_window_periodic::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
1475
1476 static auto op = create_bartlett_window_periodic_typed_handle();
1477 return op.redispatch(dispatchKeySet, window_length, periodic, dtype, layout, device, pin_memory);
1478}
1479
1480STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(binary_cross_entropy, name, "aten::binary_cross_entropy")
1481STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(binary_cross_entropy, overload_name, "")
1482STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(binary_cross_entropy, schema_str, "binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor")
1483
1484// aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor
1485static C10_NOINLINE c10::TypedOperatorHandle<binary_cross_entropy::schema> create_binary_cross_entropy_typed_handle() {
1486 return c10::Dispatcher::singleton()
1487 .findSchemaOrThrow(binary_cross_entropy::name, binary_cross_entropy::overload_name)
1488 .typed<binary_cross_entropy::schema>();
1489}
1490
1491// aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor
1492at::Tensor binary_cross_entropy::call(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction) {
1493
1494 static auto op = create_binary_cross_entropy_typed_handle();
1495 return op.call(self, target, weight, reduction);
1496}
1497
1498// aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor
1499at::Tensor binary_cross_entropy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction) {
1500
1501 static auto op = create_binary_cross_entropy_typed_handle();
1502 return op.redispatch(dispatchKeySet, self, target, weight, reduction);
1503}
1504
1505STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(binary_cross_entropy_out, name, "aten::binary_cross_entropy")
1506STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(binary_cross_entropy_out, overload_name, "out")
1507STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(binary_cross_entropy_out, schema_str, "binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)")
1508
1509// aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
1510static C10_NOINLINE c10::TypedOperatorHandle<binary_cross_entropy_out::schema> create_binary_cross_entropy_out_typed_handle() {
1511 return c10::Dispatcher::singleton()
1512 .findSchemaOrThrow(binary_cross_entropy_out::name, binary_cross_entropy_out::overload_name)
1513 .typed<binary_cross_entropy_out::schema>();
1514}
1515
1516// aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
1517at::Tensor & binary_cross_entropy_out::call(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & out) {
1518
1519 static auto op = create_binary_cross_entropy_out_typed_handle();
1520 return op.call(self, target, weight, reduction, out);
1521}
1522
1523// aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
1524at::Tensor & binary_cross_entropy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & out) {
1525
1526 static auto op = create_binary_cross_entropy_out_typed_handle();
1527 return op.redispatch(dispatchKeySet, self, target, weight, reduction, out);
1528}
1529
1530STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bmm, name, "aten::bmm")
1531STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bmm, overload_name, "")
1532STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bmm, schema_str, "bmm(Tensor self, Tensor mat2) -> Tensor")
1533
1534// aten::bmm(Tensor self, Tensor mat2) -> Tensor
1535static C10_NOINLINE c10::TypedOperatorHandle<bmm::schema> create_bmm_typed_handle() {
1536 return c10::Dispatcher::singleton()
1537 .findSchemaOrThrow(bmm::name, bmm::overload_name)
1538 .typed<bmm::schema>();
1539}
1540
1541// aten::bmm(Tensor self, Tensor mat2) -> Tensor
1542at::Tensor bmm::call(const at::Tensor & self, const at::Tensor & mat2) {
1543
1544 static auto op = create_bmm_typed_handle();
1545 return op.call(self, mat2);
1546}
1547
1548// aten::bmm(Tensor self, Tensor mat2) -> Tensor
1549at::Tensor bmm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2) {
1550
1551 static auto op = create_bmm_typed_handle();
1552 return op.redispatch(dispatchKeySet, self, mat2);
1553}
1554
1555STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bmm_out, name, "aten::bmm")
1556STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bmm_out, overload_name, "out")
1557STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bmm_out, schema_str, "bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)")
1558
1559// aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
1560static C10_NOINLINE c10::TypedOperatorHandle<bmm_out::schema> create_bmm_out_typed_handle() {
1561 return c10::Dispatcher::singleton()
1562 .findSchemaOrThrow(bmm_out::name, bmm_out::overload_name)
1563 .typed<bmm_out::schema>();
1564}
1565
1566// aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
1567at::Tensor & bmm_out::call(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
1568
1569 static auto op = create_bmm_out_typed_handle();
1570 return op.call(self, mat2, out);
1571}
1572
1573// aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
1574at::Tensor & bmm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
1575
1576 static auto op = create_bmm_out_typed_handle();
1577 return op.redispatch(dispatchKeySet, self, mat2, out);
1578}
1579
1580STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_broadcast_to, name, "aten::_sparse_broadcast_to")
1581STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_broadcast_to, overload_name, "")
1582STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_broadcast_to, schema_str, "_sparse_broadcast_to(Tensor(a) self, int[] size) -> Tensor(a)")
1583
1584// aten::_sparse_broadcast_to(Tensor(a) self, int[] size) -> Tensor(a)
1585static C10_NOINLINE c10::TypedOperatorHandle<_sparse_broadcast_to::schema> create__sparse_broadcast_to_typed_handle() {
1586 return c10::Dispatcher::singleton()
1587 .findSchemaOrThrow(_sparse_broadcast_to::name, _sparse_broadcast_to::overload_name)
1588 .typed<_sparse_broadcast_to::schema>();
1589}
1590
1591// aten::_sparse_broadcast_to(Tensor(a) self, int[] size) -> Tensor(a)
1592at::Tensor _sparse_broadcast_to::call(const at::Tensor & self, at::IntArrayRef size) {
1593
1594 static auto op = create__sparse_broadcast_to_typed_handle();
1595 return op.call(self, size);
1596}
1597
1598// aten::_sparse_broadcast_to(Tensor(a) self, int[] size) -> Tensor(a)
1599at::Tensor _sparse_broadcast_to::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size) {
1600
1601 static auto op = create__sparse_broadcast_to_typed_handle();
1602 return op.redispatch(dispatchKeySet, self, size);
1603}
1604
1605STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(concat, name, "aten::concat")
1606STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(concat, overload_name, "")
1607STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(concat, schema_str, "concat(Tensor[] tensors, int dim=0) -> Tensor")
1608
1609// aten::concat(Tensor[] tensors, int dim=0) -> Tensor
1610static C10_NOINLINE c10::TypedOperatorHandle<concat::schema> create_concat_typed_handle() {
1611 return c10::Dispatcher::singleton()
1612 .findSchemaOrThrow(concat::name, concat::overload_name)
1613 .typed<concat::schema>();
1614}
1615
1616// aten::concat(Tensor[] tensors, int dim=0) -> Tensor
1617at::Tensor concat::call(at::TensorList tensors, int64_t dim) {
1618
1619 static auto op = create_concat_typed_handle();
1620 return op.call(tensors, dim);
1621}
1622
1623// aten::concat(Tensor[] tensors, int dim=0) -> Tensor
1624at::Tensor concat::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim) {
1625
1626 static auto op = create_concat_typed_handle();
1627 return op.redispatch(dispatchKeySet, tensors, dim);
1628}
1629
1630STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(concat_out, name, "aten::concat")
1631STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(concat_out, overload_name, "out")
1632STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(concat_out, schema_str, "concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)")
1633
1634// aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
1635static C10_NOINLINE c10::TypedOperatorHandle<concat_out::schema> create_concat_out_typed_handle() {
1636 return c10::Dispatcher::singleton()
1637 .findSchemaOrThrow(concat_out::name, concat_out::overload_name)
1638 .typed<concat_out::schema>();
1639}
1640
1641// aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
1642at::Tensor & concat_out::call(at::TensorList tensors, int64_t dim, at::Tensor & out) {
1643
1644 static auto op = create_concat_out_typed_handle();
1645 return op.call(tensors, dim, out);
1646}
1647
1648// aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
1649at::Tensor & concat_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, at::Tensor & out) {
1650
1651 static auto op = create_concat_out_typed_handle();
1652 return op.redispatch(dispatchKeySet, tensors, dim, out);
1653}
1654
1655STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(concat_names, name, "aten::concat")
1656STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(concat_names, overload_name, "names")
1657STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(concat_names, schema_str, "concat.names(Tensor[] tensors, Dimname dim) -> Tensor")
1658
1659// aten::concat.names(Tensor[] tensors, Dimname dim) -> Tensor
1660static C10_NOINLINE c10::TypedOperatorHandle<concat_names::schema> create_concat_names_typed_handle() {
1661 return c10::Dispatcher::singleton()
1662 .findSchemaOrThrow(concat_names::name, concat_names::overload_name)
1663 .typed<concat_names::schema>();
1664}
1665
1666// aten::concat.names(Tensor[] tensors, Dimname dim) -> Tensor
1667at::Tensor concat_names::call(at::TensorList tensors, at::Dimname dim) {
1668
1669 static auto op = create_concat_names_typed_handle();
1670 return op.call(tensors, dim);
1671}
1672
1673// aten::concat.names(Tensor[] tensors, Dimname dim) -> Tensor
1674at::Tensor concat_names::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim) {
1675
1676 static auto op = create_concat_names_typed_handle();
1677 return op.redispatch(dispatchKeySet, tensors, dim);
1678}
1679
1680STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(concat_names_out, name, "aten::concat")
1681STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(concat_names_out, overload_name, "names_out")
1682STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(concat_names_out, schema_str, "concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)")
1683
1684// aten::concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
1685static C10_NOINLINE c10::TypedOperatorHandle<concat_names_out::schema> create_concat_names_out_typed_handle() {
1686 return c10::Dispatcher::singleton()
1687 .findSchemaOrThrow(concat_names_out::name, concat_names_out::overload_name)
1688 .typed<concat_names_out::schema>();
1689}
1690
1691// aten::concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
1692at::Tensor & concat_names_out::call(at::TensorList tensors, at::Dimname dim, at::Tensor & out) {
1693
1694 static auto op = create_concat_names_out_typed_handle();
1695 return op.call(tensors, dim, out);
1696}
1697
1698// aten::concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
1699at::Tensor & concat_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim, at::Tensor & out) {
1700
1701 static auto op = create_concat_names_out_typed_handle();
1702 return op.redispatch(dispatchKeySet, tensors, dim, out);
1703}
1704
1705STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(chain_matmul, name, "aten::chain_matmul")
1706STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(chain_matmul, overload_name, "")
1707STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(chain_matmul, schema_str, "chain_matmul(Tensor[] matrices) -> Tensor")
1708
1709// aten::chain_matmul(Tensor[] matrices) -> Tensor
1710static C10_NOINLINE c10::TypedOperatorHandle<chain_matmul::schema> create_chain_matmul_typed_handle() {
1711 return c10::Dispatcher::singleton()
1712 .findSchemaOrThrow(chain_matmul::name, chain_matmul::overload_name)
1713 .typed<chain_matmul::schema>();
1714}
1715
1716// aten::chain_matmul(Tensor[] matrices) -> Tensor
1717at::Tensor chain_matmul::call(at::TensorList matrices) {
1718
1719 static auto op = create_chain_matmul_typed_handle();
1720 return op.call(matrices);
1721}
1722
1723// aten::chain_matmul(Tensor[] matrices) -> Tensor
1724at::Tensor chain_matmul::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList matrices) {
1725
1726 static auto op = create_chain_matmul_typed_handle();
1727 return op.redispatch(dispatchKeySet, matrices);
1728}
1729
1730STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(chain_matmul_out, name, "aten::chain_matmul")
1731STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(chain_matmul_out, overload_name, "out")
1732STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(chain_matmul_out, schema_str, "chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!)")
1733
1734// aten::chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!)
1735static C10_NOINLINE c10::TypedOperatorHandle<chain_matmul_out::schema> create_chain_matmul_out_typed_handle() {
1736 return c10::Dispatcher::singleton()
1737 .findSchemaOrThrow(chain_matmul_out::name, chain_matmul_out::overload_name)
1738 .typed<chain_matmul_out::schema>();
1739}
1740
1741// aten::chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!)
1742at::Tensor & chain_matmul_out::call(at::TensorList matrices, at::Tensor & out) {
1743
1744 static auto op = create_chain_matmul_out_typed_handle();
1745 return op.call(matrices, out);
1746}
1747
1748// aten::chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!)
1749at::Tensor & chain_matmul_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList matrices, at::Tensor & out) {
1750
1751 static auto op = create_chain_matmul_out_typed_handle();
1752 return op.redispatch(dispatchKeySet, matrices, out);
1753}
1754
1755STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(clamp_min, name, "aten::clamp_min")
1756STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(clamp_min, overload_name, "")
1757STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(clamp_min, schema_str, "clamp_min(Tensor self, Scalar min) -> Tensor")
1758
1759// aten::clamp_min(Tensor self, Scalar min) -> Tensor
1760static C10_NOINLINE c10::TypedOperatorHandle<clamp_min::schema> create_clamp_min_typed_handle() {
1761 return c10::Dispatcher::singleton()
1762 .findSchemaOrThrow(clamp_min::name, clamp_min::overload_name)
1763 .typed<clamp_min::schema>();
1764}
1765
1766// aten::clamp_min(Tensor self, Scalar min) -> Tensor
1767at::Tensor clamp_min::call(const at::Tensor & self, const at::Scalar & min) {
1768
1769 static auto op = create_clamp_min_typed_handle();
1770 return op.call(self, min);
1771}
1772
1773// aten::clamp_min(Tensor self, Scalar min) -> Tensor
1774at::Tensor clamp_min::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min) {
1775
1776 static auto op = create_clamp_min_typed_handle();
1777 return op.redispatch(dispatchKeySet, self, min);
1778}
1779
1780STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(clamp_min_Tensor, name, "aten::clamp_min")
1781STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(clamp_min_Tensor, overload_name, "Tensor")
1782STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(clamp_min_Tensor, schema_str, "clamp_min.Tensor(Tensor self, Tensor min) -> Tensor")
1783
1784// aten::clamp_min.Tensor(Tensor self, Tensor min) -> Tensor
1785static C10_NOINLINE c10::TypedOperatorHandle<clamp_min_Tensor::schema> create_clamp_min_Tensor_typed_handle() {
1786 return c10::Dispatcher::singleton()
1787 .findSchemaOrThrow(clamp_min_Tensor::name, clamp_min_Tensor::overload_name)
1788 .typed<clamp_min_Tensor::schema>();
1789}
1790
1791// aten::clamp_min.Tensor(Tensor self, Tensor min) -> Tensor
1792at::Tensor clamp_min_Tensor::call(const at::Tensor & self, const at::Tensor & min) {
1793
1794 static auto op = create_clamp_min_Tensor_typed_handle();
1795 return op.call(self, min);
1796}
1797
1798// aten::clamp_min.Tensor(Tensor self, Tensor min) -> Tensor
1799at::Tensor clamp_min_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & min) {
1800
1801 static auto op = create_clamp_min_Tensor_typed_handle();
1802 return op.redispatch(dispatchKeySet, self, min);
1803}
1804
1805STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(clamp_min_, name, "aten::clamp_min_")
1806STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(clamp_min_, overload_name, "")
1807STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(clamp_min_, schema_str, "clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)")
1808
1809// aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)
1810static C10_NOINLINE c10::TypedOperatorHandle<clamp_min_::schema> create_clamp_min__typed_handle() {
1811 return c10::Dispatcher::singleton()
1812 .findSchemaOrThrow(clamp_min_::name, clamp_min_::overload_name)
1813 .typed<clamp_min_::schema>();
1814}
1815
1816// aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)
1817at::Tensor & clamp_min_::call(at::Tensor & self, const at::Scalar & min) {
1818
1819 static auto op = create_clamp_min__typed_handle();
1820 return op.call(self, min);
1821}
1822
1823// aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)
1824at::Tensor & clamp_min_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & min) {
1825
1826 static auto op = create_clamp_min__typed_handle();
1827 return op.redispatch(dispatchKeySet, self, min);
1828}
1829
1830STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(clamp_min__Tensor, name, "aten::clamp_min_")
1831STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(clamp_min__Tensor, overload_name, "Tensor")
1832STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(clamp_min__Tensor, schema_str, "clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!)")
1833
1834// aten::clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!)
1835static C10_NOINLINE c10::TypedOperatorHandle<clamp_min__Tensor::schema> create_clamp_min__Tensor_typed_handle() {
1836 return c10::Dispatcher::singleton()
1837 .findSchemaOrThrow(clamp_min__Tensor::name, clamp_min__Tensor::overload_name)
1838 .typed<clamp_min__Tensor::schema>();
1839}
1840
1841// aten::clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!)
1842at::Tensor & clamp_min__Tensor::call(at::Tensor & self, const at::Tensor & min) {
1843
1844 static auto op = create_clamp_min__Tensor_typed_handle();
1845 return op.call(self, min);
1846}
1847
1848// aten::clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!)
1849at::Tensor & clamp_min__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & min) {
1850
1851 static auto op = create_clamp_min__Tensor_typed_handle();
1852 return op.redispatch(dispatchKeySet, self, min);
1853}
1854
1855STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(clamp_min_out, name, "aten::clamp_min")
1856STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(clamp_min_out, overload_name, "out")
1857STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(clamp_min_out, schema_str, "clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!)")
1858
1859// aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!)
1860static C10_NOINLINE c10::TypedOperatorHandle<clamp_min_out::schema> create_clamp_min_out_typed_handle() {
1861 return c10::Dispatcher::singleton()
1862 .findSchemaOrThrow(clamp_min_out::name, clamp_min_out::overload_name)
1863 .typed<clamp_min_out::schema>();
1864}
1865
1866// aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!)
1867at::Tensor & clamp_min_out::call(const at::Tensor & self, const at::Scalar & min, at::Tensor & out) {
1868
1869 static auto op = create_clamp_min_out_typed_handle();
1870 return op.call(self, min, out);
1871}
1872
1873// aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!)
1874at::Tensor & clamp_min_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min, at::Tensor & out) {
1875
1876 static auto op = create_clamp_min_out_typed_handle();
1877 return op.redispatch(dispatchKeySet, self, min, out);
1878}
1879
1880STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(clamp_min_Tensor_out, name, "aten::clamp_min")
1881STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(clamp_min_Tensor_out, overload_name, "Tensor_out")
1882STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(clamp_min_Tensor_out, schema_str, "clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!)")
1883
1884// aten::clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!)
1885static C10_NOINLINE c10::TypedOperatorHandle<clamp_min_Tensor_out::schema> create_clamp_min_Tensor_out_typed_handle() {
1886 return c10::Dispatcher::singleton()
1887 .findSchemaOrThrow(clamp_min_Tensor_out::name, clamp_min_Tensor_out::overload_name)
1888 .typed<clamp_min_Tensor_out::schema>();
1889}
1890
1891// aten::clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!)
1892at::Tensor & clamp_min_Tensor_out::call(const at::Tensor & self, const at::Tensor & min, at::Tensor & out) {
1893
1894 static auto op = create_clamp_min_Tensor_out_typed_handle();
1895 return op.call(self, min, out);
1896}
1897
1898// aten::clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!)
1899at::Tensor & clamp_min_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & min, at::Tensor & out) {
1900
1901 static auto op = create_clamp_min_Tensor_out_typed_handle();
1902 return op.redispatch(dispatchKeySet, self, min, out);
1903}
1904
1905STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_convolution_mode, name, "aten::_convolution_mode")
1906STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_convolution_mode, overload_name, "")
1907STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_convolution_mode, schema_str, "_convolution_mode(Tensor input, Tensor weight, Tensor? bias, int[] stride, str padding, int[] dilation, int groups) -> Tensor")
1908
1909// aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, int[] stride, str padding, int[] dilation, int groups) -> Tensor
1910static C10_NOINLINE c10::TypedOperatorHandle<_convolution_mode::schema> create__convolution_mode_typed_handle() {
1911 return c10::Dispatcher::singleton()
1912 .findSchemaOrThrow(_convolution_mode::name, _convolution_mode::overload_name)
1913 .typed<_convolution_mode::schema>();
1914}
1915
1916// aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, int[] stride, str padding, int[] dilation, int groups) -> Tensor
1917at::Tensor _convolution_mode::call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
1918
1919 static auto op = create__convolution_mode_typed_handle();
1920 return op.call(input, weight, bias, stride, padding, dilation, groups);
1921}
1922
1923// aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, int[] stride, str padding, int[] dilation, int groups) -> Tensor
1924at::Tensor _convolution_mode::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
1925
1926 static auto op = create__convolution_mode_typed_handle();
1927 return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups);
1928}
1929
1930STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv1d, name, "aten::conv1d")
1931STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv1d, overload_name, "")
1932STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv1d, schema_str, "conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor")
1933
1934// aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor
1935static C10_NOINLINE c10::TypedOperatorHandle<conv1d::schema> create_conv1d_typed_handle() {
1936 return c10::Dispatcher::singleton()
1937 .findSchemaOrThrow(conv1d::name, conv1d::overload_name)
1938 .typed<conv1d::schema>();
1939}
1940
1941// aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor
1942at::Tensor conv1d::call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
1943
1944 static auto op = create_conv1d_typed_handle();
1945 return op.call(input, weight, bias, stride, padding, dilation, groups);
1946}
1947
1948// aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor
1949at::Tensor conv1d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
1950
1951 static auto op = create_conv1d_typed_handle();
1952 return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups);
1953}
1954
1955STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv3d, name, "aten::conv3d")
1956STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv3d, overload_name, "")
1957STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv3d, schema_str, "conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor")
1958
1959// aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor
1960static C10_NOINLINE c10::TypedOperatorHandle<conv3d::schema> create_conv3d_typed_handle() {
1961 return c10::Dispatcher::singleton()
1962 .findSchemaOrThrow(conv3d::name, conv3d::overload_name)
1963 .typed<conv3d::schema>();
1964}
1965
1966// aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor
1967at::Tensor conv3d::call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
1968
1969 static auto op = create_conv3d_typed_handle();
1970 return op.call(input, weight, bias, stride, padding, dilation, groups);
1971}
1972
1973// aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor
1974at::Tensor conv3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
1975
1976 static auto op = create_conv3d_typed_handle();
1977 return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups);
1978}
1979
1980STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv1d_padding, name, "aten::conv1d")
1981STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv1d_padding, overload_name, "padding")
1982STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv1d_padding, schema_str, "conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, str padding=\"valid\", int[1] dilation=1, int groups=1) -> Tensor")
1983
1984// aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, str padding="valid", int[1] dilation=1, int groups=1) -> Tensor
1985static C10_NOINLINE c10::TypedOperatorHandle<conv1d_padding::schema> create_conv1d_padding_typed_handle() {
1986 return c10::Dispatcher::singleton()
1987 .findSchemaOrThrow(conv1d_padding::name, conv1d_padding::overload_name)
1988 .typed<conv1d_padding::schema>();
1989}
1990
1991// aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, str padding="valid", int[1] dilation=1, int groups=1) -> Tensor
1992at::Tensor conv1d_padding::call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
1993
1994 static auto op = create_conv1d_padding_typed_handle();
1995 return op.call(input, weight, bias, stride, padding, dilation, groups);
1996}
1997
1998// aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, str padding="valid", int[1] dilation=1, int groups=1) -> Tensor
1999at::Tensor conv1d_padding::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
2000
2001 static auto op = create_conv1d_padding_typed_handle();
2002 return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups);
2003}
2004
2005STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv3d_padding, name, "aten::conv3d")
2006STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv3d_padding, overload_name, "padding")
2007STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv3d_padding, schema_str, "conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, str padding=\"valid\", int[3] dilation=1, int groups=1) -> Tensor")
2008
2009// aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, str padding="valid", int[3] dilation=1, int groups=1) -> Tensor
2010static C10_NOINLINE c10::TypedOperatorHandle<conv3d_padding::schema> create_conv3d_padding_typed_handle() {
2011 return c10::Dispatcher::singleton()
2012 .findSchemaOrThrow(conv3d_padding::name, conv3d_padding::overload_name)
2013 .typed<conv3d_padding::schema>();
2014}
2015
2016// aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, str padding="valid", int[3] dilation=1, int groups=1) -> Tensor
2017at::Tensor conv3d_padding::call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
2018
2019 static auto op = create_conv3d_padding_typed_handle();
2020 return op.call(input, weight, bias, stride, padding, dilation, groups);
2021}
2022
2023// aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, str padding="valid", int[3] dilation=1, int groups=1) -> Tensor
2024at::Tensor conv3d_padding::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
2025
2026 static auto op = create_conv3d_padding_typed_handle();
2027 return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups);
2028}
2029
2030STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv_tbc_backward, name, "aten::conv_tbc_backward")
2031STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv_tbc_backward, overload_name, "")
2032STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv_tbc_backward, schema_str, "conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor)")
2033
2034// aten::conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor)
2035static C10_NOINLINE c10::TypedOperatorHandle<conv_tbc_backward::schema> create_conv_tbc_backward_typed_handle() {
2036 return c10::Dispatcher::singleton()
2037 .findSchemaOrThrow(conv_tbc_backward::name, conv_tbc_backward::overload_name)
2038 .typed<conv_tbc_backward::schema>();
2039}
2040
2041// aten::conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor)
2042::std::tuple<at::Tensor,at::Tensor,at::Tensor> conv_tbc_backward::call(const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) {
2043
2044 static auto op = create_conv_tbc_backward_typed_handle();
2045 return op.call(self, input, weight, bias, pad);
2046}
2047
2048// aten::conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor)
2049::std::tuple<at::Tensor,at::Tensor,at::Tensor> conv_tbc_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) {
2050
2051 static auto op = create_conv_tbc_backward_typed_handle();
2052 return op.redispatch(dispatchKeySet, self, input, weight, bias, pad);
2053}
2054
2055STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv_transpose3d_input, name, "aten::conv_transpose3d")
2056STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv_transpose3d_input, overload_name, "input")
2057STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv_transpose3d_input, schema_str, "conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor")
2058
2059// aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor
2060static C10_NOINLINE c10::TypedOperatorHandle<conv_transpose3d_input::schema> create_conv_transpose3d_input_typed_handle() {
2061 return c10::Dispatcher::singleton()
2062 .findSchemaOrThrow(conv_transpose3d_input::name, conv_transpose3d_input::overload_name)
2063 .typed<conv_transpose3d_input::schema>();
2064}
2065
2066// aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor
2067at::Tensor conv_transpose3d_input::call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation) {
2068
2069 static auto op = create_conv_transpose3d_input_typed_handle();
2070 return op.call(input, weight, bias, stride, padding, output_padding, groups, dilation);
2071}
2072
2073// aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor
2074at::Tensor conv_transpose3d_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation) {
2075
2076 static auto op = create_conv_transpose3d_input_typed_handle();
2077 return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, output_padding, groups, dilation);
2078}
2079
2080STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copy, name, "aten::copy")
2081STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copy, overload_name, "")
2082STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copy, schema_str, "copy(Tensor self, Tensor src, bool non_blocking=False) -> Tensor")
2083
2084// aten::copy(Tensor self, Tensor src, bool non_blocking=False) -> Tensor
2085static C10_NOINLINE c10::TypedOperatorHandle<copy::schema> create_copy_typed_handle() {
2086 return c10::Dispatcher::singleton()
2087 .findSchemaOrThrow(copy::name, copy::overload_name)
2088 .typed<copy::schema>();
2089}
2090
2091// aten::copy(Tensor self, Tensor src, bool non_blocking=False) -> Tensor
2092at::Tensor copy::call(const at::Tensor & self, const at::Tensor & src, bool non_blocking) {
2093
2094 static auto op = create_copy_typed_handle();
2095 return op.call(self, src, non_blocking);
2096}
2097
2098// aten::copy(Tensor self, Tensor src, bool non_blocking=False) -> Tensor
2099at::Tensor copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, bool non_blocking) {
2100
2101 static auto op = create_copy_typed_handle();
2102 return op.redispatch(dispatchKeySet, self, src, non_blocking);
2103}
2104
2105STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copy_, name, "aten::copy_")
2106STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copy_, overload_name, "")
2107STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copy_, schema_str, "copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)")
2108
2109// aten::copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)
2110static C10_NOINLINE c10::TypedOperatorHandle<copy_::schema> create_copy__typed_handle() {
2111 return c10::Dispatcher::singleton()
2112 .findSchemaOrThrow(copy_::name, copy_::overload_name)
2113 .typed<copy_::schema>();
2114}
2115
2116// aten::copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)
2117at::Tensor & copy_::call(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
2118
2119 static auto op = create_copy__typed_handle();
2120 return op.call(self, src, non_blocking);
2121}
2122
2123// aten::copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)
2124at::Tensor & copy_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & src, bool non_blocking) {
2125
2126 static auto op = create_copy__typed_handle();
2127 return op.redispatch(dispatchKeySet, self, src, non_blocking);
2128}
2129
2130STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_copy_from_and_resize, name, "aten::_copy_from_and_resize")
2131STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_copy_from_and_resize, overload_name, "")
2132STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_copy_from_and_resize, schema_str, "_copy_from_and_resize(Tensor self, Tensor dst) -> Tensor")
2133
2134// aten::_copy_from_and_resize(Tensor self, Tensor dst) -> Tensor
2135static C10_NOINLINE c10::TypedOperatorHandle<_copy_from_and_resize::schema> create__copy_from_and_resize_typed_handle() {
2136 return c10::Dispatcher::singleton()
2137 .findSchemaOrThrow(_copy_from_and_resize::name, _copy_from_and_resize::overload_name)
2138 .typed<_copy_from_and_resize::schema>();
2139}
2140
2141// aten::_copy_from_and_resize(Tensor self, Tensor dst) -> Tensor
2142at::Tensor _copy_from_and_resize::call(const at::Tensor & self, const at::Tensor & dst) {
2143
2144 static auto op = create__copy_from_and_resize_typed_handle();
2145 return op.call(self, dst);
2146}
2147
2148// aten::_copy_from_and_resize(Tensor self, Tensor dst) -> Tensor
2149at::Tensor _copy_from_and_resize::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & dst) {
2150
2151 static auto op = create__copy_from_and_resize_typed_handle();
2152 return op.redispatch(dispatchKeySet, self, dst);
2153}
2154
2155STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_convolution, name, "aten::cudnn_convolution")
2156STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_convolution, overload_name, "")
2157STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_convolution, schema_str, "cudnn_convolution(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor")
2158
2159// aten::cudnn_convolution(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor
2160static C10_NOINLINE c10::TypedOperatorHandle<cudnn_convolution::schema> create_cudnn_convolution_typed_handle() {
2161 return c10::Dispatcher::singleton()
2162 .findSchemaOrThrow(cudnn_convolution::name, cudnn_convolution::overload_name)
2163 .typed<cudnn_convolution::schema>();
2164}
2165
2166// aten::cudnn_convolution(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor
2167at::Tensor cudnn_convolution::call(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) {
2168
2169 static auto op = create_cudnn_convolution_typed_handle();
2170 return op.call(self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
2171}
2172
2173// aten::cudnn_convolution(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor
2174at::Tensor cudnn_convolution::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) {
2175
2176 static auto op = create_cudnn_convolution_typed_handle();
2177 return op.redispatch(dispatchKeySet, self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
2178}
2179
2180STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_convolution_relu, name, "aten::cudnn_convolution_relu")
2181STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_convolution_relu, overload_name, "")
2182STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_convolution_relu, schema_str, "cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor")
2183
2184// aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor
2185static C10_NOINLINE c10::TypedOperatorHandle<cudnn_convolution_relu::schema> create_cudnn_convolution_relu_typed_handle() {
2186 return c10::Dispatcher::singleton()
2187 .findSchemaOrThrow(cudnn_convolution_relu::name, cudnn_convolution_relu::overload_name)
2188 .typed<cudnn_convolution_relu::schema>();
2189}
2190
2191// aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor
2192at::Tensor cudnn_convolution_relu::call(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
2193
2194 static auto op = create_cudnn_convolution_relu_typed_handle();
2195 return op.call(self, weight, bias, stride, padding, dilation, groups);
2196}
2197
2198// aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor
2199at::Tensor cudnn_convolution_relu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
2200
2201 static auto op = create_cudnn_convolution_relu_typed_handle();
2202 return op.redispatch(dispatchKeySet, self, weight, bias, stride, padding, dilation, groups);
2203}
2204
2205STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumprod, name, "aten::cumprod")
2206STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumprod, overload_name, "")
2207STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumprod, schema_str, "cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor")
2208
2209// aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
2210static C10_NOINLINE c10::TypedOperatorHandle<cumprod::schema> create_cumprod_typed_handle() {
2211 return c10::Dispatcher::singleton()
2212 .findSchemaOrThrow(cumprod::name, cumprod::overload_name)
2213 .typed<cumprod::schema>();
2214}
2215
2216// aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
2217at::Tensor cumprod::call(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
2218
2219 static auto op = create_cumprod_typed_handle();
2220 return op.call(self, dim, dtype);
2221}
2222
2223// aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
2224at::Tensor cumprod::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
2225
2226 static auto op = create_cumprod_typed_handle();
2227 return op.redispatch(dispatchKeySet, self, dim, dtype);
2228}
2229
2230STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumprod_, name, "aten::cumprod_")
2231STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumprod_, overload_name, "")
2232STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumprod_, schema_str, "cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)")
2233
2234// aten::cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)
2235static C10_NOINLINE c10::TypedOperatorHandle<cumprod_::schema> create_cumprod__typed_handle() {
2236 return c10::Dispatcher::singleton()
2237 .findSchemaOrThrow(cumprod_::name, cumprod_::overload_name)
2238 .typed<cumprod_::schema>();
2239}
2240
2241// aten::cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)
2242at::Tensor & cumprod_::call(at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
2243
2244 static auto op = create_cumprod__typed_handle();
2245 return op.call(self, dim, dtype);
2246}
2247
2248// aten::cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)
2249at::Tensor & cumprod_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
2250
2251 static auto op = create_cumprod__typed_handle();
2252 return op.redispatch(dispatchKeySet, self, dim, dtype);
2253}
2254
2255STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumprod_out, name, "aten::cumprod")
2256STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumprod_out, overload_name, "out")
2257STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumprod_out, schema_str, "cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)")
2258
2259// aten::cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
2260static C10_NOINLINE c10::TypedOperatorHandle<cumprod_out::schema> create_cumprod_out_typed_handle() {
2261 return c10::Dispatcher::singleton()
2262 .findSchemaOrThrow(cumprod_out::name, cumprod_out::overload_name)
2263 .typed<cumprod_out::schema>();
2264}
2265
2266// aten::cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
2267at::Tensor & cumprod_out::call(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
2268
2269 static auto op = create_cumprod_out_typed_handle();
2270 return op.call(self, dim, dtype, out);
2271}
2272
2273// aten::cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
2274at::Tensor & cumprod_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
2275
2276 static auto op = create_cumprod_out_typed_handle();
2277 return op.redispatch(dispatchKeySet, self, dim, dtype, out);
2278}
2279
2280STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumprod_dimname, name, "aten::cumprod")
2281STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumprod_dimname, overload_name, "dimname")
2282STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumprod_dimname, schema_str, "cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor")
2283
2284// aten::cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
2285static C10_NOINLINE c10::TypedOperatorHandle<cumprod_dimname::schema> create_cumprod_dimname_typed_handle() {
2286 return c10::Dispatcher::singleton()
2287 .findSchemaOrThrow(cumprod_dimname::name, cumprod_dimname::overload_name)
2288 .typed<cumprod_dimname::schema>();
2289}
2290
2291// aten::cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
2292at::Tensor cumprod_dimname::call(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
2293
2294 static auto op = create_cumprod_dimname_typed_handle();
2295 return op.call(self, dim, dtype);
2296}
2297
2298// aten::cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
2299at::Tensor cumprod_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
2300
2301 static auto op = create_cumprod_dimname_typed_handle();
2302 return op.redispatch(dispatchKeySet, self, dim, dtype);
2303}
2304
2305STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumprod__dimname, name, "aten::cumprod_")
2306STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumprod__dimname, overload_name, "dimname")
2307STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumprod__dimname, schema_str, "cumprod_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)")
2308
2309// aten::cumprod_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)
2310static C10_NOINLINE c10::TypedOperatorHandle<cumprod__dimname::schema> create_cumprod__dimname_typed_handle() {
2311 return c10::Dispatcher::singleton()
2312 .findSchemaOrThrow(cumprod__dimname::name, cumprod__dimname::overload_name)
2313 .typed<cumprod__dimname::schema>();
2314}
2315
2316// aten::cumprod_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)
2317at::Tensor & cumprod__dimname::call(at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
2318
2319 static auto op = create_cumprod__dimname_typed_handle();
2320 return op.call(self, dim, dtype);
2321}
2322
2323// aten::cumprod_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)
2324at::Tensor & cumprod__dimname::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
2325
2326 static auto op = create_cumprod__dimname_typed_handle();
2327 return op.redispatch(dispatchKeySet, self, dim, dtype);
2328}
2329
2330STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumprod_dimname_out, name, "aten::cumprod")
2331STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumprod_dimname_out, overload_name, "dimname_out")
2332STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumprod_dimname_out, schema_str, "cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)")
2333
2334// aten::cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
2335static C10_NOINLINE c10::TypedOperatorHandle<cumprod_dimname_out::schema> create_cumprod_dimname_out_typed_handle() {
2336 return c10::Dispatcher::singleton()
2337 .findSchemaOrThrow(cumprod_dimname_out::name, cumprod_dimname_out::overload_name)
2338 .typed<cumprod_dimname_out::schema>();
2339}
2340
2341// aten::cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
2342at::Tensor & cumprod_dimname_out::call(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
2343
2344 static auto op = create_cumprod_dimname_out_typed_handle();
2345 return op.call(self, dim, dtype, out);
2346}
2347
2348// aten::cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
2349at::Tensor & cumprod_dimname_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
2350
2351 static auto op = create_cumprod_dimname_out_typed_handle();
2352 return op.redispatch(dispatchKeySet, self, dim, dtype, out);
2353}
2354
2355STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumulative_trapezoid_x, name, "aten::cumulative_trapezoid")
2356STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumulative_trapezoid_x, overload_name, "x")
2357STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumulative_trapezoid_x, schema_str, "cumulative_trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor")
2358
2359// aten::cumulative_trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
2360static C10_NOINLINE c10::TypedOperatorHandle<cumulative_trapezoid_x::schema> create_cumulative_trapezoid_x_typed_handle() {
2361 return c10::Dispatcher::singleton()
2362 .findSchemaOrThrow(cumulative_trapezoid_x::name, cumulative_trapezoid_x::overload_name)
2363 .typed<cumulative_trapezoid_x::schema>();
2364}
2365
2366// aten::cumulative_trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
2367at::Tensor cumulative_trapezoid_x::call(const at::Tensor & y, const at::Tensor & x, int64_t dim) {
2368
2369 static auto op = create_cumulative_trapezoid_x_typed_handle();
2370 return op.call(y, x, dim);
2371}
2372
2373// aten::cumulative_trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
2374at::Tensor cumulative_trapezoid_x::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, const at::Tensor & x, int64_t dim) {
2375
2376 static auto op = create_cumulative_trapezoid_x_typed_handle();
2377 return op.redispatch(dispatchKeySet, y, x, dim);
2378}
2379
2380STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumulative_trapezoid_dx, name, "aten::cumulative_trapezoid")
2381STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumulative_trapezoid_dx, overload_name, "dx")
2382STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cumulative_trapezoid_dx, schema_str, "cumulative_trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor")
2383
2384// aten::cumulative_trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor
2385static C10_NOINLINE c10::TypedOperatorHandle<cumulative_trapezoid_dx::schema> create_cumulative_trapezoid_dx_typed_handle() {
2386 return c10::Dispatcher::singleton()
2387 .findSchemaOrThrow(cumulative_trapezoid_dx::name, cumulative_trapezoid_dx::overload_name)
2388 .typed<cumulative_trapezoid_dx::schema>();
2389}
2390
2391// aten::cumulative_trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor
2392at::Tensor cumulative_trapezoid_dx::call(const at::Tensor & y, const at::Scalar & dx, int64_t dim) {
2393
2394 static auto op = create_cumulative_trapezoid_dx_typed_handle();
2395 return op.call(y, dx, dim);
2396}
2397
2398// aten::cumulative_trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor
2399at::Tensor cumulative_trapezoid_dx::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, const at::Scalar & dx, int64_t dim) {
2400
2401 static auto op = create_cumulative_trapezoid_dx_typed_handle();
2402 return op.redispatch(dispatchKeySet, y, dx, dim);
2403}
2404
2405STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ctc_loss_IntList, name, "aten::ctc_loss")
2406STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ctc_loss_IntList, overload_name, "IntList")
2407STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ctc_loss_IntList, schema_str, "ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor")
2408
2409// aten::ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor
2410static C10_NOINLINE c10::TypedOperatorHandle<ctc_loss_IntList::schema> create_ctc_loss_IntList_typed_handle() {
2411 return c10::Dispatcher::singleton()
2412 .findSchemaOrThrow(ctc_loss_IntList::name, ctc_loss_IntList::overload_name)
2413 .typed<ctc_loss_IntList::schema>();
2414}
2415
2416// aten::ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor
2417at::Tensor ctc_loss_IntList::call(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) {
2418
2419 static auto op = create_ctc_loss_IntList_typed_handle();
2420 return op.call(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
2421}
2422
2423// aten::ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor
2424at::Tensor ctc_loss_IntList::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) {
2425
2426 static auto op = create_ctc_loss_IntList_typed_handle();
2427 return op.redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
2428}
2429
2430STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ctc_loss_Tensor, name, "aten::ctc_loss")
2431STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ctc_loss_Tensor, overload_name, "Tensor")
2432STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ctc_loss_Tensor, schema_str, "ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor")
2433
2434// aten::ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor
2435static C10_NOINLINE c10::TypedOperatorHandle<ctc_loss_Tensor::schema> create_ctc_loss_Tensor_typed_handle() {
2436 return c10::Dispatcher::singleton()
2437 .findSchemaOrThrow(ctc_loss_Tensor::name, ctc_loss_Tensor::overload_name)
2438 .typed<ctc_loss_Tensor::schema>();
2439}
2440
2441// aten::ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor
2442at::Tensor ctc_loss_Tensor::call(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) {
2443
2444 static auto op = create_ctc_loss_Tensor_typed_handle();
2445 return op.call(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
2446}
2447
2448// aten::ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor
2449at::Tensor ctc_loss_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) {
2450
2451 static auto op = create_ctc_loss_Tensor_typed_handle();
2452 return op.redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
2453}
2454
2455STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diag_embed, name, "aten::diag_embed")
2456STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diag_embed, overload_name, "")
2457STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diag_embed, schema_str, "diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor")
2458
2459// aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor
2460static C10_NOINLINE c10::TypedOperatorHandle<diag_embed::schema> create_diag_embed_typed_handle() {
2461 return c10::Dispatcher::singleton()
2462 .findSchemaOrThrow(diag_embed::name, diag_embed::overload_name)
2463 .typed<diag_embed::schema>();
2464}
2465
2466// aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor
2467at::Tensor diag_embed::call(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
2468
2469 static auto op = create_diag_embed_typed_handle();
2470 return op.call(self, offset, dim1, dim2);
2471}
2472
2473// aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor
2474at::Tensor diag_embed::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
2475
2476 static auto op = create_diag_embed_typed_handle();
2477 return op.redispatch(dispatchKeySet, self, offset, dim1, dim2);
2478}
2479
2480STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diagonal, name, "aten::diagonal")
2481STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diagonal, overload_name, "")
2482STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diagonal, schema_str, "diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)")
2483
2484// aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)
2485static C10_NOINLINE c10::TypedOperatorHandle<diagonal::schema> create_diagonal_typed_handle() {
2486 return c10::Dispatcher::singleton()
2487 .findSchemaOrThrow(diagonal::name, diagonal::overload_name)
2488 .typed<diagonal::schema>();
2489}
2490
2491// aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)
2492at::Tensor diagonal::call(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
2493
2494 static auto op = create_diagonal_typed_handle();
2495 return op.call(self, offset, dim1, dim2);
2496}
2497
2498// aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)
2499at::Tensor diagonal::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
2500
2501 static auto op = create_diagonal_typed_handle();
2502 return op.redispatch(dispatchKeySet, self, offset, dim1, dim2);
2503}
2504
2505STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diagonal_Dimname, name, "aten::diagonal")
2506STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diagonal_Dimname, overload_name, "Dimname")
2507STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diagonal_Dimname, schema_str, "diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a)")
2508
2509// aten::diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a)
2510static C10_NOINLINE c10::TypedOperatorHandle<diagonal_Dimname::schema> create_diagonal_Dimname_typed_handle() {
2511 return c10::Dispatcher::singleton()
2512 .findSchemaOrThrow(diagonal_Dimname::name, diagonal_Dimname::overload_name)
2513 .typed<diagonal_Dimname::schema>();
2514}
2515
2516// aten::diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a)
2517at::Tensor diagonal_Dimname::call(const at::Tensor & self, at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset) {
2518
2519 static auto op = create_diagonal_Dimname_typed_handle();
2520 return op.call(self, outdim, dim1, dim2, offset);
2521}
2522
2523// aten::diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a)
2524at::Tensor diagonal_Dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset) {
2525
2526 static auto op = create_diagonal_Dimname_typed_handle();
2527 return op.redispatch(dispatchKeySet, self, outdim, dim1, dim2, offset);
2528}
2529
2530STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide_Tensor, name, "aten::divide")
2531STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide_Tensor, overload_name, "Tensor")
2532STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide_Tensor, schema_str, "divide.Tensor(Tensor self, Tensor other) -> Tensor")
2533
2534// aten::divide.Tensor(Tensor self, Tensor other) -> Tensor
2535static C10_NOINLINE c10::TypedOperatorHandle<divide_Tensor::schema> create_divide_Tensor_typed_handle() {
2536 return c10::Dispatcher::singleton()
2537 .findSchemaOrThrow(divide_Tensor::name, divide_Tensor::overload_name)
2538 .typed<divide_Tensor::schema>();
2539}
2540
2541// aten::divide.Tensor(Tensor self, Tensor other) -> Tensor
2542at::Tensor divide_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
2543
2544 static auto op = create_divide_Tensor_typed_handle();
2545 return op.call(self, other);
2546}
2547
2548// aten::divide.Tensor(Tensor self, Tensor other) -> Tensor
2549at::Tensor divide_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
2550
2551 static auto op = create_divide_Tensor_typed_handle();
2552 return op.redispatch(dispatchKeySet, self, other);
2553}
2554
2555STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide__Tensor, name, "aten::divide_")
2556STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide__Tensor, overload_name, "Tensor")
2557STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide__Tensor, schema_str, "divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
2558
2559// aten::divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
2560static C10_NOINLINE c10::TypedOperatorHandle<divide__Tensor::schema> create_divide__Tensor_typed_handle() {
2561 return c10::Dispatcher::singleton()
2562 .findSchemaOrThrow(divide__Tensor::name, divide__Tensor::overload_name)
2563 .typed<divide__Tensor::schema>();
2564}
2565
2566// aten::divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
2567at::Tensor & divide__Tensor::call(at::Tensor & self, const at::Tensor & other) {
2568
2569 static auto op = create_divide__Tensor_typed_handle();
2570 return op.call(self, other);
2571}
2572
2573// aten::divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
2574at::Tensor & divide__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
2575
2576 static auto op = create_divide__Tensor_typed_handle();
2577 return op.redispatch(dispatchKeySet, self, other);
2578}
2579
2580STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide_out, name, "aten::divide")
2581STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide_out, overload_name, "out")
2582STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide_out, schema_str, "divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
2583
2584// aten::divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2585static C10_NOINLINE c10::TypedOperatorHandle<divide_out::schema> create_divide_out_typed_handle() {
2586 return c10::Dispatcher::singleton()
2587 .findSchemaOrThrow(divide_out::name, divide_out::overload_name)
2588 .typed<divide_out::schema>();
2589}
2590
2591// aten::divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2592at::Tensor & divide_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
2593
2594 static auto op = create_divide_out_typed_handle();
2595 return op.call(self, other, out);
2596}
2597
2598// aten::divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2599at::Tensor & divide_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
2600
2601 static auto op = create_divide_out_typed_handle();
2602 return op.redispatch(dispatchKeySet, self, other, out);
2603}
2604
2605STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide_Scalar, name, "aten::divide")
2606STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide_Scalar, overload_name, "Scalar")
2607STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide_Scalar, schema_str, "divide.Scalar(Tensor self, Scalar other) -> Tensor")
2608
2609// aten::divide.Scalar(Tensor self, Scalar other) -> Tensor
2610static C10_NOINLINE c10::TypedOperatorHandle<divide_Scalar::schema> create_divide_Scalar_typed_handle() {
2611 return c10::Dispatcher::singleton()
2612 .findSchemaOrThrow(divide_Scalar::name, divide_Scalar::overload_name)
2613 .typed<divide_Scalar::schema>();
2614}
2615
2616// aten::divide.Scalar(Tensor self, Scalar other) -> Tensor
2617at::Tensor divide_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
2618
2619 static auto op = create_divide_Scalar_typed_handle();
2620 return op.call(self, other);
2621}
2622
2623// aten::divide.Scalar(Tensor self, Scalar other) -> Tensor
2624at::Tensor divide_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
2625
2626 static auto op = create_divide_Scalar_typed_handle();
2627 return op.redispatch(dispatchKeySet, self, other);
2628}
2629
2630STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide__Scalar, name, "aten::divide_")
2631STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide__Scalar, overload_name, "Scalar")
2632STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide__Scalar, schema_str, "divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
2633
2634// aten::divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
2635static C10_NOINLINE c10::TypedOperatorHandle<divide__Scalar::schema> create_divide__Scalar_typed_handle() {
2636 return c10::Dispatcher::singleton()
2637 .findSchemaOrThrow(divide__Scalar::name, divide__Scalar::overload_name)
2638 .typed<divide__Scalar::schema>();
2639}
2640
2641// aten::divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
2642at::Tensor & divide__Scalar::call(at::Tensor & self, const at::Scalar & other) {
2643
2644 static auto op = create_divide__Scalar_typed_handle();
2645 return op.call(self, other);
2646}
2647
2648// aten::divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
2649at::Tensor & divide__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
2650
2651 static auto op = create_divide__Scalar_typed_handle();
2652 return op.redispatch(dispatchKeySet, self, other);
2653}
2654
2655STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide_Tensor_mode, name, "aten::divide")
2656STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide_Tensor_mode, overload_name, "Tensor_mode")
2657STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide_Tensor_mode, schema_str, "divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor")
2658
2659// aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor
2660static C10_NOINLINE c10::TypedOperatorHandle<divide_Tensor_mode::schema> create_divide_Tensor_mode_typed_handle() {
2661 return c10::Dispatcher::singleton()
2662 .findSchemaOrThrow(divide_Tensor_mode::name, divide_Tensor_mode::overload_name)
2663 .typed<divide_Tensor_mode::schema>();
2664}
2665
2666// aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor
2667at::Tensor divide_Tensor_mode::call(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
2668
2669 static auto op = create_divide_Tensor_mode_typed_handle();
2670 return op.call(self, other, rounding_mode);
2671}
2672
2673// aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor
2674at::Tensor divide_Tensor_mode::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
2675
2676 static auto op = create_divide_Tensor_mode_typed_handle();
2677 return op.redispatch(dispatchKeySet, self, other, rounding_mode);
2678}
2679
2680STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide__Tensor_mode, name, "aten::divide_")
2681STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide__Tensor_mode, overload_name, "Tensor_mode")
2682STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide__Tensor_mode, schema_str, "divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)")
2683
2684// aten::divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)
2685static C10_NOINLINE c10::TypedOperatorHandle<divide__Tensor_mode::schema> create_divide__Tensor_mode_typed_handle() {
2686 return c10::Dispatcher::singleton()
2687 .findSchemaOrThrow(divide__Tensor_mode::name, divide__Tensor_mode::overload_name)
2688 .typed<divide__Tensor_mode::schema>();
2689}
2690
2691// aten::divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)
2692at::Tensor & divide__Tensor_mode::call(at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
2693
2694 static auto op = create_divide__Tensor_mode_typed_handle();
2695 return op.call(self, other, rounding_mode);
2696}
2697
2698// aten::divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)
2699at::Tensor & divide__Tensor_mode::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
2700
2701 static auto op = create_divide__Tensor_mode_typed_handle();
2702 return op.redispatch(dispatchKeySet, self, other, rounding_mode);
2703}
2704
2705STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide_out_mode, name, "aten::divide")
2706STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide_out_mode, overload_name, "out_mode")
2707STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide_out_mode, schema_str, "divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)")
2708
2709// aten::divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
2710static C10_NOINLINE c10::TypedOperatorHandle<divide_out_mode::schema> create_divide_out_mode_typed_handle() {
2711 return c10::Dispatcher::singleton()
2712 .findSchemaOrThrow(divide_out_mode::name, divide_out_mode::overload_name)
2713 .typed<divide_out_mode::schema>();
2714}
2715
2716// aten::divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
2717at::Tensor & divide_out_mode::call(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out) {
2718
2719 static auto op = create_divide_out_mode_typed_handle();
2720 return op.call(self, other, rounding_mode, out);
2721}
2722
2723// aten::divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
2724at::Tensor & divide_out_mode::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out) {
2725
2726 static auto op = create_divide_out_mode_typed_handle();
2727 return op.redispatch(dispatchKeySet, self, other, rounding_mode, out);
2728}
2729
2730STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide_Scalar_mode, name, "aten::divide")
2731STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide_Scalar_mode, overload_name, "Scalar_mode")
2732STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide_Scalar_mode, schema_str, "divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor")
2733
2734// aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor
2735static C10_NOINLINE c10::TypedOperatorHandle<divide_Scalar_mode::schema> create_divide_Scalar_mode_typed_handle() {
2736 return c10::Dispatcher::singleton()
2737 .findSchemaOrThrow(divide_Scalar_mode::name, divide_Scalar_mode::overload_name)
2738 .typed<divide_Scalar_mode::schema>();
2739}
2740
2741// aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor
2742at::Tensor divide_Scalar_mode::call(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
2743
2744 static auto op = create_divide_Scalar_mode_typed_handle();
2745 return op.call(self, other, rounding_mode);
2746}
2747
2748// aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor
2749at::Tensor divide_Scalar_mode::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
2750
2751 static auto op = create_divide_Scalar_mode_typed_handle();
2752 return op.redispatch(dispatchKeySet, self, other, rounding_mode);
2753}
2754
2755STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide__Scalar_mode, name, "aten::divide_")
2756STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide__Scalar_mode, overload_name, "Scalar_mode")
2757STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(divide__Scalar_mode, schema_str, "divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)")
2758
2759// aten::divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)
2760static C10_NOINLINE c10::TypedOperatorHandle<divide__Scalar_mode::schema> create_divide__Scalar_mode_typed_handle() {
2761 return c10::Dispatcher::singleton()
2762 .findSchemaOrThrow(divide__Scalar_mode::name, divide__Scalar_mode::overload_name)
2763 .typed<divide__Scalar_mode::schema>();
2764}
2765
2766// aten::divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)
2767at::Tensor & divide__Scalar_mode::call(at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
2768
2769 static auto op = create_divide__Scalar_mode_typed_handle();
2770 return op.call(self, other, rounding_mode);
2771}
2772
2773// aten::divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)
2774at::Tensor & divide__Scalar_mode::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
2775
2776 static auto op = create_divide__Scalar_mode_typed_handle();
2777 return op.redispatch(dispatchKeySet, self, other, rounding_mode);
2778}
2779
2780STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_empty_affine_quantized, name, "aten::_empty_affine_quantized")
2781STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_empty_affine_quantized, overload_name, "")
2782STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_empty_affine_quantized, schema_str, "_empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor")
2783
2784// aten::_empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor
2785static C10_NOINLINE c10::TypedOperatorHandle<_empty_affine_quantized::schema> create__empty_affine_quantized_typed_handle() {
2786 return c10::Dispatcher::singleton()
2787 .findSchemaOrThrow(_empty_affine_quantized::name, _empty_affine_quantized::overload_name)
2788 .typed<_empty_affine_quantized::schema>();
2789}
2790
2791// aten::_empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor
2792at::Tensor _empty_affine_quantized::call(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, double scale, int64_t zero_point, c10::optional<at::MemoryFormat> memory_format) {
2793
2794 static auto op = create__empty_affine_quantized_typed_handle();
2795 return op.call(size, dtype, layout, device, pin_memory, scale, zero_point, memory_format);
2796}
2797
2798// aten::_empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor
2799at::Tensor _empty_affine_quantized::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, double scale, int64_t zero_point, c10::optional<at::MemoryFormat> memory_format) {
2800
2801 static auto op = create__empty_affine_quantized_typed_handle();
2802 return op.redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory, scale, zero_point, memory_format);
2803}
2804
2805STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_resize_output_, name, "aten::_resize_output_")
2806STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_resize_output_, overload_name, "")
2807STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_resize_output_, schema_str, "_resize_output_(Tensor(a!) self, int[] size, Device device) -> Tensor(a!)")
2808
2809// aten::_resize_output_(Tensor(a!) self, int[] size, Device device) -> Tensor(a!)
2810static C10_NOINLINE c10::TypedOperatorHandle<_resize_output_::schema> create__resize_output__typed_handle() {
2811 return c10::Dispatcher::singleton()
2812 .findSchemaOrThrow(_resize_output_::name, _resize_output_::overload_name)
2813 .typed<_resize_output_::schema>();
2814}
2815
2816// aten::_resize_output_(Tensor(a!) self, int[] size, Device device) -> Tensor(a!)
2817const at::Tensor & _resize_output_::call(const at::Tensor & self, at::IntArrayRef size, at::Device device) {
2818
2819 static auto op = create__resize_output__typed_handle();
2820 return op.call(self, size, device);
2821}
2822
2823// aten::_resize_output_(Tensor(a!) self, int[] size, Device device) -> Tensor(a!)
2824const at::Tensor & _resize_output_::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Device device) {
2825
2826 static auto op = create__resize_output__typed_handle();
2827 return op.redispatch(dispatchKeySet, self, size, device);
2828}
2829
2830STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(empty_like, name, "aten::empty_like")
2831STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(empty_like, overload_name, "")
2832STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(empty_like, schema_str, "empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor")
2833
2834// aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
2835static C10_NOINLINE c10::TypedOperatorHandle<empty_like::schema> create_empty_like_typed_handle() {
2836 return c10::Dispatcher::singleton()
2837 .findSchemaOrThrow(empty_like::name, empty_like::overload_name)
2838 .typed<empty_like::schema>();
2839}
2840
2841// aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
2842at::Tensor empty_like::call(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
2843
2844 static auto op = create_empty_like_typed_handle();
2845 return op.call(self, dtype, layout, device, pin_memory, memory_format);
2846}
2847
2848// aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
2849at::Tensor empty_like::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
2850
2851 static auto op = create_empty_like_typed_handle();
2852 return op.redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, memory_format);
2853}
2854
2855STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(expand, name, "aten::expand")
2856STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(expand, overload_name, "")
2857STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(expand, schema_str, "expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)")
2858
2859// aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)
2860static C10_NOINLINE c10::TypedOperatorHandle<expand::schema> create_expand_typed_handle() {
2861 return c10::Dispatcher::singleton()
2862 .findSchemaOrThrow(expand::name, expand::overload_name)
2863 .typed<expand::schema>();
2864}
2865
2866// aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)
2867at::Tensor expand::call(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) {
2868
2869 static auto op = create_expand_typed_handle();
2870 return op.call(self, size, implicit);
2871}
2872
2873// aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)
2874at::Tensor expand::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) {
2875
2876 static auto op = create_expand_typed_handle();
2877 return op.redispatch(dispatchKeySet, self, size, implicit);
2878}
2879
2880STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(flatten_using_ints, name, "aten::flatten")
2881STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(flatten_using_ints, overload_name, "using_ints")
2882STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(flatten_using_ints, schema_str, "flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)")
2883
2884// aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)
2885static C10_NOINLINE c10::TypedOperatorHandle<flatten_using_ints::schema> create_flatten_using_ints_typed_handle() {
2886 return c10::Dispatcher::singleton()
2887 .findSchemaOrThrow(flatten_using_ints::name, flatten_using_ints::overload_name)
2888 .typed<flatten_using_ints::schema>();
2889}
2890
2891// aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)
2892at::Tensor flatten_using_ints::call(const at::Tensor & self, int64_t start_dim, int64_t end_dim) {
2893
2894 static auto op = create_flatten_using_ints_typed_handle();
2895 return op.call(self, start_dim, end_dim);
2896}
2897
2898// aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)
2899at::Tensor flatten_using_ints::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t start_dim, int64_t end_dim) {
2900
2901 static auto op = create_flatten_using_ints_typed_handle();
2902 return op.redispatch(dispatchKeySet, self, start_dim, end_dim);
2903}
2904
2905STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(flatten_named_out_dim, name, "aten::flatten")
2906STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(flatten_named_out_dim, overload_name, "named_out_dim")
2907STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(flatten_named_out_dim, schema_str, "flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a)")
2908
2909// aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a)
2910static C10_NOINLINE c10::TypedOperatorHandle<flatten_named_out_dim::schema> create_flatten_named_out_dim_typed_handle() {
2911 return c10::Dispatcher::singleton()
2912 .findSchemaOrThrow(flatten_named_out_dim::name, flatten_named_out_dim::overload_name)
2913 .typed<flatten_named_out_dim::schema>();
2914}
2915
2916// aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a)
2917at::Tensor flatten_named_out_dim::call(const at::Tensor & self, int64_t start_dim, int64_t end_dim, at::Dimname out_dim) {
2918
2919 static auto op = create_flatten_named_out_dim_typed_handle();
2920 return op.call(self, start_dim, end_dim, out_dim);
2921}
2922
2923// aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a)
2924at::Tensor flatten_named_out_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t start_dim, int64_t end_dim, at::Dimname out_dim) {
2925
2926 static auto op = create_flatten_named_out_dim_typed_handle();
2927 return op.redispatch(dispatchKeySet, self, start_dim, end_dim, out_dim);
2928}
2929
2930STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(flatten_using_names, name, "aten::flatten")
2931STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(flatten_using_names, overload_name, "using_names")
2932STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(flatten_using_names, schema_str, "flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a)")
2933
2934// aten::flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a)
2935static C10_NOINLINE c10::TypedOperatorHandle<flatten_using_names::schema> create_flatten_using_names_typed_handle() {
2936 return c10::Dispatcher::singleton()
2937 .findSchemaOrThrow(flatten_using_names::name, flatten_using_names::overload_name)
2938 .typed<flatten_using_names::schema>();
2939}
2940
2941// aten::flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a)
2942at::Tensor flatten_using_names::call(const at::Tensor & self, at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim) {
2943
2944 static auto op = create_flatten_using_names_typed_handle();
2945 return op.call(self, start_dim, end_dim, out_dim);
2946}
2947
2948// aten::flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a)
2949at::Tensor flatten_using_names::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim) {
2950
2951 static auto op = create_flatten_using_names_typed_handle();
2952 return op.redispatch(dispatchKeySet, self, start_dim, end_dim, out_dim);
2953}
2954
2955STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(flatten_DimnameList, name, "aten::flatten")
2956STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(flatten_DimnameList, overload_name, "DimnameList")
2957STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(flatten_DimnameList, schema_str, "flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a)")
2958
2959// aten::flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a)
2960static C10_NOINLINE c10::TypedOperatorHandle<flatten_DimnameList::schema> create_flatten_DimnameList_typed_handle() {
2961 return c10::Dispatcher::singleton()
2962 .findSchemaOrThrow(flatten_DimnameList::name, flatten_DimnameList::overload_name)
2963 .typed<flatten_DimnameList::schema>();
2964}
2965
2966// aten::flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a)
2967at::Tensor flatten_DimnameList::call(const at::Tensor & self, at::DimnameList dims, at::Dimname out_dim) {
2968
2969 static auto op = create_flatten_DimnameList_typed_handle();
2970 return op.call(self, dims, out_dim);
2971}
2972
2973// aten::flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a)
2974at::Tensor flatten_DimnameList::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dims, at::Dimname out_dim) {
2975
2976 static auto op = create_flatten_DimnameList_typed_handle();
2977 return op.redispatch(dispatchKeySet, self, dims, out_dim);
2978}
2979
2980STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(floor, name, "aten::floor")
2981STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(floor, overload_name, "")
2982STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(floor, schema_str, "floor(Tensor self) -> Tensor")
2983
2984// aten::floor(Tensor self) -> Tensor
2985static C10_NOINLINE c10::TypedOperatorHandle<floor::schema> create_floor_typed_handle() {
2986 return c10::Dispatcher::singleton()
2987 .findSchemaOrThrow(floor::name, floor::overload_name)
2988 .typed<floor::schema>();
2989}
2990
2991// aten::floor(Tensor self) -> Tensor
2992at::Tensor floor::call(const at::Tensor & self) {
2993
2994 static auto op = create_floor_typed_handle();
2995 return op.call(self);
2996}
2997
2998// aten::floor(Tensor self) -> Tensor
2999at::Tensor floor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
3000
3001 static auto op = create_floor_typed_handle();
3002 return op.redispatch(dispatchKeySet, self);
3003}
3004
3005STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(floor_, name, "aten::floor_")
3006STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(floor_, overload_name, "")
3007STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(floor_, schema_str, "floor_(Tensor(a!) self) -> Tensor(a!)")
3008
3009// aten::floor_(Tensor(a!) self) -> Tensor(a!)
3010static C10_NOINLINE c10::TypedOperatorHandle<floor_::schema> create_floor__typed_handle() {
3011 return c10::Dispatcher::singleton()
3012 .findSchemaOrThrow(floor_::name, floor_::overload_name)
3013 .typed<floor_::schema>();
3014}
3015
3016// aten::floor_(Tensor(a!) self) -> Tensor(a!)
3017at::Tensor & floor_::call(at::Tensor & self) {
3018
3019 static auto op = create_floor__typed_handle();
3020 return op.call(self);
3021}
3022
3023// aten::floor_(Tensor(a!) self) -> Tensor(a!)
3024at::Tensor & floor_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
3025
3026 static auto op = create_floor__typed_handle();
3027 return op.redispatch(dispatchKeySet, self);
3028}
3029
3030STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(floor_out, name, "aten::floor")
3031STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(floor_out, overload_name, "out")
3032STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(floor_out, schema_str, "floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
3033
3034// aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
3035static C10_NOINLINE c10::TypedOperatorHandle<floor_out::schema> create_floor_out_typed_handle() {
3036 return c10::Dispatcher::singleton()
3037 .findSchemaOrThrow(floor_out::name, floor_out::overload_name)
3038 .typed<floor_out::schema>();
3039}
3040
3041// aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
3042at::Tensor & floor_out::call(const at::Tensor & self, at::Tensor & out) {
3043
3044 static auto op = create_floor_out_typed_handle();
3045 return op.call(self, out);
3046}
3047
3048// aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
3049at::Tensor & floor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
3050
3051 static auto op = create_floor_out_typed_handle();
3052 return op.redispatch(dispatchKeySet, self, out);
3053}
3054
3055STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(grid_sampler_3d_backward, name, "aten::grid_sampler_3d_backward")
3056STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(grid_sampler_3d_backward, overload_name, "")
3057STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(grid_sampler_3d_backward, schema_str, "grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)")
3058
3059// aten::grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)
3060static C10_NOINLINE c10::TypedOperatorHandle<grid_sampler_3d_backward::schema> create_grid_sampler_3d_backward_typed_handle() {
3061 return c10::Dispatcher::singleton()
3062 .findSchemaOrThrow(grid_sampler_3d_backward::name, grid_sampler_3d_backward::overload_name)
3063 .typed<grid_sampler_3d_backward::schema>();
3064}
3065
3066// aten::grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)
3067::std::tuple<at::Tensor,at::Tensor> grid_sampler_3d_backward::call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
3068
3069 static auto op = create_grid_sampler_3d_backward_typed_handle();
3070 return op.call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
3071}
3072
3073// aten::grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)
3074::std::tuple<at::Tensor,at::Tensor> grid_sampler_3d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
3075
3076 static auto op = create_grid_sampler_3d_backward_typed_handle();
3077 return op.redispatch(dispatchKeySet, grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
3078}
3079
3080STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hinge_embedding_loss, name, "aten::hinge_embedding_loss")
3081STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hinge_embedding_loss, overload_name, "")
3082STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hinge_embedding_loss, schema_str, "hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor")
3083
3084// aten::hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor
3085static C10_NOINLINE c10::TypedOperatorHandle<hinge_embedding_loss::schema> create_hinge_embedding_loss_typed_handle() {
3086 return c10::Dispatcher::singleton()
3087 .findSchemaOrThrow(hinge_embedding_loss::name, hinge_embedding_loss::overload_name)
3088 .typed<hinge_embedding_loss::schema>();
3089}
3090
3091// aten::hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor
3092at::Tensor hinge_embedding_loss::call(const at::Tensor & self, const at::Tensor & target, double margin, int64_t reduction) {
3093
3094 static auto op = create_hinge_embedding_loss_typed_handle();
3095 return op.call(self, target, margin, reduction);
3096}
3097
3098// aten::hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor
3099at::Tensor hinge_embedding_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, double margin, int64_t reduction) {
3100
3101 static auto op = create_hinge_embedding_loss_typed_handle();
3102 return op.redispatch(dispatchKeySet, self, target, margin, reduction);
3103}
3104
3105STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_group_norm, name, "aten::native_group_norm")
3106STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_group_norm, overload_name, "")
3107STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_group_norm, schema_str, "native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor)")
3108
3109// aten::native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor)
3110static C10_NOINLINE c10::TypedOperatorHandle<native_group_norm::schema> create_native_group_norm_typed_handle() {
3111 return c10::Dispatcher::singleton()
3112 .findSchemaOrThrow(native_group_norm::name, native_group_norm::overload_name)
3113 .typed<native_group_norm::schema>();
3114}
3115
3116// aten::native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor)
3117::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm::call(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps) {
3118
3119 static auto op = create_native_group_norm_typed_handle();
3120 return op.call(input, weight, bias, N, C, HxW, group, eps);
3121}
3122
3123// aten::native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor)
3124::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps) {
3125
3126 static auto op = create_native_group_norm_typed_handle();
3127 return op.redispatch(dispatchKeySet, input, weight, bias, N, C, HxW, group, eps);
3128}
3129
3130STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fft_r2c, name, "aten::_fft_r2c")
3131STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fft_r2c, overload_name, "")
3132STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fft_r2c, schema_str, "_fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor")
3133
3134// aten::_fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor
3135static C10_NOINLINE c10::TypedOperatorHandle<_fft_r2c::schema> create__fft_r2c_typed_handle() {
3136 return c10::Dispatcher::singleton()
3137 .findSchemaOrThrow(_fft_r2c::name, _fft_r2c::overload_name)
3138 .typed<_fft_r2c::schema>();
3139}
3140
3141// aten::_fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor
3142at::Tensor _fft_r2c::call(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided) {
3143
3144 static auto op = create__fft_r2c_typed_handle();
3145 return op.call(self, dim, normalization, onesided);
3146}
3147
3148// aten::_fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor
3149at::Tensor _fft_r2c::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided) {
3150
3151 static auto op = create__fft_r2c_typed_handle();
3152 return op.redispatch(dispatchKeySet, self, dim, normalization, onesided);
3153}
3154
3155STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fft_r2c_out, name, "aten::_fft_r2c")
3156STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fft_r2c_out, overload_name, "out")
3157STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fft_r2c_out, schema_str, "_fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!)")
3158
3159// aten::_fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!)
3160static C10_NOINLINE c10::TypedOperatorHandle<_fft_r2c_out::schema> create__fft_r2c_out_typed_handle() {
3161 return c10::Dispatcher::singleton()
3162 .findSchemaOrThrow(_fft_r2c_out::name, _fft_r2c_out::overload_name)
3163 .typed<_fft_r2c_out::schema>();
3164}
3165
3166// aten::_fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!)
3167at::Tensor & _fft_r2c_out::call(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided, at::Tensor & out) {
3168
3169 static auto op = create__fft_r2c_out_typed_handle();
3170 return op.call(self, dim, normalization, onesided, out);
3171}
3172
3173// aten::_fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!)
3174at::Tensor & _fft_r2c_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided, at::Tensor & out) {
3175
3176 static auto op = create__fft_r2c_out_typed_handle();
3177 return op.redispatch(dispatchKeySet, self, dim, normalization, onesided, out);
3178}
3179
3180STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_neg, name, "aten::is_neg")
3181STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_neg, overload_name, "")
3182STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_neg, schema_str, "is_neg(Tensor self) -> bool")
3183
3184// aten::is_neg(Tensor self) -> bool
3185static C10_NOINLINE c10::TypedOperatorHandle<is_neg::schema> create_is_neg_typed_handle() {
3186 return c10::Dispatcher::singleton()
3187 .findSchemaOrThrow(is_neg::name, is_neg::overload_name)
3188 .typed<is_neg::schema>();
3189}
3190
3191// aten::is_neg(Tensor self) -> bool
3192bool is_neg::call(const at::Tensor & self) {
3193
3194 static auto op = create_is_neg_typed_handle();
3195 return op.call(self);
3196}
3197
3198// aten::is_neg(Tensor self) -> bool
3199bool is_neg::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
3200
3201 static auto op = create_is_neg_typed_handle();
3202 return op.redispatch(dispatchKeySet, self);
3203}
3204
3205STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(isreal, name, "aten::isreal")
3206STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(isreal, overload_name, "")
3207STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(isreal, schema_str, "isreal(Tensor self) -> Tensor")
3208
3209// aten::isreal(Tensor self) -> Tensor
3210static C10_NOINLINE c10::TypedOperatorHandle<isreal::schema> create_isreal_typed_handle() {
3211 return c10::Dispatcher::singleton()
3212 .findSchemaOrThrow(isreal::name, isreal::overload_name)
3213 .typed<isreal::schema>();
3214}
3215
3216// aten::isreal(Tensor self) -> Tensor
3217at::Tensor isreal::call(const at::Tensor & self) {
3218
3219 static auto op = create_isreal_typed_handle();
3220 return op.call(self);
3221}
3222
3223// aten::isreal(Tensor self) -> Tensor
3224at::Tensor isreal::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
3225
3226 static auto op = create_isreal_typed_handle();
3227 return op.redispatch(dispatchKeySet, self);
3228}
3229
3230STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linear_backward, name, "aten::linear_backward")
3231STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linear_backward, overload_name, "")
3232STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linear_backward, schema_str, "linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)")
3233
3234// aten::linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
3235static C10_NOINLINE c10::TypedOperatorHandle<linear_backward::schema> create_linear_backward_typed_handle() {
3236 return c10::Dispatcher::singleton()
3237 .findSchemaOrThrow(linear_backward::name, linear_backward::overload_name)
3238 .typed<linear_backward::schema>();
3239}
3240
3241// aten::linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
3242::std::tuple<at::Tensor,at::Tensor,at::Tensor> linear_backward::call(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask) {
3243
3244 static auto op = create_linear_backward_typed_handle();
3245 return op.call(self, grad_output, weight, output_mask);
3246}
3247
3248// aten::linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
3249::std::tuple<at::Tensor,at::Tensor,at::Tensor> linear_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask) {
3250
3251 static auto op = create_linear_backward_typed_handle();
3252 return op.redispatch(dispatchKeySet, self, grad_output, weight, output_mask);
3253}
3254
3255STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_linear_backward_input, name, "aten::mkldnn_linear_backward_input")
3256STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_linear_backward_input, overload_name, "")
3257STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_linear_backward_input, schema_str, "mkldnn_linear_backward_input(int[] input_size, Tensor grad_output, Tensor weight) -> Tensor")
3258
3259// aten::mkldnn_linear_backward_input(int[] input_size, Tensor grad_output, Tensor weight) -> Tensor
3260static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_linear_backward_input::schema> create_mkldnn_linear_backward_input_typed_handle() {
3261 return c10::Dispatcher::singleton()
3262 .findSchemaOrThrow(mkldnn_linear_backward_input::name, mkldnn_linear_backward_input::overload_name)
3263 .typed<mkldnn_linear_backward_input::schema>();
3264}
3265
3266// aten::mkldnn_linear_backward_input(int[] input_size, Tensor grad_output, Tensor weight) -> Tensor
3267at::Tensor mkldnn_linear_backward_input::call(at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight) {
3268
3269 static auto op = create_mkldnn_linear_backward_input_typed_handle();
3270 return op.call(input_size, grad_output, weight);
3271}
3272
3273// aten::mkldnn_linear_backward_input(int[] input_size, Tensor grad_output, Tensor weight) -> Tensor
3274at::Tensor mkldnn_linear_backward_input::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight) {
3275
3276 static auto op = create_mkldnn_linear_backward_input_typed_handle();
3277 return op.redispatch(dispatchKeySet, input_size, grad_output, weight);
3278}
3279
3280STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_linear_backward, name, "aten::mkldnn_linear_backward")
3281STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_linear_backward, overload_name, "")
3282STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_linear_backward, schema_str, "mkldnn_linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)")
3283
3284// aten::mkldnn_linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
3285static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_linear_backward::schema> create_mkldnn_linear_backward_typed_handle() {
3286 return c10::Dispatcher::singleton()
3287 .findSchemaOrThrow(mkldnn_linear_backward::name, mkldnn_linear_backward::overload_name)
3288 .typed<mkldnn_linear_backward::schema>();
3289}
3290
3291// aten::mkldnn_linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
3292::std::tuple<at::Tensor,at::Tensor,at::Tensor> mkldnn_linear_backward::call(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask) {
3293
3294 static auto op = create_mkldnn_linear_backward_typed_handle();
3295 return op.call(self, grad_output, weight, output_mask);
3296}
3297
3298// aten::mkldnn_linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
3299::std::tuple<at::Tensor,at::Tensor,at::Tensor> mkldnn_linear_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask) {
3300
3301 static auto op = create_mkldnn_linear_backward_typed_handle();
3302 return op.redispatch(dispatchKeySet, self, grad_output, weight, output_mask);
3303}
3304
3305STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_logcumsumexp, name, "aten::_logcumsumexp")
3306STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_logcumsumexp, overload_name, "")
3307STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_logcumsumexp, schema_str, "_logcumsumexp(Tensor self, int dim) -> Tensor")
3308
3309// aten::_logcumsumexp(Tensor self, int dim) -> Tensor
3310static C10_NOINLINE c10::TypedOperatorHandle<_logcumsumexp::schema> create__logcumsumexp_typed_handle() {
3311 return c10::Dispatcher::singleton()
3312 .findSchemaOrThrow(_logcumsumexp::name, _logcumsumexp::overload_name)
3313 .typed<_logcumsumexp::schema>();
3314}
3315
3316// aten::_logcumsumexp(Tensor self, int dim) -> Tensor
3317at::Tensor _logcumsumexp::call(const at::Tensor & self, int64_t dim) {
3318
3319 static auto op = create__logcumsumexp_typed_handle();
3320 return op.call(self, dim);
3321}
3322
3323// aten::_logcumsumexp(Tensor self, int dim) -> Tensor
3324at::Tensor _logcumsumexp::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
3325
3326 static auto op = create__logcumsumexp_typed_handle();
3327 return op.redispatch(dispatchKeySet, self, dim);
3328}
3329
3330STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_logcumsumexp_out, name, "aten::_logcumsumexp")
3331STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_logcumsumexp_out, overload_name, "out")
3332STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_logcumsumexp_out, schema_str, "_logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)")
3333
3334// aten::_logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
3335static C10_NOINLINE c10::TypedOperatorHandle<_logcumsumexp_out::schema> create__logcumsumexp_out_typed_handle() {
3336 return c10::Dispatcher::singleton()
3337 .findSchemaOrThrow(_logcumsumexp_out::name, _logcumsumexp_out::overload_name)
3338 .typed<_logcumsumexp_out::schema>();
3339}
3340
3341// aten::_logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
3342at::Tensor & _logcumsumexp_out::call(const at::Tensor & self, int64_t dim, at::Tensor & out) {
3343
3344 static auto op = create__logcumsumexp_out_typed_handle();
3345 return op.call(self, dim, out);
3346}
3347
3348// aten::_logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
3349at::Tensor & _logcumsumexp_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) {
3350
3351 static auto op = create__logcumsumexp_out_typed_handle();
3352 return op.redispatch(dispatchKeySet, self, dim, out);
3353}
3354
3355STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(value_selecting_reduction_backward, name, "aten::value_selecting_reduction_backward")
3356STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(value_selecting_reduction_backward, overload_name, "")
3357STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(value_selecting_reduction_backward, schema_str, "value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor")
3358
3359// aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor
3360static C10_NOINLINE c10::TypedOperatorHandle<value_selecting_reduction_backward::schema> create_value_selecting_reduction_backward_typed_handle() {
3361 return c10::Dispatcher::singleton()
3362 .findSchemaOrThrow(value_selecting_reduction_backward::name, value_selecting_reduction_backward::overload_name)
3363 .typed<value_selecting_reduction_backward::schema>();
3364}
3365
3366// aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor
3367at::Tensor value_selecting_reduction_backward::call(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, c10::SymIntArrayRef sizes, bool keepdim) {
3368
3369 static auto op = create_value_selecting_reduction_backward_typed_handle();
3370 return op.call(grad, dim, indices, sizes, keepdim);
3371}
3372
3373// aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor
3374at::Tensor value_selecting_reduction_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, int64_t dim, const at::Tensor & indices, c10::SymIntArrayRef sizes, bool keepdim) {
3375
3376 static auto op = create_value_selecting_reduction_backward_typed_handle();
3377 return op.redispatch(dispatchKeySet, grad, dim, indices, sizes, keepdim);
3378}
3379
3380STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool1d, name, "aten::max_pool1d")
3381STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool1d, overload_name, "")
3382STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool1d, schema_str, "max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor")
3383
3384// aten::max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor
3385static C10_NOINLINE c10::TypedOperatorHandle<max_pool1d::schema> create_max_pool1d_typed_handle() {
3386 return c10::Dispatcher::singleton()
3387 .findSchemaOrThrow(max_pool1d::name, max_pool1d::overload_name)
3388 .typed<max_pool1d::schema>();
3389}
3390
3391// aten::max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor
3392at::Tensor max_pool1d::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
3393
3394 static auto op = create_max_pool1d_typed_handle();
3395 return op.call(self, kernel_size, stride, padding, dilation, ceil_mode);
3396}
3397
3398// aten::max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor
3399at::Tensor max_pool1d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
3400
3401 static auto op = create_max_pool1d_typed_handle();
3402 return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode);
3403}
3404
3405STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool2d, name, "aten::max_pool2d")
3406STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool2d, overload_name, "")
3407STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_pool2d, schema_str, "max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor")
3408
3409// aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
3410static C10_NOINLINE c10::TypedOperatorHandle<max_pool2d::schema> create_max_pool2d_typed_handle() {
3411 return c10::Dispatcher::singleton()
3412 .findSchemaOrThrow(max_pool2d::name, max_pool2d::overload_name)
3413 .typed<max_pool2d::schema>();
3414}
3415
3416// aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
3417at::Tensor max_pool2d::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
3418
3419 static auto op = create_max_pool2d_typed_handle();
3420 return op.call(self, kernel_size, stride, padding, dilation, ceil_mode);
3421}
3422
3423// aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
3424at::Tensor max_pool2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
3425
3426 static auto op = create_max_pool2d_typed_handle();
3427 return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode);
3428}
3429
3430STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mean, name, "aten::mean")
3431STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mean, overload_name, "")
3432STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mean, schema_str, "mean(Tensor self, *, ScalarType? dtype=None) -> Tensor")
3433
3434// aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor
3435static C10_NOINLINE c10::TypedOperatorHandle<mean::schema> create_mean_typed_handle() {
3436 return c10::Dispatcher::singleton()
3437 .findSchemaOrThrow(mean::name, mean::overload_name)
3438 .typed<mean::schema>();
3439}
3440
3441// aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor
3442at::Tensor mean::call(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
3443
3444 static auto op = create_mean_typed_handle();
3445 return op.call(self, dtype);
3446}
3447
3448// aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor
3449at::Tensor mean::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
3450
3451 static auto op = create_mean_typed_handle();
3452 return op.redispatch(dispatchKeySet, self, dtype);
3453}
3454
3455STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mean_dim, name, "aten::mean")
3456STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mean_dim, overload_name, "dim")
3457STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mean_dim, schema_str, "mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor")
3458
3459// aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
3460static C10_NOINLINE c10::TypedOperatorHandle<mean_dim::schema> create_mean_dim_typed_handle() {
3461 return c10::Dispatcher::singleton()
3462 .findSchemaOrThrow(mean_dim::name, mean_dim::overload_name)
3463 .typed<mean_dim::schema>();
3464}
3465
3466// aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
3467at::Tensor mean_dim::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
3468
3469 static auto op = create_mean_dim_typed_handle();
3470 return op.call(self, dim, keepdim, dtype);
3471}
3472
3473// aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
3474at::Tensor mean_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
3475
3476 static auto op = create_mean_dim_typed_handle();
3477 return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype);
3478}
3479
3480STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mean_out, name, "aten::mean")
3481STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mean_out, overload_name, "out")
3482STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mean_out, schema_str, "mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)")
3483
3484// aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
3485static C10_NOINLINE c10::TypedOperatorHandle<mean_out::schema> create_mean_out_typed_handle() {
3486 return c10::Dispatcher::singleton()
3487 .findSchemaOrThrow(mean_out::name, mean_out::overload_name)
3488 .typed<mean_out::schema>();
3489}
3490
3491// aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
3492at::Tensor & mean_out::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
3493
3494 static auto op = create_mean_out_typed_handle();
3495 return op.call(self, dim, keepdim, dtype, out);
3496}
3497
3498// aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
3499at::Tensor & mean_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
3500
3501 static auto op = create_mean_out_typed_handle();
3502 return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype, out);
3503}
3504
3505STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mean_names_dim, name, "aten::mean")
3506STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mean_names_dim, overload_name, "names_dim")
3507STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mean_names_dim, schema_str, "mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor")
3508
3509// aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
3510static C10_NOINLINE c10::TypedOperatorHandle<mean_names_dim::schema> create_mean_names_dim_typed_handle() {
3511 return c10::Dispatcher::singleton()
3512 .findSchemaOrThrow(mean_names_dim::name, mean_names_dim::overload_name)
3513 .typed<mean_names_dim::schema>();
3514}
3515
3516// aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
3517at::Tensor mean_names_dim::call(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
3518
3519 static auto op = create_mean_names_dim_typed_handle();
3520 return op.call(self, dim, keepdim, dtype);
3521}
3522
3523// aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
3524at::Tensor mean_names_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
3525
3526 static auto op = create_mean_names_dim_typed_handle();
3527 return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype);
3528}
3529
3530STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mean_names_out, name, "aten::mean")
3531STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mean_names_out, overload_name, "names_out")
3532STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mean_names_out, schema_str, "mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)")
3533
3534// aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
3535static C10_NOINLINE c10::TypedOperatorHandle<mean_names_out::schema> create_mean_names_out_typed_handle() {
3536 return c10::Dispatcher::singleton()
3537 .findSchemaOrThrow(mean_names_out::name, mean_names_out::overload_name)
3538 .typed<mean_names_out::schema>();
3539}
3540
3541// aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
3542at::Tensor & mean_names_out::call(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
3543
3544 static auto op = create_mean_names_out_typed_handle();
3545 return op.call(self, dim, keepdim, dtype, out);
3546}
3547
3548// aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
3549at::Tensor & mean_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
3550
3551 static auto op = create_mean_names_out_typed_handle();
3552 return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype, out);
3553}
3554
3555STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanmean, name, "aten::nanmean")
3556STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanmean, overload_name, "")
3557STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanmean, schema_str, "nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor")
3558
3559// aten::nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
3560static C10_NOINLINE c10::TypedOperatorHandle<nanmean::schema> create_nanmean_typed_handle() {
3561 return c10::Dispatcher::singleton()
3562 .findSchemaOrThrow(nanmean::name, nanmean::overload_name)
3563 .typed<nanmean::schema>();
3564}
3565
3566// aten::nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
3567at::Tensor nanmean::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
3568
3569 static auto op = create_nanmean_typed_handle();
3570 return op.call(self, dim, keepdim, dtype);
3571}
3572
3573// aten::nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
3574at::Tensor nanmean::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
3575
3576 static auto op = create_nanmean_typed_handle();
3577 return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype);
3578}
3579
3580STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanmean_out, name, "aten::nanmean")
3581STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanmean_out, overload_name, "out")
3582STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nanmean_out, schema_str, "nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)")
3583
3584// aten::nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
3585static C10_NOINLINE c10::TypedOperatorHandle<nanmean_out::schema> create_nanmean_out_typed_handle() {
3586 return c10::Dispatcher::singleton()
3587 .findSchemaOrThrow(nanmean_out::name, nanmean_out::overload_name)
3588 .typed<nanmean_out::schema>();
3589}
3590
3591// aten::nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
3592at::Tensor & nanmean_out::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
3593
3594 static auto op = create_nanmean_out_typed_handle();
3595 return op.call(self, dim, keepdim, dtype, out);
3596}
3597
3598// aten::nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
3599at::Tensor & nanmean_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
3600
3601 static auto op = create_nanmean_out_typed_handle();
3602 return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype, out);
3603}
3604
3605STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(min_dim, name, "aten::min")
3606STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(min_dim, overload_name, "dim")
3607STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(min_dim, schema_str, "min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)")
3608
3609// aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
3610static C10_NOINLINE c10::TypedOperatorHandle<min_dim::schema> create_min_dim_typed_handle() {
3611 return c10::Dispatcher::singleton()
3612 .findSchemaOrThrow(min_dim::name, min_dim::overload_name)
3613 .typed<min_dim::schema>();
3614}
3615
3616// aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
3617::std::tuple<at::Tensor,at::Tensor> min_dim::call(const at::Tensor & self, int64_t dim, bool keepdim) {
3618
3619 static auto op = create_min_dim_typed_handle();
3620 return op.call(self, dim, keepdim);
3621}
3622
3623// aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
3624::std::tuple<at::Tensor,at::Tensor> min_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim) {
3625
3626 static auto op = create_min_dim_typed_handle();
3627 return op.redispatch(dispatchKeySet, self, dim, keepdim);
3628}
3629
3630STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(min_dim_min, name, "aten::min")
3631STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(min_dim_min, overload_name, "dim_min")
3632STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(min_dim_min, schema_str, "min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)")
3633
3634// aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
3635static C10_NOINLINE c10::TypedOperatorHandle<min_dim_min::schema> create_min_dim_min_typed_handle() {
3636 return c10::Dispatcher::singleton()
3637 .findSchemaOrThrow(min_dim_min::name, min_dim_min::overload_name)
3638 .typed<min_dim_min::schema>();
3639}
3640
3641// aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
3642::std::tuple<at::Tensor &,at::Tensor &> min_dim_min::call(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) {
3643
3644 static auto op = create_min_dim_min_typed_handle();
3645 return op.call(self, dim, keepdim, min, min_indices);
3646}
3647
3648// aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
3649::std::tuple<at::Tensor &,at::Tensor &> min_dim_min::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) {
3650
3651 static auto op = create_min_dim_min_typed_handle();
3652 return op.redispatch(dispatchKeySet, self, dim, keepdim, min, min_indices);
3653}
3654
3655STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(min_names_dim, name, "aten::min")
3656STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(min_names_dim, overload_name, "names_dim")
3657STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(min_names_dim, schema_str, "min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)")
3658
3659// aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
3660static C10_NOINLINE c10::TypedOperatorHandle<min_names_dim::schema> create_min_names_dim_typed_handle() {
3661 return c10::Dispatcher::singleton()
3662 .findSchemaOrThrow(min_names_dim::name, min_names_dim::overload_name)
3663 .typed<min_names_dim::schema>();
3664}
3665
3666// aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
3667::std::tuple<at::Tensor,at::Tensor> min_names_dim::call(const at::Tensor & self, at::Dimname dim, bool keepdim) {
3668
3669 static auto op = create_min_names_dim_typed_handle();
3670 return op.call(self, dim, keepdim);
3671}
3672
3673// aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
3674::std::tuple<at::Tensor,at::Tensor> min_names_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim) {
3675
3676 static auto op = create_min_names_dim_typed_handle();
3677 return op.redispatch(dispatchKeySet, self, dim, keepdim);
3678}
3679
3680STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(min_names_dim_min, name, "aten::min")
3681STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(min_names_dim_min, overload_name, "names_dim_min")
3682STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(min_names_dim_min, schema_str, "min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)")
3683
3684// aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
3685static C10_NOINLINE c10::TypedOperatorHandle<min_names_dim_min::schema> create_min_names_dim_min_typed_handle() {
3686 return c10::Dispatcher::singleton()
3687 .findSchemaOrThrow(min_names_dim_min::name, min_names_dim_min::overload_name)
3688 .typed<min_names_dim_min::schema>();
3689}
3690
3691// aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
3692::std::tuple<at::Tensor &,at::Tensor &> min_names_dim_min::call(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) {
3693
3694 static auto op = create_min_names_dim_min_typed_handle();
3695 return op.call(self, dim, keepdim, min, min_indices);
3696}
3697
3698// aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
3699::std::tuple<at::Tensor &,at::Tensor &> min_names_dim_min::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) {
3700
3701 static auto op = create_min_names_dim_min_typed_handle();
3702 return op.redispatch(dispatchKeySet, self, dim, keepdim, min, min_indices);
3703}
3704
3705STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mm, name, "aten::mm")
3706STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mm, overload_name, "")
3707STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mm, schema_str, "mm(Tensor self, Tensor mat2) -> Tensor")
3708
3709// aten::mm(Tensor self, Tensor mat2) -> Tensor
3710static C10_NOINLINE c10::TypedOperatorHandle<mm::schema> create_mm_typed_handle() {
3711 return c10::Dispatcher::singleton()
3712 .findSchemaOrThrow(mm::name, mm::overload_name)
3713 .typed<mm::schema>();
3714}
3715
3716// aten::mm(Tensor self, Tensor mat2) -> Tensor
3717at::Tensor mm::call(const at::Tensor & self, const at::Tensor & mat2) {
3718
3719 static auto op = create_mm_typed_handle();
3720 return op.call(self, mat2);
3721}
3722
3723// aten::mm(Tensor self, Tensor mat2) -> Tensor
3724at::Tensor mm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2) {
3725
3726 static auto op = create_mm_typed_handle();
3727 return op.redispatch(dispatchKeySet, self, mat2);
3728}
3729
3730STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mm_out, name, "aten::mm")
3731STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mm_out, overload_name, "out")
3732STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mm_out, schema_str, "mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)")
3733
3734// aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
3735static C10_NOINLINE c10::TypedOperatorHandle<mm_out::schema> create_mm_out_typed_handle() {
3736 return c10::Dispatcher::singleton()
3737 .findSchemaOrThrow(mm_out::name, mm_out::overload_name)
3738 .typed<mm_out::schema>();
3739}
3740
3741// aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
3742at::Tensor & mm_out::call(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
3743
3744 static auto op = create_mm_out_typed_handle();
3745 return op.call(self, mat2, out);
3746}
3747
3748// aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
3749at::Tensor & mm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
3750
3751 static auto op = create_mm_out_typed_handle();
3752 return op.redispatch(dispatchKeySet, self, mat2, out);
3753}
3754
3755STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mv, name, "aten::mv")
3756STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mv, overload_name, "")
3757STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mv, schema_str, "mv(Tensor self, Tensor vec) -> Tensor")
3758
3759// aten::mv(Tensor self, Tensor vec) -> Tensor
3760static C10_NOINLINE c10::TypedOperatorHandle<mv::schema> create_mv_typed_handle() {
3761 return c10::Dispatcher::singleton()
3762 .findSchemaOrThrow(mv::name, mv::overload_name)
3763 .typed<mv::schema>();
3764}
3765
3766// aten::mv(Tensor self, Tensor vec) -> Tensor
3767at::Tensor mv::call(const at::Tensor & self, const at::Tensor & vec) {
3768
3769 static auto op = create_mv_typed_handle();
3770 return op.call(self, vec);
3771}
3772
3773// aten::mv(Tensor self, Tensor vec) -> Tensor
3774at::Tensor mv::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec) {
3775
3776 static auto op = create_mv_typed_handle();
3777 return op.redispatch(dispatchKeySet, self, vec);
3778}
3779
3780STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mv_out, name, "aten::mv")
3781STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mv_out, overload_name, "out")
3782STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mv_out, schema_str, "mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)")
3783
3784// aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)
3785static C10_NOINLINE c10::TypedOperatorHandle<mv_out::schema> create_mv_out_typed_handle() {
3786 return c10::Dispatcher::singleton()
3787 .findSchemaOrThrow(mv_out::name, mv_out::overload_name)
3788 .typed<mv_out::schema>();
3789}
3790
3791// aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)
3792at::Tensor & mv_out::call(const at::Tensor & self, const at::Tensor & vec, at::Tensor & out) {
3793
3794 static auto op = create_mv_out_typed_handle();
3795 return op.call(self, vec, out);
3796}
3797
3798// aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)
3799at::Tensor & mv_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec, at::Tensor & out) {
3800
3801 static auto op = create_mv_out_typed_handle();
3802 return op.redispatch(dispatchKeySet, self, vec, out);
3803}
3804
3805STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(narrow_copy, name, "aten::narrow_copy")
3806STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(narrow_copy, overload_name, "")
3807STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(narrow_copy, schema_str, "narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor")
3808
3809// aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor
3810static C10_NOINLINE c10::TypedOperatorHandle<narrow_copy::schema> create_narrow_copy_typed_handle() {
3811 return c10::Dispatcher::singleton()
3812 .findSchemaOrThrow(narrow_copy::name, narrow_copy::overload_name)
3813 .typed<narrow_copy::schema>();
3814}
3815
3816// aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor
3817at::Tensor narrow_copy::call(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
3818
3819 static auto op = create_narrow_copy_typed_handle();
3820 return op.call(self, dim, start, length);
3821}
3822
3823// aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor
3824at::Tensor narrow_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
3825
3826 static auto op = create_narrow_copy_typed_handle();
3827 return op.redispatch(dispatchKeySet, self, dim, start, length);
3828}
3829
3830STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(narrow_copy_out, name, "aten::narrow_copy")
3831STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(narrow_copy_out, overload_name, "out")
3832STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(narrow_copy_out, schema_str, "narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)")
3833
3834// aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)
3835static C10_NOINLINE c10::TypedOperatorHandle<narrow_copy_out::schema> create_narrow_copy_out_typed_handle() {
3836 return c10::Dispatcher::singleton()
3837 .findSchemaOrThrow(narrow_copy_out::name, narrow_copy_out::overload_name)
3838 .typed<narrow_copy_out::schema>();
3839}
3840
3841// aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)
3842at::Tensor & narrow_copy_out::call(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length, at::Tensor & out) {
3843
3844 static auto op = create_narrow_copy_out_typed_handle();
3845 return op.call(self, dim, start, length, out);
3846}
3847
3848// aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)
3849at::Tensor & narrow_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length, at::Tensor & out) {
3850
3851 static auto op = create_narrow_copy_out_typed_handle();
3852 return op.redispatch(dispatchKeySet, self, dim, start, length, out);
3853}
3854
3855STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_gather_stats_with_counts, name, "aten::batch_norm_gather_stats_with_counts")
3856STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_gather_stats_with_counts, overload_name, "")
3857STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_gather_stats_with_counts, schema_str, "batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor)")
3858
3859// aten::batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor)
3860static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_gather_stats_with_counts::schema> create_batch_norm_gather_stats_with_counts_typed_handle() {
3861 return c10::Dispatcher::singleton()
3862 .findSchemaOrThrow(batch_norm_gather_stats_with_counts::name, batch_norm_gather_stats_with_counts::overload_name)
3863 .typed<batch_norm_gather_stats_with_counts::schema>();
3864}
3865
3866// aten::batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor)
3867::std::tuple<at::Tensor,at::Tensor> batch_norm_gather_stats_with_counts::call(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts) {
3868
3869 static auto op = create_batch_norm_gather_stats_with_counts_typed_handle();
3870 return op.call(input, mean, invstd, running_mean, running_var, momentum, eps, counts);
3871}
3872
3873// aten::batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor)
3874::std::tuple<at::Tensor,at::Tensor> batch_norm_gather_stats_with_counts::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts) {
3875
3876 static auto op = create_batch_norm_gather_stats_with_counts_typed_handle();
3877 return op.redispatch(dispatchKeySet, input, mean, invstd, running_mean, running_var, momentum, eps, counts);
3878}
3879
3880STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pairwise_distance, name, "aten::pairwise_distance")
3881STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pairwise_distance, overload_name, "")
3882STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pairwise_distance, schema_str, "pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor")
3883
3884// aten::pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor
3885static C10_NOINLINE c10::TypedOperatorHandle<pairwise_distance::schema> create_pairwise_distance_typed_handle() {
3886 return c10::Dispatcher::singleton()
3887 .findSchemaOrThrow(pairwise_distance::name, pairwise_distance::overload_name)
3888 .typed<pairwise_distance::schema>();
3889}
3890
3891// aten::pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor
3892at::Tensor pairwise_distance::call(const at::Tensor & x1, const at::Tensor & x2, double p, double eps, bool keepdim) {
3893
3894 static auto op = create_pairwise_distance_typed_handle();
3895 return op.call(x1, x2, p, eps, keepdim);
3896}
3897
3898// aten::pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor
3899at::Tensor pairwise_distance::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, double p, double eps, bool keepdim) {
3900
3901 static auto op = create_pairwise_distance_typed_handle();
3902 return op.redispatch(dispatchKeySet, x1, x2, p, eps, keepdim);
3903}
3904
3905STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pdist_backward, name, "aten::_pdist_backward")
3906STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pdist_backward, overload_name, "")
3907STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pdist_backward, schema_str, "_pdist_backward(Tensor grad, Tensor self, float p, Tensor pdist) -> Tensor")
3908
3909// aten::_pdist_backward(Tensor grad, Tensor self, float p, Tensor pdist) -> Tensor
3910static C10_NOINLINE c10::TypedOperatorHandle<_pdist_backward::schema> create__pdist_backward_typed_handle() {
3911 return c10::Dispatcher::singleton()
3912 .findSchemaOrThrow(_pdist_backward::name, _pdist_backward::overload_name)
3913 .typed<_pdist_backward::schema>();
3914}
3915
3916// aten::_pdist_backward(Tensor grad, Tensor self, float p, Tensor pdist) -> Tensor
3917at::Tensor _pdist_backward::call(const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist) {
3918
3919 static auto op = create__pdist_backward_typed_handle();
3920 return op.call(grad, self, p, pdist);
3921}
3922
3923// aten::_pdist_backward(Tensor grad, Tensor self, float p, Tensor pdist) -> Tensor
3924at::Tensor _pdist_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist) {
3925
3926 static auto op = create__pdist_backward_typed_handle();
3927 return op.redispatch(dispatchKeySet, grad, self, p, pdist);
3928}
3929
3930STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(permute, name, "aten::permute")
3931STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(permute, overload_name, "")
3932STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(permute, schema_str, "permute(Tensor(a) self, int[] dims) -> Tensor(a)")
3933
3934// aten::permute(Tensor(a) self, int[] dims) -> Tensor(a)
3935static C10_NOINLINE c10::TypedOperatorHandle<permute::schema> create_permute_typed_handle() {
3936 return c10::Dispatcher::singleton()
3937 .findSchemaOrThrow(permute::name, permute::overload_name)
3938 .typed<permute::schema>();
3939}
3940
3941// aten::permute(Tensor(a) self, int[] dims) -> Tensor(a)
3942at::Tensor permute::call(const at::Tensor & self, at::IntArrayRef dims) {
3943
3944 static auto op = create_permute_typed_handle();
3945 return op.call(self, dims);
3946}
3947
3948// aten::permute(Tensor(a) self, int[] dims) -> Tensor(a)
3949at::Tensor permute::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims) {
3950
3951 static auto op = create_permute_typed_handle();
3952 return op.redispatch(dispatchKeySet, self, dims);
3953}
3954
3955STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(matrix_H, name, "aten::matrix_H")
3956STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(matrix_H, overload_name, "")
3957STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(matrix_H, schema_str, "matrix_H(Tensor(a) self) -> Tensor(a)")
3958
3959// aten::matrix_H(Tensor(a) self) -> Tensor(a)
3960static C10_NOINLINE c10::TypedOperatorHandle<matrix_H::schema> create_matrix_H_typed_handle() {
3961 return c10::Dispatcher::singleton()
3962 .findSchemaOrThrow(matrix_H::name, matrix_H::overload_name)
3963 .typed<matrix_H::schema>();
3964}
3965
3966// aten::matrix_H(Tensor(a) self) -> Tensor(a)
3967at::Tensor matrix_H::call(const at::Tensor & self) {
3968
3969 static auto op = create_matrix_H_typed_handle();
3970 return op.call(self);
3971}
3972
3973// aten::matrix_H(Tensor(a) self) -> Tensor(a)
3974at::Tensor matrix_H::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
3975
3976 static auto op = create_matrix_H_typed_handle();
3977 return op.redispatch(dispatchKeySet, self);
3978}
3979
3980STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pixel_shuffle, name, "aten::pixel_shuffle")
3981STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pixel_shuffle, overload_name, "")
3982STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pixel_shuffle, schema_str, "pixel_shuffle(Tensor self, int upscale_factor) -> Tensor")
3983
3984// aten::pixel_shuffle(Tensor self, int upscale_factor) -> Tensor
3985static C10_NOINLINE c10::TypedOperatorHandle<pixel_shuffle::schema> create_pixel_shuffle_typed_handle() {
3986 return c10::Dispatcher::singleton()
3987 .findSchemaOrThrow(pixel_shuffle::name, pixel_shuffle::overload_name)
3988 .typed<pixel_shuffle::schema>();
3989}
3990
3991// aten::pixel_shuffle(Tensor self, int upscale_factor) -> Tensor
3992at::Tensor pixel_shuffle::call(const at::Tensor & self, int64_t upscale_factor) {
3993
3994 static auto op = create_pixel_shuffle_typed_handle();
3995 return op.call(self, upscale_factor);
3996}
3997
3998// aten::pixel_shuffle(Tensor self, int upscale_factor) -> Tensor
3999at::Tensor pixel_shuffle::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t upscale_factor) {
4000
4001 static auto op = create_pixel_shuffle_typed_handle();
4002 return op.redispatch(dispatchKeySet, self, upscale_factor);
4003}
4004
4005STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pinverse, name, "aten::pinverse")
4006STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pinverse, overload_name, "")
4007STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pinverse, schema_str, "pinverse(Tensor self, float rcond=1e-15) -> Tensor")
4008
4009// aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor
4010static C10_NOINLINE c10::TypedOperatorHandle<pinverse::schema> create_pinverse_typed_handle() {
4011 return c10::Dispatcher::singleton()
4012 .findSchemaOrThrow(pinverse::name, pinverse::overload_name)
4013 .typed<pinverse::schema>();
4014}
4015
4016// aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor
4017at::Tensor pinverse::call(const at::Tensor & self, double rcond) {
4018
4019 static auto op = create_pinverse_typed_handle();
4020 return op.call(self, rcond);
4021}
4022
4023// aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor
4024at::Tensor pinverse::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double rcond) {
4025
4026 static auto op = create_pinverse_typed_handle();
4027 return op.redispatch(dispatchKeySet, self, rcond);
4028}
4029
4030STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reshape, name, "aten::reshape")
4031STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reshape, overload_name, "")
4032STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reshape, schema_str, "reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a)")
4033
4034// aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a)
4035static C10_NOINLINE c10::TypedOperatorHandle<reshape::schema> create_reshape_typed_handle() {
4036 return c10::Dispatcher::singleton()
4037 .findSchemaOrThrow(reshape::name, reshape::overload_name)
4038 .typed<reshape::schema>();
4039}
4040
4041// aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a)
4042at::Tensor reshape::call(const at::Tensor & self, c10::SymIntArrayRef shape) {
4043
4044 static auto op = create_reshape_typed_handle();
4045 return op.call(self, shape);
4046}
4047
4048// aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a)
4049at::Tensor reshape::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef shape) {
4050
4051 static auto op = create_reshape_typed_handle();
4052 return op.redispatch(dispatchKeySet, self, shape);
4053}
4054
4055STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_reshape_alias, name, "aten::_reshape_alias")
4056STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_reshape_alias, overload_name, "")
4057STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_reshape_alias, schema_str, "_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a)")
4058
4059// aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a)
4060static C10_NOINLINE c10::TypedOperatorHandle<_reshape_alias::schema> create__reshape_alias_typed_handle() {
4061 return c10::Dispatcher::singleton()
4062 .findSchemaOrThrow(_reshape_alias::name, _reshape_alias::overload_name)
4063 .typed<_reshape_alias::schema>();
4064}
4065
4066// aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a)
4067at::Tensor _reshape_alias::call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
4068
4069 static auto op = create__reshape_alias_typed_handle();
4070 return op.call(self, size, stride);
4071}
4072
4073// aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a)
4074at::Tensor _reshape_alias::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
4075
4076 static auto op = create__reshape_alias_typed_handle();
4077 return op.redispatch(dispatchKeySet, self, size, stride);
4078}
4079
4080STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(select_Dimname, name, "aten::select")
4081STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(select_Dimname, overload_name, "Dimname")
4082STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(select_Dimname, schema_str, "select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a)")
4083
4084// aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a)
4085static C10_NOINLINE c10::TypedOperatorHandle<select_Dimname::schema> create_select_Dimname_typed_handle() {
4086 return c10::Dispatcher::singleton()
4087 .findSchemaOrThrow(select_Dimname::name, select_Dimname::overload_name)
4088 .typed<select_Dimname::schema>();
4089}
4090
4091// aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a)
4092at::Tensor select_Dimname::call(const at::Tensor & self, at::Dimname dim, int64_t index) {
4093
4094 static auto op = create_select_Dimname_typed_handle();
4095 return op.call(self, dim, index);
4096}
4097
4098// aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a)
4099at::Tensor select_Dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, int64_t index) {
4100
4101 static auto op = create_select_Dimname_typed_handle();
4102 return op.redispatch(dispatchKeySet, self, dim, index);
4103}
4104
4105STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(select_int, name, "aten::select")
4106STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(select_int, overload_name, "int")
4107STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(select_int, schema_str, "select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a)")
4108
4109// aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a)
4110static C10_NOINLINE c10::TypedOperatorHandle<select_int::schema> create_select_int_typed_handle() {
4111 return c10::Dispatcher::singleton()
4112 .findSchemaOrThrow(select_int::name, select_int::overload_name)
4113 .typed<select_int::schema>();
4114}
4115
4116// aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a)
4117at::Tensor select_int::call(const at::Tensor & self, int64_t dim, c10::SymInt index) {
4118
4119 static auto op = create_select_int_typed_handle();
4120 return op.call(self, dim, index);
4121}
4122
4123// aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a)
4124at::Tensor select_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt index) {
4125
4126 static auto op = create_select_int_typed_handle();
4127 return op.redispatch(dispatchKeySet, self, dim, index);
4128}
4129
4130STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(celu, name, "aten::celu")
4131STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(celu, overload_name, "")
4132STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(celu, schema_str, "celu(Tensor self, Scalar alpha=1.0) -> Tensor")
4133
4134// aten::celu(Tensor self, Scalar alpha=1.0) -> Tensor
4135static C10_NOINLINE c10::TypedOperatorHandle<celu::schema> create_celu_typed_handle() {
4136 return c10::Dispatcher::singleton()
4137 .findSchemaOrThrow(celu::name, celu::overload_name)
4138 .typed<celu::schema>();
4139}
4140
4141// aten::celu(Tensor self, Scalar alpha=1.0) -> Tensor
4142at::Tensor celu::call(const at::Tensor & self, const at::Scalar & alpha) {
4143
4144 static auto op = create_celu_typed_handle();
4145 return op.call(self, alpha);
4146}
4147
4148// aten::celu(Tensor self, Scalar alpha=1.0) -> Tensor
4149at::Tensor celu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & alpha) {
4150
4151 static auto op = create_celu_typed_handle();
4152 return op.redispatch(dispatchKeySet, self, alpha);
4153}
4154
4155STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(celu_, name, "aten::celu_")
4156STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(celu_, overload_name, "")
4157STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(celu_, schema_str, "celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!)")
4158
4159// aten::celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!)
4160static C10_NOINLINE c10::TypedOperatorHandle<celu_::schema> create_celu__typed_handle() {
4161 return c10::Dispatcher::singleton()
4162 .findSchemaOrThrow(celu_::name, celu_::overload_name)
4163 .typed<celu_::schema>();
4164}
4165
4166// aten::celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!)
4167at::Tensor & celu_::call(at::Tensor & self, const at::Scalar & alpha) {
4168
4169 static auto op = create_celu__typed_handle();
4170 return op.call(self, alpha);
4171}
4172
4173// aten::celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!)
4174at::Tensor & celu_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & alpha) {
4175
4176 static auto op = create_celu__typed_handle();
4177 return op.redispatch(dispatchKeySet, self, alpha);
4178}
4179
4180STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(silu, name, "aten::silu")
4181STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(silu, overload_name, "")
4182STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(silu, schema_str, "silu(Tensor self) -> Tensor")
4183
4184// aten::silu(Tensor self) -> Tensor
4185static C10_NOINLINE c10::TypedOperatorHandle<silu::schema> create_silu_typed_handle() {
4186 return c10::Dispatcher::singleton()
4187 .findSchemaOrThrow(silu::name, silu::overload_name)
4188 .typed<silu::schema>();
4189}
4190
4191// aten::silu(Tensor self) -> Tensor
4192at::Tensor silu::call(const at::Tensor & self) {
4193
4194 static auto op = create_silu_typed_handle();
4195 return op.call(self);
4196}
4197
4198// aten::silu(Tensor self) -> Tensor
4199at::Tensor silu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
4200
4201 static auto op = create_silu_typed_handle();
4202 return op.redispatch(dispatchKeySet, self);
4203}
4204
4205STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(silu_, name, "aten::silu_")
4206STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(silu_, overload_name, "")
4207STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(silu_, schema_str, "silu_(Tensor(a!) self) -> Tensor(a!)")
4208
4209// aten::silu_(Tensor(a!) self) -> Tensor(a!)
4210static C10_NOINLINE c10::TypedOperatorHandle<silu_::schema> create_silu__typed_handle() {
4211 return c10::Dispatcher::singleton()
4212 .findSchemaOrThrow(silu_::name, silu_::overload_name)
4213 .typed<silu_::schema>();
4214}
4215
4216// aten::silu_(Tensor(a!) self) -> Tensor(a!)
4217at::Tensor & silu_::call(at::Tensor & self) {
4218
4219 static auto op = create_silu__typed_handle();
4220 return op.call(self);
4221}
4222
4223// aten::silu_(Tensor(a!) self) -> Tensor(a!)
4224at::Tensor & silu_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
4225
4226 static auto op = create_silu__typed_handle();
4227 return op.redispatch(dispatchKeySet, self);
4228}
4229
4230STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(silu_out, name, "aten::silu")
4231STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(silu_out, overload_name, "out")
4232STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(silu_out, schema_str, "silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
4233
4234// aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4235static C10_NOINLINE c10::TypedOperatorHandle<silu_out::schema> create_silu_out_typed_handle() {
4236 return c10::Dispatcher::singleton()
4237 .findSchemaOrThrow(silu_out::name, silu_out::overload_name)
4238 .typed<silu_out::schema>();
4239}
4240
4241// aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4242at::Tensor & silu_out::call(const at::Tensor & self, at::Tensor & out) {
4243
4244 static auto op = create_silu_out_typed_handle();
4245 return op.call(self, out);
4246}
4247
4248// aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4249at::Tensor & silu_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
4250
4251 static auto op = create_silu_out_typed_handle();
4252 return op.redispatch(dispatchKeySet, self, out);
4253}
4254
4255STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mish_backward, name, "aten::mish_backward")
4256STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mish_backward, overload_name, "")
4257STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mish_backward, schema_str, "mish_backward(Tensor grad_output, Tensor self) -> Tensor")
4258
4259// aten::mish_backward(Tensor grad_output, Tensor self) -> Tensor
4260static C10_NOINLINE c10::TypedOperatorHandle<mish_backward::schema> create_mish_backward_typed_handle() {
4261 return c10::Dispatcher::singleton()
4262 .findSchemaOrThrow(mish_backward::name, mish_backward::overload_name)
4263 .typed<mish_backward::schema>();
4264}
4265
4266// aten::mish_backward(Tensor grad_output, Tensor self) -> Tensor
4267at::Tensor mish_backward::call(const at::Tensor & grad_output, const at::Tensor & self) {
4268
4269 static auto op = create_mish_backward_typed_handle();
4270 return op.call(grad_output, self);
4271}
4272
4273// aten::mish_backward(Tensor grad_output, Tensor self) -> Tensor
4274at::Tensor mish_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) {
4275
4276 static auto op = create_mish_backward_typed_handle();
4277 return op.redispatch(dispatchKeySet, grad_output, self);
4278}
4279
4280STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logit, name, "aten::logit")
4281STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logit, overload_name, "")
4282STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logit, schema_str, "logit(Tensor self, float? eps=None) -> Tensor")
4283
4284// aten::logit(Tensor self, float? eps=None) -> Tensor
4285static C10_NOINLINE c10::TypedOperatorHandle<logit::schema> create_logit_typed_handle() {
4286 return c10::Dispatcher::singleton()
4287 .findSchemaOrThrow(logit::name, logit::overload_name)
4288 .typed<logit::schema>();
4289}
4290
4291// aten::logit(Tensor self, float? eps=None) -> Tensor
4292at::Tensor logit::call(const at::Tensor & self, c10::optional<double> eps) {
4293
4294 static auto op = create_logit_typed_handle();
4295 return op.call(self, eps);
4296}
4297
4298// aten::logit(Tensor self, float? eps=None) -> Tensor
4299at::Tensor logit::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<double> eps) {
4300
4301 static auto op = create_logit_typed_handle();
4302 return op.redispatch(dispatchKeySet, self, eps);
4303}
4304
4305STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logit_, name, "aten::logit_")
4306STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logit_, overload_name, "")
4307STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logit_, schema_str, "logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!)")
4308
4309// aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!)
4310static C10_NOINLINE c10::TypedOperatorHandle<logit_::schema> create_logit__typed_handle() {
4311 return c10::Dispatcher::singleton()
4312 .findSchemaOrThrow(logit_::name, logit_::overload_name)
4313 .typed<logit_::schema>();
4314}
4315
4316// aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!)
4317at::Tensor & logit_::call(at::Tensor & self, c10::optional<double> eps) {
4318
4319 static auto op = create_logit__typed_handle();
4320 return op.call(self, eps);
4321}
4322
4323// aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!)
4324at::Tensor & logit_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, c10::optional<double> eps) {
4325
4326 static auto op = create_logit__typed_handle();
4327 return op.redispatch(dispatchKeySet, self, eps);
4328}
4329
4330STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logit_out, name, "aten::logit")
4331STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logit_out, overload_name, "out")
4332STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logit_out, schema_str, "logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)")
4333
4334// aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)
4335static C10_NOINLINE c10::TypedOperatorHandle<logit_out::schema> create_logit_out_typed_handle() {
4336 return c10::Dispatcher::singleton()
4337 .findSchemaOrThrow(logit_out::name, logit_out::overload_name)
4338 .typed<logit_out::schema>();
4339}
4340
4341// aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)
4342at::Tensor & logit_out::call(const at::Tensor & self, c10::optional<double> eps, at::Tensor & out) {
4343
4344 static auto op = create_logit_out_typed_handle();
4345 return op.call(self, eps, out);
4346}
4347
4348// aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)
4349at::Tensor & logit_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<double> eps, at::Tensor & out) {
4350
4351 static auto op = create_logit_out_typed_handle();
4352 return op.redispatch(dispatchKeySet, self, eps, out);
4353}
4354
4355STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sinh, name, "aten::sinh")
4356STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sinh, overload_name, "")
4357STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sinh, schema_str, "sinh(Tensor self) -> Tensor")
4358
4359// aten::sinh(Tensor self) -> Tensor
4360static C10_NOINLINE c10::TypedOperatorHandle<sinh::schema> create_sinh_typed_handle() {
4361 return c10::Dispatcher::singleton()
4362 .findSchemaOrThrow(sinh::name, sinh::overload_name)
4363 .typed<sinh::schema>();
4364}
4365
4366// aten::sinh(Tensor self) -> Tensor
4367at::Tensor sinh::call(const at::Tensor & self) {
4368
4369 static auto op = create_sinh_typed_handle();
4370 return op.call(self);
4371}
4372
4373// aten::sinh(Tensor self) -> Tensor
4374at::Tensor sinh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
4375
4376 static auto op = create_sinh_typed_handle();
4377 return op.redispatch(dispatchKeySet, self);
4378}
4379
4380STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sinh_, name, "aten::sinh_")
4381STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sinh_, overload_name, "")
4382STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sinh_, schema_str, "sinh_(Tensor(a!) self) -> Tensor(a!)")
4383
4384// aten::sinh_(Tensor(a!) self) -> Tensor(a!)
4385static C10_NOINLINE c10::TypedOperatorHandle<sinh_::schema> create_sinh__typed_handle() {
4386 return c10::Dispatcher::singleton()
4387 .findSchemaOrThrow(sinh_::name, sinh_::overload_name)
4388 .typed<sinh_::schema>();
4389}
4390
4391// aten::sinh_(Tensor(a!) self) -> Tensor(a!)
4392at::Tensor & sinh_::call(at::Tensor & self) {
4393
4394 static auto op = create_sinh__typed_handle();
4395 return op.call(self);
4396}
4397
4398// aten::sinh_(Tensor(a!) self) -> Tensor(a!)
4399at::Tensor & sinh_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
4400
4401 static auto op = create_sinh__typed_handle();
4402 return op.redispatch(dispatchKeySet, self);
4403}
4404
4405STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sinh_out, name, "aten::sinh")
4406STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sinh_out, overload_name, "out")
4407STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sinh_out, schema_str, "sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
4408
4409// aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4410static C10_NOINLINE c10::TypedOperatorHandle<sinh_out::schema> create_sinh_out_typed_handle() {
4411 return c10::Dispatcher::singleton()
4412 .findSchemaOrThrow(sinh_out::name, sinh_out::overload_name)
4413 .typed<sinh_out::schema>();
4414}
4415
4416// aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4417at::Tensor & sinh_out::call(const at::Tensor & self, at::Tensor & out) {
4418
4419 static auto op = create_sinh_out_typed_handle();
4420 return op.call(self, out);
4421}
4422
4423// aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4424at::Tensor & sinh_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
4425
4426 static auto op = create_sinh_out_typed_handle();
4427 return op.redispatch(dispatchKeySet, self, out);
4428}
4429
4430STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slice_backward, name, "aten::slice_backward")
4431STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slice_backward, overload_name, "")
4432STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slice_backward, schema_str, "slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor")
4433
4434// aten::slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor
4435static C10_NOINLINE c10::TypedOperatorHandle<slice_backward::schema> create_slice_backward_typed_handle() {
4436 return c10::Dispatcher::singleton()
4437 .findSchemaOrThrow(slice_backward::name, slice_backward::overload_name)
4438 .typed<slice_backward::schema>();
4439}
4440
4441// aten::slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor
4442at::Tensor slice_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step) {
4443
4444 static auto op = create_slice_backward_typed_handle();
4445 return op.call(grad_output, input_sizes, dim, start, end, step);
4446}
4447
4448// aten::slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor
4449at::Tensor slice_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step) {
4450
4451 static auto op = create_slice_backward_typed_handle();
4452 return op.redispatch(dispatchKeySet, grad_output, input_sizes, dim, start, end, step);
4453}
4454
4455STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(softmax_int, name, "aten::softmax")
4456STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(softmax_int, overload_name, "int")
4457STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(softmax_int, schema_str, "softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor")
4458
4459// aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
4460static C10_NOINLINE c10::TypedOperatorHandle<softmax_int::schema> create_softmax_int_typed_handle() {
4461 return c10::Dispatcher::singleton()
4462 .findSchemaOrThrow(softmax_int::name, softmax_int::overload_name)
4463 .typed<softmax_int::schema>();
4464}
4465
4466// aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
4467at::Tensor softmax_int::call(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
4468
4469 static auto op = create_softmax_int_typed_handle();
4470 return op.call(self, dim, dtype);
4471}
4472
4473// aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
4474at::Tensor softmax_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
4475
4476 static auto op = create_softmax_int_typed_handle();
4477 return op.redispatch(dispatchKeySet, self, dim, dtype);
4478}
4479
4480STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(softmax_int_out, name, "aten::softmax")
4481STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(softmax_int_out, overload_name, "int_out")
4482STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(softmax_int_out, schema_str, "softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)")
4483
4484// aten::softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
4485static C10_NOINLINE c10::TypedOperatorHandle<softmax_int_out::schema> create_softmax_int_out_typed_handle() {
4486 return c10::Dispatcher::singleton()
4487 .findSchemaOrThrow(softmax_int_out::name, softmax_int_out::overload_name)
4488 .typed<softmax_int_out::schema>();
4489}
4490
4491// aten::softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
4492at::Tensor & softmax_int_out::call(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
4493
4494 static auto op = create_softmax_int_out_typed_handle();
4495 return op.call(self, dim, dtype, out);
4496}
4497
4498// aten::softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
4499at::Tensor & softmax_int_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
4500
4501 static auto op = create_softmax_int_out_typed_handle();
4502 return op.redispatch(dispatchKeySet, self, dim, dtype, out);
4503}
4504
4505STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(softmax_Dimname, name, "aten::softmax")
4506STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(softmax_Dimname, overload_name, "Dimname")
4507STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(softmax_Dimname, schema_str, "softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor")
4508
4509// aten::softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
4510static C10_NOINLINE c10::TypedOperatorHandle<softmax_Dimname::schema> create_softmax_Dimname_typed_handle() {
4511 return c10::Dispatcher::singleton()
4512 .findSchemaOrThrow(softmax_Dimname::name, softmax_Dimname::overload_name)
4513 .typed<softmax_Dimname::schema>();
4514}
4515
4516// aten::softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
4517at::Tensor softmax_Dimname::call(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
4518
4519 static auto op = create_softmax_Dimname_typed_handle();
4520 return op.call(self, dim, dtype);
4521}
4522
4523// aten::softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
4524at::Tensor softmax_Dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
4525
4526 static auto op = create_softmax_Dimname_typed_handle();
4527 return op.redispatch(dispatchKeySet, self, dim, dtype);
4528}
4529
4530STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_softmax, name, "aten::_softmax")
4531STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_softmax, overload_name, "")
4532STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_softmax, schema_str, "_softmax(Tensor self, int dim, bool half_to_float) -> Tensor")
4533
4534// aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
4535static C10_NOINLINE c10::TypedOperatorHandle<_softmax::schema> create__softmax_typed_handle() {
4536 return c10::Dispatcher::singleton()
4537 .findSchemaOrThrow(_softmax::name, _softmax::overload_name)
4538 .typed<_softmax::schema>();
4539}
4540
4541// aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
4542at::Tensor _softmax::call(const at::Tensor & self, int64_t dim, bool half_to_float) {
4543
4544 static auto op = create__softmax_typed_handle();
4545 return op.call(self, dim, half_to_float);
4546}
4547
4548// aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
4549at::Tensor _softmax::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float) {
4550
4551 static auto op = create__softmax_typed_handle();
4552 return op.redispatch(dispatchKeySet, self, dim, half_to_float);
4553}
4554
4555STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_softmax_out, name, "aten::_softmax")
4556STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_softmax_out, overload_name, "out")
4557STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_softmax_out, schema_str, "_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)")
4558
4559// aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
4560static C10_NOINLINE c10::TypedOperatorHandle<_softmax_out::schema> create__softmax_out_typed_handle() {
4561 return c10::Dispatcher::singleton()
4562 .findSchemaOrThrow(_softmax_out::name, _softmax_out::overload_name)
4563 .typed<_softmax_out::schema>();
4564}
4565
4566// aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
4567at::Tensor & _softmax_out::call(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
4568
4569 static auto op = create__softmax_out_typed_handle();
4570 return op.call(self, dim, half_to_float, out);
4571}
4572
4573// aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
4574at::Tensor & _softmax_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
4575
4576 static auto op = create__softmax_out_typed_handle();
4577 return op.redispatch(dispatchKeySet, self, dim, half_to_float, out);
4578}
4579
4580STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unsafe_split_Tensor, name, "aten::unsafe_split")
4581STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unsafe_split_Tensor, overload_name, "Tensor")
4582STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unsafe_split_Tensor, schema_str, "unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]")
4583
4584// aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]
4585static C10_NOINLINE c10::TypedOperatorHandle<unsafe_split_Tensor::schema> create_unsafe_split_Tensor_typed_handle() {
4586 return c10::Dispatcher::singleton()
4587 .findSchemaOrThrow(unsafe_split_Tensor::name, unsafe_split_Tensor::overload_name)
4588 .typed<unsafe_split_Tensor::schema>();
4589}
4590
4591// aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]
4592::std::vector<at::Tensor> unsafe_split_Tensor::call(const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
4593
4594 static auto op = create_unsafe_split_Tensor_typed_handle();
4595 return op.call(self, split_size, dim);
4596}
4597
4598// aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]
4599::std::vector<at::Tensor> unsafe_split_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
4600
4601 static auto op = create_unsafe_split_Tensor_typed_handle();
4602 return op.redispatch(dispatchKeySet, self, split_size, dim);
4603}
4604
4605STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dsplit_int, name, "aten::dsplit")
4606STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dsplit_int, overload_name, "int")
4607STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dsplit_int, schema_str, "dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]")
4608
4609// aten::dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
4610static C10_NOINLINE c10::TypedOperatorHandle<dsplit_int::schema> create_dsplit_int_typed_handle() {
4611 return c10::Dispatcher::singleton()
4612 .findSchemaOrThrow(dsplit_int::name, dsplit_int::overload_name)
4613 .typed<dsplit_int::schema>();
4614}
4615
4616// aten::dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
4617::std::vector<at::Tensor> dsplit_int::call(const at::Tensor & self, int64_t sections) {
4618
4619 static auto op = create_dsplit_int_typed_handle();
4620 return op.call(self, sections);
4621}
4622
4623// aten::dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
4624::std::vector<at::Tensor> dsplit_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sections) {
4625
4626 static auto op = create_dsplit_int_typed_handle();
4627 return op.redispatch(dispatchKeySet, self, sections);
4628}
4629
4630STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dsplit_array, name, "aten::dsplit")
4631STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dsplit_array, overload_name, "array")
4632STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dsplit_array, schema_str, "dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]")
4633
4634// aten::dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
4635static C10_NOINLINE c10::TypedOperatorHandle<dsplit_array::schema> create_dsplit_array_typed_handle() {
4636 return c10::Dispatcher::singleton()
4637 .findSchemaOrThrow(dsplit_array::name, dsplit_array::overload_name)
4638 .typed<dsplit_array::schema>();
4639}
4640
4641// aten::dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
4642::std::vector<at::Tensor> dsplit_array::call(const at::Tensor & self, at::IntArrayRef indices) {
4643
4644 static auto op = create_dsplit_array_typed_handle();
4645 return op.call(self, indices);
4646}
4647
4648// aten::dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
4649::std::vector<at::Tensor> dsplit_array::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef indices) {
4650
4651 static auto op = create_dsplit_array_typed_handle();
4652 return op.redispatch(dispatchKeySet, self, indices);
4653}
4654
4655STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(vstack, name, "aten::vstack")
4656STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(vstack, overload_name, "")
4657STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(vstack, schema_str, "vstack(Tensor[] tensors) -> Tensor")
4658
4659// aten::vstack(Tensor[] tensors) -> Tensor
4660static C10_NOINLINE c10::TypedOperatorHandle<vstack::schema> create_vstack_typed_handle() {
4661 return c10::Dispatcher::singleton()
4662 .findSchemaOrThrow(vstack::name, vstack::overload_name)
4663 .typed<vstack::schema>();
4664}
4665
4666// aten::vstack(Tensor[] tensors) -> Tensor
4667at::Tensor vstack::call(at::TensorList tensors) {
4668
4669 static auto op = create_vstack_typed_handle();
4670 return op.call(tensors);
4671}
4672
4673// aten::vstack(Tensor[] tensors) -> Tensor
4674at::Tensor vstack::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
4675
4676 static auto op = create_vstack_typed_handle();
4677 return op.redispatch(dispatchKeySet, tensors);
4678}
4679
4680STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(vstack_out, name, "aten::vstack")
4681STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(vstack_out, overload_name, "out")
4682STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(vstack_out, schema_str, "vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)")
4683
4684// aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
4685static C10_NOINLINE c10::TypedOperatorHandle<vstack_out::schema> create_vstack_out_typed_handle() {
4686 return c10::Dispatcher::singleton()
4687 .findSchemaOrThrow(vstack_out::name, vstack_out::overload_name)
4688 .typed<vstack_out::schema>();
4689}
4690
4691// aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
4692at::Tensor & vstack_out::call(at::TensorList tensors, at::Tensor & out) {
4693
4694 static auto op = create_vstack_out_typed_handle();
4695 return op.call(tensors, out);
4696}
4697
4698// aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
4699at::Tensor & vstack_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) {
4700
4701 static auto op = create_vstack_out_typed_handle();
4702 return op.redispatch(dispatchKeySet, tensors, out);
4703}
4704
4705STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(stft, name, "aten::stft")
4706STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(stft, overload_name, "")
4707STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(stft, schema_str, "stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor")
4708
4709// aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor
4710static C10_NOINLINE c10::TypedOperatorHandle<stft::schema> create_stft_typed_handle() {
4711 return c10::Dispatcher::singleton()
4712 .findSchemaOrThrow(stft::name, stft::overload_name)
4713 .typed<stft::schema>();
4714}
4715
4716// aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor
4717at::Tensor stft::call(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool normalized, c10::optional<bool> onesided, c10::optional<bool> return_complex) {
4718
4719 static auto op = create_stft_typed_handle();
4720 return op.call(self, n_fft, hop_length, win_length, window, normalized, onesided, return_complex);
4721}
4722
4723// aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor
4724at::Tensor stft::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool normalized, c10::optional<bool> onesided, c10::optional<bool> return_complex) {
4725
4726 static auto op = create_stft_typed_handle();
4727 return op.redispatch(dispatchKeySet, self, n_fft, hop_length, win_length, window, normalized, onesided, return_complex);
4728}
4729
4730STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(stft_center, name, "aten::stft")
4731STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(stft_center, overload_name, "center")
4732STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(stft_center, schema_str, "stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode=\"reflect\", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor")
4733
4734// aten::stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode="reflect", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor
4735static C10_NOINLINE c10::TypedOperatorHandle<stft_center::schema> create_stft_center_typed_handle() {
4736 return c10::Dispatcher::singleton()
4737 .findSchemaOrThrow(stft_center::name, stft_center::overload_name)
4738 .typed<stft_center::schema>();
4739}
4740
4741// aten::stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode="reflect", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor
4742at::Tensor stft_center::call(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool center, c10::string_view pad_mode, bool normalized, c10::optional<bool> onesided, c10::optional<bool> return_complex) {
4743
4744 static auto op = create_stft_center_typed_handle();
4745 return op.call(self, n_fft, hop_length, win_length, window, center, pad_mode, normalized, onesided, return_complex);
4746}
4747
4748// aten::stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode="reflect", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor
4749at::Tensor stft_center::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool center, c10::string_view pad_mode, bool normalized, c10::optional<bool> onesided, c10::optional<bool> return_complex) {
4750
4751 static auto op = create_stft_center_typed_handle();
4752 return op.redispatch(dispatchKeySet, self, n_fft, hop_length, win_length, window, center, pad_mode, normalized, onesided, return_complex);
4753}
4754
4755STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_sum_backward, name, "aten::_nested_sum_backward")
4756STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_sum_backward, overload_name, "")
4757STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_sum_backward, schema_str, "_nested_sum_backward(Tensor grad, Tensor self, int[1]? dim, bool keepdim=False) -> Tensor")
4758
4759// aten::_nested_sum_backward(Tensor grad, Tensor self, int[1]? dim, bool keepdim=False) -> Tensor
4760static C10_NOINLINE c10::TypedOperatorHandle<_nested_sum_backward::schema> create__nested_sum_backward_typed_handle() {
4761 return c10::Dispatcher::singleton()
4762 .findSchemaOrThrow(_nested_sum_backward::name, _nested_sum_backward::overload_name)
4763 .typed<_nested_sum_backward::schema>();
4764}
4765
4766// aten::_nested_sum_backward(Tensor grad, Tensor self, int[1]? dim, bool keepdim=False) -> Tensor
4767at::Tensor _nested_sum_backward::call(const at::Tensor & grad, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) {
4768
4769 static auto op = create__nested_sum_backward_typed_handle();
4770 return op.call(grad, self, dim, keepdim);
4771}
4772
4773// aten::_nested_sum_backward(Tensor grad, Tensor self, int[1]? dim, bool keepdim=False) -> Tensor
4774at::Tensor _nested_sum_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) {
4775
4776 static auto op = create__nested_sum_backward_typed_handle();
4777 return op.redispatch(dispatchKeySet, grad, self, dim, keepdim);
4778}
4779
4780STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sum_to_size, name, "aten::sum_to_size")
4781STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sum_to_size, overload_name, "")
4782STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sum_to_size, schema_str, "sum_to_size(Tensor self, int[] size) -> Tensor")
4783
4784// aten::sum_to_size(Tensor self, int[] size) -> Tensor
4785static C10_NOINLINE c10::TypedOperatorHandle<sum_to_size::schema> create_sum_to_size_typed_handle() {
4786 return c10::Dispatcher::singleton()
4787 .findSchemaOrThrow(sum_to_size::name, sum_to_size::overload_name)
4788 .typed<sum_to_size::schema>();
4789}
4790
4791// aten::sum_to_size(Tensor self, int[] size) -> Tensor
4792at::Tensor sum_to_size::call(const at::Tensor & self, at::IntArrayRef size) {
4793
4794 static auto op = create_sum_to_size_typed_handle();
4795 return op.call(self, size);
4796}
4797
4798// aten::sum_to_size(Tensor self, int[] size) -> Tensor
4799at::Tensor sum_to_size::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size) {
4800
4801 static auto op = create_sum_to_size_typed_handle();
4802 return op.redispatch(dispatchKeySet, self, size);
4803}
4804
4805STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sqrt, name, "aten::sqrt")
4806STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sqrt, overload_name, "")
4807STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sqrt, schema_str, "sqrt(Tensor self) -> Tensor")
4808
4809// aten::sqrt(Tensor self) -> Tensor
4810static C10_NOINLINE c10::TypedOperatorHandle<sqrt::schema> create_sqrt_typed_handle() {
4811 return c10::Dispatcher::singleton()
4812 .findSchemaOrThrow(sqrt::name, sqrt::overload_name)
4813 .typed<sqrt::schema>();
4814}
4815
4816// aten::sqrt(Tensor self) -> Tensor
4817at::Tensor sqrt::call(const at::Tensor & self) {
4818
4819 static auto op = create_sqrt_typed_handle();
4820 return op.call(self);
4821}
4822
4823// aten::sqrt(Tensor self) -> Tensor
4824at::Tensor sqrt::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
4825
4826 static auto op = create_sqrt_typed_handle();
4827 return op.redispatch(dispatchKeySet, self);
4828}
4829
4830STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sqrt_, name, "aten::sqrt_")
4831STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sqrt_, overload_name, "")
4832STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sqrt_, schema_str, "sqrt_(Tensor(a!) self) -> Tensor(a!)")
4833
4834// aten::sqrt_(Tensor(a!) self) -> Tensor(a!)
4835static C10_NOINLINE c10::TypedOperatorHandle<sqrt_::schema> create_sqrt__typed_handle() {
4836 return c10::Dispatcher::singleton()
4837 .findSchemaOrThrow(sqrt_::name, sqrt_::overload_name)
4838 .typed<sqrt_::schema>();
4839}
4840
4841// aten::sqrt_(Tensor(a!) self) -> Tensor(a!)
4842at::Tensor & sqrt_::call(at::Tensor & self) {
4843
4844 static auto op = create_sqrt__typed_handle();
4845 return op.call(self);
4846}
4847
4848// aten::sqrt_(Tensor(a!) self) -> Tensor(a!)
4849at::Tensor & sqrt_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
4850
4851 static auto op = create_sqrt__typed_handle();
4852 return op.redispatch(dispatchKeySet, self);
4853}
4854
4855STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sqrt_out, name, "aten::sqrt")
4856STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sqrt_out, overload_name, "out")
4857STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sqrt_out, schema_str, "sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
4858
4859// aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4860static C10_NOINLINE c10::TypedOperatorHandle<sqrt_out::schema> create_sqrt_out_typed_handle() {
4861 return c10::Dispatcher::singleton()
4862 .findSchemaOrThrow(sqrt_out::name, sqrt_out::overload_name)
4863 .typed<sqrt_out::schema>();
4864}
4865
4866// aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4867at::Tensor & sqrt_out::call(const at::Tensor & self, at::Tensor & out) {
4868
4869 static auto op = create_sqrt_out_typed_handle();
4870 return op.call(self, out);
4871}
4872
4873// aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4874at::Tensor & sqrt_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
4875
4876 static auto op = create_sqrt_out_typed_handle();
4877 return op.redispatch(dispatchKeySet, self, out);
4878}
4879
4880STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std, name, "aten::std")
4881STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std, overload_name, "")
4882STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std, schema_str, "std(Tensor self, bool unbiased=True) -> Tensor")
4883
4884// aten::std(Tensor self, bool unbiased=True) -> Tensor
4885static C10_NOINLINE c10::TypedOperatorHandle<std::schema> create_std_typed_handle() {
4886 return c10::Dispatcher::singleton()
4887 .findSchemaOrThrow(std::name, std::overload_name)
4888 .typed<std::schema>();
4889}
4890
4891// aten::std(Tensor self, bool unbiased=True) -> Tensor
4892at::Tensor std::call(const at::Tensor & self, bool unbiased) {
4893
4894 static auto op = create_std_typed_handle();
4895 return op.call(self, unbiased);
4896}
4897
4898// aten::std(Tensor self, bool unbiased=True) -> Tensor
4899at::Tensor std::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool unbiased) {
4900
4901 static auto op = create_std_typed_handle();
4902 return op.redispatch(dispatchKeySet, self, unbiased);
4903}
4904
4905STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_dim, name, "aten::std")
4906STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_dim, overload_name, "dim")
4907STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_dim, schema_str, "std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor")
4908
4909// aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
4910static C10_NOINLINE c10::TypedOperatorHandle<std_dim::schema> create_std_dim_typed_handle() {
4911 return c10::Dispatcher::singleton()
4912 .findSchemaOrThrow(std_dim::name, std_dim::overload_name)
4913 .typed<std_dim::schema>();
4914}
4915
4916// aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
4917at::Tensor std_dim::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
4918
4919 static auto op = create_std_dim_typed_handle();
4920 return op.call(self, dim, unbiased, keepdim);
4921}
4922
4923// aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
4924at::Tensor std_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
4925
4926 static auto op = create_std_dim_typed_handle();
4927 return op.redispatch(dispatchKeySet, self, dim, unbiased, keepdim);
4928}
4929
4930STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_correction, name, "aten::std")
4931STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_correction, overload_name, "correction")
4932STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_correction, schema_str, "std.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor")
4933
4934// aten::std.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor
4935static C10_NOINLINE c10::TypedOperatorHandle<std_correction::schema> create_std_correction_typed_handle() {
4936 return c10::Dispatcher::singleton()
4937 .findSchemaOrThrow(std_correction::name, std_correction::overload_name)
4938 .typed<std_correction::schema>();
4939}
4940
4941// aten::std.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor
4942at::Tensor std_correction::call(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
4943
4944 static auto op = create_std_correction_typed_handle();
4945 return op.call(self, dim, correction, keepdim);
4946}
4947
4948// aten::std.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor
4949at::Tensor std_correction::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
4950
4951 static auto op = create_std_correction_typed_handle();
4952 return op.redispatch(dispatchKeySet, self, dim, correction, keepdim);
4953}
4954
4955STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_mean, name, "aten::std_mean")
4956STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_mean, overload_name, "")
4957STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_mean, schema_str, "std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)")
4958
4959// aten::std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
4960static C10_NOINLINE c10::TypedOperatorHandle<std_mean::schema> create_std_mean_typed_handle() {
4961 return c10::Dispatcher::singleton()
4962 .findSchemaOrThrow(std_mean::name, std_mean::overload_name)
4963 .typed<std_mean::schema>();
4964}
4965
4966// aten::std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
4967::std::tuple<at::Tensor,at::Tensor> std_mean::call(const at::Tensor & self, bool unbiased) {
4968
4969 static auto op = create_std_mean_typed_handle();
4970 return op.call(self, unbiased);
4971}
4972
4973// aten::std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
4974::std::tuple<at::Tensor,at::Tensor> std_mean::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool unbiased) {
4975
4976 static auto op = create_std_mean_typed_handle();
4977 return op.redispatch(dispatchKeySet, self, unbiased);
4978}
4979
4980STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_mean_dim, name, "aten::std_mean")
4981STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_mean_dim, overload_name, "dim")
4982STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_mean_dim, schema_str, "std_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)")
4983
4984// aten::std_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
4985static C10_NOINLINE c10::TypedOperatorHandle<std_mean_dim::schema> create_std_mean_dim_typed_handle() {
4986 return c10::Dispatcher::singleton()
4987 .findSchemaOrThrow(std_mean_dim::name, std_mean_dim::overload_name)
4988 .typed<std_mean_dim::schema>();
4989}
4990
4991// aten::std_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
4992::std::tuple<at::Tensor,at::Tensor> std_mean_dim::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
4993
4994 static auto op = create_std_mean_dim_typed_handle();
4995 return op.call(self, dim, unbiased, keepdim);
4996}
4997
4998// aten::std_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
4999::std::tuple<at::Tensor,at::Tensor> std_mean_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
5000
5001 static auto op = create_std_mean_dim_typed_handle();
5002 return op.redispatch(dispatchKeySet, self, dim, unbiased, keepdim);
5003}
5004
5005STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_mean_correction, name, "aten::std_mean")
5006STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_mean_correction, overload_name, "correction")
5007STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_mean_correction, schema_str, "std_mean.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)")
5008
5009// aten::std_mean.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)
5010static C10_NOINLINE c10::TypedOperatorHandle<std_mean_correction::schema> create_std_mean_correction_typed_handle() {
5011 return c10::Dispatcher::singleton()
5012 .findSchemaOrThrow(std_mean_correction::name, std_mean_correction::overload_name)
5013 .typed<std_mean_correction::schema>();
5014}
5015
5016// aten::std_mean.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)
5017::std::tuple<at::Tensor,at::Tensor> std_mean_correction::call(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
5018
5019 static auto op = create_std_mean_correction_typed_handle();
5020 return op.call(self, dim, correction, keepdim);
5021}
5022
5023// aten::std_mean.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)
5024::std::tuple<at::Tensor,at::Tensor> std_mean_correction::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
5025
5026 static auto op = create_std_mean_correction_typed_handle();
5027 return op.redispatch(dispatchKeySet, self, dim, correction, keepdim);
5028}
5029
5030STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_mean_names_dim, name, "aten::std_mean")
5031STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_mean_names_dim, overload_name, "names_dim")
5032STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_mean_names_dim, schema_str, "std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)")
5033
5034// aten::std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
5035static C10_NOINLINE c10::TypedOperatorHandle<std_mean_names_dim::schema> create_std_mean_names_dim_typed_handle() {
5036 return c10::Dispatcher::singleton()
5037 .findSchemaOrThrow(std_mean_names_dim::name, std_mean_names_dim::overload_name)
5038 .typed<std_mean_names_dim::schema>();
5039}
5040
5041// aten::std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
5042::std::tuple<at::Tensor,at::Tensor> std_mean_names_dim::call(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
5043
5044 static auto op = create_std_mean_names_dim_typed_handle();
5045 return op.call(self, dim, unbiased, keepdim);
5046}
5047
5048// aten::std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
5049::std::tuple<at::Tensor,at::Tensor> std_mean_names_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
5050
5051 static auto op = create_std_mean_names_dim_typed_handle();
5052 return op.redispatch(dispatchKeySet, self, dim, unbiased, keepdim);
5053}
5054
5055STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_mean_correction_names, name, "aten::std_mean")
5056STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_mean_correction_names, overload_name, "correction_names")
5057STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_mean_correction_names, schema_str, "std_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)")
5058
5059// aten::std_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)
5060static C10_NOINLINE c10::TypedOperatorHandle<std_mean_correction_names::schema> create_std_mean_correction_names_typed_handle() {
5061 return c10::Dispatcher::singleton()
5062 .findSchemaOrThrow(std_mean_correction_names::name, std_mean_correction_names::overload_name)
5063 .typed<std_mean_correction_names::schema>();
5064}
5065
5066// aten::std_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)
5067::std::tuple<at::Tensor,at::Tensor> std_mean_correction_names::call(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) {
5068
5069 static auto op = create_std_mean_correction_names_typed_handle();
5070 return op.call(self, dim, correction, keepdim);
5071}
5072
5073// aten::std_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)
5074::std::tuple<at::Tensor,at::Tensor> std_mean_correction_names::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) {
5075
5076 static auto op = create_std_mean_correction_names_typed_handle();
5077 return op.redispatch(dispatchKeySet, self, dim, correction, keepdim);
5078}
5079
5080STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_out, name, "aten::std")
5081STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_out, overload_name, "out")
5082STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_out, schema_str, "std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)")
5083
5084// aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
5085static C10_NOINLINE c10::TypedOperatorHandle<std_out::schema> create_std_out_typed_handle() {
5086 return c10::Dispatcher::singleton()
5087 .findSchemaOrThrow(std_out::name, std_out::overload_name)
5088 .typed<std_out::schema>();
5089}
5090
5091// aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
5092at::Tensor & std_out::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) {
5093
5094 static auto op = create_std_out_typed_handle();
5095 return op.call(self, dim, unbiased, keepdim, out);
5096}
5097
5098// aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
5099at::Tensor & std_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) {
5100
5101 static auto op = create_std_out_typed_handle();
5102 return op.redispatch(dispatchKeySet, self, dim, unbiased, keepdim, out);
5103}
5104
5105STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_correction_out, name, "aten::std")
5106STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_correction_out, overload_name, "correction_out")
5107STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_correction_out, schema_str, "std.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)")
5108
5109// aten::std.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
5110static C10_NOINLINE c10::TypedOperatorHandle<std_correction_out::schema> create_std_correction_out_typed_handle() {
5111 return c10::Dispatcher::singleton()
5112 .findSchemaOrThrow(std_correction_out::name, std_correction_out::overload_name)
5113 .typed<std_correction_out::schema>();
5114}
5115
5116// aten::std.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
5117at::Tensor & std_correction_out::call(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) {
5118
5119 static auto op = create_std_correction_out_typed_handle();
5120 return op.call(self, dim, correction, keepdim, out);
5121}
5122
5123// aten::std.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
5124at::Tensor & std_correction_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) {
5125
5126 static auto op = create_std_correction_out_typed_handle();
5127 return op.redispatch(dispatchKeySet, self, dim, correction, keepdim, out);
5128}
5129
5130STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_names_dim, name, "aten::std")
5131STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_names_dim, overload_name, "names_dim")
5132STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_names_dim, schema_str, "std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor")
5133
5134// aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
5135static C10_NOINLINE c10::TypedOperatorHandle<std_names_dim::schema> create_std_names_dim_typed_handle() {
5136 return c10::Dispatcher::singleton()
5137 .findSchemaOrThrow(std_names_dim::name, std_names_dim::overload_name)
5138 .typed<std_names_dim::schema>();
5139}
5140
5141// aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
5142at::Tensor std_names_dim::call(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
5143
5144 static auto op = create_std_names_dim_typed_handle();
5145 return op.call(self, dim, unbiased, keepdim);
5146}
5147
5148// aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
5149at::Tensor std_names_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
5150
5151 static auto op = create_std_names_dim_typed_handle();
5152 return op.redispatch(dispatchKeySet, self, dim, unbiased, keepdim);
5153}
5154
5155STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_names_out, name, "aten::std")
5156STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_names_out, overload_name, "names_out")
5157STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_names_out, schema_str, "std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)")
5158
5159// aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
5160static C10_NOINLINE c10::TypedOperatorHandle<std_names_out::schema> create_std_names_out_typed_handle() {
5161 return c10::Dispatcher::singleton()
5162 .findSchemaOrThrow(std_names_out::name, std_names_out::overload_name)
5163 .typed<std_names_out::schema>();
5164}
5165
5166// aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
5167at::Tensor & std_names_out::call(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) {
5168
5169 static auto op = create_std_names_out_typed_handle();
5170 return op.call(self, dim, unbiased, keepdim, out);
5171}
5172
5173// aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
5174at::Tensor & std_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) {
5175
5176 static auto op = create_std_names_out_typed_handle();
5177 return op.redispatch(dispatchKeySet, self, dim, unbiased, keepdim, out);
5178}
5179
5180STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_correction_names, name, "aten::std")
5181STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_correction_names, overload_name, "correction_names")
5182STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_correction_names, schema_str, "std.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor")
5183
5184// aten::std.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor
5185static C10_NOINLINE c10::TypedOperatorHandle<std_correction_names::schema> create_std_correction_names_typed_handle() {
5186 return c10::Dispatcher::singleton()
5187 .findSchemaOrThrow(std_correction_names::name, std_correction_names::overload_name)
5188 .typed<std_correction_names::schema>();
5189}
5190
5191// aten::std.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor
5192at::Tensor std_correction_names::call(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) {
5193
5194 static auto op = create_std_correction_names_typed_handle();
5195 return op.call(self, dim, correction, keepdim);
5196}
5197
5198// aten::std.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor
5199at::Tensor std_correction_names::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) {
5200
5201 static auto op = create_std_correction_names_typed_handle();
5202 return op.redispatch(dispatchKeySet, self, dim, correction, keepdim);
5203}
5204
5205STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_correction_names_out, name, "aten::std")
5206STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_correction_names_out, overload_name, "correction_names_out")
5207STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_correction_names_out, schema_str, "std.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)")
5208
5209// aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
5210static C10_NOINLINE c10::TypedOperatorHandle<std_correction_names_out::schema> create_std_correction_names_out_typed_handle() {
5211 return c10::Dispatcher::singleton()
5212 .findSchemaOrThrow(std_correction_names_out::name, std_correction_names_out::overload_name)
5213 .typed<std_correction_names_out::schema>();
5214}
5215
5216// aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
5217at::Tensor & std_correction_names_out::call(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) {
5218
5219 static auto op = create_std_correction_names_out_typed_handle();
5220 return op.call(self, dim, correction, keepdim, out);
5221}
5222
5223// aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
5224at::Tensor & std_correction_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) {
5225
5226 static auto op = create_std_correction_names_out_typed_handle();
5227 return op.redispatch(dispatchKeySet, self, dim, correction, keepdim, out);
5228}
5229
5230STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(t, name, "aten::t")
5231STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(t, overload_name, "")
5232STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(t, schema_str, "t(Tensor(a) self) -> Tensor(a)")
5233
5234// aten::t(Tensor(a) self) -> Tensor(a)
5235static C10_NOINLINE c10::TypedOperatorHandle<t::schema> create_t_typed_handle() {
5236 return c10::Dispatcher::singleton()
5237 .findSchemaOrThrow(t::name, t::overload_name)
5238 .typed<t::schema>();
5239}
5240
5241// aten::t(Tensor(a) self) -> Tensor(a)
5242at::Tensor t::call(const at::Tensor & self) {
5243
5244 static auto op = create_t_typed_handle();
5245 return op.call(self);
5246}
5247
5248// aten::t(Tensor(a) self) -> Tensor(a)
5249at::Tensor t::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
5250
5251 static auto op = create_t_typed_handle();
5252 return op.redispatch(dispatchKeySet, self);
5253}
5254
5255STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(t_, name, "aten::t_")
5256STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(t_, overload_name, "")
5257STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(t_, schema_str, "t_(Tensor(a!) self) -> Tensor(a!)")
5258
5259// aten::t_(Tensor(a!) self) -> Tensor(a!)
5260static C10_NOINLINE c10::TypedOperatorHandle<t_::schema> create_t__typed_handle() {
5261 return c10::Dispatcher::singleton()
5262 .findSchemaOrThrow(t_::name, t_::overload_name)
5263 .typed<t_::schema>();
5264}
5265
5266// aten::t_(Tensor(a!) self) -> Tensor(a!)
5267at::Tensor & t_::call(at::Tensor & self) {
5268
5269 static auto op = create_t__typed_handle();
5270 return op.call(self);
5271}
5272
5273// aten::t_(Tensor(a!) self) -> Tensor(a!)
5274at::Tensor & t_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
5275
5276 static auto op = create_t__typed_handle();
5277 return op.redispatch(dispatchKeySet, self);
5278}
5279
5280STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(threshold, name, "aten::threshold")
5281STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(threshold, overload_name, "")
5282STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(threshold, schema_str, "threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor")
5283
5284// aten::threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor
5285static C10_NOINLINE c10::TypedOperatorHandle<threshold::schema> create_threshold_typed_handle() {
5286 return c10::Dispatcher::singleton()
5287 .findSchemaOrThrow(threshold::name, threshold::overload_name)
5288 .typed<threshold::schema>();
5289}
5290
5291// aten::threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor
5292at::Tensor threshold::call(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
5293
5294 static auto op = create_threshold_typed_handle();
5295 return op.call(self, threshold, value);
5296}
5297
5298// aten::threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor
5299at::Tensor threshold::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
5300
5301 static auto op = create_threshold_typed_handle();
5302 return op.redispatch(dispatchKeySet, self, threshold, value);
5303}
5304
5305STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(threshold_, name, "aten::threshold_")
5306STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(threshold_, overload_name, "")
5307STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(threshold_, schema_str, "threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!)")
5308
5309// aten::threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!)
5310static C10_NOINLINE c10::TypedOperatorHandle<threshold_::schema> create_threshold__typed_handle() {
5311 return c10::Dispatcher::singleton()
5312 .findSchemaOrThrow(threshold_::name, threshold_::overload_name)
5313 .typed<threshold_::schema>();
5314}
5315
5316// aten::threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!)
5317at::Tensor & threshold_::call(at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
5318
5319 static auto op = create_threshold__typed_handle();
5320 return op.call(self, threshold, value);
5321}
5322
5323// aten::threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!)
5324at::Tensor & threshold_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
5325
5326 static auto op = create_threshold__typed_handle();
5327 return op.redispatch(dispatchKeySet, self, threshold, value);
5328}
5329
5330STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(threshold_out, name, "aten::threshold")
5331STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(threshold_out, overload_name, "out")
5332STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(threshold_out, schema_str, "threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!)")
5333
5334// aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
5335static C10_NOINLINE c10::TypedOperatorHandle<threshold_out::schema> create_threshold_out_typed_handle() {
5336 return c10::Dispatcher::singleton()
5337 .findSchemaOrThrow(threshold_out::name, threshold_out::overload_name)
5338 .typed<threshold_out::schema>();
5339}
5340
5341// aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
5342at::Tensor & threshold_out::call(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value, at::Tensor & out) {
5343
5344 static auto op = create_threshold_out_typed_handle();
5345 return op.call(self, threshold, value, out);
5346}
5347
5348// aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
5349at::Tensor & threshold_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value, at::Tensor & out) {
5350
5351 static auto op = create_threshold_out_typed_handle();
5352 return op.redispatch(dispatchKeySet, self, threshold, value, out);
5353}
5354
5355STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(transpose_int, name, "aten::transpose")
5356STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(transpose_int, overload_name, "int")
5357STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(transpose_int, schema_str, "transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)")
5358
5359// aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
5360static C10_NOINLINE c10::TypedOperatorHandle<transpose_int::schema> create_transpose_int_typed_handle() {
5361 return c10::Dispatcher::singleton()
5362 .findSchemaOrThrow(transpose_int::name, transpose_int::overload_name)
5363 .typed<transpose_int::schema>();
5364}
5365
5366// aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
5367at::Tensor transpose_int::call(const at::Tensor & self, int64_t dim0, int64_t dim1) {
5368
5369 static auto op = create_transpose_int_typed_handle();
5370 return op.call(self, dim0, dim1);
5371}
5372
5373// aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
5374at::Tensor transpose_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1) {
5375
5376 static auto op = create_transpose_int_typed_handle();
5377 return op.redispatch(dispatchKeySet, self, dim0, dim1);
5378}
5379
5380STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(transpose_Dimname, name, "aten::transpose")
5381STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(transpose_Dimname, overload_name, "Dimname")
5382STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(transpose_Dimname, schema_str, "transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a)")
5383
5384// aten::transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a)
5385static C10_NOINLINE c10::TypedOperatorHandle<transpose_Dimname::schema> create_transpose_Dimname_typed_handle() {
5386 return c10::Dispatcher::singleton()
5387 .findSchemaOrThrow(transpose_Dimname::name, transpose_Dimname::overload_name)
5388 .typed<transpose_Dimname::schema>();
5389}
5390
5391// aten::transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a)
5392at::Tensor transpose_Dimname::call(const at::Tensor & self, at::Dimname dim0, at::Dimname dim1) {
5393
5394 static auto op = create_transpose_Dimname_typed_handle();
5395 return op.call(self, dim0, dim1);
5396}
5397
5398// aten::transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a)
5399at::Tensor transpose_Dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim0, at::Dimname dim1) {
5400
5401 static auto op = create_transpose_Dimname_typed_handle();
5402 return op.redispatch(dispatchKeySet, self, dim0, dim1);
5403}
5404
5405STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(transpose_, name, "aten::transpose_")
5406STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(transpose_, overload_name, "")
5407STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(transpose_, schema_str, "transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)")
5408
5409// aten::transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
5410static C10_NOINLINE c10::TypedOperatorHandle<transpose_::schema> create_transpose__typed_handle() {
5411 return c10::Dispatcher::singleton()
5412 .findSchemaOrThrow(transpose_::name, transpose_::overload_name)
5413 .typed<transpose_::schema>();
5414}
5415
5416// aten::transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
5417at::Tensor & transpose_::call(at::Tensor & self, int64_t dim0, int64_t dim1) {
5418
5419 static auto op = create_transpose__typed_handle();
5420 return op.call(self, dim0, dim1);
5421}
5422
5423// aten::transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
5424at::Tensor & transpose_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim0, int64_t dim1) {
5425
5426 static auto op = create_transpose__typed_handle();
5427 return op.redispatch(dispatchKeySet, self, dim0, dim1);
5428}
5429
5430STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(flip, name, "aten::flip")
5431STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(flip, overload_name, "")
5432STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(flip, schema_str, "flip(Tensor self, int[] dims) -> Tensor")
5433
5434// aten::flip(Tensor self, int[] dims) -> Tensor
5435static C10_NOINLINE c10::TypedOperatorHandle<flip::schema> create_flip_typed_handle() {
5436 return c10::Dispatcher::singleton()
5437 .findSchemaOrThrow(flip::name, flip::overload_name)
5438 .typed<flip::schema>();
5439}
5440
5441// aten::flip(Tensor self, int[] dims) -> Tensor
5442at::Tensor flip::call(const at::Tensor & self, at::IntArrayRef dims) {
5443
5444 static auto op = create_flip_typed_handle();
5445 return op.call(self, dims);
5446}
5447
5448// aten::flip(Tensor self, int[] dims) -> Tensor
5449at::Tensor flip::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims) {
5450
5451 static auto op = create_flip_typed_handle();
5452 return op.redispatch(dispatchKeySet, self, dims);
5453}
5454
5455STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(roll, name, "aten::roll")
5456STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(roll, overload_name, "")
5457STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(roll, schema_str, "roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor")
5458
5459// aten::roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor
5460static C10_NOINLINE c10::TypedOperatorHandle<roll::schema> create_roll_typed_handle() {
5461 return c10::Dispatcher::singleton()
5462 .findSchemaOrThrow(roll::name, roll::overload_name)
5463 .typed<roll::schema>();
5464}
5465
5466// aten::roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor
5467at::Tensor roll::call(const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims) {
5468
5469 static auto op = create_roll_typed_handle();
5470 return op.call(self, shifts, dims);
5471}
5472
5473// aten::roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor
5474at::Tensor roll::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims) {
5475
5476 static auto op = create_roll_typed_handle();
5477 return op.redispatch(dispatchKeySet, self, shifts, dims);
5478}
5479
5480STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_from_padded, name, "aten::_nested_from_padded")
5481STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_from_padded, overload_name, "")
5482STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_from_padded, schema_str, "_nested_from_padded(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False) -> Tensor")
5483
5484// aten::_nested_from_padded(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False) -> Tensor
5485static C10_NOINLINE c10::TypedOperatorHandle<_nested_from_padded::schema> create__nested_from_padded_typed_handle() {
5486 return c10::Dispatcher::singleton()
5487 .findSchemaOrThrow(_nested_from_padded::name, _nested_from_padded::overload_name)
5488 .typed<_nested_from_padded::schema>();
5489}
5490
5491// aten::_nested_from_padded(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False) -> Tensor
5492at::Tensor _nested_from_padded::call(const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213) {
5493
5494 static auto op = create__nested_from_padded_typed_handle();
5495 return op.call(padded, cpu_nested_shape_example, fuse_transform_0213);
5496}
5497
5498// aten::_nested_from_padded(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False) -> Tensor
5499at::Tensor _nested_from_padded::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213) {
5500
5501 static auto op = create__nested_from_padded_typed_handle();
5502 return op.redispatch(dispatchKeySet, padded, cpu_nested_shape_example, fuse_transform_0213);
5503}
5504
5505STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_view_from_buffer, name, "aten::_nested_view_from_buffer")
5506STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_view_from_buffer, overload_name, "")
5507STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_view_from_buffer, schema_str, "_nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor(a)")
5508
5509// aten::_nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor(a)
5510static C10_NOINLINE c10::TypedOperatorHandle<_nested_view_from_buffer::schema> create__nested_view_from_buffer_typed_handle() {
5511 return c10::Dispatcher::singleton()
5512 .findSchemaOrThrow(_nested_view_from_buffer::name, _nested_view_from_buffer::overload_name)
5513 .typed<_nested_view_from_buffer::schema>();
5514}
5515
5516// aten::_nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor(a)
5517at::Tensor _nested_view_from_buffer::call(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets) {
5518
5519 static auto op = create__nested_view_from_buffer_typed_handle();
5520 return op.call(self, nested_size, nested_strides, offsets);
5521}
5522
5523// aten::_nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor(a)
5524at::Tensor _nested_view_from_buffer::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets) {
5525
5526 static auto op = create__nested_view_from_buffer_typed_handle();
5527 return op.redispatch(dispatchKeySet, self, nested_size, nested_strides, offsets);
5528}
5529
5530STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_trilinear, name, "aten::_trilinear")
5531STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_trilinear, overload_name, "")
5532STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_trilinear, schema_str, "_trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor")
5533
5534// aten::_trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor
5535static C10_NOINLINE c10::TypedOperatorHandle<_trilinear::schema> create__trilinear_typed_handle() {
5536 return c10::Dispatcher::singleton()
5537 .findSchemaOrThrow(_trilinear::name, _trilinear::overload_name)
5538 .typed<_trilinear::schema>();
5539}
5540
5541// aten::_trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor
5542at::Tensor _trilinear::call(const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim) {
5543
5544 static auto op = create__trilinear_typed_handle();
5545 return op.call(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim);
5546}
5547
5548// aten::_trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor
5549at::Tensor _trilinear::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim) {
5550
5551 static auto op = create__trilinear_typed_handle();
5552 return op.redispatch(dispatchKeySet, i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim);
5553}
5554
5555STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(type_as, name, "aten::type_as")
5556STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(type_as, overload_name, "")
5557STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(type_as, schema_str, "type_as(Tensor self, Tensor other) -> Tensor")
5558
5559// aten::type_as(Tensor self, Tensor other) -> Tensor
5560static C10_NOINLINE c10::TypedOperatorHandle<type_as::schema> create_type_as_typed_handle() {
5561 return c10::Dispatcher::singleton()
5562 .findSchemaOrThrow(type_as::name, type_as::overload_name)
5563 .typed<type_as::schema>();
5564}
5565
5566// aten::type_as(Tensor self, Tensor other) -> Tensor
5567at::Tensor type_as::call(const at::Tensor & self, const at::Tensor & other) {
5568
5569 static auto op = create_type_as_typed_handle();
5570 return op.call(self, other);
5571}
5572
5573// aten::type_as(Tensor self, Tensor other) -> Tensor
5574at::Tensor type_as::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
5575
5576 static auto op = create_type_as_typed_handle();
5577 return op.redispatch(dispatchKeySet, self, other);
5578}
5579
5580STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_has_compatible_shallow_copy_type, name, "aten::_has_compatible_shallow_copy_type")
5581STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_has_compatible_shallow_copy_type, overload_name, "")
5582STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_has_compatible_shallow_copy_type, schema_str, "_has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool")
5583
5584// aten::_has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool
5585static C10_NOINLINE c10::TypedOperatorHandle<_has_compatible_shallow_copy_type::schema> create__has_compatible_shallow_copy_type_typed_handle() {
5586 return c10::Dispatcher::singleton()
5587 .findSchemaOrThrow(_has_compatible_shallow_copy_type::name, _has_compatible_shallow_copy_type::overload_name)
5588 .typed<_has_compatible_shallow_copy_type::schema>();
5589}
5590
5591// aten::_has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool
5592bool _has_compatible_shallow_copy_type::call(const at::Tensor & self, const at::Tensor & from) {
5593
5594 static auto op = create__has_compatible_shallow_copy_type_typed_handle();
5595 return op.call(self, from);
5596}
5597
5598// aten::_has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool
5599bool _has_compatible_shallow_copy_type::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & from) {
5600
5601 static auto op = create__has_compatible_shallow_copy_type_typed_handle();
5602 return op.redispatch(dispatchKeySet, self, from);
5603}
5604
5605STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_unique2, name, "aten::_unique2")
5606STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_unique2, overload_name, "")
5607STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_unique2, schema_str, "_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)")
5608
5609// aten::_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
5610static C10_NOINLINE c10::TypedOperatorHandle<_unique2::schema> create__unique2_typed_handle() {
5611 return c10::Dispatcher::singleton()
5612 .findSchemaOrThrow(_unique2::name, _unique2::overload_name)
5613 .typed<_unique2::schema>();
5614}
5615
5616// aten::_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
5617::std::tuple<at::Tensor,at::Tensor,at::Tensor> _unique2::call(const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts) {
5618
5619 static auto op = create__unique2_typed_handle();
5620 return op.call(self, sorted, return_inverse, return_counts);
5621}
5622
5623// aten::_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
5624::std::tuple<at::Tensor,at::Tensor,at::Tensor> _unique2::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts) {
5625
5626 static auto op = create__unique2_typed_handle();
5627 return op.redispatch(dispatchKeySet, self, sorted, return_inverse, return_counts);
5628}
5629
5630STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_weight_norm_interface_backward, name, "aten::_weight_norm_interface_backward")
5631STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_weight_norm_interface_backward, overload_name, "")
5632STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_weight_norm_interface_backward, schema_str, "_weight_norm_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)")
5633
5634// aten::_weight_norm_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)
5635static C10_NOINLINE c10::TypedOperatorHandle<_weight_norm_interface_backward::schema> create__weight_norm_interface_backward_typed_handle() {
5636 return c10::Dispatcher::singleton()
5637 .findSchemaOrThrow(_weight_norm_interface_backward::name, _weight_norm_interface_backward::overload_name)
5638 .typed<_weight_norm_interface_backward::schema>();
5639}
5640
5641// aten::_weight_norm_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)
5642::std::tuple<at::Tensor,at::Tensor> _weight_norm_interface_backward::call(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) {
5643
5644 static auto op = create__weight_norm_interface_backward_typed_handle();
5645 return op.call(grad_w, saved_v, saved_g, saved_norms, dim);
5646}
5647
5648// aten::_weight_norm_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)
5649::std::tuple<at::Tensor,at::Tensor> _weight_norm_interface_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) {
5650
5651 static auto op = create__weight_norm_interface_backward_typed_handle();
5652 return op.redispatch(dispatchKeySet, grad_w, saved_v, saved_g, saved_norms, dim);
5653}
5654
5655STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(zeros_like, name, "aten::zeros_like")
5656STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(zeros_like, overload_name, "")
5657STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(zeros_like, schema_str, "zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor")
5658
5659// aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
5660static C10_NOINLINE c10::TypedOperatorHandle<zeros_like::schema> create_zeros_like_typed_handle() {
5661 return c10::Dispatcher::singleton()
5662 .findSchemaOrThrow(zeros_like::name, zeros_like::overload_name)
5663 .typed<zeros_like::schema>();
5664}
5665
5666// aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
5667at::Tensor zeros_like::call(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
5668
5669 static auto op = create_zeros_like_typed_handle();
5670 return op.call(self, dtype, layout, device, pin_memory, memory_format);
5671}
5672
5673// aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
5674at::Tensor zeros_like::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
5675
5676 static auto op = create_zeros_like_typed_handle();
5677 return op.redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, memory_format);
5678}
5679
5680STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_csr_prod_dim_dtype, name, "aten::_sparse_csr_prod")
5681STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_csr_prod_dim_dtype, overload_name, "dim_dtype")
5682STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_csr_prod_dim_dtype, schema_str, "_sparse_csr_prod.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor")
5683
5684// aten::_sparse_csr_prod.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
5685static C10_NOINLINE c10::TypedOperatorHandle<_sparse_csr_prod_dim_dtype::schema> create__sparse_csr_prod_dim_dtype_typed_handle() {
5686 return c10::Dispatcher::singleton()
5687 .findSchemaOrThrow(_sparse_csr_prod_dim_dtype::name, _sparse_csr_prod_dim_dtype::overload_name)
5688 .typed<_sparse_csr_prod_dim_dtype::schema>();
5689}
5690
5691// aten::_sparse_csr_prod.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
5692at::Tensor _sparse_csr_prod_dim_dtype::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
5693
5694 static auto op = create__sparse_csr_prod_dim_dtype_typed_handle();
5695 return op.call(self, dim, keepdim, dtype);
5696}
5697
5698// aten::_sparse_csr_prod.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
5699at::Tensor _sparse_csr_prod_dim_dtype::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
5700
5701 static auto op = create__sparse_csr_prod_dim_dtype_typed_handle();
5702 return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype);
5703}
5704
5705STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_softmax_backward_data, name, "aten::_sparse_softmax_backward_data")
5706STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_softmax_backward_data, overload_name, "")
5707STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_softmax_backward_data, schema_str, "_sparse_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor")
5708
5709// aten::_sparse_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
5710static C10_NOINLINE c10::TypedOperatorHandle<_sparse_softmax_backward_data::schema> create__sparse_softmax_backward_data_typed_handle() {
5711 return c10::Dispatcher::singleton()
5712 .findSchemaOrThrow(_sparse_softmax_backward_data::name, _sparse_softmax_backward_data::overload_name)
5713 .typed<_sparse_softmax_backward_data::schema>();
5714}
5715
5716// aten::_sparse_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
5717at::Tensor _sparse_softmax_backward_data::call(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
5718
5719 static auto op = create__sparse_softmax_backward_data_typed_handle();
5720 return op.call(grad_output, output, dim, self);
5721}
5722
5723// aten::_sparse_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
5724at::Tensor _sparse_softmax_backward_data::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
5725
5726 static auto op = create__sparse_softmax_backward_data_typed_handle();
5727 return op.redispatch(dispatchKeySet, grad_output, output, dim, self);
5728}
5729
5730STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_log_softmax_int, name, "aten::_sparse_log_softmax")
5731STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_log_softmax_int, overload_name, "int")
5732STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_log_softmax_int, schema_str, "_sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor")
5733
5734// aten::_sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
5735static C10_NOINLINE c10::TypedOperatorHandle<_sparse_log_softmax_int::schema> create__sparse_log_softmax_int_typed_handle() {
5736 return c10::Dispatcher::singleton()
5737 .findSchemaOrThrow(_sparse_log_softmax_int::name, _sparse_log_softmax_int::overload_name)
5738 .typed<_sparse_log_softmax_int::schema>();
5739}
5740
5741// aten::_sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
5742at::Tensor _sparse_log_softmax_int::call(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
5743
5744 static auto op = create__sparse_log_softmax_int_typed_handle();
5745 return op.call(self, dim, dtype);
5746}
5747
5748// aten::_sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
5749at::Tensor _sparse_log_softmax_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
5750
5751 static auto op = create__sparse_log_softmax_int_typed_handle();
5752 return op.redispatch(dispatchKeySet, self, dim, dtype);
5753}
5754
5755STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_log_softmax_Dimname, name, "aten::_sparse_log_softmax")
5756STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_log_softmax_Dimname, overload_name, "Dimname")
5757STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_log_softmax_Dimname, schema_str, "_sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor")
5758
5759// aten::_sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
5760static C10_NOINLINE c10::TypedOperatorHandle<_sparse_log_softmax_Dimname::schema> create__sparse_log_softmax_Dimname_typed_handle() {
5761 return c10::Dispatcher::singleton()
5762 .findSchemaOrThrow(_sparse_log_softmax_Dimname::name, _sparse_log_softmax_Dimname::overload_name)
5763 .typed<_sparse_log_softmax_Dimname::schema>();
5764}
5765
5766// aten::_sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
5767at::Tensor _sparse_log_softmax_Dimname::call(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
5768
5769 static auto op = create__sparse_log_softmax_Dimname_typed_handle();
5770 return op.call(self, dim, dtype);
5771}
5772
5773// aten::_sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
5774at::Tensor _sparse_log_softmax_Dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
5775
5776 static auto op = create__sparse_log_softmax_Dimname_typed_handle();
5777 return op.redispatch(dispatchKeySet, self, dim, dtype);
5778}
5779
5780STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_log_softmax, name, "aten::_sparse_log_softmax")
5781STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_log_softmax, overload_name, "")
5782STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_log_softmax, schema_str, "_sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor")
5783
5784// aten::_sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
5785static C10_NOINLINE c10::TypedOperatorHandle<_sparse_log_softmax::schema> create__sparse_log_softmax_typed_handle() {
5786 return c10::Dispatcher::singleton()
5787 .findSchemaOrThrow(_sparse_log_softmax::name, _sparse_log_softmax::overload_name)
5788 .typed<_sparse_log_softmax::schema>();
5789}
5790
5791// aten::_sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
5792at::Tensor _sparse_log_softmax::call(const at::Tensor & self, int64_t dim, bool half_to_float) {
5793
5794 static auto op = create__sparse_log_softmax_typed_handle();
5795 return op.call(self, dim, half_to_float);
5796}
5797
5798// aten::_sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
5799at::Tensor _sparse_log_softmax::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float) {
5800
5801 static auto op = create__sparse_log_softmax_typed_handle();
5802 return op.redispatch(dispatchKeySet, self, dim, half_to_float);
5803}
5804
5805STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_log_softmax_backward_data, name, "aten::_sparse_log_softmax_backward_data")
5806STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_log_softmax_backward_data, overload_name, "")
5807STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_log_softmax_backward_data, schema_str, "_sparse_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor")
5808
5809// aten::_sparse_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
5810static C10_NOINLINE c10::TypedOperatorHandle<_sparse_log_softmax_backward_data::schema> create__sparse_log_softmax_backward_data_typed_handle() {
5811 return c10::Dispatcher::singleton()
5812 .findSchemaOrThrow(_sparse_log_softmax_backward_data::name, _sparse_log_softmax_backward_data::overload_name)
5813 .typed<_sparse_log_softmax_backward_data::schema>();
5814}
5815
5816// aten::_sparse_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
5817at::Tensor _sparse_log_softmax_backward_data::call(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
5818
5819 static auto op = create__sparse_log_softmax_backward_data_typed_handle();
5820 return op.call(grad_output, output, dim, self);
5821}
5822
5823// aten::_sparse_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
5824at::Tensor _sparse_log_softmax_backward_data::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
5825
5826 static auto op = create__sparse_log_softmax_backward_data_typed_handle();
5827 return op.redispatch(dispatchKeySet, grad_output, output, dim, self);
5828}
5829
5830STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_spdiags, name, "aten::_spdiags")
5831STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_spdiags, overload_name, "")
5832STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_spdiags, schema_str, "_spdiags(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None) -> Tensor")
5833
5834// aten::_spdiags(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None) -> Tensor
5835static C10_NOINLINE c10::TypedOperatorHandle<_spdiags::schema> create__spdiags_typed_handle() {
5836 return c10::Dispatcher::singleton()
5837 .findSchemaOrThrow(_spdiags::name, _spdiags::overload_name)
5838 .typed<_spdiags::schema>();
5839}
5840
5841// aten::_spdiags(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None) -> Tensor
5842at::Tensor _spdiags::call(const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional<at::Layout> layout) {
5843
5844 static auto op = create__spdiags_typed_handle();
5845 return op.call(diagonals, offsets, shape, layout);
5846}
5847
5848// aten::_spdiags(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None) -> Tensor
5849at::Tensor _spdiags::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional<at::Layout> layout) {
5850
5851 static auto op = create__spdiags_typed_handle();
5852 return op.redispatch(dispatchKeySet, diagonals, offsets, shape, layout);
5853}
5854
5855STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(frexp_Tensor, name, "aten::frexp")
5856STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(frexp_Tensor, overload_name, "Tensor")
5857STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(frexp_Tensor, schema_str, "frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent)")
5858
5859// aten::frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent)
5860static C10_NOINLINE c10::TypedOperatorHandle<frexp_Tensor::schema> create_frexp_Tensor_typed_handle() {
5861 return c10::Dispatcher::singleton()
5862 .findSchemaOrThrow(frexp_Tensor::name, frexp_Tensor::overload_name)
5863 .typed<frexp_Tensor::schema>();
5864}
5865
5866// aten::frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent)
5867::std::tuple<at::Tensor,at::Tensor> frexp_Tensor::call(const at::Tensor & self) {
5868
5869 static auto op = create_frexp_Tensor_typed_handle();
5870 return op.call(self);
5871}
5872
5873// aten::frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent)
5874::std::tuple<at::Tensor,at::Tensor> frexp_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
5875
5876 static auto op = create_frexp_Tensor_typed_handle();
5877 return op.redispatch(dispatchKeySet, self);
5878}
5879
5880STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(frexp_Tensor_out, name, "aten::frexp")
5881STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(frexp_Tensor_out, overload_name, "Tensor_out")
5882STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(frexp_Tensor_out, schema_str, "frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent)")
5883
5884// aten::frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent)
5885static C10_NOINLINE c10::TypedOperatorHandle<frexp_Tensor_out::schema> create_frexp_Tensor_out_typed_handle() {
5886 return c10::Dispatcher::singleton()
5887 .findSchemaOrThrow(frexp_Tensor_out::name, frexp_Tensor_out::overload_name)
5888 .typed<frexp_Tensor_out::schema>();
5889}
5890
5891// aten::frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent)
5892::std::tuple<at::Tensor &,at::Tensor &> frexp_Tensor_out::call(const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent) {
5893
5894 static auto op = create_frexp_Tensor_out_typed_handle();
5895 return op.call(self, mantissa, exponent);
5896}
5897
5898// aten::frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent)
5899::std::tuple<at::Tensor &,at::Tensor &> frexp_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent) {
5900
5901 static auto op = create_frexp_Tensor_out_typed_handle();
5902 return op.redispatch(dispatchKeySet, self, mantissa, exponent);
5903}
5904
5905STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(zero_, name, "aten::zero_")
5906STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(zero_, overload_name, "")
5907STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(zero_, schema_str, "zero_(Tensor(a!) self) -> Tensor(a!)")
5908
5909// aten::zero_(Tensor(a!) self) -> Tensor(a!)
5910static C10_NOINLINE c10::TypedOperatorHandle<zero_::schema> create_zero__typed_handle() {
5911 return c10::Dispatcher::singleton()
5912 .findSchemaOrThrow(zero_::name, zero_::overload_name)
5913 .typed<zero_::schema>();
5914}
5915
5916// aten::zero_(Tensor(a!) self) -> Tensor(a!)
5917at::Tensor & zero_::call(at::Tensor & self) {
5918
5919 static auto op = create_zero__typed_handle();
5920 return op.call(self);
5921}
5922
5923// aten::zero_(Tensor(a!) self) -> Tensor(a!)
5924at::Tensor & zero_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
5925
5926 static auto op = create_zero__typed_handle();
5927 return op.redispatch(dispatchKeySet, self);
5928}
5929
5930STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rsub_Tensor, name, "aten::rsub")
5931STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rsub_Tensor, overload_name, "Tensor")
5932STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rsub_Tensor, schema_str, "rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor")
5933
5934// aten::rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
5935static C10_NOINLINE c10::TypedOperatorHandle<rsub_Tensor::schema> create_rsub_Tensor_typed_handle() {
5936 return c10::Dispatcher::singleton()
5937 .findSchemaOrThrow(rsub_Tensor::name, rsub_Tensor::overload_name)
5938 .typed<rsub_Tensor::schema>();
5939}
5940
5941// aten::rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
5942at::Tensor rsub_Tensor::call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
5943
5944 static auto op = create_rsub_Tensor_typed_handle();
5945 return op.call(self, other, alpha);
5946}
5947
5948// aten::rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
5949at::Tensor rsub_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
5950
5951 static auto op = create_rsub_Tensor_typed_handle();
5952 return op.redispatch(dispatchKeySet, self, other, alpha);
5953}
5954
5955STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rsub_Scalar, name, "aten::rsub")
5956STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rsub_Scalar, overload_name, "Scalar")
5957STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rsub_Scalar, schema_str, "rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor")
5958
5959// aten::rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
5960static C10_NOINLINE c10::TypedOperatorHandle<rsub_Scalar::schema> create_rsub_Scalar_typed_handle() {
5961 return c10::Dispatcher::singleton()
5962 .findSchemaOrThrow(rsub_Scalar::name, rsub_Scalar::overload_name)
5963 .typed<rsub_Scalar::schema>();
5964}
5965
5966// aten::rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
5967at::Tensor rsub_Scalar::call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
5968
5969 static auto op = create_rsub_Scalar_typed_handle();
5970 return op.call(self, other, alpha);
5971}
5972
5973// aten::rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
5974at::Tensor rsub_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
5975
5976 static auto op = create_rsub_Scalar_typed_handle();
5977 return op.redispatch(dispatchKeySet, self, other, alpha);
5978}
5979
5980STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_mm_reduce_impl, name, "aten::_sparse_mm_reduce_impl")
5981STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_mm_reduce_impl, overload_name, "")
5982STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_mm_reduce_impl, schema_str, "_sparse_mm_reduce_impl(Tensor self, Tensor other, str reduce) -> (Tensor, Tensor)")
5983
5984// aten::_sparse_mm_reduce_impl(Tensor self, Tensor other, str reduce) -> (Tensor, Tensor)
5985static C10_NOINLINE c10::TypedOperatorHandle<_sparse_mm_reduce_impl::schema> create__sparse_mm_reduce_impl_typed_handle() {
5986 return c10::Dispatcher::singleton()
5987 .findSchemaOrThrow(_sparse_mm_reduce_impl::name, _sparse_mm_reduce_impl::overload_name)
5988 .typed<_sparse_mm_reduce_impl::schema>();
5989}
5990
5991// aten::_sparse_mm_reduce_impl(Tensor self, Tensor other, str reduce) -> (Tensor, Tensor)
5992::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl::call(const at::Tensor & self, const at::Tensor & other, c10::string_view reduce) {
5993
5994 static auto op = create__sparse_mm_reduce_impl_typed_handle();
5995 return op.call(self, other, reduce);
5996}
5997
5998// aten::_sparse_mm_reduce_impl(Tensor self, Tensor other, str reduce) -> (Tensor, Tensor)
5999::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::string_view reduce) {
6000
6001 static auto op = create__sparse_mm_reduce_impl_typed_handle();
6002 return op.redispatch(dispatchKeySet, self, other, reduce);
6003}
6004
6005STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_bsr_tensor_unsafe, name, "aten::_sparse_bsr_tensor_unsafe")
6006STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_bsr_tensor_unsafe, overload_name, "")
6007STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_bsr_tensor_unsafe, schema_str, "_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
6008
6009// aten::_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6010static C10_NOINLINE c10::TypedOperatorHandle<_sparse_bsr_tensor_unsafe::schema> create__sparse_bsr_tensor_unsafe_typed_handle() {
6011 return c10::Dispatcher::singleton()
6012 .findSchemaOrThrow(_sparse_bsr_tensor_unsafe::name, _sparse_bsr_tensor_unsafe::overload_name)
6013 .typed<_sparse_bsr_tensor_unsafe::schema>();
6014}
6015
6016// aten::_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6017at::Tensor _sparse_bsr_tensor_unsafe::call(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
6018
6019 static auto op = create__sparse_bsr_tensor_unsafe_typed_handle();
6020 return op.call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
6021}
6022
6023// aten::_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6024at::Tensor _sparse_bsr_tensor_unsafe::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
6025
6026 static auto op = create__sparse_bsr_tensor_unsafe_typed_handle();
6027 return op.redispatch(dispatchKeySet, crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
6028}
6029
6030STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_validate_sparse_csc_tensor_args, name, "aten::_validate_sparse_csc_tensor_args")
6031STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_validate_sparse_csc_tensor_args, overload_name, "")
6032STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_validate_sparse_csc_tensor_args, schema_str, "_validate_sparse_csc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()")
6033
6034// aten::_validate_sparse_csc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()
6035static C10_NOINLINE c10::TypedOperatorHandle<_validate_sparse_csc_tensor_args::schema> create__validate_sparse_csc_tensor_args_typed_handle() {
6036 return c10::Dispatcher::singleton()
6037 .findSchemaOrThrow(_validate_sparse_csc_tensor_args::name, _validate_sparse_csc_tensor_args::overload_name)
6038 .typed<_validate_sparse_csc_tensor_args::schema>();
6039}
6040
6041// aten::_validate_sparse_csc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()
6042void _validate_sparse_csc_tensor_args::call(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) {
6043
6044 static auto op = create__validate_sparse_csc_tensor_args_typed_handle();
6045 return op.call(ccol_indices, row_indices, values, size);
6046}
6047
6048// aten::_validate_sparse_csc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()
6049void _validate_sparse_csc_tensor_args::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) {
6050
6051 static auto op = create__validate_sparse_csc_tensor_args_typed_handle();
6052 return op.redispatch(dispatchKeySet, ccol_indices, row_indices, values, size);
6053}
6054
6055STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_coo_tensor_with_dims, name, "aten::_sparse_coo_tensor_with_dims")
6056STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_coo_tensor_with_dims, overload_name, "")
6057STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_coo_tensor_with_dims, schema_str, "_sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor")
6058
6059// aten::_sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
6060static C10_NOINLINE c10::TypedOperatorHandle<_sparse_coo_tensor_with_dims::schema> create__sparse_coo_tensor_with_dims_typed_handle() {
6061 return c10::Dispatcher::singleton()
6062 .findSchemaOrThrow(_sparse_coo_tensor_with_dims::name, _sparse_coo_tensor_with_dims::overload_name)
6063 .typed<_sparse_coo_tensor_with_dims::schema>();
6064}
6065
6066// aten::_sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
6067at::Tensor _sparse_coo_tensor_with_dims::call(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
6068
6069 static auto op = create__sparse_coo_tensor_with_dims_typed_handle();
6070 return op.call(sparse_dim, dense_dim, size, dtype, layout, device, pin_memory);
6071}
6072
6073// aten::_sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
6074at::Tensor _sparse_coo_tensor_with_dims::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
6075
6076 static auto op = create__sparse_coo_tensor_with_dims_typed_handle();
6077 return op.redispatch(dispatchKeySet, sparse_dim, dense_dim, size, dtype, layout, device, pin_memory);
6078}
6079
6080STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_dense_backward, name, "aten::to_dense_backward")
6081STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_dense_backward, overload_name, "")
6082STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_dense_backward, schema_str, "to_dense_backward(Tensor grad, Tensor input) -> Tensor")
6083
6084// aten::to_dense_backward(Tensor grad, Tensor input) -> Tensor
6085static C10_NOINLINE c10::TypedOperatorHandle<to_dense_backward::schema> create_to_dense_backward_typed_handle() {
6086 return c10::Dispatcher::singleton()
6087 .findSchemaOrThrow(to_dense_backward::name, to_dense_backward::overload_name)
6088 .typed<to_dense_backward::schema>();
6089}
6090
6091// aten::to_dense_backward(Tensor grad, Tensor input) -> Tensor
6092at::Tensor to_dense_backward::call(const at::Tensor & grad, const at::Tensor & input) {
6093
6094 static auto op = create_to_dense_backward_typed_handle();
6095 return op.call(grad, input);
6096}
6097
6098// aten::to_dense_backward(Tensor grad, Tensor input) -> Tensor
6099at::Tensor to_dense_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & input) {
6100
6101 static auto op = create_to_dense_backward_typed_handle();
6102 return op.redispatch(dispatchKeySet, grad, input);
6103}
6104
6105STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_coalesce, name, "aten::_coalesce")
6106STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_coalesce, overload_name, "")
6107STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_coalesce, schema_str, "_coalesce(Tensor self) -> Tensor")
6108
6109// aten::_coalesce(Tensor self) -> Tensor
6110static C10_NOINLINE c10::TypedOperatorHandle<_coalesce::schema> create__coalesce_typed_handle() {
6111 return c10::Dispatcher::singleton()
6112 .findSchemaOrThrow(_coalesce::name, _coalesce::overload_name)
6113 .typed<_coalesce::schema>();
6114}
6115
6116// aten::_coalesce(Tensor self) -> Tensor
6117at::Tensor _coalesce::call(const at::Tensor & self) {
6118
6119 static auto op = create__coalesce_typed_handle();
6120 return op.call(self);
6121}
6122
6123// aten::_coalesce(Tensor self) -> Tensor
6124at::Tensor _coalesce::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
6125
6126 static auto op = create__coalesce_typed_handle();
6127 return op.redispatch(dispatchKeySet, self);
6128}
6129
6130STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_values, name, "aten::_values")
6131STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_values, overload_name, "")
6132STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_values, schema_str, "_values(Tensor(a) self) -> Tensor(a)")
6133
6134// aten::_values(Tensor(a) self) -> Tensor(a)
6135static C10_NOINLINE c10::TypedOperatorHandle<_values::schema> create__values_typed_handle() {
6136 return c10::Dispatcher::singleton()
6137 .findSchemaOrThrow(_values::name, _values::overload_name)
6138 .typed<_values::schema>();
6139}
6140
6141// aten::_values(Tensor(a) self) -> Tensor(a)
6142at::Tensor _values::call(const at::Tensor & self) {
6143
6144 static auto op = create__values_typed_handle();
6145 return op.call(self);
6146}
6147
6148// aten::_values(Tensor(a) self) -> Tensor(a)
6149at::Tensor _values::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
6150
6151 static auto op = create__values_typed_handle();
6152 return op.redispatch(dispatchKeySet, self);
6153}
6154
6155STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(crow_indices, name, "aten::crow_indices")
6156STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(crow_indices, overload_name, "")
6157STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(crow_indices, schema_str, "crow_indices(Tensor(a) self) -> Tensor(a)")
6158
6159// aten::crow_indices(Tensor(a) self) -> Tensor(a)
6160static C10_NOINLINE c10::TypedOperatorHandle<crow_indices::schema> create_crow_indices_typed_handle() {
6161 return c10::Dispatcher::singleton()
6162 .findSchemaOrThrow(crow_indices::name, crow_indices::overload_name)
6163 .typed<crow_indices::schema>();
6164}
6165
6166// aten::crow_indices(Tensor(a) self) -> Tensor(a)
6167at::Tensor crow_indices::call(const at::Tensor & self) {
6168
6169 static auto op = create_crow_indices_typed_handle();
6170 return op.call(self);
6171}
6172
6173// aten::crow_indices(Tensor(a) self) -> Tensor(a)
6174at::Tensor crow_indices::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
6175
6176 static auto op = create_crow_indices_typed_handle();
6177 return op.redispatch(dispatchKeySet, self);
6178}
6179
6180STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(q_zero_point, name, "aten::q_zero_point")
6181STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(q_zero_point, overload_name, "")
6182STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(q_zero_point, schema_str, "q_zero_point(Tensor self) -> int")
6183
6184// aten::q_zero_point(Tensor self) -> int
6185static C10_NOINLINE c10::TypedOperatorHandle<q_zero_point::schema> create_q_zero_point_typed_handle() {
6186 return c10::Dispatcher::singleton()
6187 .findSchemaOrThrow(q_zero_point::name, q_zero_point::overload_name)
6188 .typed<q_zero_point::schema>();
6189}
6190
6191// aten::q_zero_point(Tensor self) -> int
6192int64_t q_zero_point::call(const at::Tensor & self) {
6193
6194 static auto op = create_q_zero_point_typed_handle();
6195 return op.call(self);
6196}
6197
6198// aten::q_zero_point(Tensor self) -> int
6199int64_t q_zero_point::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
6200
6201 static auto op = create_q_zero_point_typed_handle();
6202 return op.redispatch(dispatchKeySet, self);
6203}
6204
6205STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(q_per_channel_scales, name, "aten::q_per_channel_scales")
6206STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(q_per_channel_scales, overload_name, "")
6207STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(q_per_channel_scales, schema_str, "q_per_channel_scales(Tensor self) -> Tensor")
6208
6209// aten::q_per_channel_scales(Tensor self) -> Tensor
6210static C10_NOINLINE c10::TypedOperatorHandle<q_per_channel_scales::schema> create_q_per_channel_scales_typed_handle() {
6211 return c10::Dispatcher::singleton()
6212 .findSchemaOrThrow(q_per_channel_scales::name, q_per_channel_scales::overload_name)
6213 .typed<q_per_channel_scales::schema>();
6214}
6215
6216// aten::q_per_channel_scales(Tensor self) -> Tensor
6217at::Tensor q_per_channel_scales::call(const at::Tensor & self) {
6218
6219 static auto op = create_q_per_channel_scales_typed_handle();
6220 return op.call(self);
6221}
6222
6223// aten::q_per_channel_scales(Tensor self) -> Tensor
6224at::Tensor q_per_channel_scales::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
6225
6226 static auto op = create_q_per_channel_scales_typed_handle();
6227 return op.redispatch(dispatchKeySet, self);
6228}
6229
6230STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fake_quantize_learnable_per_tensor_affine_backward, name, "aten::_fake_quantize_learnable_per_tensor_affine_backward")
6231STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fake_quantize_learnable_per_tensor_affine_backward, overload_name, "")
6232STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fake_quantize_learnable_per_tensor_affine_backward, schema_str, "_fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)")
6233
6234// aten::_fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)
6235static C10_NOINLINE c10::TypedOperatorHandle<_fake_quantize_learnable_per_tensor_affine_backward::schema> create__fake_quantize_learnable_per_tensor_affine_backward_typed_handle() {
6236 return c10::Dispatcher::singleton()
6237 .findSchemaOrThrow(_fake_quantize_learnable_per_tensor_affine_backward::name, _fake_quantize_learnable_per_tensor_affine_backward::overload_name)
6238 .typed<_fake_quantize_learnable_per_tensor_affine_backward::schema>();
6239}
6240
6241// aten::_fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)
6242::std::tuple<at::Tensor,at::Tensor,at::Tensor> _fake_quantize_learnable_per_tensor_affine_backward::call(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) {
6243
6244 static auto op = create__fake_quantize_learnable_per_tensor_affine_backward_typed_handle();
6245 return op.call(grad, self, scale, zero_point, quant_min, quant_max, grad_factor);
6246}
6247
6248// aten::_fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)
6249::std::tuple<at::Tensor,at::Tensor,at::Tensor> _fake_quantize_learnable_per_tensor_affine_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) {
6250
6251 static auto op = create__fake_quantize_learnable_per_tensor_affine_backward_typed_handle();
6252 return op.redispatch(dispatchKeySet, grad, self, scale, zero_point, quant_min, quant_max, grad_factor);
6253}
6254
6255STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fake_quantize_learnable_per_channel_affine_backward, name, "aten::_fake_quantize_learnable_per_channel_affine_backward")
6256STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fake_quantize_learnable_per_channel_affine_backward, overload_name, "")
6257STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fake_quantize_learnable_per_channel_affine_backward, schema_str, "_fake_quantize_learnable_per_channel_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)")
6258
6259// aten::_fake_quantize_learnable_per_channel_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)
6260static C10_NOINLINE c10::TypedOperatorHandle<_fake_quantize_learnable_per_channel_affine_backward::schema> create__fake_quantize_learnable_per_channel_affine_backward_typed_handle() {
6261 return c10::Dispatcher::singleton()
6262 .findSchemaOrThrow(_fake_quantize_learnable_per_channel_affine_backward::name, _fake_quantize_learnable_per_channel_affine_backward::overload_name)
6263 .typed<_fake_quantize_learnable_per_channel_affine_backward::schema>();
6264}
6265
6266// aten::_fake_quantize_learnable_per_channel_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)
6267::std::tuple<at::Tensor,at::Tensor,at::Tensor> _fake_quantize_learnable_per_channel_affine_backward::call(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) {
6268
6269 static auto op = create__fake_quantize_learnable_per_channel_affine_backward_typed_handle();
6270 return op.call(grad, self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
6271}
6272
6273// aten::_fake_quantize_learnable_per_channel_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)
6274::std::tuple<at::Tensor,at::Tensor,at::Tensor> _fake_quantize_learnable_per_channel_affine_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) {
6275
6276 static auto op = create__fake_quantize_learnable_per_channel_affine_backward_typed_handle();
6277 return op.redispatch(dispatchKeySet, grad, self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
6278}
6279
6280STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fused_moving_avg_obs_fake_quant, name, "aten::fused_moving_avg_obs_fake_quant")
6281STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fused_moving_avg_obs_fake_quant, overload_name, "")
6282STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fused_moving_avg_obs_fake_quant, schema_str, "fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor")
6283
6284// aten::fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor
6285static C10_NOINLINE c10::TypedOperatorHandle<fused_moving_avg_obs_fake_quant::schema> create_fused_moving_avg_obs_fake_quant_typed_handle() {
6286 return c10::Dispatcher::singleton()
6287 .findSchemaOrThrow(fused_moving_avg_obs_fake_quant::name, fused_moving_avg_obs_fake_quant::overload_name)
6288 .typed<fused_moving_avg_obs_fake_quant::schema>();
6289}
6290
6291// aten::fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor
6292at::Tensor fused_moving_avg_obs_fake_quant::call(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) {
6293
6294 static auto op = create_fused_moving_avg_obs_fake_quant_typed_handle();
6295 return op.call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
6296}
6297
6298// aten::fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor
6299at::Tensor fused_moving_avg_obs_fake_quant::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) {
6300
6301 static auto op = create_fused_moving_avg_obs_fake_quant_typed_handle();
6302 return op.redispatch(dispatchKeySet, self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
6303}
6304
6305STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_choose_qparams_per_tensor, name, "aten::_choose_qparams_per_tensor")
6306STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_choose_qparams_per_tensor, overload_name, "")
6307STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_choose_qparams_per_tensor, schema_str, "_choose_qparams_per_tensor(Tensor self, bool reduce_range=False) -> (float, int)")
6308
6309// aten::_choose_qparams_per_tensor(Tensor self, bool reduce_range=False) -> (float, int)
6310static C10_NOINLINE c10::TypedOperatorHandle<_choose_qparams_per_tensor::schema> create__choose_qparams_per_tensor_typed_handle() {
6311 return c10::Dispatcher::singleton()
6312 .findSchemaOrThrow(_choose_qparams_per_tensor::name, _choose_qparams_per_tensor::overload_name)
6313 .typed<_choose_qparams_per_tensor::schema>();
6314}
6315
6316// aten::_choose_qparams_per_tensor(Tensor self, bool reduce_range=False) -> (float, int)
6317::std::tuple<double,int64_t> _choose_qparams_per_tensor::call(const at::Tensor & self, bool reduce_range) {
6318
6319 static auto op = create__choose_qparams_per_tensor_typed_handle();
6320 return op.call(self, reduce_range);
6321}
6322
6323// aten::_choose_qparams_per_tensor(Tensor self, bool reduce_range=False) -> (float, int)
6324::std::tuple<double,int64_t> _choose_qparams_per_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool reduce_range) {
6325
6326 static auto op = create__choose_qparams_per_tensor_typed_handle();
6327 return op.redispatch(dispatchKeySet, self, reduce_range);
6328}
6329
6330STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(meshgrid, name, "aten::meshgrid")
6331STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(meshgrid, overload_name, "")
6332STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(meshgrid, schema_str, "meshgrid(Tensor[] tensors) -> Tensor[]")
6333
6334// aten::meshgrid(Tensor[] tensors) -> Tensor[]
6335static C10_NOINLINE c10::TypedOperatorHandle<meshgrid::schema> create_meshgrid_typed_handle() {
6336 return c10::Dispatcher::singleton()
6337 .findSchemaOrThrow(meshgrid::name, meshgrid::overload_name)
6338 .typed<meshgrid::schema>();
6339}
6340
6341// aten::meshgrid(Tensor[] tensors) -> Tensor[]
6342::std::vector<at::Tensor> meshgrid::call(at::TensorList tensors) {
6343
6344 static auto op = create_meshgrid_typed_handle();
6345 return op.call(tensors);
6346}
6347
6348// aten::meshgrid(Tensor[] tensors) -> Tensor[]
6349::std::vector<at::Tensor> meshgrid::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
6350
6351 static auto op = create_meshgrid_typed_handle();
6352 return op.redispatch(dispatchKeySet, tensors);
6353}
6354
6355STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(meshgrid_indexing, name, "aten::meshgrid")
6356STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(meshgrid_indexing, overload_name, "indexing")
6357STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(meshgrid_indexing, schema_str, "meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[]")
6358
6359// aten::meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[]
6360static C10_NOINLINE c10::TypedOperatorHandle<meshgrid_indexing::schema> create_meshgrid_indexing_typed_handle() {
6361 return c10::Dispatcher::singleton()
6362 .findSchemaOrThrow(meshgrid_indexing::name, meshgrid_indexing::overload_name)
6363 .typed<meshgrid_indexing::schema>();
6364}
6365
6366// aten::meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[]
6367::std::vector<at::Tensor> meshgrid_indexing::call(at::TensorList tensors, c10::string_view indexing) {
6368
6369 static auto op = create_meshgrid_indexing_typed_handle();
6370 return op.call(tensors, indexing);
6371}
6372
6373// aten::meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[]
6374::std::vector<at::Tensor> meshgrid_indexing::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, c10::string_view indexing) {
6375
6376 static auto op = create_meshgrid_indexing_typed_handle();
6377 return op.redispatch(dispatchKeySet, tensors, indexing);
6378}
6379
6380STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(can_cast, name, "aten::can_cast")
6381STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(can_cast, overload_name, "")
6382STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(can_cast, schema_str, "can_cast(ScalarType from, ScalarType to) -> bool")
6383
6384// aten::can_cast(ScalarType from, ScalarType to) -> bool
6385static C10_NOINLINE c10::TypedOperatorHandle<can_cast::schema> create_can_cast_typed_handle() {
6386 return c10::Dispatcher::singleton()
6387 .findSchemaOrThrow(can_cast::name, can_cast::overload_name)
6388 .typed<can_cast::schema>();
6389}
6390
6391// aten::can_cast(ScalarType from, ScalarType to) -> bool
6392bool can_cast::call(at::ScalarType from, at::ScalarType to) {
6393
6394 static auto op = create_can_cast_typed_handle();
6395 return op.call(from, to);
6396}
6397
6398// aten::can_cast(ScalarType from, ScalarType to) -> bool
6399bool can_cast::redispatch(c10::DispatchKeySet dispatchKeySet, at::ScalarType from, at::ScalarType to) {
6400
6401 static auto op = create_can_cast_typed_handle();
6402 return op.redispatch(dispatchKeySet, from, to);
6403}
6404
6405STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lstm_mps_backward, name, "aten::lstm_mps_backward")
6406STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lstm_mps_backward, overload_name, "")
6407STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lstm_mps_backward, schema_str, "lstm_mps_backward(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[])")
6408
6409// aten::lstm_mps_backward(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[])
6410static C10_NOINLINE c10::TypedOperatorHandle<lstm_mps_backward::schema> create_lstm_mps_backward_typed_handle() {
6411 return c10::Dispatcher::singleton()
6412 .findSchemaOrThrow(lstm_mps_backward::name, lstm_mps_backward::overload_name)
6413 .typed<lstm_mps_backward::schema>();
6414}
6415
6416// aten::lstm_mps_backward(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[])
6417::std::tuple<at::Tensor,::std::vector<at::Tensor>,::std::vector<at::Tensor>> lstm_mps_backward::call(const at::Tensor & grad_y, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
6418
6419 static auto op = create_lstm_mps_backward_typed_handle();
6420 return op.call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
6421}
6422
6423// aten::lstm_mps_backward(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[])
6424::std::tuple<at::Tensor,::std::vector<at::Tensor>,::std::vector<at::Tensor>> lstm_mps_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_y, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
6425
6426 static auto op = create_lstm_mps_backward_typed_handle();
6427 return op.redispatch(dispatchKeySet, grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
6428}
6429
6430STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_thnn_fused_lstm_cell_backward_impl, name, "aten::_thnn_fused_lstm_cell_backward_impl")
6431STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_thnn_fused_lstm_cell_backward_impl, overload_name, "")
6432STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_thnn_fused_lstm_cell_backward_impl, schema_str, "_thnn_fused_lstm_cell_backward_impl(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor)")
6433
6434// aten::_thnn_fused_lstm_cell_backward_impl(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor)
6435static C10_NOINLINE c10::TypedOperatorHandle<_thnn_fused_lstm_cell_backward_impl::schema> create__thnn_fused_lstm_cell_backward_impl_typed_handle() {
6436 return c10::Dispatcher::singleton()
6437 .findSchemaOrThrow(_thnn_fused_lstm_cell_backward_impl::name, _thnn_fused_lstm_cell_backward_impl::overload_name)
6438 .typed<_thnn_fused_lstm_cell_backward_impl::schema>();
6439}
6440
6441// aten::_thnn_fused_lstm_cell_backward_impl(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor)
6442::std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_backward_impl::call(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) {
6443
6444 static auto op = create__thnn_fused_lstm_cell_backward_impl_typed_handle();
6445 return op.call(grad_hy, grad_cy, cx, cy, workspace, has_bias);
6446}
6447
6448// aten::_thnn_fused_lstm_cell_backward_impl(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor)
6449::std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_backward_impl::redispatch(c10::DispatchKeySet dispatchKeySet, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) {
6450
6451 static auto op = create__thnn_fused_lstm_cell_backward_impl_typed_handle();
6452 return op.redispatch(dispatchKeySet, grad_hy, grad_cy, cx, cy, workspace, has_bias);
6453}
6454
6455STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_thnn_fused_gru_cell, name, "aten::_thnn_fused_gru_cell")
6456STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_thnn_fused_gru_cell, overload_name, "")
6457STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_thnn_fused_gru_cell, schema_str, "_thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor)")
6458
6459// aten::_thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor)
6460static C10_NOINLINE c10::TypedOperatorHandle<_thnn_fused_gru_cell::schema> create__thnn_fused_gru_cell_typed_handle() {
6461 return c10::Dispatcher::singleton()
6462 .findSchemaOrThrow(_thnn_fused_gru_cell::name, _thnn_fused_gru_cell::overload_name)
6463 .typed<_thnn_fused_gru_cell::schema>();
6464}
6465
6466// aten::_thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor)
6467::std::tuple<at::Tensor,at::Tensor> _thnn_fused_gru_cell::call(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias) {
6468
6469 static auto op = create__thnn_fused_gru_cell_typed_handle();
6470 return op.call(input_gates, hidden_gates, hx, input_bias, hidden_bias);
6471}
6472
6473// aten::_thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor)
6474::std::tuple<at::Tensor,at::Tensor> _thnn_fused_gru_cell::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias) {
6475
6476 static auto op = create__thnn_fused_gru_cell_typed_handle();
6477 return op.redispatch(dispatchKeySet, input_gates, hidden_gates, hx, input_bias, hidden_bias);
6478}
6479
6480STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantized_rnn_tanh_cell, name, "aten::quantized_rnn_tanh_cell")
6481STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantized_rnn_tanh_cell, overload_name, "")
6482STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantized_rnn_tanh_cell, schema_str, "quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor")
6483
6484// aten::quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
6485static C10_NOINLINE c10::TypedOperatorHandle<quantized_rnn_tanh_cell::schema> create_quantized_rnn_tanh_cell_typed_handle() {
6486 return c10::Dispatcher::singleton()
6487 .findSchemaOrThrow(quantized_rnn_tanh_cell::name, quantized_rnn_tanh_cell::overload_name)
6488 .typed<quantized_rnn_tanh_cell::schema>();
6489}
6490
6491// aten::quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
6492at::Tensor quantized_rnn_tanh_cell::call(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
6493
6494 static auto op = create_quantized_rnn_tanh_cell_typed_handle();
6495 return op.call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
6496}
6497
6498// aten::quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
6499at::Tensor quantized_rnn_tanh_cell::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
6500
6501 static auto op = create_quantized_rnn_tanh_cell_typed_handle();
6502 return op.redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
6503}
6504
6505STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pack_padded_sequence, name, "aten::_pack_padded_sequence")
6506STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pack_padded_sequence, overload_name, "")
6507STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pack_padded_sequence, schema_str, "_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)")
6508
6509// aten::_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)
6510static C10_NOINLINE c10::TypedOperatorHandle<_pack_padded_sequence::schema> create__pack_padded_sequence_typed_handle() {
6511 return c10::Dispatcher::singleton()
6512 .findSchemaOrThrow(_pack_padded_sequence::name, _pack_padded_sequence::overload_name)
6513 .typed<_pack_padded_sequence::schema>();
6514}
6515
6516// aten::_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)
6517::std::tuple<at::Tensor,at::Tensor> _pack_padded_sequence::call(const at::Tensor & input, const at::Tensor & lengths, bool batch_first) {
6518
6519 static auto op = create__pack_padded_sequence_typed_handle();
6520 return op.call(input, lengths, batch_first);
6521}
6522
6523// aten::_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)
6524::std::tuple<at::Tensor,at::Tensor> _pack_padded_sequence::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & lengths, bool batch_first) {
6525
6526 static auto op = create__pack_padded_sequence_typed_handle();
6527 return op.redispatch(dispatchKeySet, input, lengths, batch_first);
6528}
6529
6530STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_set_to, name, "aten::is_set_to")
6531STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_set_to, overload_name, "")
6532STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_set_to, schema_str, "is_set_to(Tensor self, Tensor tensor) -> bool")
6533
6534// aten::is_set_to(Tensor self, Tensor tensor) -> bool
6535static C10_NOINLINE c10::TypedOperatorHandle<is_set_to::schema> create_is_set_to_typed_handle() {
6536 return c10::Dispatcher::singleton()
6537 .findSchemaOrThrow(is_set_to::name, is_set_to::overload_name)
6538 .typed<is_set_to::schema>();
6539}
6540
6541// aten::is_set_to(Tensor self, Tensor tensor) -> bool
6542bool is_set_to::call(const at::Tensor & self, const at::Tensor & tensor) {
6543
6544 static auto op = create_is_set_to_typed_handle();
6545 return op.call(self, tensor);
6546}
6547
6548// aten::is_set_to(Tensor self, Tensor tensor) -> bool
6549bool is_set_to::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor) {
6550
6551 static auto op = create_is_set_to_typed_handle();
6552 return op.redispatch(dispatchKeySet, self, tensor);
6553}
6554
6555STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_masked_softmax, name, "aten::_masked_softmax")
6556STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_masked_softmax, overload_name, "")
6557STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_masked_softmax, schema_str, "_masked_softmax(Tensor self, Tensor mask, int? dim=None, int? mask_type=None) -> Tensor")
6558
6559// aten::_masked_softmax(Tensor self, Tensor mask, int? dim=None, int? mask_type=None) -> Tensor
6560static C10_NOINLINE c10::TypedOperatorHandle<_masked_softmax::schema> create__masked_softmax_typed_handle() {
6561 return c10::Dispatcher::singleton()
6562 .findSchemaOrThrow(_masked_softmax::name, _masked_softmax::overload_name)
6563 .typed<_masked_softmax::schema>();
6564}
6565
6566// aten::_masked_softmax(Tensor self, Tensor mask, int? dim=None, int? mask_type=None) -> Tensor
6567at::Tensor _masked_softmax::call(const at::Tensor & self, const at::Tensor & mask, c10::optional<int64_t> dim, c10::optional<int64_t> mask_type) {
6568
6569 static auto op = create__masked_softmax_typed_handle();
6570 return op.call(self, mask, dim, mask_type);
6571}
6572
6573// aten::_masked_softmax(Tensor self, Tensor mask, int? dim=None, int? mask_type=None) -> Tensor
6574at::Tensor _masked_softmax::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, c10::optional<int64_t> dim, c10::optional<int64_t> mask_type) {
6575
6576 static auto op = create__masked_softmax_typed_handle();
6577 return op.redispatch(dispatchKeySet, self, mask, dim, mask_type);
6578}
6579
6580STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(view, name, "aten::view")
6581STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(view, overload_name, "")
6582STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(view, schema_str, "view(Tensor(a) self, SymInt[] size) -> Tensor(a)")
6583
6584// aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a)
6585static C10_NOINLINE c10::TypedOperatorHandle<view::schema> create_view_typed_handle() {
6586 return c10::Dispatcher::singleton()
6587 .findSchemaOrThrow(view::name, view::overload_name)
6588 .typed<view::schema>();
6589}
6590
6591// aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a)
6592at::Tensor view::call(const at::Tensor & self, c10::SymIntArrayRef size) {
6593
6594 static auto op = create_view_typed_handle();
6595 return op.call(self, size);
6596}
6597
6598// aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a)
6599at::Tensor view::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size) {
6600
6601 static auto op = create_view_typed_handle();
6602 return op.redispatch(dispatchKeySet, self, size);
6603}
6604
6605STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(view_dtype, name, "aten::view")
6606STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(view_dtype, overload_name, "dtype")
6607STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(view_dtype, schema_str, "view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a)")
6608
6609// aten::view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a)
6610static C10_NOINLINE c10::TypedOperatorHandle<view_dtype::schema> create_view_dtype_typed_handle() {
6611 return c10::Dispatcher::singleton()
6612 .findSchemaOrThrow(view_dtype::name, view_dtype::overload_name)
6613 .typed<view_dtype::schema>();
6614}
6615
6616// aten::view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a)
6617at::Tensor view_dtype::call(const at::Tensor & self, at::ScalarType dtype) {
6618
6619 static auto op = create_view_dtype_typed_handle();
6620 return op.call(self, dtype);
6621}
6622
6623// aten::view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a)
6624at::Tensor view_dtype::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype) {
6625
6626 static auto op = create_view_dtype_typed_handle();
6627 return op.redispatch(dispatchKeySet, self, dtype);
6628}
6629
6630STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__xor___Scalar, name, "aten::__xor__")
6631STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__xor___Scalar, overload_name, "Scalar")
6632STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__xor___Scalar, schema_str, "__xor__.Scalar(Tensor self, Scalar other) -> Tensor")
6633
6634// aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor
6635static C10_NOINLINE c10::TypedOperatorHandle<__xor___Scalar::schema> create___xor___Scalar_typed_handle() {
6636 return c10::Dispatcher::singleton()
6637 .findSchemaOrThrow(__xor___Scalar::name, __xor___Scalar::overload_name)
6638 .typed<__xor___Scalar::schema>();
6639}
6640
6641// aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor
6642at::Tensor __xor___Scalar::call(const at::Tensor & self, const at::Scalar & other) {
6643
6644 static auto op = create___xor___Scalar_typed_handle();
6645 return op.call(self, other);
6646}
6647
6648// aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor
6649at::Tensor __xor___Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
6650
6651 static auto op = create___xor___Scalar_typed_handle();
6652 return op.redispatch(dispatchKeySet, self, other);
6653}
6654
6655STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__xor___Tensor, name, "aten::__xor__")
6656STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__xor___Tensor, overload_name, "Tensor")
6657STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__xor___Tensor, schema_str, "__xor__.Tensor(Tensor self, Tensor other) -> Tensor")
6658
6659// aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor
6660static C10_NOINLINE c10::TypedOperatorHandle<__xor___Tensor::schema> create___xor___Tensor_typed_handle() {
6661 return c10::Dispatcher::singleton()
6662 .findSchemaOrThrow(__xor___Tensor::name, __xor___Tensor::overload_name)
6663 .typed<__xor___Tensor::schema>();
6664}
6665
6666// aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor
6667at::Tensor __xor___Tensor::call(const at::Tensor & self, const at::Tensor & other) {
6668
6669 static auto op = create___xor___Tensor_typed_handle();
6670 return op.call(self, other);
6671}
6672
6673// aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor
6674at::Tensor __xor___Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
6675
6676 static auto op = create___xor___Tensor_typed_handle();
6677 return op.redispatch(dispatchKeySet, self, other);
6678}
6679
6680STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__ixor___Scalar, name, "aten::__ixor__")
6681STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__ixor___Scalar, overload_name, "Scalar")
6682STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__ixor___Scalar, schema_str, "__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
6683
6684// aten::__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
6685static C10_NOINLINE c10::TypedOperatorHandle<__ixor___Scalar::schema> create___ixor___Scalar_typed_handle() {
6686 return c10::Dispatcher::singleton()
6687 .findSchemaOrThrow(__ixor___Scalar::name, __ixor___Scalar::overload_name)
6688 .typed<__ixor___Scalar::schema>();
6689}
6690
6691// aten::__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
6692at::Tensor & __ixor___Scalar::call(at::Tensor & self, const at::Scalar & other) {
6693
6694 static auto op = create___ixor___Scalar_typed_handle();
6695 return op.call(self, other);
6696}
6697
6698// aten::__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
6699at::Tensor & __ixor___Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
6700
6701 static auto op = create___ixor___Scalar_typed_handle();
6702 return op.redispatch(dispatchKeySet, self, other);
6703}
6704
6705STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__ixor___Tensor, name, "aten::__ixor__")
6706STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__ixor___Tensor, overload_name, "Tensor")
6707STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(__ixor___Tensor, schema_str, "__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
6708
6709// aten::__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
6710static C10_NOINLINE c10::TypedOperatorHandle<__ixor___Tensor::schema> create___ixor___Tensor_typed_handle() {
6711 return c10::Dispatcher::singleton()
6712 .findSchemaOrThrow(__ixor___Tensor::name, __ixor___Tensor::overload_name)
6713 .typed<__ixor___Tensor::schema>();
6714}
6715
6716// aten::__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
6717at::Tensor & __ixor___Tensor::call(at::Tensor & self, const at::Tensor & other) {
6718
6719 static auto op = create___ixor___Tensor_typed_handle();
6720 return op.call(self, other);
6721}
6722
6723// aten::__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
6724at::Tensor & __ixor___Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
6725
6726 static auto op = create___ixor___Tensor_typed_handle();
6727 return op.redispatch(dispatchKeySet, self, other);
6728}
6729
6730STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(triu_, name, "aten::triu_")
6731STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(triu_, overload_name, "")
6732STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(triu_, schema_str, "triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)")
6733
6734// aten::triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)
6735static C10_NOINLINE c10::TypedOperatorHandle<triu_::schema> create_triu__typed_handle() {
6736 return c10::Dispatcher::singleton()
6737 .findSchemaOrThrow(triu_::name, triu_::overload_name)
6738 .typed<triu_::schema>();
6739}
6740
6741// aten::triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)
6742at::Tensor & triu_::call(at::Tensor & self, int64_t diagonal) {
6743
6744 static auto op = create_triu__typed_handle();
6745 return op.call(self, diagonal);
6746}
6747
6748// aten::triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)
6749at::Tensor & triu_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t diagonal) {
6750
6751 static auto op = create_triu__typed_handle();
6752 return op.redispatch(dispatchKeySet, self, diagonal);
6753}
6754
6755STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lerp__Scalar, name, "aten::lerp_")
6756STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lerp__Scalar, overload_name, "Scalar")
6757STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lerp__Scalar, schema_str, "lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!)")
6758
6759// aten::lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!)
6760static C10_NOINLINE c10::TypedOperatorHandle<lerp__Scalar::schema> create_lerp__Scalar_typed_handle() {
6761 return c10::Dispatcher::singleton()
6762 .findSchemaOrThrow(lerp__Scalar::name, lerp__Scalar::overload_name)
6763 .typed<lerp__Scalar::schema>();
6764}
6765
6766// aten::lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!)
6767at::Tensor & lerp__Scalar::call(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
6768
6769 static auto op = create_lerp__Scalar_typed_handle();
6770 return op.call(self, end, weight);
6771}
6772
6773// aten::lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!)
6774at::Tensor & lerp__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
6775
6776 static auto op = create_lerp__Scalar_typed_handle();
6777 return op.redispatch(dispatchKeySet, self, end, weight);
6778}
6779
6780STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lerp__Tensor, name, "aten::lerp_")
6781STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lerp__Tensor, overload_name, "Tensor")
6782STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lerp__Tensor, schema_str, "lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!)")
6783
6784// aten::lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!)
6785static C10_NOINLINE c10::TypedOperatorHandle<lerp__Tensor::schema> create_lerp__Tensor_typed_handle() {
6786 return c10::Dispatcher::singleton()
6787 .findSchemaOrThrow(lerp__Tensor::name, lerp__Tensor::overload_name)
6788 .typed<lerp__Tensor::schema>();
6789}
6790
6791// aten::lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!)
6792at::Tensor & lerp__Tensor::call(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
6793
6794 static auto op = create_lerp__Tensor_typed_handle();
6795 return op.call(self, end, weight);
6796}
6797
6798// aten::lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!)
6799at::Tensor & lerp__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
6800
6801 static auto op = create_lerp__Tensor_typed_handle();
6802 return op.redispatch(dispatchKeySet, self, end, weight);
6803}
6804
6805STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addbmm_, name, "aten::addbmm_")
6806STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addbmm_, overload_name, "")
6807STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addbmm_, schema_str, "addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)")
6808
6809// aten::addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
6810static C10_NOINLINE c10::TypedOperatorHandle<addbmm_::schema> create_addbmm__typed_handle() {
6811 return c10::Dispatcher::singleton()
6812 .findSchemaOrThrow(addbmm_::name, addbmm_::overload_name)
6813 .typed<addbmm_::schema>();
6814}
6815
6816// aten::addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
6817at::Tensor & addbmm_::call(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
6818
6819 static auto op = create_addbmm__typed_handle();
6820 return op.call(self, batch1, batch2, beta, alpha);
6821}
6822
6823// aten::addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
6824at::Tensor & addbmm_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
6825
6826 static auto op = create_addbmm__typed_handle();
6827 return op.redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha);
6828}
6829
6830STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addbmm_out, name, "aten::addbmm")
6831STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addbmm_out, overload_name, "out")
6832STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addbmm_out, schema_str, "addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)")
6833
6834// aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
6835static C10_NOINLINE c10::TypedOperatorHandle<addbmm_out::schema> create_addbmm_out_typed_handle() {
6836 return c10::Dispatcher::singleton()
6837 .findSchemaOrThrow(addbmm_out::name, addbmm_out::overload_name)
6838 .typed<addbmm_out::schema>();
6839}
6840
6841// aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
6842at::Tensor & addbmm_out::call(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
6843
6844 static auto op = create_addbmm_out_typed_handle();
6845 return op.call(self, batch1, batch2, beta, alpha, out);
6846}
6847
6848// aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
6849at::Tensor & addbmm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
6850
6851 static auto op = create_addbmm_out_typed_handle();
6852 return op.redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha, out);
6853}
6854
6855STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addbmm, name, "aten::addbmm")
6856STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addbmm, overload_name, "")
6857STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addbmm, schema_str, "addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor")
6858
6859// aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
6860static C10_NOINLINE c10::TypedOperatorHandle<addbmm::schema> create_addbmm_typed_handle() {
6861 return c10::Dispatcher::singleton()
6862 .findSchemaOrThrow(addbmm::name, addbmm::overload_name)
6863 .typed<addbmm::schema>();
6864}
6865
6866// aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
6867at::Tensor addbmm::call(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
6868
6869 static auto op = create_addbmm_typed_handle();
6870 return op.call(self, batch1, batch2, beta, alpha);
6871}
6872
6873// aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
6874at::Tensor addbmm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
6875
6876 static auto op = create_addbmm_typed_handle();
6877 return op.redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha);
6878}
6879
6880STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(triu_out, name, "aten::triu")
6881STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(triu_out, overload_name, "out")
6882STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(triu_out, schema_str, "triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)")
6883
6884// aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
6885static C10_NOINLINE c10::TypedOperatorHandle<triu_out::schema> create_triu_out_typed_handle() {
6886 return c10::Dispatcher::singleton()
6887 .findSchemaOrThrow(triu_out::name, triu_out::overload_name)
6888 .typed<triu_out::schema>();
6889}
6890
6891// aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
6892at::Tensor & triu_out::call(const at::Tensor & self, int64_t diagonal, at::Tensor & out) {
6893
6894 static auto op = create_triu_out_typed_handle();
6895 return op.call(self, diagonal, out);
6896}
6897
6898// aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
6899at::Tensor & triu_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal, at::Tensor & out) {
6900
6901 static auto op = create_triu_out_typed_handle();
6902 return op.redispatch(dispatchKeySet, self, diagonal, out);
6903}
6904
6905STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(triu, name, "aten::triu")
6906STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(triu, overload_name, "")
6907STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(triu, schema_str, "triu(Tensor self, int diagonal=0) -> Tensor")
6908
6909// aten::triu(Tensor self, int diagonal=0) -> Tensor
6910static C10_NOINLINE c10::TypedOperatorHandle<triu::schema> create_triu_typed_handle() {
6911 return c10::Dispatcher::singleton()
6912 .findSchemaOrThrow(triu::name, triu::overload_name)
6913 .typed<triu::schema>();
6914}
6915
6916// aten::triu(Tensor self, int diagonal=0) -> Tensor
6917at::Tensor triu::call(const at::Tensor & self, int64_t diagonal) {
6918
6919 static auto op = create_triu_typed_handle();
6920 return op.call(self, diagonal);
6921}
6922
6923// aten::triu(Tensor self, int diagonal=0) -> Tensor
6924at::Tensor triu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal) {
6925
6926 static auto op = create_triu_typed_handle();
6927 return op.redispatch(dispatchKeySet, self, diagonal);
6928}
6929
6930STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(not_equal_Scalar_out, name, "aten::not_equal")
6931STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(not_equal_Scalar_out, overload_name, "Scalar_out")
6932STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(not_equal_Scalar_out, schema_str, "not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
6933
6934// aten::not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
6935static C10_NOINLINE c10::TypedOperatorHandle<not_equal_Scalar_out::schema> create_not_equal_Scalar_out_typed_handle() {
6936 return c10::Dispatcher::singleton()
6937 .findSchemaOrThrow(not_equal_Scalar_out::name, not_equal_Scalar_out::overload_name)
6938 .typed<not_equal_Scalar_out::schema>();
6939}
6940
6941// aten::not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
6942at::Tensor & not_equal_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
6943
6944 static auto op = create_not_equal_Scalar_out_typed_handle();
6945 return op.call(self, other, out);
6946}
6947
6948// aten::not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
6949at::Tensor & not_equal_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
6950
6951 static auto op = create_not_equal_Scalar_out_typed_handle();
6952 return op.redispatch(dispatchKeySet, self, other, out);
6953}
6954
6955STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(not_equal_Scalar, name, "aten::not_equal")
6956STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(not_equal_Scalar, overload_name, "Scalar")
6957STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(not_equal_Scalar, schema_str, "not_equal.Scalar(Tensor self, Scalar other) -> Tensor")
6958
6959// aten::not_equal.Scalar(Tensor self, Scalar other) -> Tensor
6960static C10_NOINLINE c10::TypedOperatorHandle<not_equal_Scalar::schema> create_not_equal_Scalar_typed_handle() {
6961 return c10::Dispatcher::singleton()
6962 .findSchemaOrThrow(not_equal_Scalar::name, not_equal_Scalar::overload_name)
6963 .typed<not_equal_Scalar::schema>();
6964}
6965
6966// aten::not_equal.Scalar(Tensor self, Scalar other) -> Tensor
6967at::Tensor not_equal_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
6968
6969 static auto op = create_not_equal_Scalar_typed_handle();
6970 return op.call(self, other);
6971}
6972
6973// aten::not_equal.Scalar(Tensor self, Scalar other) -> Tensor
6974at::Tensor not_equal_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
6975
6976 static auto op = create_not_equal_Scalar_typed_handle();
6977 return op.redispatch(dispatchKeySet, self, other);
6978}
6979
6980STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(not_equal_Tensor_out, name, "aten::not_equal")
6981STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(not_equal_Tensor_out, overload_name, "Tensor_out")
6982STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(not_equal_Tensor_out, schema_str, "not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
6983
6984// aten::not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
6985static C10_NOINLINE c10::TypedOperatorHandle<not_equal_Tensor_out::schema> create_not_equal_Tensor_out_typed_handle() {
6986 return c10::Dispatcher::singleton()
6987 .findSchemaOrThrow(not_equal_Tensor_out::name, not_equal_Tensor_out::overload_name)
6988 .typed<not_equal_Tensor_out::schema>();
6989}
6990
6991// aten::not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
6992at::Tensor & not_equal_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
6993
6994 static auto op = create_not_equal_Tensor_out_typed_handle();
6995 return op.call(self, other, out);
6996}
6997
6998// aten::not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
6999at::Tensor & not_equal_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
7000
7001 static auto op = create_not_equal_Tensor_out_typed_handle();
7002 return op.redispatch(dispatchKeySet, self, other, out);
7003}
7004
7005STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(not_equal_Tensor, name, "aten::not_equal")
7006STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(not_equal_Tensor, overload_name, "Tensor")
7007STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(not_equal_Tensor, schema_str, "not_equal.Tensor(Tensor self, Tensor other) -> Tensor")
7008
7009// aten::not_equal.Tensor(Tensor self, Tensor other) -> Tensor
7010static C10_NOINLINE c10::TypedOperatorHandle<not_equal_Tensor::schema> create_not_equal_Tensor_typed_handle() {
7011 return c10::Dispatcher::singleton()
7012 .findSchemaOrThrow(not_equal_Tensor::name, not_equal_Tensor::overload_name)
7013 .typed<not_equal_Tensor::schema>();
7014}
7015
7016// aten::not_equal.Tensor(Tensor self, Tensor other) -> Tensor
7017at::Tensor not_equal_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
7018
7019 static auto op = create_not_equal_Tensor_typed_handle();
7020 return op.call(self, other);
7021}
7022
7023// aten::not_equal.Tensor(Tensor self, Tensor other) -> Tensor
7024at::Tensor not_equal_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
7025
7026 static auto op = create_not_equal_Tensor_typed_handle();
7027 return op.redispatch(dispatchKeySet, self, other);
7028}
7029
7030STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(not_equal__Scalar, name, "aten::not_equal_")
7031STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(not_equal__Scalar, overload_name, "Scalar")
7032STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(not_equal__Scalar, schema_str, "not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
7033
7034// aten::not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7035static C10_NOINLINE c10::TypedOperatorHandle<not_equal__Scalar::schema> create_not_equal__Scalar_typed_handle() {
7036 return c10::Dispatcher::singleton()
7037 .findSchemaOrThrow(not_equal__Scalar::name, not_equal__Scalar::overload_name)
7038 .typed<not_equal__Scalar::schema>();
7039}
7040
7041// aten::not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7042at::Tensor & not_equal__Scalar::call(at::Tensor & self, const at::Scalar & other) {
7043
7044 static auto op = create_not_equal__Scalar_typed_handle();
7045 return op.call(self, other);
7046}
7047
7048// aten::not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7049at::Tensor & not_equal__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
7050
7051 static auto op = create_not_equal__Scalar_typed_handle();
7052 return op.redispatch(dispatchKeySet, self, other);
7053}
7054
7055STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(not_equal__Tensor, name, "aten::not_equal_")
7056STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(not_equal__Tensor, overload_name, "Tensor")
7057STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(not_equal__Tensor, schema_str, "not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
7058
7059// aten::not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
7060static C10_NOINLINE c10::TypedOperatorHandle<not_equal__Tensor::schema> create_not_equal__Tensor_typed_handle() {
7061 return c10::Dispatcher::singleton()
7062 .findSchemaOrThrow(not_equal__Tensor::name, not_equal__Tensor::overload_name)
7063 .typed<not_equal__Tensor::schema>();
7064}
7065
7066// aten::not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
7067at::Tensor & not_equal__Tensor::call(at::Tensor & self, const at::Tensor & other) {
7068
7069 static auto op = create_not_equal__Tensor_typed_handle();
7070 return op.call(self, other);
7071}
7072
7073// aten::not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
7074at::Tensor & not_equal__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
7075
7076 static auto op = create_not_equal__Tensor_typed_handle();
7077 return op.redispatch(dispatchKeySet, self, other);
7078}
7079
7080STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_Scalar_out, name, "aten::greater")
7081STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_Scalar_out, overload_name, "Scalar_out")
7082STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_Scalar_out, schema_str, "greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
7083
7084// aten::greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
7085static C10_NOINLINE c10::TypedOperatorHandle<greater_Scalar_out::schema> create_greater_Scalar_out_typed_handle() {
7086 return c10::Dispatcher::singleton()
7087 .findSchemaOrThrow(greater_Scalar_out::name, greater_Scalar_out::overload_name)
7088 .typed<greater_Scalar_out::schema>();
7089}
7090
7091// aten::greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
7092at::Tensor & greater_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
7093
7094 static auto op = create_greater_Scalar_out_typed_handle();
7095 return op.call(self, other, out);
7096}
7097
7098// aten::greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
7099at::Tensor & greater_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
7100
7101 static auto op = create_greater_Scalar_out_typed_handle();
7102 return op.redispatch(dispatchKeySet, self, other, out);
7103}
7104
7105STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_Scalar, name, "aten::greater")
7106STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_Scalar, overload_name, "Scalar")
7107STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_Scalar, schema_str, "greater.Scalar(Tensor self, Scalar other) -> Tensor")
7108
7109// aten::greater.Scalar(Tensor self, Scalar other) -> Tensor
7110static C10_NOINLINE c10::TypedOperatorHandle<greater_Scalar::schema> create_greater_Scalar_typed_handle() {
7111 return c10::Dispatcher::singleton()
7112 .findSchemaOrThrow(greater_Scalar::name, greater_Scalar::overload_name)
7113 .typed<greater_Scalar::schema>();
7114}
7115
7116// aten::greater.Scalar(Tensor self, Scalar other) -> Tensor
7117at::Tensor greater_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
7118
7119 static auto op = create_greater_Scalar_typed_handle();
7120 return op.call(self, other);
7121}
7122
7123// aten::greater.Scalar(Tensor self, Scalar other) -> Tensor
7124at::Tensor greater_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
7125
7126 static auto op = create_greater_Scalar_typed_handle();
7127 return op.redispatch(dispatchKeySet, self, other);
7128}
7129
7130STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_Tensor_out, name, "aten::greater")
7131STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_Tensor_out, overload_name, "Tensor_out")
7132STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_Tensor_out, schema_str, "greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
7133
7134// aten::greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
7135static C10_NOINLINE c10::TypedOperatorHandle<greater_Tensor_out::schema> create_greater_Tensor_out_typed_handle() {
7136 return c10::Dispatcher::singleton()
7137 .findSchemaOrThrow(greater_Tensor_out::name, greater_Tensor_out::overload_name)
7138 .typed<greater_Tensor_out::schema>();
7139}
7140
7141// aten::greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
7142at::Tensor & greater_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
7143
7144 static auto op = create_greater_Tensor_out_typed_handle();
7145 return op.call(self, other, out);
7146}
7147
7148// aten::greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
7149at::Tensor & greater_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
7150
7151 static auto op = create_greater_Tensor_out_typed_handle();
7152 return op.redispatch(dispatchKeySet, self, other, out);
7153}
7154
7155STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_Tensor, name, "aten::greater")
7156STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_Tensor, overload_name, "Tensor")
7157STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater_Tensor, schema_str, "greater.Tensor(Tensor self, Tensor other) -> Tensor")
7158
7159// aten::greater.Tensor(Tensor self, Tensor other) -> Tensor
7160static C10_NOINLINE c10::TypedOperatorHandle<greater_Tensor::schema> create_greater_Tensor_typed_handle() {
7161 return c10::Dispatcher::singleton()
7162 .findSchemaOrThrow(greater_Tensor::name, greater_Tensor::overload_name)
7163 .typed<greater_Tensor::schema>();
7164}
7165
7166// aten::greater.Tensor(Tensor self, Tensor other) -> Tensor
7167at::Tensor greater_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
7168
7169 static auto op = create_greater_Tensor_typed_handle();
7170 return op.call(self, other);
7171}
7172
7173// aten::greater.Tensor(Tensor self, Tensor other) -> Tensor
7174at::Tensor greater_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
7175
7176 static auto op = create_greater_Tensor_typed_handle();
7177 return op.redispatch(dispatchKeySet, self, other);
7178}
7179
7180STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater__Scalar, name, "aten::greater_")
7181STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater__Scalar, overload_name, "Scalar")
7182STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater__Scalar, schema_str, "greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
7183
7184// aten::greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7185static C10_NOINLINE c10::TypedOperatorHandle<greater__Scalar::schema> create_greater__Scalar_typed_handle() {
7186 return c10::Dispatcher::singleton()
7187 .findSchemaOrThrow(greater__Scalar::name, greater__Scalar::overload_name)
7188 .typed<greater__Scalar::schema>();
7189}
7190
7191// aten::greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7192at::Tensor & greater__Scalar::call(at::Tensor & self, const at::Scalar & other) {
7193
7194 static auto op = create_greater__Scalar_typed_handle();
7195 return op.call(self, other);
7196}
7197
7198// aten::greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7199at::Tensor & greater__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
7200
7201 static auto op = create_greater__Scalar_typed_handle();
7202 return op.redispatch(dispatchKeySet, self, other);
7203}
7204
7205STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater__Tensor, name, "aten::greater_")
7206STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater__Tensor, overload_name, "Tensor")
7207STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(greater__Tensor, schema_str, "greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
7208
7209// aten::greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
7210static C10_NOINLINE c10::TypedOperatorHandle<greater__Tensor::schema> create_greater__Tensor_typed_handle() {
7211 return c10::Dispatcher::singleton()
7212 .findSchemaOrThrow(greater__Tensor::name, greater__Tensor::overload_name)
7213 .typed<greater__Tensor::schema>();
7214}
7215
7216// aten::greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
7217at::Tensor & greater__Tensor::call(at::Tensor & self, const at::Tensor & other) {
7218
7219 static auto op = create_greater__Tensor_typed_handle();
7220 return op.call(self, other);
7221}
7222
7223// aten::greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
7224at::Tensor & greater__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
7225
7226 static auto op = create_greater__Tensor_typed_handle();
7227 return op.redispatch(dispatchKeySet, self, other);
7228}
7229
7230STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gather_out, name, "aten::gather")
7231STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gather_out, overload_name, "out")
7232STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gather_out, schema_str, "gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)")
7233
7234// aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
7235static C10_NOINLINE c10::TypedOperatorHandle<gather_out::schema> create_gather_out_typed_handle() {
7236 return c10::Dispatcher::singleton()
7237 .findSchemaOrThrow(gather_out::name, gather_out::overload_name)
7238 .typed<gather_out::schema>();
7239}
7240
7241// aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
7242at::Tensor & gather_out::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) {
7243
7244 static auto op = create_gather_out_typed_handle();
7245 return op.call(self, dim, index, sparse_grad, out);
7246}
7247
7248// aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
7249at::Tensor & gather_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) {
7250
7251 static auto op = create_gather_out_typed_handle();
7252 return op.redispatch(dispatchKeySet, self, dim, index, sparse_grad, out);
7253}
7254
7255STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gather, name, "aten::gather")
7256STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gather, overload_name, "")
7257STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gather, schema_str, "gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor")
7258
7259// aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor
7260static C10_NOINLINE c10::TypedOperatorHandle<gather::schema> create_gather_typed_handle() {
7261 return c10::Dispatcher::singleton()
7262 .findSchemaOrThrow(gather::name, gather::overload_name)
7263 .typed<gather::schema>();
7264}
7265
7266// aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor
7267at::Tensor gather::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
7268
7269 static auto op = create_gather_typed_handle();
7270 return op.call(self, dim, index, sparse_grad);
7271}
7272
7273// aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor
7274at::Tensor gather::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
7275
7276 static auto op = create_gather_typed_handle();
7277 return op.redispatch(dispatchKeySet, self, dim, index, sparse_grad);
7278}
7279
7280STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gather_backward, name, "aten::gather_backward")
7281STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gather_backward, overload_name, "")
7282STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gather_backward, schema_str, "gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor")
7283
7284// aten::gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor
7285static C10_NOINLINE c10::TypedOperatorHandle<gather_backward::schema> create_gather_backward_typed_handle() {
7286 return c10::Dispatcher::singleton()
7287 .findSchemaOrThrow(gather_backward::name, gather_backward::overload_name)
7288 .typed<gather_backward::schema>();
7289}
7290
7291// aten::gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor
7292at::Tensor gather_backward::call(const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
7293
7294 static auto op = create_gather_backward_typed_handle();
7295 return op.call(grad, self, dim, index, sparse_grad);
7296}
7297
7298// aten::gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor
7299at::Tensor gather_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
7300
7301 static auto op = create_gather_backward_typed_handle();
7302 return op.redispatch(dispatchKeySet, grad, self, dim, index, sparse_grad);
7303}
7304
7305STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gather_dimname_out, name, "aten::gather")
7306STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gather_dimname_out, overload_name, "dimname_out")
7307STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gather_dimname_out, schema_str, "gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)")
7308
7309// aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
7310static C10_NOINLINE c10::TypedOperatorHandle<gather_dimname_out::schema> create_gather_dimname_out_typed_handle() {
7311 return c10::Dispatcher::singleton()
7312 .findSchemaOrThrow(gather_dimname_out::name, gather_dimname_out::overload_name)
7313 .typed<gather_dimname_out::schema>();
7314}
7315
7316// aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
7317at::Tensor & gather_dimname_out::call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) {
7318
7319 static auto op = create_gather_dimname_out_typed_handle();
7320 return op.call(self, dim, index, sparse_grad, out);
7321}
7322
7323// aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
7324at::Tensor & gather_dimname_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) {
7325
7326 static auto op = create_gather_dimname_out_typed_handle();
7327 return op.redispatch(dispatchKeySet, self, dim, index, sparse_grad, out);
7328}
7329
7330STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gather_dimname, name, "aten::gather")
7331STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gather_dimname, overload_name, "dimname")
7332STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gather_dimname, schema_str, "gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor")
7333
7334// aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor
7335static C10_NOINLINE c10::TypedOperatorHandle<gather_dimname::schema> create_gather_dimname_typed_handle() {
7336 return c10::Dispatcher::singleton()
7337 .findSchemaOrThrow(gather_dimname::name, gather_dimname::overload_name)
7338 .typed<gather_dimname::schema>();
7339}
7340
7341// aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor
7342at::Tensor gather_dimname::call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad) {
7343
7344 static auto op = create_gather_dimname_typed_handle();
7345 return op.call(self, dim, index, sparse_grad);
7346}
7347
7348// aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor
7349at::Tensor gather_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad) {
7350
7351 static auto op = create_gather_dimname_typed_handle();
7352 return op.redispatch(dispatchKeySet, self, dim, index, sparse_grad);
7353}
7354
7355STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cross_entropy_loss, name, "aten::cross_entropy_loss")
7356STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cross_entropy_loss, overload_name, "")
7357STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cross_entropy_loss, schema_str, "cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor")
7358
7359// aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor
7360static C10_NOINLINE c10::TypedOperatorHandle<cross_entropy_loss::schema> create_cross_entropy_loss_typed_handle() {
7361 return c10::Dispatcher::singleton()
7362 .findSchemaOrThrow(cross_entropy_loss::name, cross_entropy_loss::overload_name)
7363 .typed<cross_entropy_loss::schema>();
7364}
7365
7366// aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor
7367at::Tensor cross_entropy_loss::call(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, double label_smoothing) {
7368
7369 static auto op = create_cross_entropy_loss_typed_handle();
7370 return op.call(self, target, weight, reduction, ignore_index, label_smoothing);
7371}
7372
7373// aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor
7374at::Tensor cross_entropy_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, double label_smoothing) {
7375
7376 static auto op = create_cross_entropy_loss_typed_handle();
7377 return op.redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, label_smoothing);
7378}
7379
7380STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(triangular_solve_X, name, "aten::triangular_solve")
7381STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(triangular_solve_X, overload_name, "X")
7382STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(triangular_solve_X, schema_str, "triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient)")
7383
7384// aten::triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient)
7385static C10_NOINLINE c10::TypedOperatorHandle<triangular_solve_X::schema> create_triangular_solve_X_typed_handle() {
7386 return c10::Dispatcher::singleton()
7387 .findSchemaOrThrow(triangular_solve_X::name, triangular_solve_X::overload_name)
7388 .typed<triangular_solve_X::schema>();
7389}
7390
7391// aten::triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient)
7392::std::tuple<at::Tensor &,at::Tensor &> triangular_solve_X::call(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular, at::Tensor & X, at::Tensor & M) {
7393
7394 static auto op = create_triangular_solve_X_typed_handle();
7395 return op.call(self, A, upper, transpose, unitriangular, X, M);
7396}
7397
7398// aten::triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient)
7399::std::tuple<at::Tensor &,at::Tensor &> triangular_solve_X::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular, at::Tensor & X, at::Tensor & M) {
7400
7401 static auto op = create_triangular_solve_X_typed_handle();
7402 return op.redispatch(dispatchKeySet, self, A, upper, transpose, unitriangular, X, M);
7403}
7404
7405STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(triangular_solve, name, "aten::triangular_solve")
7406STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(triangular_solve, overload_name, "")
7407STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(triangular_solve, schema_str, "triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient)")
7408
7409// aten::triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient)
7410static C10_NOINLINE c10::TypedOperatorHandle<triangular_solve::schema> create_triangular_solve_typed_handle() {
7411 return c10::Dispatcher::singleton()
7412 .findSchemaOrThrow(triangular_solve::name, triangular_solve::overload_name)
7413 .typed<triangular_solve::schema>();
7414}
7415
7416// aten::triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient)
7417::std::tuple<at::Tensor,at::Tensor> triangular_solve::call(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) {
7418
7419 static auto op = create_triangular_solve_typed_handle();
7420 return op.call(self, A, upper, transpose, unitriangular);
7421}
7422
7423// aten::triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient)
7424::std::tuple<at::Tensor,at::Tensor> triangular_solve::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) {
7425
7426 static auto op = create_triangular_solve_typed_handle();
7427 return op.redispatch(dispatchKeySet, self, A, upper, transpose, unitriangular);
7428}
7429
7430STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_check_errors, name, "aten::_linalg_check_errors")
7431STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_check_errors, overload_name, "")
7432STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_check_errors, schema_str, "_linalg_check_errors(Tensor info, str api_name, *, bool is_matrix) -> ()")
7433
7434// aten::_linalg_check_errors(Tensor info, str api_name, *, bool is_matrix) -> ()
7435static C10_NOINLINE c10::TypedOperatorHandle<_linalg_check_errors::schema> create__linalg_check_errors_typed_handle() {
7436 return c10::Dispatcher::singleton()
7437 .findSchemaOrThrow(_linalg_check_errors::name, _linalg_check_errors::overload_name)
7438 .typed<_linalg_check_errors::schema>();
7439}
7440
7441// aten::_linalg_check_errors(Tensor info, str api_name, *, bool is_matrix) -> ()
7442void _linalg_check_errors::call(const at::Tensor & info, c10::string_view api_name, bool is_matrix) {
7443
7444 static auto op = create__linalg_check_errors_typed_handle();
7445 return op.call(info, api_name, is_matrix);
7446}
7447
7448// aten::_linalg_check_errors(Tensor info, str api_name, *, bool is_matrix) -> ()
7449void _linalg_check_errors::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & info, c10::string_view api_name, bool is_matrix) {
7450
7451 static auto op = create__linalg_check_errors_typed_handle();
7452 return op.redispatch(dispatchKeySet, info, api_name, is_matrix);
7453}
7454
7455STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_solve_triangular_out, name, "aten::linalg_solve_triangular")
7456STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_solve_triangular_out, overload_name, "out")
7457STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_solve_triangular_out, schema_str, "linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!)")
7458
7459// aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!)
7460static C10_NOINLINE c10::TypedOperatorHandle<linalg_solve_triangular_out::schema> create_linalg_solve_triangular_out_typed_handle() {
7461 return c10::Dispatcher::singleton()
7462 .findSchemaOrThrow(linalg_solve_triangular_out::name, linalg_solve_triangular_out::overload_name)
7463 .typed<linalg_solve_triangular_out::schema>();
7464}
7465
7466// aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!)
7467at::Tensor & linalg_solve_triangular_out::call(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular, at::Tensor & out) {
7468
7469 static auto op = create_linalg_solve_triangular_out_typed_handle();
7470 return op.call(self, B, upper, left, unitriangular, out);
7471}
7472
7473// aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!)
7474at::Tensor & linalg_solve_triangular_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular, at::Tensor & out) {
7475
7476 static auto op = create_linalg_solve_triangular_out_typed_handle();
7477 return op.redispatch(dispatchKeySet, self, B, upper, left, unitriangular, out);
7478}
7479
7480STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_solve_triangular, name, "aten::linalg_solve_triangular")
7481STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_solve_triangular, overload_name, "")
7482STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_solve_triangular, schema_str, "linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor")
7483
7484// aten::linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor
7485static C10_NOINLINE c10::TypedOperatorHandle<linalg_solve_triangular::schema> create_linalg_solve_triangular_typed_handle() {
7486 return c10::Dispatcher::singleton()
7487 .findSchemaOrThrow(linalg_solve_triangular::name, linalg_solve_triangular::overload_name)
7488 .typed<linalg_solve_triangular::schema>();
7489}
7490
7491// aten::linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor
7492at::Tensor linalg_solve_triangular::call(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular) {
7493
7494 static auto op = create_linalg_solve_triangular_typed_handle();
7495 return op.call(self, B, upper, left, unitriangular);
7496}
7497
7498// aten::linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor
7499at::Tensor linalg_solve_triangular::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular) {
7500
7501 static auto op = create_linalg_solve_triangular_typed_handle();
7502 return op.redispatch(dispatchKeySet, self, B, upper, left, unitriangular);
7503}
7504
7505STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ormqr_out, name, "aten::ormqr")
7506STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ormqr_out, overload_name, "out")
7507STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ormqr_out, schema_str, "ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!)")
7508
7509// aten::ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!)
7510static C10_NOINLINE c10::TypedOperatorHandle<ormqr_out::schema> create_ormqr_out_typed_handle() {
7511 return c10::Dispatcher::singleton()
7512 .findSchemaOrThrow(ormqr_out::name, ormqr_out::overload_name)
7513 .typed<ormqr_out::schema>();
7514}
7515
7516// aten::ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!)
7517at::Tensor & ormqr_out::call(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose, at::Tensor & out) {
7518
7519 static auto op = create_ormqr_out_typed_handle();
7520 return op.call(self, input2, input3, left, transpose, out);
7521}
7522
7523// aten::ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!)
7524at::Tensor & ormqr_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose, at::Tensor & out) {
7525
7526 static auto op = create_ormqr_out_typed_handle();
7527 return op.redispatch(dispatchKeySet, self, input2, input3, left, transpose, out);
7528}
7529
7530STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ormqr, name, "aten::ormqr")
7531STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ormqr, overload_name, "")
7532STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ormqr, schema_str, "ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor")
7533
7534// aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor
7535static C10_NOINLINE c10::TypedOperatorHandle<ormqr::schema> create_ormqr_typed_handle() {
7536 return c10::Dispatcher::singleton()
7537 .findSchemaOrThrow(ormqr::name, ormqr::overload_name)
7538 .typed<ormqr::schema>();
7539}
7540
7541// aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor
7542at::Tensor ormqr::call(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) {
7543
7544 static auto op = create_ormqr_typed_handle();
7545 return op.call(self, input2, input3, left, transpose);
7546}
7547
7548// aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor
7549at::Tensor ormqr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) {
7550
7551 static auto op = create_ormqr_typed_handle();
7552 return op.redispatch(dispatchKeySet, self, input2, input3, left, transpose);
7553}
7554
7555STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(i0, name, "aten::i0")
7556STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(i0, overload_name, "")
7557STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(i0, schema_str, "i0(Tensor self) -> Tensor")
7558
7559// aten::i0(Tensor self) -> Tensor
7560static C10_NOINLINE c10::TypedOperatorHandle<i0::schema> create_i0_typed_handle() {
7561 return c10::Dispatcher::singleton()
7562 .findSchemaOrThrow(i0::name, i0::overload_name)
7563 .typed<i0::schema>();
7564}
7565
7566// aten::i0(Tensor self) -> Tensor
7567at::Tensor i0::call(const at::Tensor & self) {
7568
7569 static auto op = create_i0_typed_handle();
7570 return op.call(self);
7571}
7572
7573// aten::i0(Tensor self) -> Tensor
7574at::Tensor i0::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
7575
7576 static auto op = create_i0_typed_handle();
7577 return op.redispatch(dispatchKeySet, self);
7578}
7579
7580STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(i0_, name, "aten::i0_")
7581STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(i0_, overload_name, "")
7582STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(i0_, schema_str, "i0_(Tensor(a!) self) -> Tensor(a!)")
7583
7584// aten::i0_(Tensor(a!) self) -> Tensor(a!)
7585static C10_NOINLINE c10::TypedOperatorHandle<i0_::schema> create_i0__typed_handle() {
7586 return c10::Dispatcher::singleton()
7587 .findSchemaOrThrow(i0_::name, i0_::overload_name)
7588 .typed<i0_::schema>();
7589}
7590
7591// aten::i0_(Tensor(a!) self) -> Tensor(a!)
7592at::Tensor & i0_::call(at::Tensor & self) {
7593
7594 static auto op = create_i0__typed_handle();
7595 return op.call(self);
7596}
7597
7598// aten::i0_(Tensor(a!) self) -> Tensor(a!)
7599at::Tensor & i0_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
7600
7601 static auto op = create_i0__typed_handle();
7602 return op.redispatch(dispatchKeySet, self);
7603}
7604
7605STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(i0_out, name, "aten::i0")
7606STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(i0_out, overload_name, "out")
7607STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(i0_out, schema_str, "i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
7608
7609// aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7610static C10_NOINLINE c10::TypedOperatorHandle<i0_out::schema> create_i0_out_typed_handle() {
7611 return c10::Dispatcher::singleton()
7612 .findSchemaOrThrow(i0_out::name, i0_out::overload_name)
7613 .typed<i0_out::schema>();
7614}
7615
7616// aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7617at::Tensor & i0_out::call(const at::Tensor & self, at::Tensor & out) {
7618
7619 static auto op = create_i0_out_typed_handle();
7620 return op.call(self, out);
7621}
7622
7623// aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7624at::Tensor & i0_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
7625
7626 static auto op = create_i0_out_typed_handle();
7627 return op.redispatch(dispatchKeySet, self, out);
7628}
7629
7630STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sign, name, "aten::sign")
7631STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sign, overload_name, "")
7632STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sign, schema_str, "sign(Tensor self) -> Tensor")
7633
7634// aten::sign(Tensor self) -> Tensor
7635static C10_NOINLINE c10::TypedOperatorHandle<sign::schema> create_sign_typed_handle() {
7636 return c10::Dispatcher::singleton()
7637 .findSchemaOrThrow(sign::name, sign::overload_name)
7638 .typed<sign::schema>();
7639}
7640
7641// aten::sign(Tensor self) -> Tensor
7642at::Tensor sign::call(const at::Tensor & self) {
7643
7644 static auto op = create_sign_typed_handle();
7645 return op.call(self);
7646}
7647
7648// aten::sign(Tensor self) -> Tensor
7649at::Tensor sign::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
7650
7651 static auto op = create_sign_typed_handle();
7652 return op.redispatch(dispatchKeySet, self);
7653}
7654
7655STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sign_, name, "aten::sign_")
7656STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sign_, overload_name, "")
7657STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sign_, schema_str, "sign_(Tensor(a!) self) -> Tensor(a!)")
7658
7659// aten::sign_(Tensor(a!) self) -> Tensor(a!)
7660static C10_NOINLINE c10::TypedOperatorHandle<sign_::schema> create_sign__typed_handle() {
7661 return c10::Dispatcher::singleton()
7662 .findSchemaOrThrow(sign_::name, sign_::overload_name)
7663 .typed<sign_::schema>();
7664}
7665
7666// aten::sign_(Tensor(a!) self) -> Tensor(a!)
7667at::Tensor & sign_::call(at::Tensor & self) {
7668
7669 static auto op = create_sign__typed_handle();
7670 return op.call(self);
7671}
7672
7673// aten::sign_(Tensor(a!) self) -> Tensor(a!)
7674at::Tensor & sign_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
7675
7676 static auto op = create_sign__typed_handle();
7677 return op.redispatch(dispatchKeySet, self);
7678}
7679
7680STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sign_out, name, "aten::sign")
7681STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sign_out, overload_name, "out")
7682STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sign_out, schema_str, "sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
7683
7684// aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7685static C10_NOINLINE c10::TypedOperatorHandle<sign_out::schema> create_sign_out_typed_handle() {
7686 return c10::Dispatcher::singleton()
7687 .findSchemaOrThrow(sign_out::name, sign_out::overload_name)
7688 .typed<sign_out::schema>();
7689}
7690
7691// aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7692at::Tensor & sign_out::call(const at::Tensor & self, at::Tensor & out) {
7693
7694 static auto op = create_sign_out_typed_handle();
7695 return op.call(self, out);
7696}
7697
7698// aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7699at::Tensor & sign_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
7700
7701 static auto op = create_sign_out_typed_handle();
7702 return op.redispatch(dispatchKeySet, self, out);
7703}
7704
7705STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lerp_Scalar_out, name, "aten::lerp")
7706STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lerp_Scalar_out, overload_name, "Scalar_out")
7707STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lerp_Scalar_out, schema_str, "lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)")
7708
7709// aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)
7710static C10_NOINLINE c10::TypedOperatorHandle<lerp_Scalar_out::schema> create_lerp_Scalar_out_typed_handle() {
7711 return c10::Dispatcher::singleton()
7712 .findSchemaOrThrow(lerp_Scalar_out::name, lerp_Scalar_out::overload_name)
7713 .typed<lerp_Scalar_out::schema>();
7714}
7715
7716// aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)
7717at::Tensor & lerp_Scalar_out::call(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out) {
7718
7719 static auto op = create_lerp_Scalar_out_typed_handle();
7720 return op.call(self, end, weight, out);
7721}
7722
7723// aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)
7724at::Tensor & lerp_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out) {
7725
7726 static auto op = create_lerp_Scalar_out_typed_handle();
7727 return op.redispatch(dispatchKeySet, self, end, weight, out);
7728}
7729
7730STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lerp_Tensor_out, name, "aten::lerp")
7731STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lerp_Tensor_out, overload_name, "Tensor_out")
7732STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lerp_Tensor_out, schema_str, "lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)")
7733
7734// aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)
7735static C10_NOINLINE c10::TypedOperatorHandle<lerp_Tensor_out::schema> create_lerp_Tensor_out_typed_handle() {
7736 return c10::Dispatcher::singleton()
7737 .findSchemaOrThrow(lerp_Tensor_out::name, lerp_Tensor_out::overload_name)
7738 .typed<lerp_Tensor_out::schema>();
7739}
7740
7741// aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)
7742at::Tensor & lerp_Tensor_out::call(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out) {
7743
7744 static auto op = create_lerp_Tensor_out_typed_handle();
7745 return op.call(self, end, weight, out);
7746}
7747
7748// aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)
7749at::Tensor & lerp_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out) {
7750
7751 static auto op = create_lerp_Tensor_out_typed_handle();
7752 return op.redispatch(dispatchKeySet, self, end, weight, out);
7753}
7754
7755STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lerp_Scalar, name, "aten::lerp")
7756STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lerp_Scalar, overload_name, "Scalar")
7757STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lerp_Scalar, schema_str, "lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor")
7758
7759// aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor
7760static C10_NOINLINE c10::TypedOperatorHandle<lerp_Scalar::schema> create_lerp_Scalar_typed_handle() {
7761 return c10::Dispatcher::singleton()
7762 .findSchemaOrThrow(lerp_Scalar::name, lerp_Scalar::overload_name)
7763 .typed<lerp_Scalar::schema>();
7764}
7765
7766// aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor
7767at::Tensor lerp_Scalar::call(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
7768
7769 static auto op = create_lerp_Scalar_typed_handle();
7770 return op.call(self, end, weight);
7771}
7772
7773// aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor
7774at::Tensor lerp_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
7775
7776 static auto op = create_lerp_Scalar_typed_handle();
7777 return op.redispatch(dispatchKeySet, self, end, weight);
7778}
7779
7780STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lerp_Tensor, name, "aten::lerp")
7781STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lerp_Tensor, overload_name, "Tensor")
7782STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lerp_Tensor, schema_str, "lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor")
7783
7784// aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor
7785static C10_NOINLINE c10::TypedOperatorHandle<lerp_Tensor::schema> create_lerp_Tensor_typed_handle() {
7786 return c10::Dispatcher::singleton()
7787 .findSchemaOrThrow(lerp_Tensor::name, lerp_Tensor::overload_name)
7788 .typed<lerp_Tensor::schema>();
7789}
7790
7791// aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor
7792at::Tensor lerp_Tensor::call(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
7793
7794 static auto op = create_lerp_Tensor_typed_handle();
7795 return op.call(self, end, weight);
7796}
7797
7798// aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor
7799at::Tensor lerp_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
7800
7801 static auto op = create_lerp_Tensor_typed_handle();
7802 return op.redispatch(dispatchKeySet, self, end, weight);
7803}
7804
7805STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(min, name, "aten::min")
7806STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(min, overload_name, "")
7807STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(min, schema_str, "min(Tensor self) -> Tensor")
7808
7809// aten::min(Tensor self) -> Tensor
7810static C10_NOINLINE c10::TypedOperatorHandle<min::schema> create_min_typed_handle() {
7811 return c10::Dispatcher::singleton()
7812 .findSchemaOrThrow(min::name, min::overload_name)
7813 .typed<min::schema>();
7814}
7815
7816// aten::min(Tensor self) -> Tensor
7817at::Tensor min::call(const at::Tensor & self) {
7818
7819 static auto op = create_min_typed_handle();
7820 return op.call(self);
7821}
7822
7823// aten::min(Tensor self) -> Tensor
7824at::Tensor min::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
7825
7826 static auto op = create_min_typed_handle();
7827 return op.redispatch(dispatchKeySet, self);
7828}
7829
7830STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fmin, name, "aten::fmin")
7831STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fmin, overload_name, "")
7832STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fmin, schema_str, "fmin(Tensor self, Tensor other) -> Tensor")
7833
7834// aten::fmin(Tensor self, Tensor other) -> Tensor
7835static C10_NOINLINE c10::TypedOperatorHandle<fmin::schema> create_fmin_typed_handle() {
7836 return c10::Dispatcher::singleton()
7837 .findSchemaOrThrow(fmin::name, fmin::overload_name)
7838 .typed<fmin::schema>();
7839}
7840
7841// aten::fmin(Tensor self, Tensor other) -> Tensor
7842at::Tensor fmin::call(const at::Tensor & self, const at::Tensor & other) {
7843
7844 static auto op = create_fmin_typed_handle();
7845 return op.call(self, other);
7846}
7847
7848// aten::fmin(Tensor self, Tensor other) -> Tensor
7849at::Tensor fmin::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
7850
7851 static auto op = create_fmin_typed_handle();
7852 return op.redispatch(dispatchKeySet, self, other);
7853}
7854
7855STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fmin_out, name, "aten::fmin")
7856STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fmin_out, overload_name, "out")
7857STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fmin_out, schema_str, "fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
7858
7859// aten::fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
7860static C10_NOINLINE c10::TypedOperatorHandle<fmin_out::schema> create_fmin_out_typed_handle() {
7861 return c10::Dispatcher::singleton()
7862 .findSchemaOrThrow(fmin_out::name, fmin_out::overload_name)
7863 .typed<fmin_out::schema>();
7864}
7865
7866// aten::fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
7867at::Tensor & fmin_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
7868
7869 static auto op = create_fmin_out_typed_handle();
7870 return op.call(self, other, out);
7871}
7872
7873// aten::fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
7874at::Tensor & fmin_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
7875
7876 static auto op = create_fmin_out_typed_handle();
7877 return op.redispatch(dispatchKeySet, self, other, out);
7878}
7879
7880STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(min_out, name, "aten::min")
7881STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(min_out, overload_name, "out")
7882STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(min_out, schema_str, "min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
7883
7884// aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
7885static C10_NOINLINE c10::TypedOperatorHandle<min_out::schema> create_min_out_typed_handle() {
7886 return c10::Dispatcher::singleton()
7887 .findSchemaOrThrow(min_out::name, min_out::overload_name)
7888 .typed<min_out::schema>();
7889}
7890
7891// aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
7892at::Tensor & min_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
7893
7894 static auto op = create_min_out_typed_handle();
7895 return op.call(self, other, out);
7896}
7897
7898// aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
7899at::Tensor & min_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
7900
7901 static auto op = create_min_out_typed_handle();
7902 return op.redispatch(dispatchKeySet, self, other, out);
7903}
7904
7905STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(min_other, name, "aten::min")
7906STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(min_other, overload_name, "other")
7907STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(min_other, schema_str, "min.other(Tensor self, Tensor other) -> Tensor")
7908
7909// aten::min.other(Tensor self, Tensor other) -> Tensor
7910static C10_NOINLINE c10::TypedOperatorHandle<min_other::schema> create_min_other_typed_handle() {
7911 return c10::Dispatcher::singleton()
7912 .findSchemaOrThrow(min_other::name, min_other::overload_name)
7913 .typed<min_other::schema>();
7914}
7915
7916// aten::min.other(Tensor self, Tensor other) -> Tensor
7917at::Tensor min_other::call(const at::Tensor & self, const at::Tensor & other) {
7918
7919 static auto op = create_min_other_typed_handle();
7920 return op.call(self, other);
7921}
7922
7923// aten::min.other(Tensor self, Tensor other) -> Tensor
7924at::Tensor min_other::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
7925
7926 static auto op = create_min_other_typed_handle();
7927 return op.redispatch(dispatchKeySet, self, other);
7928}
7929
7930STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(equal, name, "aten::equal")
7931STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(equal, overload_name, "")
7932STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(equal, schema_str, "equal(Tensor self, Tensor other) -> bool")
7933
7934// aten::equal(Tensor self, Tensor other) -> bool
7935static C10_NOINLINE c10::TypedOperatorHandle<equal::schema> create_equal_typed_handle() {
7936 return c10::Dispatcher::singleton()
7937 .findSchemaOrThrow(equal::name, equal::overload_name)
7938 .typed<equal::schema>();
7939}
7940
7941// aten::equal(Tensor self, Tensor other) -> bool
7942bool equal::call(const at::Tensor & self, const at::Tensor & other) {
7943
7944 static auto op = create_equal_typed_handle();
7945 return op.call(self, other);
7946}
7947
7948// aten::equal(Tensor self, Tensor other) -> bool
7949bool equal::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
7950
7951 static auto op = create_equal_typed_handle();
7952 return op.redispatch(dispatchKeySet, self, other);
7953}
7954
7955STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_mul_Scalar, name, "aten::_foreach_mul")
7956STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_mul_Scalar, overload_name, "Scalar")
7957STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_mul_Scalar, schema_str, "_foreach_mul.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]")
7958
7959// aten::_foreach_mul.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
7960static C10_NOINLINE c10::TypedOperatorHandle<_foreach_mul_Scalar::schema> create__foreach_mul_Scalar_typed_handle() {
7961 return c10::Dispatcher::singleton()
7962 .findSchemaOrThrow(_foreach_mul_Scalar::name, _foreach_mul_Scalar::overload_name)
7963 .typed<_foreach_mul_Scalar::schema>();
7964}
7965
7966// aten::_foreach_mul.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
7967::std::vector<at::Tensor> _foreach_mul_Scalar::call(at::TensorList self, const at::Scalar & scalar) {
7968
7969 static auto op = create__foreach_mul_Scalar_typed_handle();
7970 return op.call(self, scalar);
7971}
7972
7973// aten::_foreach_mul.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
7974::std::vector<at::Tensor> _foreach_mul_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
7975
7976 static auto op = create__foreach_mul_Scalar_typed_handle();
7977 return op.redispatch(dispatchKeySet, self, scalar);
7978}
7979
7980STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_mul__Scalar, name, "aten::_foreach_mul_")
7981STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_mul__Scalar, overload_name, "Scalar")
7982STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_mul__Scalar, schema_str, "_foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()")
7983
7984// aten::_foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
7985static C10_NOINLINE c10::TypedOperatorHandle<_foreach_mul__Scalar::schema> create__foreach_mul__Scalar_typed_handle() {
7986 return c10::Dispatcher::singleton()
7987 .findSchemaOrThrow(_foreach_mul__Scalar::name, _foreach_mul__Scalar::overload_name)
7988 .typed<_foreach_mul__Scalar::schema>();
7989}
7990
7991// aten::_foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
7992void _foreach_mul__Scalar::call(at::TensorList self, const at::Scalar & scalar) {
7993
7994 static auto op = create__foreach_mul__Scalar_typed_handle();
7995 return op.call(self, scalar);
7996}
7997
7998// aten::_foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
7999void _foreach_mul__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
8000
8001 static auto op = create__foreach_mul__Scalar_typed_handle();
8002 return op.redispatch(dispatchKeySet, self, scalar);
8003}
8004
8005STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_div_Scalar, name, "aten::_foreach_div")
8006STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_div_Scalar, overload_name, "Scalar")
8007STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_div_Scalar, schema_str, "_foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]")
8008
8009// aten::_foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
8010static C10_NOINLINE c10::TypedOperatorHandle<_foreach_div_Scalar::schema> create__foreach_div_Scalar_typed_handle() {
8011 return c10::Dispatcher::singleton()
8012 .findSchemaOrThrow(_foreach_div_Scalar::name, _foreach_div_Scalar::overload_name)
8013 .typed<_foreach_div_Scalar::schema>();
8014}
8015
8016// aten::_foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
8017::std::vector<at::Tensor> _foreach_div_Scalar::call(at::TensorList self, const at::Scalar & scalar) {
8018
8019 static auto op = create__foreach_div_Scalar_typed_handle();
8020 return op.call(self, scalar);
8021}
8022
8023// aten::_foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
8024::std::vector<at::Tensor> _foreach_div_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
8025
8026 static auto op = create__foreach_div_Scalar_typed_handle();
8027 return op.redispatch(dispatchKeySet, self, scalar);
8028}
8029
8030STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_div__Scalar, name, "aten::_foreach_div_")
8031STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_div__Scalar, overload_name, "Scalar")
8032STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_div__Scalar, schema_str, "_foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()")
8033
8034// aten::_foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
8035static C10_NOINLINE c10::TypedOperatorHandle<_foreach_div__Scalar::schema> create__foreach_div__Scalar_typed_handle() {
8036 return c10::Dispatcher::singleton()
8037 .findSchemaOrThrow(_foreach_div__Scalar::name, _foreach_div__Scalar::overload_name)
8038 .typed<_foreach_div__Scalar::schema>();
8039}
8040
8041// aten::_foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
8042void _foreach_div__Scalar::call(at::TensorList self, const at::Scalar & scalar) {
8043
8044 static auto op = create__foreach_div__Scalar_typed_handle();
8045 return op.call(self, scalar);
8046}
8047
8048// aten::_foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
8049void _foreach_div__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
8050
8051 static auto op = create__foreach_div__Scalar_typed_handle();
8052 return op.redispatch(dispatchKeySet, self, scalar);
8053}
8054
8055STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_mul_List, name, "aten::_foreach_mul")
8056STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_mul_List, overload_name, "List")
8057STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_mul_List, schema_str, "_foreach_mul.List(Tensor[] self, Tensor[] other) -> Tensor[]")
8058
8059// aten::_foreach_mul.List(Tensor[] self, Tensor[] other) -> Tensor[]
8060static C10_NOINLINE c10::TypedOperatorHandle<_foreach_mul_List::schema> create__foreach_mul_List_typed_handle() {
8061 return c10::Dispatcher::singleton()
8062 .findSchemaOrThrow(_foreach_mul_List::name, _foreach_mul_List::overload_name)
8063 .typed<_foreach_mul_List::schema>();
8064}
8065
8066// aten::_foreach_mul.List(Tensor[] self, Tensor[] other) -> Tensor[]
8067::std::vector<at::Tensor> _foreach_mul_List::call(at::TensorList self, at::TensorList other) {
8068
8069 static auto op = create__foreach_mul_List_typed_handle();
8070 return op.call(self, other);
8071}
8072
8073// aten::_foreach_mul.List(Tensor[] self, Tensor[] other) -> Tensor[]
8074::std::vector<at::Tensor> _foreach_mul_List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
8075
8076 static auto op = create__foreach_mul_List_typed_handle();
8077 return op.redispatch(dispatchKeySet, self, other);
8078}
8079
8080STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_mul__List, name, "aten::_foreach_mul_")
8081STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_mul__List, overload_name, "List")
8082STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_mul__List, schema_str, "_foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> ()")
8083
8084// aten::_foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> ()
8085static C10_NOINLINE c10::TypedOperatorHandle<_foreach_mul__List::schema> create__foreach_mul__List_typed_handle() {
8086 return c10::Dispatcher::singleton()
8087 .findSchemaOrThrow(_foreach_mul__List::name, _foreach_mul__List::overload_name)
8088 .typed<_foreach_mul__List::schema>();
8089}
8090
8091// aten::_foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> ()
8092void _foreach_mul__List::call(at::TensorList self, at::TensorList other) {
8093
8094 static auto op = create__foreach_mul__List_typed_handle();
8095 return op.call(self, other);
8096}
8097
8098// aten::_foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> ()
8099void _foreach_mul__List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
8100
8101 static auto op = create__foreach_mul__List_typed_handle();
8102 return op.redispatch(dispatchKeySet, self, other);
8103}
8104
8105STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_div_List, name, "aten::_foreach_div")
8106STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_div_List, overload_name, "List")
8107STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_div_List, schema_str, "_foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[]")
8108
8109// aten::_foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[]
8110static C10_NOINLINE c10::TypedOperatorHandle<_foreach_div_List::schema> create__foreach_div_List_typed_handle() {
8111 return c10::Dispatcher::singleton()
8112 .findSchemaOrThrow(_foreach_div_List::name, _foreach_div_List::overload_name)
8113 .typed<_foreach_div_List::schema>();
8114}
8115
8116// aten::_foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[]
8117::std::vector<at::Tensor> _foreach_div_List::call(at::TensorList self, at::TensorList other) {
8118
8119 static auto op = create__foreach_div_List_typed_handle();
8120 return op.call(self, other);
8121}
8122
8123// aten::_foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[]
8124::std::vector<at::Tensor> _foreach_div_List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
8125
8126 static auto op = create__foreach_div_List_typed_handle();
8127 return op.redispatch(dispatchKeySet, self, other);
8128}
8129
8130STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_div__List, name, "aten::_foreach_div_")
8131STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_div__List, overload_name, "List")
8132STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_div__List, schema_str, "_foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> ()")
8133
8134// aten::_foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> ()
8135static C10_NOINLINE c10::TypedOperatorHandle<_foreach_div__List::schema> create__foreach_div__List_typed_handle() {
8136 return c10::Dispatcher::singleton()
8137 .findSchemaOrThrow(_foreach_div__List::name, _foreach_div__List::overload_name)
8138 .typed<_foreach_div__List::schema>();
8139}
8140
8141// aten::_foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> ()
8142void _foreach_div__List::call(at::TensorList self, at::TensorList other) {
8143
8144 static auto op = create__foreach_div__List_typed_handle();
8145 return op.call(self, other);
8146}
8147
8148// aten::_foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> ()
8149void _foreach_div__List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
8150
8151 static auto op = create__foreach_div__List_typed_handle();
8152 return op.redispatch(dispatchKeySet, self, other);
8153}
8154
8155STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_div_ScalarList, name, "aten::_foreach_div")
8156STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_div_ScalarList, overload_name, "ScalarList")
8157STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_div_ScalarList, schema_str, "_foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]")
8158
8159// aten::_foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
8160static C10_NOINLINE c10::TypedOperatorHandle<_foreach_div_ScalarList::schema> create__foreach_div_ScalarList_typed_handle() {
8161 return c10::Dispatcher::singleton()
8162 .findSchemaOrThrow(_foreach_div_ScalarList::name, _foreach_div_ScalarList::overload_name)
8163 .typed<_foreach_div_ScalarList::schema>();
8164}
8165
8166// aten::_foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
8167::std::vector<at::Tensor> _foreach_div_ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
8168
8169 static auto op = create__foreach_div_ScalarList_typed_handle();
8170 return op.call(self, scalars);
8171}
8172
8173// aten::_foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
8174::std::vector<at::Tensor> _foreach_div_ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
8175
8176 static auto op = create__foreach_div_ScalarList_typed_handle();
8177 return op.redispatch(dispatchKeySet, self, scalars);
8178}
8179
8180STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_div__ScalarList, name, "aten::_foreach_div_")
8181STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_div__ScalarList, overload_name, "ScalarList")
8182STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_div__ScalarList, schema_str, "_foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()")
8183
8184// aten::_foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
8185static C10_NOINLINE c10::TypedOperatorHandle<_foreach_div__ScalarList::schema> create__foreach_div__ScalarList_typed_handle() {
8186 return c10::Dispatcher::singleton()
8187 .findSchemaOrThrow(_foreach_div__ScalarList::name, _foreach_div__ScalarList::overload_name)
8188 .typed<_foreach_div__ScalarList::schema>();
8189}
8190
8191// aten::_foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
8192void _foreach_div__ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
8193
8194 static auto op = create__foreach_div__ScalarList_typed_handle();
8195 return op.call(self, scalars);
8196}
8197
8198// aten::_foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
8199void _foreach_div__ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
8200
8201 static auto op = create__foreach_div__ScalarList_typed_handle();
8202 return op.redispatch(dispatchKeySet, self, scalars);
8203}
8204
8205STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_mul_ScalarList, name, "aten::_foreach_mul")
8206STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_mul_ScalarList, overload_name, "ScalarList")
8207STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_mul_ScalarList, schema_str, "_foreach_mul.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]")
8208
8209// aten::_foreach_mul.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
8210static C10_NOINLINE c10::TypedOperatorHandle<_foreach_mul_ScalarList::schema> create__foreach_mul_ScalarList_typed_handle() {
8211 return c10::Dispatcher::singleton()
8212 .findSchemaOrThrow(_foreach_mul_ScalarList::name, _foreach_mul_ScalarList::overload_name)
8213 .typed<_foreach_mul_ScalarList::schema>();
8214}
8215
8216// aten::_foreach_mul.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
8217::std::vector<at::Tensor> _foreach_mul_ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
8218
8219 static auto op = create__foreach_mul_ScalarList_typed_handle();
8220 return op.call(self, scalars);
8221}
8222
8223// aten::_foreach_mul.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
8224::std::vector<at::Tensor> _foreach_mul_ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
8225
8226 static auto op = create__foreach_mul_ScalarList_typed_handle();
8227 return op.redispatch(dispatchKeySet, self, scalars);
8228}
8229
8230STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_mul__ScalarList, name, "aten::_foreach_mul_")
8231STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_mul__ScalarList, overload_name, "ScalarList")
8232STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_mul__ScalarList, schema_str, "_foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()")
8233
8234// aten::_foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
8235static C10_NOINLINE c10::TypedOperatorHandle<_foreach_mul__ScalarList::schema> create__foreach_mul__ScalarList_typed_handle() {
8236 return c10::Dispatcher::singleton()
8237 .findSchemaOrThrow(_foreach_mul__ScalarList::name, _foreach_mul__ScalarList::overload_name)
8238 .typed<_foreach_mul__ScalarList::schema>();
8239}
8240
8241// aten::_foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
8242void _foreach_mul__ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
8243
8244 static auto op = create__foreach_mul__ScalarList_typed_handle();
8245 return op.call(self, scalars);
8246}
8247
8248// aten::_foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
8249void _foreach_mul__ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
8250
8251 static auto op = create__foreach_mul__ScalarList_typed_handle();
8252 return op.redispatch(dispatchKeySet, self, scalars);
8253}
8254
8255STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_zero_, name, "aten::_foreach_zero_")
8256STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_zero_, overload_name, "")
8257STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_zero_, schema_str, "_foreach_zero_(Tensor(a!)[] self) -> ()")
8258
8259// aten::_foreach_zero_(Tensor(a!)[] self) -> ()
8260static C10_NOINLINE c10::TypedOperatorHandle<_foreach_zero_::schema> create__foreach_zero__typed_handle() {
8261 return c10::Dispatcher::singleton()
8262 .findSchemaOrThrow(_foreach_zero_::name, _foreach_zero_::overload_name)
8263 .typed<_foreach_zero_::schema>();
8264}
8265
8266// aten::_foreach_zero_(Tensor(a!)[] self) -> ()
8267void _foreach_zero_::call(at::TensorList self) {
8268
8269 static auto op = create__foreach_zero__typed_handle();
8270 return op.call(self);
8271}
8272
8273// aten::_foreach_zero_(Tensor(a!)[] self) -> ()
8274void _foreach_zero_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
8275
8276 static auto op = create__foreach_zero__typed_handle();
8277 return op.redispatch(dispatchKeySet, self);
8278}
8279
8280STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_asin, name, "aten::_foreach_asin")
8281STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_asin, overload_name, "")
8282STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_asin, schema_str, "_foreach_asin(Tensor[] self) -> Tensor[]")
8283
8284// aten::_foreach_asin(Tensor[] self) -> Tensor[]
8285static C10_NOINLINE c10::TypedOperatorHandle<_foreach_asin::schema> create__foreach_asin_typed_handle() {
8286 return c10::Dispatcher::singleton()
8287 .findSchemaOrThrow(_foreach_asin::name, _foreach_asin::overload_name)
8288 .typed<_foreach_asin::schema>();
8289}
8290
8291// aten::_foreach_asin(Tensor[] self) -> Tensor[]
8292::std::vector<at::Tensor> _foreach_asin::call(at::TensorList self) {
8293
8294 static auto op = create__foreach_asin_typed_handle();
8295 return op.call(self);
8296}
8297
8298// aten::_foreach_asin(Tensor[] self) -> Tensor[]
8299::std::vector<at::Tensor> _foreach_asin::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
8300
8301 static auto op = create__foreach_asin_typed_handle();
8302 return op.redispatch(dispatchKeySet, self);
8303}
8304
8305STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_asin_, name, "aten::_foreach_asin_")
8306STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_asin_, overload_name, "")
8307STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_asin_, schema_str, "_foreach_asin_(Tensor(a!)[] self) -> ()")
8308
8309// aten::_foreach_asin_(Tensor(a!)[] self) -> ()
8310static C10_NOINLINE c10::TypedOperatorHandle<_foreach_asin_::schema> create__foreach_asin__typed_handle() {
8311 return c10::Dispatcher::singleton()
8312 .findSchemaOrThrow(_foreach_asin_::name, _foreach_asin_::overload_name)
8313 .typed<_foreach_asin_::schema>();
8314}
8315
8316// aten::_foreach_asin_(Tensor(a!)[] self) -> ()
8317void _foreach_asin_::call(at::TensorList self) {
8318
8319 static auto op = create__foreach_asin__typed_handle();
8320 return op.call(self);
8321}
8322
8323// aten::_foreach_asin_(Tensor(a!)[] self) -> ()
8324void _foreach_asin_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
8325
8326 static auto op = create__foreach_asin__typed_handle();
8327 return op.redispatch(dispatchKeySet, self);
8328}
8329
8330STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_cos, name, "aten::_foreach_cos")
8331STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_cos, overload_name, "")
8332STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_cos, schema_str, "_foreach_cos(Tensor[] self) -> Tensor[]")
8333
8334// aten::_foreach_cos(Tensor[] self) -> Tensor[]
8335static C10_NOINLINE c10::TypedOperatorHandle<_foreach_cos::schema> create__foreach_cos_typed_handle() {
8336 return c10::Dispatcher::singleton()
8337 .findSchemaOrThrow(_foreach_cos::name, _foreach_cos::overload_name)
8338 .typed<_foreach_cos::schema>();
8339}
8340
8341// aten::_foreach_cos(Tensor[] self) -> Tensor[]
8342::std::vector<at::Tensor> _foreach_cos::call(at::TensorList self) {
8343
8344 static auto op = create__foreach_cos_typed_handle();
8345 return op.call(self);
8346}
8347
8348// aten::_foreach_cos(Tensor[] self) -> Tensor[]
8349::std::vector<at::Tensor> _foreach_cos::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
8350
8351 static auto op = create__foreach_cos_typed_handle();
8352 return op.redispatch(dispatchKeySet, self);
8353}
8354
8355STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_cos_, name, "aten::_foreach_cos_")
8356STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_cos_, overload_name, "")
8357STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_cos_, schema_str, "_foreach_cos_(Tensor(a!)[] self) -> ()")
8358
8359// aten::_foreach_cos_(Tensor(a!)[] self) -> ()
8360static C10_NOINLINE c10::TypedOperatorHandle<_foreach_cos_::schema> create__foreach_cos__typed_handle() {
8361 return c10::Dispatcher::singleton()
8362 .findSchemaOrThrow(_foreach_cos_::name, _foreach_cos_::overload_name)
8363 .typed<_foreach_cos_::schema>();
8364}
8365
8366// aten::_foreach_cos_(Tensor(a!)[] self) -> ()
8367void _foreach_cos_::call(at::TensorList self) {
8368
8369 static auto op = create__foreach_cos__typed_handle();
8370 return op.call(self);
8371}
8372
8373// aten::_foreach_cos_(Tensor(a!)[] self) -> ()
8374void _foreach_cos_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
8375
8376 static auto op = create__foreach_cos__typed_handle();
8377 return op.redispatch(dispatchKeySet, self);
8378}
8379
8380STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_floor, name, "aten::_foreach_floor")
8381STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_floor, overload_name, "")
8382STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_floor, schema_str, "_foreach_floor(Tensor[] self) -> Tensor[]")
8383
8384// aten::_foreach_floor(Tensor[] self) -> Tensor[]
8385static C10_NOINLINE c10::TypedOperatorHandle<_foreach_floor::schema> create__foreach_floor_typed_handle() {
8386 return c10::Dispatcher::singleton()
8387 .findSchemaOrThrow(_foreach_floor::name, _foreach_floor::overload_name)
8388 .typed<_foreach_floor::schema>();
8389}
8390
8391// aten::_foreach_floor(Tensor[] self) -> Tensor[]
8392::std::vector<at::Tensor> _foreach_floor::call(at::TensorList self) {
8393
8394 static auto op = create__foreach_floor_typed_handle();
8395 return op.call(self);
8396}
8397
8398// aten::_foreach_floor(Tensor[] self) -> Tensor[]
8399::std::vector<at::Tensor> _foreach_floor::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
8400
8401 static auto op = create__foreach_floor_typed_handle();
8402 return op.redispatch(dispatchKeySet, self);
8403}
8404
8405STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_floor_, name, "aten::_foreach_floor_")
8406STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_floor_, overload_name, "")
8407STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_floor_, schema_str, "_foreach_floor_(Tensor(a!)[] self) -> ()")
8408
8409// aten::_foreach_floor_(Tensor(a!)[] self) -> ()
8410static C10_NOINLINE c10::TypedOperatorHandle<_foreach_floor_::schema> create__foreach_floor__typed_handle() {
8411 return c10::Dispatcher::singleton()
8412 .findSchemaOrThrow(_foreach_floor_::name, _foreach_floor_::overload_name)
8413 .typed<_foreach_floor_::schema>();
8414}
8415
8416// aten::_foreach_floor_(Tensor(a!)[] self) -> ()
8417void _foreach_floor_::call(at::TensorList self) {
8418
8419 static auto op = create__foreach_floor__typed_handle();
8420 return op.call(self);
8421}
8422
8423// aten::_foreach_floor_(Tensor(a!)[] self) -> ()
8424void _foreach_floor_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
8425
8426 static auto op = create__foreach_floor__typed_handle();
8427 return op.redispatch(dispatchKeySet, self);
8428}
8429
8430STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_tanh, name, "aten::_foreach_tanh")
8431STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_tanh, overload_name, "")
8432STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_tanh, schema_str, "_foreach_tanh(Tensor[] self) -> Tensor[]")
8433
8434// aten::_foreach_tanh(Tensor[] self) -> Tensor[]
8435static C10_NOINLINE c10::TypedOperatorHandle<_foreach_tanh::schema> create__foreach_tanh_typed_handle() {
8436 return c10::Dispatcher::singleton()
8437 .findSchemaOrThrow(_foreach_tanh::name, _foreach_tanh::overload_name)
8438 .typed<_foreach_tanh::schema>();
8439}
8440
8441// aten::_foreach_tanh(Tensor[] self) -> Tensor[]
8442::std::vector<at::Tensor> _foreach_tanh::call(at::TensorList self) {
8443
8444 static auto op = create__foreach_tanh_typed_handle();
8445 return op.call(self);
8446}
8447
8448// aten::_foreach_tanh(Tensor[] self) -> Tensor[]
8449::std::vector<at::Tensor> _foreach_tanh::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
8450
8451 static auto op = create__foreach_tanh_typed_handle();
8452 return op.redispatch(dispatchKeySet, self);
8453}
8454
8455STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_tanh_, name, "aten::_foreach_tanh_")
8456STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_tanh_, overload_name, "")
8457STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_tanh_, schema_str, "_foreach_tanh_(Tensor(a!)[] self) -> ()")
8458
8459// aten::_foreach_tanh_(Tensor(a!)[] self) -> ()
8460static C10_NOINLINE c10::TypedOperatorHandle<_foreach_tanh_::schema> create__foreach_tanh__typed_handle() {
8461 return c10::Dispatcher::singleton()
8462 .findSchemaOrThrow(_foreach_tanh_::name, _foreach_tanh_::overload_name)
8463 .typed<_foreach_tanh_::schema>();
8464}
8465
8466// aten::_foreach_tanh_(Tensor(a!)[] self) -> ()
8467void _foreach_tanh_::call(at::TensorList self) {
8468
8469 static auto op = create__foreach_tanh__typed_handle();
8470 return op.call(self);
8471}
8472
8473// aten::_foreach_tanh_(Tensor(a!)[] self) -> ()
8474void _foreach_tanh_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
8475
8476 static auto op = create__foreach_tanh__typed_handle();
8477 return op.redispatch(dispatchKeySet, self);
8478}
8479
8480STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_addcmul__Scalar, name, "aten::_foreach_addcmul_")
8481STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_addcmul__Scalar, overload_name, "Scalar")
8482STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_addcmul__Scalar, schema_str, "_foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()")
8483
8484// aten::_foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()
8485static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcmul__Scalar::schema> create__foreach_addcmul__Scalar_typed_handle() {
8486 return c10::Dispatcher::singleton()
8487 .findSchemaOrThrow(_foreach_addcmul__Scalar::name, _foreach_addcmul__Scalar::overload_name)
8488 .typed<_foreach_addcmul__Scalar::schema>();
8489}
8490
8491// aten::_foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()
8492void _foreach_addcmul__Scalar::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
8493
8494 static auto op = create__foreach_addcmul__Scalar_typed_handle();
8495 return op.call(self, tensor1, tensor2, value);
8496}
8497
8498// aten::_foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()
8499void _foreach_addcmul__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
8500
8501 static auto op = create__foreach_addcmul__Scalar_typed_handle();
8502 return op.redispatch(dispatchKeySet, self, tensor1, tensor2, value);
8503}
8504
8505STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_addcmul__ScalarList, name, "aten::_foreach_addcmul_")
8506STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_addcmul__ScalarList, overload_name, "ScalarList")
8507STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_addcmul__ScalarList, schema_str, "_foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()")
8508
8509// aten::_foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()
8510static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcmul__ScalarList::schema> create__foreach_addcmul__ScalarList_typed_handle() {
8511 return c10::Dispatcher::singleton()
8512 .findSchemaOrThrow(_foreach_addcmul__ScalarList::name, _foreach_addcmul__ScalarList::overload_name)
8513 .typed<_foreach_addcmul__ScalarList::schema>();
8514}
8515
8516// aten::_foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()
8517void _foreach_addcmul__ScalarList::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
8518
8519 static auto op = create__foreach_addcmul__ScalarList_typed_handle();
8520 return op.call(self, tensor1, tensor2, scalars);
8521}
8522
8523// aten::_foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()
8524void _foreach_addcmul__ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
8525
8526 static auto op = create__foreach_addcmul__ScalarList_typed_handle();
8527 return op.redispatch(dispatchKeySet, self, tensor1, tensor2, scalars);
8528}
8529
8530STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_addcmul__Tensor, name, "aten::_foreach_addcmul_")
8531STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_addcmul__Tensor, overload_name, "Tensor")
8532STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_addcmul__Tensor, schema_str, "_foreach_addcmul_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()")
8533
8534// aten::_foreach_addcmul_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()
8535static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcmul__Tensor::schema> create__foreach_addcmul__Tensor_typed_handle() {
8536 return c10::Dispatcher::singleton()
8537 .findSchemaOrThrow(_foreach_addcmul__Tensor::name, _foreach_addcmul__Tensor::overload_name)
8538 .typed<_foreach_addcmul__Tensor::schema>();
8539}
8540
8541// aten::_foreach_addcmul_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()
8542void _foreach_addcmul__Tensor::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
8543
8544 static auto op = create__foreach_addcmul__Tensor_typed_handle();
8545 return op.call(self, tensor1, tensor2, scalars);
8546}
8547
8548// aten::_foreach_addcmul_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()
8549void _foreach_addcmul__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
8550
8551 static auto op = create__foreach_addcmul__Tensor_typed_handle();
8552 return op.redispatch(dispatchKeySet, self, tensor1, tensor2, scalars);
8553}
8554
8555STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_addcmul_Scalar, name, "aten::_foreach_addcmul")
8556STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_addcmul_Scalar, overload_name, "Scalar")
8557STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_addcmul_Scalar, schema_str, "_foreach_addcmul.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]")
8558
8559// aten::_foreach_addcmul.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]
8560static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcmul_Scalar::schema> create__foreach_addcmul_Scalar_typed_handle() {
8561 return c10::Dispatcher::singleton()
8562 .findSchemaOrThrow(_foreach_addcmul_Scalar::name, _foreach_addcmul_Scalar::overload_name)
8563 .typed<_foreach_addcmul_Scalar::schema>();
8564}
8565
8566// aten::_foreach_addcmul.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]
8567::std::vector<at::Tensor> _foreach_addcmul_Scalar::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
8568
8569 static auto op = create__foreach_addcmul_Scalar_typed_handle();
8570 return op.call(self, tensor1, tensor2, value);
8571}
8572
8573// aten::_foreach_addcmul.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]
8574::std::vector<at::Tensor> _foreach_addcmul_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
8575
8576 static auto op = create__foreach_addcmul_Scalar_typed_handle();
8577 return op.redispatch(dispatchKeySet, self, tensor1, tensor2, value);
8578}
8579
8580STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_addcmul_ScalarList, name, "aten::_foreach_addcmul")
8581STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_addcmul_ScalarList, overload_name, "ScalarList")
8582STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_addcmul_ScalarList, schema_str, "_foreach_addcmul.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]")
8583
8584// aten::_foreach_addcmul.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]
8585static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcmul_ScalarList::schema> create__foreach_addcmul_ScalarList_typed_handle() {
8586 return c10::Dispatcher::singleton()
8587 .findSchemaOrThrow(_foreach_addcmul_ScalarList::name, _foreach_addcmul_ScalarList::overload_name)
8588 .typed<_foreach_addcmul_ScalarList::schema>();
8589}
8590
8591// aten::_foreach_addcmul.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]
8592::std::vector<at::Tensor> _foreach_addcmul_ScalarList::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
8593
8594 static auto op = create__foreach_addcmul_ScalarList_typed_handle();
8595 return op.call(self, tensor1, tensor2, scalars);
8596}
8597
8598// aten::_foreach_addcmul.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]
8599::std::vector<at::Tensor> _foreach_addcmul_ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
8600
8601 static auto op = create__foreach_addcmul_ScalarList_typed_handle();
8602 return op.redispatch(dispatchKeySet, self, tensor1, tensor2, scalars);
8603}
8604
8605STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_addcmul_Tensor, name, "aten::_foreach_addcmul")
8606STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_addcmul_Tensor, overload_name, "Tensor")
8607STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_addcmul_Tensor, schema_str, "_foreach_addcmul.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]")
8608
8609// aten::_foreach_addcmul.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]
8610static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcmul_Tensor::schema> create__foreach_addcmul_Tensor_typed_handle() {
8611 return c10::Dispatcher::singleton()
8612 .findSchemaOrThrow(_foreach_addcmul_Tensor::name, _foreach_addcmul_Tensor::overload_name)
8613 .typed<_foreach_addcmul_Tensor::schema>();
8614}
8615
8616// aten::_foreach_addcmul.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]
8617::std::vector<at::Tensor> _foreach_addcmul_Tensor::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
8618
8619 static auto op = create__foreach_addcmul_Tensor_typed_handle();
8620 return op.call(self, tensor1, tensor2, scalars);
8621}
8622
8623// aten::_foreach_addcmul.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]
8624::std::vector<at::Tensor> _foreach_addcmul_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
8625
8626 static auto op = create__foreach_addcmul_Tensor_typed_handle();
8627 return op.redispatch(dispatchKeySet, self, tensor1, tensor2, scalars);
8628}
8629
8630STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_convert_indices_from_csr_to_coo, name, "aten::_convert_indices_from_csr_to_coo")
8631STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_convert_indices_from_csr_to_coo, overload_name, "")
8632STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_convert_indices_from_csr_to_coo, schema_str, "_convert_indices_from_csr_to_coo(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False) -> Tensor")
8633
8634// aten::_convert_indices_from_csr_to_coo(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False) -> Tensor
8635static C10_NOINLINE c10::TypedOperatorHandle<_convert_indices_from_csr_to_coo::schema> create__convert_indices_from_csr_to_coo_typed_handle() {
8636 return c10::Dispatcher::singleton()
8637 .findSchemaOrThrow(_convert_indices_from_csr_to_coo::name, _convert_indices_from_csr_to_coo::overload_name)
8638 .typed<_convert_indices_from_csr_to_coo::schema>();
8639}
8640
8641// aten::_convert_indices_from_csr_to_coo(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False) -> Tensor
8642at::Tensor _convert_indices_from_csr_to_coo::call(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose) {
8643
8644 static auto op = create__convert_indices_from_csr_to_coo_typed_handle();
8645 return op.call(crow_indices, col_indices, out_int32, transpose);
8646}
8647
8648// aten::_convert_indices_from_csr_to_coo(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False) -> Tensor
8649at::Tensor _convert_indices_from_csr_to_coo::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose) {
8650
8651 static auto op = create__convert_indices_from_csr_to_coo_typed_handle();
8652 return op.redispatch(dispatchKeySet, crow_indices, col_indices, out_int32, transpose);
8653}
8654
8655STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_convert_indices_from_csr_to_coo_out, name, "aten::_convert_indices_from_csr_to_coo")
8656STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_convert_indices_from_csr_to_coo_out, overload_name, "out")
8657STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_convert_indices_from_csr_to_coo_out, schema_str, "_convert_indices_from_csr_to_coo.out(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False, Tensor(a!) out) -> Tensor(a!)")
8658
8659// aten::_convert_indices_from_csr_to_coo.out(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False, Tensor(a!) out) -> Tensor(a!)
8660static C10_NOINLINE c10::TypedOperatorHandle<_convert_indices_from_csr_to_coo_out::schema> create__convert_indices_from_csr_to_coo_out_typed_handle() {
8661 return c10::Dispatcher::singleton()
8662 .findSchemaOrThrow(_convert_indices_from_csr_to_coo_out::name, _convert_indices_from_csr_to_coo_out::overload_name)
8663 .typed<_convert_indices_from_csr_to_coo_out::schema>();
8664}
8665
8666// aten::_convert_indices_from_csr_to_coo.out(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False, Tensor(a!) out) -> Tensor(a!)
8667at::Tensor & _convert_indices_from_csr_to_coo_out::call(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose, at::Tensor & out) {
8668
8669 static auto op = create__convert_indices_from_csr_to_coo_out_typed_handle();
8670 return op.call(crow_indices, col_indices, out_int32, transpose, out);
8671}
8672
8673// aten::_convert_indices_from_csr_to_coo.out(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False, Tensor(a!) out) -> Tensor(a!)
8674at::Tensor & _convert_indices_from_csr_to_coo_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose, at::Tensor & out) {
8675
8676 static auto op = create__convert_indices_from_csr_to_coo_out_typed_handle();
8677 return op.redispatch(dispatchKeySet, crow_indices, col_indices, out_int32, transpose, out);
8678}
8679
8680STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss_out, name, "aten::nll_loss")
8681STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss_out, overload_name, "out")
8682STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss_out, schema_str, "nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)")
8683
8684// aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
8685static C10_NOINLINE c10::TypedOperatorHandle<nll_loss_out::schema> create_nll_loss_out_typed_handle() {
8686 return c10::Dispatcher::singleton()
8687 .findSchemaOrThrow(nll_loss_out::name, nll_loss_out::overload_name)
8688 .typed<nll_loss_out::schema>();
8689}
8690
8691// aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
8692at::Tensor & nll_loss_out::call(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out) {
8693
8694 static auto op = create_nll_loss_out_typed_handle();
8695 return op.call(self, target, weight, reduction, ignore_index, out);
8696}
8697
8698// aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
8699at::Tensor & nll_loss_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out) {
8700
8701 static auto op = create_nll_loss_out_typed_handle();
8702 return op.redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, out);
8703}
8704
8705STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss, name, "aten::nll_loss")
8706STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss, overload_name, "")
8707STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss, schema_str, "nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor")
8708
8709// aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
8710static C10_NOINLINE c10::TypedOperatorHandle<nll_loss::schema> create_nll_loss_typed_handle() {
8711 return c10::Dispatcher::singleton()
8712 .findSchemaOrThrow(nll_loss::name, nll_loss::overload_name)
8713 .typed<nll_loss::schema>();
8714}
8715
8716// aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
8717at::Tensor nll_loss::call(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
8718
8719 static auto op = create_nll_loss_typed_handle();
8720 return op.call(self, target, weight, reduction, ignore_index);
8721}
8722
8723// aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
8724at::Tensor nll_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
8725
8726 static auto op = create_nll_loss_typed_handle();
8727 return op.redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index);
8728}
8729
8730STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss_backward_grad_input, name, "aten::nll_loss_backward")
8731STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss_backward_grad_input, overload_name, "grad_input")
8732STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss_backward_grad_input, schema_str, "nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)")
8733
8734// aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
8735static C10_NOINLINE c10::TypedOperatorHandle<nll_loss_backward_grad_input::schema> create_nll_loss_backward_grad_input_typed_handle() {
8736 return c10::Dispatcher::singleton()
8737 .findSchemaOrThrow(nll_loss_backward_grad_input::name, nll_loss_backward_grad_input::overload_name)
8738 .typed<nll_loss_backward_grad_input::schema>();
8739}
8740
8741// aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
8742at::Tensor & nll_loss_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) {
8743
8744 static auto op = create_nll_loss_backward_grad_input_typed_handle();
8745 return op.call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
8746}
8747
8748// aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
8749at::Tensor & nll_loss_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) {
8750
8751 static auto op = create_nll_loss_backward_grad_input_typed_handle();
8752 return op.redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
8753}
8754
8755STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss_backward, name, "aten::nll_loss_backward")
8756STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss_backward, overload_name, "")
8757STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss_backward, schema_str, "nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor")
8758
8759// aten::nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor
8760static C10_NOINLINE c10::TypedOperatorHandle<nll_loss_backward::schema> create_nll_loss_backward_typed_handle() {
8761 return c10::Dispatcher::singleton()
8762 .findSchemaOrThrow(nll_loss_backward::name, nll_loss_backward::overload_name)
8763 .typed<nll_loss_backward::schema>();
8764}
8765
8766// aten::nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor
8767at::Tensor nll_loss_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
8768
8769 static auto op = create_nll_loss_backward_typed_handle();
8770 return op.call(grad_output, self, target, weight, reduction, ignore_index, total_weight);
8771}
8772
8773// aten::nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor
8774at::Tensor nll_loss_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
8775
8776 static auto op = create_nll_loss_backward_typed_handle();
8777 return op.redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, ignore_index, total_weight);
8778}
8779
8780STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(smooth_l1_loss_backward_grad_input, name, "aten::smooth_l1_loss_backward")
8781STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(smooth_l1_loss_backward_grad_input, overload_name, "grad_input")
8782STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(smooth_l1_loss_backward_grad_input, schema_str, "smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!)")
8783
8784// aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!)
8785static C10_NOINLINE c10::TypedOperatorHandle<smooth_l1_loss_backward_grad_input::schema> create_smooth_l1_loss_backward_grad_input_typed_handle() {
8786 return c10::Dispatcher::singleton()
8787 .findSchemaOrThrow(smooth_l1_loss_backward_grad_input::name, smooth_l1_loss_backward_grad_input::overload_name)
8788 .typed<smooth_l1_loss_backward_grad_input::schema>();
8789}
8790
8791// aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!)
8792at::Tensor & smooth_l1_loss_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & grad_input) {
8793
8794 static auto op = create_smooth_l1_loss_backward_grad_input_typed_handle();
8795 return op.call(grad_output, self, target, reduction, beta, grad_input);
8796}
8797
8798// aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!)
8799at::Tensor & smooth_l1_loss_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & grad_input) {
8800
8801 static auto op = create_smooth_l1_loss_backward_grad_input_typed_handle();
8802 return op.redispatch(dispatchKeySet, grad_output, self, target, reduction, beta, grad_input);
8803}
8804
8805STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(smooth_l1_loss_backward, name, "aten::smooth_l1_loss_backward")
8806STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(smooth_l1_loss_backward, overload_name, "")
8807STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(smooth_l1_loss_backward, schema_str, "smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor")
8808
8809// aten::smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor
8810static C10_NOINLINE c10::TypedOperatorHandle<smooth_l1_loss_backward::schema> create_smooth_l1_loss_backward_typed_handle() {
8811 return c10::Dispatcher::singleton()
8812 .findSchemaOrThrow(smooth_l1_loss_backward::name, smooth_l1_loss_backward::overload_name)
8813 .typed<smooth_l1_loss_backward::schema>();
8814}
8815
8816// aten::smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor
8817at::Tensor smooth_l1_loss_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
8818
8819 static auto op = create_smooth_l1_loss_backward_typed_handle();
8820 return op.call(grad_output, self, target, reduction, beta);
8821}
8822
8823// aten::smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor
8824at::Tensor smooth_l1_loss_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
8825
8826 static auto op = create_smooth_l1_loss_backward_typed_handle();
8827 return op.redispatch(dispatchKeySet, grad_output, self, target, reduction, beta);
8828}
8829
8830STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(huber_loss_out, name, "aten::huber_loss")
8831STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(huber_loss_out, overload_name, "out")
8832STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(huber_loss_out, schema_str, "huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!)")
8833
8834// aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!)
8835static C10_NOINLINE c10::TypedOperatorHandle<huber_loss_out::schema> create_huber_loss_out_typed_handle() {
8836 return c10::Dispatcher::singleton()
8837 .findSchemaOrThrow(huber_loss_out::name, huber_loss_out::overload_name)
8838 .typed<huber_loss_out::schema>();
8839}
8840
8841// aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!)
8842at::Tensor & huber_loss_out::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out) {
8843
8844 static auto op = create_huber_loss_out_typed_handle();
8845 return op.call(self, target, reduction, delta, out);
8846}
8847
8848// aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!)
8849at::Tensor & huber_loss_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out) {
8850
8851 static auto op = create_huber_loss_out_typed_handle();
8852 return op.redispatch(dispatchKeySet, self, target, reduction, delta, out);
8853}
8854
8855STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(huber_loss, name, "aten::huber_loss")
8856STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(huber_loss, overload_name, "")
8857STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(huber_loss, schema_str, "huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor")
8858
8859// aten::huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor
8860static C10_NOINLINE c10::TypedOperatorHandle<huber_loss::schema> create_huber_loss_typed_handle() {
8861 return c10::Dispatcher::singleton()
8862 .findSchemaOrThrow(huber_loss::name, huber_loss::overload_name)
8863 .typed<huber_loss::schema>();
8864}
8865
8866// aten::huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor
8867at::Tensor huber_loss::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) {
8868
8869 static auto op = create_huber_loss_typed_handle();
8870 return op.call(self, target, reduction, delta);
8871}
8872
8873// aten::huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor
8874at::Tensor huber_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) {
8875
8876 static auto op = create_huber_loss_typed_handle();
8877 return op.redispatch(dispatchKeySet, self, target, reduction, delta);
8878}
8879
8880STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(huber_loss_backward_out, name, "aten::huber_loss_backward")
8881STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(huber_loss_backward_out, overload_name, "out")
8882STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(huber_loss_backward_out, schema_str, "huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!)")
8883
8884// aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!)
8885static C10_NOINLINE c10::TypedOperatorHandle<huber_loss_backward_out::schema> create_huber_loss_backward_out_typed_handle() {
8886 return c10::Dispatcher::singleton()
8887 .findSchemaOrThrow(huber_loss_backward_out::name, huber_loss_backward_out::overload_name)
8888 .typed<huber_loss_backward_out::schema>();
8889}
8890
8891// aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!)
8892at::Tensor & huber_loss_backward_out::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input) {
8893
8894 static auto op = create_huber_loss_backward_out_typed_handle();
8895 return op.call(grad_output, self, target, reduction, delta, grad_input);
8896}
8897
8898// aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!)
8899at::Tensor & huber_loss_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input) {
8900
8901 static auto op = create_huber_loss_backward_out_typed_handle();
8902 return op.redispatch(dispatchKeySet, grad_output, self, target, reduction, delta, grad_input);
8903}
8904
8905STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(huber_loss_backward, name, "aten::huber_loss_backward")
8906STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(huber_loss_backward, overload_name, "")
8907STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(huber_loss_backward, schema_str, "huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor")
8908
8909// aten::huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor
8910static C10_NOINLINE c10::TypedOperatorHandle<huber_loss_backward::schema> create_huber_loss_backward_typed_handle() {
8911 return c10::Dispatcher::singleton()
8912 .findSchemaOrThrow(huber_loss_backward::name, huber_loss_backward::overload_name)
8913 .typed<huber_loss_backward::schema>();
8914}
8915
8916// aten::huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor
8917at::Tensor huber_loss_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) {
8918
8919 static auto op = create_huber_loss_backward_typed_handle();
8920 return op.call(grad_output, self, target, reduction, delta);
8921}
8922
8923// aten::huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor
8924at::Tensor huber_loss_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) {
8925
8926 static auto op = create_huber_loss_backward_typed_handle();
8927 return op.redispatch(dispatchKeySet, grad_output, self, target, reduction, delta);
8928}
8929
8930STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardsigmoid_out, name, "aten::hardsigmoid")
8931STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardsigmoid_out, overload_name, "out")
8932STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardsigmoid_out, schema_str, "hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
8933
8934// aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
8935static C10_NOINLINE c10::TypedOperatorHandle<hardsigmoid_out::schema> create_hardsigmoid_out_typed_handle() {
8936 return c10::Dispatcher::singleton()
8937 .findSchemaOrThrow(hardsigmoid_out::name, hardsigmoid_out::overload_name)
8938 .typed<hardsigmoid_out::schema>();
8939}
8940
8941// aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
8942at::Tensor & hardsigmoid_out::call(const at::Tensor & self, at::Tensor & out) {
8943
8944 static auto op = create_hardsigmoid_out_typed_handle();
8945 return op.call(self, out);
8946}
8947
8948// aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
8949at::Tensor & hardsigmoid_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
8950
8951 static auto op = create_hardsigmoid_out_typed_handle();
8952 return op.redispatch(dispatchKeySet, self, out);
8953}
8954
8955STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardsigmoid, name, "aten::hardsigmoid")
8956STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardsigmoid, overload_name, "")
8957STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardsigmoid, schema_str, "hardsigmoid(Tensor self) -> Tensor")
8958
8959// aten::hardsigmoid(Tensor self) -> Tensor
8960static C10_NOINLINE c10::TypedOperatorHandle<hardsigmoid::schema> create_hardsigmoid_typed_handle() {
8961 return c10::Dispatcher::singleton()
8962 .findSchemaOrThrow(hardsigmoid::name, hardsigmoid::overload_name)
8963 .typed<hardsigmoid::schema>();
8964}
8965
8966// aten::hardsigmoid(Tensor self) -> Tensor
8967at::Tensor hardsigmoid::call(const at::Tensor & self) {
8968
8969 static auto op = create_hardsigmoid_typed_handle();
8970 return op.call(self);
8971}
8972
8973// aten::hardsigmoid(Tensor self) -> Tensor
8974at::Tensor hardsigmoid::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
8975
8976 static auto op = create_hardsigmoid_typed_handle();
8977 return op.redispatch(dispatchKeySet, self);
8978}
8979
8980STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardsigmoid_, name, "aten::hardsigmoid_")
8981STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardsigmoid_, overload_name, "")
8982STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardsigmoid_, schema_str, "hardsigmoid_(Tensor(a!) self) -> Tensor(a!)")
8983
8984// aten::hardsigmoid_(Tensor(a!) self) -> Tensor(a!)
8985static C10_NOINLINE c10::TypedOperatorHandle<hardsigmoid_::schema> create_hardsigmoid__typed_handle() {
8986 return c10::Dispatcher::singleton()
8987 .findSchemaOrThrow(hardsigmoid_::name, hardsigmoid_::overload_name)
8988 .typed<hardsigmoid_::schema>();
8989}
8990
8991// aten::hardsigmoid_(Tensor(a!) self) -> Tensor(a!)
8992at::Tensor & hardsigmoid_::call(at::Tensor & self) {
8993
8994 static auto op = create_hardsigmoid__typed_handle();
8995 return op.call(self);
8996}
8997
8998// aten::hardsigmoid_(Tensor(a!) self) -> Tensor(a!)
8999at::Tensor & hardsigmoid_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
9000
9001 static auto op = create_hardsigmoid__typed_handle();
9002 return op.redispatch(dispatchKeySet, self);
9003}
9004
9005STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log_sigmoid_out, name, "aten::log_sigmoid")
9006STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log_sigmoid_out, overload_name, "out")
9007STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log_sigmoid_out, schema_str, "log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
9008
9009// aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
9010static C10_NOINLINE c10::TypedOperatorHandle<log_sigmoid_out::schema> create_log_sigmoid_out_typed_handle() {
9011 return c10::Dispatcher::singleton()
9012 .findSchemaOrThrow(log_sigmoid_out::name, log_sigmoid_out::overload_name)
9013 .typed<log_sigmoid_out::schema>();
9014}
9015
9016// aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
9017at::Tensor & log_sigmoid_out::call(const at::Tensor & self, at::Tensor & out) {
9018
9019 static auto op = create_log_sigmoid_out_typed_handle();
9020 return op.call(self, out);
9021}
9022
9023// aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
9024at::Tensor & log_sigmoid_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
9025
9026 static auto op = create_log_sigmoid_out_typed_handle();
9027 return op.redispatch(dispatchKeySet, self, out);
9028}
9029
9030STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log_sigmoid, name, "aten::log_sigmoid")
9031STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log_sigmoid, overload_name, "")
9032STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log_sigmoid, schema_str, "log_sigmoid(Tensor self) -> Tensor")
9033
9034// aten::log_sigmoid(Tensor self) -> Tensor
9035static C10_NOINLINE c10::TypedOperatorHandle<log_sigmoid::schema> create_log_sigmoid_typed_handle() {
9036 return c10::Dispatcher::singleton()
9037 .findSchemaOrThrow(log_sigmoid::name, log_sigmoid::overload_name)
9038 .typed<log_sigmoid::schema>();
9039}
9040
9041// aten::log_sigmoid(Tensor self) -> Tensor
9042at::Tensor log_sigmoid::call(const at::Tensor & self) {
9043
9044 static auto op = create_log_sigmoid_typed_handle();
9045 return op.call(self);
9046}
9047
9048// aten::log_sigmoid(Tensor self) -> Tensor
9049at::Tensor log_sigmoid::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
9050
9051 static auto op = create_log_sigmoid_typed_handle();
9052 return op.redispatch(dispatchKeySet, self);
9053}
9054
9055STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_avg_pool2d_out, name, "aten::adaptive_avg_pool2d")
9056STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_avg_pool2d_out, overload_name, "out")
9057STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_avg_pool2d_out, schema_str, "adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)")
9058
9059// aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
9060static C10_NOINLINE c10::TypedOperatorHandle<adaptive_avg_pool2d_out::schema> create_adaptive_avg_pool2d_out_typed_handle() {
9061 return c10::Dispatcher::singleton()
9062 .findSchemaOrThrow(adaptive_avg_pool2d_out::name, adaptive_avg_pool2d_out::overload_name)
9063 .typed<adaptive_avg_pool2d_out::schema>();
9064}
9065
9066// aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
9067at::Tensor & adaptive_avg_pool2d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
9068
9069 static auto op = create_adaptive_avg_pool2d_out_typed_handle();
9070 return op.call(self, output_size, out);
9071}
9072
9073// aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
9074at::Tensor & adaptive_avg_pool2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
9075
9076 static auto op = create_adaptive_avg_pool2d_out_typed_handle();
9077 return op.redispatch(dispatchKeySet, self, output_size, out);
9078}
9079
9080STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_avg_pool2d, name, "aten::adaptive_avg_pool2d")
9081STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_avg_pool2d, overload_name, "")
9082STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_avg_pool2d, schema_str, "adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor")
9083
9084// aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor
9085static C10_NOINLINE c10::TypedOperatorHandle<adaptive_avg_pool2d::schema> create_adaptive_avg_pool2d_typed_handle() {
9086 return c10::Dispatcher::singleton()
9087 .findSchemaOrThrow(adaptive_avg_pool2d::name, adaptive_avg_pool2d::overload_name)
9088 .typed<adaptive_avg_pool2d::schema>();
9089}
9090
9091// aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor
9092at::Tensor adaptive_avg_pool2d::call(const at::Tensor & self, c10::SymIntArrayRef output_size) {
9093
9094 static auto op = create_adaptive_avg_pool2d_typed_handle();
9095 return op.call(self, output_size);
9096}
9097
9098// aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor
9099at::Tensor adaptive_avg_pool2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size) {
9100
9101 static auto op = create_adaptive_avg_pool2d_typed_handle();
9102 return op.redispatch(dispatchKeySet, self, output_size);
9103}
9104
9105STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_avg_pool3d_out, name, "aten::adaptive_avg_pool3d")
9106STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_avg_pool3d_out, overload_name, "out")
9107STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_avg_pool3d_out, schema_str, "adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)")
9108
9109// aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
9110static C10_NOINLINE c10::TypedOperatorHandle<adaptive_avg_pool3d_out::schema> create_adaptive_avg_pool3d_out_typed_handle() {
9111 return c10::Dispatcher::singleton()
9112 .findSchemaOrThrow(adaptive_avg_pool3d_out::name, adaptive_avg_pool3d_out::overload_name)
9113 .typed<adaptive_avg_pool3d_out::schema>();
9114}
9115
9116// aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
9117at::Tensor & adaptive_avg_pool3d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
9118
9119 static auto op = create_adaptive_avg_pool3d_out_typed_handle();
9120 return op.call(self, output_size, out);
9121}
9122
9123// aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
9124at::Tensor & adaptive_avg_pool3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
9125
9126 static auto op = create_adaptive_avg_pool3d_out_typed_handle();
9127 return op.redispatch(dispatchKeySet, self, output_size, out);
9128}
9129
9130STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_avg_pool3d, name, "aten::adaptive_avg_pool3d")
9131STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_avg_pool3d, overload_name, "")
9132STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_avg_pool3d, schema_str, "adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor")
9133
9134// aten::adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor
9135static C10_NOINLINE c10::TypedOperatorHandle<adaptive_avg_pool3d::schema> create_adaptive_avg_pool3d_typed_handle() {
9136 return c10::Dispatcher::singleton()
9137 .findSchemaOrThrow(adaptive_avg_pool3d::name, adaptive_avg_pool3d::overload_name)
9138 .typed<adaptive_avg_pool3d::schema>();
9139}
9140
9141// aten::adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor
9142at::Tensor adaptive_avg_pool3d::call(const at::Tensor & self, c10::SymIntArrayRef output_size) {
9143
9144 static auto op = create_adaptive_avg_pool3d_typed_handle();
9145 return op.call(self, output_size);
9146}
9147
9148// aten::adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor
9149at::Tensor adaptive_avg_pool3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size) {
9150
9151 static auto op = create_adaptive_avg_pool3d_typed_handle();
9152 return op.redispatch(dispatchKeySet, self, output_size);
9153}
9154
9155STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_adaptive_avg_pool3d, name, "aten::_adaptive_avg_pool3d")
9156STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_adaptive_avg_pool3d, overload_name, "")
9157STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_adaptive_avg_pool3d, schema_str, "_adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor")
9158
9159// aten::_adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor
9160static C10_NOINLINE c10::TypedOperatorHandle<_adaptive_avg_pool3d::schema> create__adaptive_avg_pool3d_typed_handle() {
9161 return c10::Dispatcher::singleton()
9162 .findSchemaOrThrow(_adaptive_avg_pool3d::name, _adaptive_avg_pool3d::overload_name)
9163 .typed<_adaptive_avg_pool3d::schema>();
9164}
9165
9166// aten::_adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor
9167at::Tensor _adaptive_avg_pool3d::call(const at::Tensor & self, c10::SymIntArrayRef output_size) {
9168
9169 static auto op = create__adaptive_avg_pool3d_typed_handle();
9170 return op.call(self, output_size);
9171}
9172
9173// aten::_adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor
9174at::Tensor _adaptive_avg_pool3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size) {
9175
9176 static auto op = create__adaptive_avg_pool3d_typed_handle();
9177 return op.redispatch(dispatchKeySet, self, output_size);
9178}
9179
9180STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_max_pool2d_out, name, "aten::adaptive_max_pool2d")
9181STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_max_pool2d_out, overload_name, "out")
9182STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_max_pool2d_out, schema_str, "adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))")
9183
9184// aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
9185static C10_NOINLINE c10::TypedOperatorHandle<adaptive_max_pool2d_out::schema> create_adaptive_max_pool2d_out_typed_handle() {
9186 return c10::Dispatcher::singleton()
9187 .findSchemaOrThrow(adaptive_max_pool2d_out::name, adaptive_max_pool2d_out::overload_name)
9188 .typed<adaptive_max_pool2d_out::schema>();
9189}
9190
9191// aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
9192::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool2d_out::call(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) {
9193
9194 static auto op = create_adaptive_max_pool2d_out_typed_handle();
9195 return op.call(self, output_size, out, indices);
9196}
9197
9198// aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
9199::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) {
9200
9201 static auto op = create_adaptive_max_pool2d_out_typed_handle();
9202 return op.redispatch(dispatchKeySet, self, output_size, out, indices);
9203}
9204
9205STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_max_pool2d, name, "aten::adaptive_max_pool2d")
9206STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_max_pool2d, overload_name, "")
9207STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_max_pool2d, schema_str, "adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor)")
9208
9209// aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor)
9210static C10_NOINLINE c10::TypedOperatorHandle<adaptive_max_pool2d::schema> create_adaptive_max_pool2d_typed_handle() {
9211 return c10::Dispatcher::singleton()
9212 .findSchemaOrThrow(adaptive_max_pool2d::name, adaptive_max_pool2d::overload_name)
9213 .typed<adaptive_max_pool2d::schema>();
9214}
9215
9216// aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor)
9217::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool2d::call(const at::Tensor & self, at::IntArrayRef output_size) {
9218
9219 static auto op = create_adaptive_max_pool2d_typed_handle();
9220 return op.call(self, output_size);
9221}
9222
9223// aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor)
9224::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) {
9225
9226 static auto op = create_adaptive_max_pool2d_typed_handle();
9227 return op.redispatch(dispatchKeySet, self, output_size);
9228}
9229
9230STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_max_pool3d_out, name, "aten::adaptive_max_pool3d")
9231STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_max_pool3d_out, overload_name, "out")
9232STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_max_pool3d_out, schema_str, "adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))")
9233
9234// aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
9235static C10_NOINLINE c10::TypedOperatorHandle<adaptive_max_pool3d_out::schema> create_adaptive_max_pool3d_out_typed_handle() {
9236 return c10::Dispatcher::singleton()
9237 .findSchemaOrThrow(adaptive_max_pool3d_out::name, adaptive_max_pool3d_out::overload_name)
9238 .typed<adaptive_max_pool3d_out::schema>();
9239}
9240
9241// aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
9242::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool3d_out::call(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) {
9243
9244 static auto op = create_adaptive_max_pool3d_out_typed_handle();
9245 return op.call(self, output_size, out, indices);
9246}
9247
9248// aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
9249::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) {
9250
9251 static auto op = create_adaptive_max_pool3d_out_typed_handle();
9252 return op.redispatch(dispatchKeySet, self, output_size, out, indices);
9253}
9254
9255STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_max_pool3d, name, "aten::adaptive_max_pool3d")
9256STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_max_pool3d, overload_name, "")
9257STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_max_pool3d, schema_str, "adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)")
9258
9259// aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)
9260static C10_NOINLINE c10::TypedOperatorHandle<adaptive_max_pool3d::schema> create_adaptive_max_pool3d_typed_handle() {
9261 return c10::Dispatcher::singleton()
9262 .findSchemaOrThrow(adaptive_max_pool3d::name, adaptive_max_pool3d::overload_name)
9263 .typed<adaptive_max_pool3d::schema>();
9264}
9265
9266// aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)
9267::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool3d::call(const at::Tensor & self, at::IntArrayRef output_size) {
9268
9269 static auto op = create_adaptive_max_pool3d_typed_handle();
9270 return op.call(self, output_size);
9271}
9272
9273// aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)
9274::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) {
9275
9276 static auto op = create_adaptive_max_pool3d_typed_handle();
9277 return op.redispatch(dispatchKeySet, self, output_size);
9278}
9279
9280STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(avg_pool2d_backward_grad_input, name, "aten::avg_pool2d_backward")
9281STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(avg_pool2d_backward_grad_input, overload_name, "grad_input")
9282STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(avg_pool2d_backward_grad_input, schema_str, "avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)")
9283
9284// aten::avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)
9285static C10_NOINLINE c10::TypedOperatorHandle<avg_pool2d_backward_grad_input::schema> create_avg_pool2d_backward_grad_input_typed_handle() {
9286 return c10::Dispatcher::singleton()
9287 .findSchemaOrThrow(avg_pool2d_backward_grad_input::name, avg_pool2d_backward_grad_input::overload_name)
9288 .typed<avg_pool2d_backward_grad_input::schema>();
9289}
9290
9291// aten::avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)
9292at::Tensor & avg_pool2d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & grad_input) {
9293
9294 static auto op = create_avg_pool2d_backward_grad_input_typed_handle();
9295 return op.call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input);
9296}
9297
9298// aten::avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)
9299at::Tensor & avg_pool2d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & grad_input) {
9300
9301 static auto op = create_avg_pool2d_backward_grad_input_typed_handle();
9302 return op.redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input);
9303}
9304
9305STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(avg_pool2d_backward, name, "aten::avg_pool2d_backward")
9306STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(avg_pool2d_backward, overload_name, "")
9307STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(avg_pool2d_backward, schema_str, "avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor")
9308
9309// aten::avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor
9310static C10_NOINLINE c10::TypedOperatorHandle<avg_pool2d_backward::schema> create_avg_pool2d_backward_typed_handle() {
9311 return c10::Dispatcher::singleton()
9312 .findSchemaOrThrow(avg_pool2d_backward::name, avg_pool2d_backward::overload_name)
9313 .typed<avg_pool2d_backward::schema>();
9314}
9315
9316// aten::avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor
9317at::Tensor avg_pool2d_backward::call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
9318
9319 static auto op = create_avg_pool2d_backward_typed_handle();
9320 return op.call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
9321}
9322
9323// aten::avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor
9324at::Tensor avg_pool2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
9325
9326 static auto op = create_avg_pool2d_backward_typed_handle();
9327 return op.redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
9328}
9329
9330STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fractional_max_pool2d_output, name, "aten::fractional_max_pool2d")
9331STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fractional_max_pool2d_output, overload_name, "output")
9332STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fractional_max_pool2d_output, schema_str, "fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))")
9333
9334// aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
9335static C10_NOINLINE c10::TypedOperatorHandle<fractional_max_pool2d_output::schema> create_fractional_max_pool2d_output_typed_handle() {
9336 return c10::Dispatcher::singleton()
9337 .findSchemaOrThrow(fractional_max_pool2d_output::name, fractional_max_pool2d_output::overload_name)
9338 .typed<fractional_max_pool2d_output::schema>();
9339}
9340
9341// aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
9342::std::tuple<at::Tensor &,at::Tensor &> fractional_max_pool2d_output::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) {
9343
9344 static auto op = create_fractional_max_pool2d_output_typed_handle();
9345 return op.call(self, kernel_size, output_size, random_samples, output, indices);
9346}
9347
9348// aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
9349::std::tuple<at::Tensor &,at::Tensor &> fractional_max_pool2d_output::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) {
9350
9351 static auto op = create_fractional_max_pool2d_output_typed_handle();
9352 return op.redispatch(dispatchKeySet, self, kernel_size, output_size, random_samples, output, indices);
9353}
9354
9355STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fractional_max_pool2d, name, "aten::fractional_max_pool2d")
9356STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fractional_max_pool2d, overload_name, "")
9357STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fractional_max_pool2d, schema_str, "fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor)")
9358
9359// aten::fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor)
9360static C10_NOINLINE c10::TypedOperatorHandle<fractional_max_pool2d::schema> create_fractional_max_pool2d_typed_handle() {
9361 return c10::Dispatcher::singleton()
9362 .findSchemaOrThrow(fractional_max_pool2d::name, fractional_max_pool2d::overload_name)
9363 .typed<fractional_max_pool2d::schema>();
9364}
9365
9366// aten::fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor)
9367::std::tuple<at::Tensor,at::Tensor> fractional_max_pool2d::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
9368
9369 static auto op = create_fractional_max_pool2d_typed_handle();
9370 return op.call(self, kernel_size, output_size, random_samples);
9371}
9372
9373// aten::fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor)
9374::std::tuple<at::Tensor,at::Tensor> fractional_max_pool2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
9375
9376 static auto op = create_fractional_max_pool2d_typed_handle();
9377 return op.redispatch(dispatchKeySet, self, kernel_size, output_size, random_samples);
9378}
9379
9380STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_unpool2d_out, name, "aten::max_unpool2d")
9381STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_unpool2d_out, overload_name, "out")
9382STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_unpool2d_out, schema_str, "max_unpool2d.out(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)")
9383
9384// aten::max_unpool2d.out(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
9385static C10_NOINLINE c10::TypedOperatorHandle<max_unpool2d_out::schema> create_max_unpool2d_out_typed_handle() {
9386 return c10::Dispatcher::singleton()
9387 .findSchemaOrThrow(max_unpool2d_out::name, max_unpool2d_out::overload_name)
9388 .typed<max_unpool2d_out::schema>();
9389}
9390
9391// aten::max_unpool2d.out(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
9392at::Tensor & max_unpool2d_out::call(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::Tensor & out) {
9393
9394 static auto op = create_max_unpool2d_out_typed_handle();
9395 return op.call(self, indices, output_size, out);
9396}
9397
9398// aten::max_unpool2d.out(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
9399at::Tensor & max_unpool2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::Tensor & out) {
9400
9401 static auto op = create_max_unpool2d_out_typed_handle();
9402 return op.redispatch(dispatchKeySet, self, indices, output_size, out);
9403}
9404
9405STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_unpool2d, name, "aten::max_unpool2d")
9406STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_unpool2d, overload_name, "")
9407STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_unpool2d, schema_str, "max_unpool2d(Tensor self, Tensor indices, int[2] output_size) -> Tensor")
9408
9409// aten::max_unpool2d(Tensor self, Tensor indices, int[2] output_size) -> Tensor
9410static C10_NOINLINE c10::TypedOperatorHandle<max_unpool2d::schema> create_max_unpool2d_typed_handle() {
9411 return c10::Dispatcher::singleton()
9412 .findSchemaOrThrow(max_unpool2d::name, max_unpool2d::overload_name)
9413 .typed<max_unpool2d::schema>();
9414}
9415
9416// aten::max_unpool2d(Tensor self, Tensor indices, int[2] output_size) -> Tensor
9417at::Tensor max_unpool2d::call(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size) {
9418
9419 static auto op = create_max_unpool2d_typed_handle();
9420 return op.call(self, indices, output_size);
9421}
9422
9423// aten::max_unpool2d(Tensor self, Tensor indices, int[2] output_size) -> Tensor
9424at::Tensor max_unpool2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size) {
9425
9426 static auto op = create_max_unpool2d_typed_handle();
9427 return op.redispatch(dispatchKeySet, self, indices, output_size);
9428}
9429
9430STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_unpool3d_out, name, "aten::max_unpool3d")
9431STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_unpool3d_out, overload_name, "out")
9432STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_unpool3d_out, schema_str, "max_unpool3d.out(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!)")
9433
9434// aten::max_unpool3d.out(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!)
9435static C10_NOINLINE c10::TypedOperatorHandle<max_unpool3d_out::schema> create_max_unpool3d_out_typed_handle() {
9436 return c10::Dispatcher::singleton()
9437 .findSchemaOrThrow(max_unpool3d_out::name, max_unpool3d_out::overload_name)
9438 .typed<max_unpool3d_out::schema>();
9439}
9440
9441// aten::max_unpool3d.out(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!)
9442at::Tensor & max_unpool3d_out::call(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) {
9443
9444 static auto op = create_max_unpool3d_out_typed_handle();
9445 return op.call(self, indices, output_size, stride, padding, out);
9446}
9447
9448// aten::max_unpool3d.out(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!)
9449at::Tensor & max_unpool3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) {
9450
9451 static auto op = create_max_unpool3d_out_typed_handle();
9452 return op.redispatch(dispatchKeySet, self, indices, output_size, stride, padding, out);
9453}
9454
9455STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_unpool3d, name, "aten::max_unpool3d")
9456STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_unpool3d, overload_name, "")
9457STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(max_unpool3d, schema_str, "max_unpool3d(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor")
9458
9459// aten::max_unpool3d(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor
9460static C10_NOINLINE c10::TypedOperatorHandle<max_unpool3d::schema> create_max_unpool3d_typed_handle() {
9461 return c10::Dispatcher::singleton()
9462 .findSchemaOrThrow(max_unpool3d::name, max_unpool3d::overload_name)
9463 .typed<max_unpool3d::schema>();
9464}
9465
9466// aten::max_unpool3d(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor
9467at::Tensor max_unpool3d::call(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) {
9468
9469 static auto op = create_max_unpool3d_typed_handle();
9470 return op.call(self, indices, output_size, stride, padding);
9471}
9472
9473// aten::max_unpool3d(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor
9474at::Tensor max_unpool3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) {
9475
9476 static auto op = create_max_unpool3d_typed_handle();
9477 return op.redispatch(dispatchKeySet, self, indices, output_size, stride, padding);
9478}
9479
9480STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reflection_pad3d_backward_grad_input, name, "aten::reflection_pad3d_backward")
9481STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reflection_pad3d_backward_grad_input, overload_name, "grad_input")
9482STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reflection_pad3d_backward_grad_input, schema_str, "reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)")
9483
9484// aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
9485static C10_NOINLINE c10::TypedOperatorHandle<reflection_pad3d_backward_grad_input::schema> create_reflection_pad3d_backward_grad_input_typed_handle() {
9486 return c10::Dispatcher::singleton()
9487 .findSchemaOrThrow(reflection_pad3d_backward_grad_input::name, reflection_pad3d_backward_grad_input::overload_name)
9488 .typed<reflection_pad3d_backward_grad_input::schema>();
9489}
9490
9491// aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
9492at::Tensor & reflection_pad3d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
9493
9494 static auto op = create_reflection_pad3d_backward_grad_input_typed_handle();
9495 return op.call(grad_output, self, padding, grad_input);
9496}
9497
9498// aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
9499at::Tensor & reflection_pad3d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
9500
9501 static auto op = create_reflection_pad3d_backward_grad_input_typed_handle();
9502 return op.redispatch(dispatchKeySet, grad_output, self, padding, grad_input);
9503}
9504
9505STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reflection_pad3d_backward, name, "aten::reflection_pad3d_backward")
9506STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reflection_pad3d_backward, overload_name, "")
9507STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reflection_pad3d_backward, schema_str, "reflection_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor")
9508
9509// aten::reflection_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor
9510static C10_NOINLINE c10::TypedOperatorHandle<reflection_pad3d_backward::schema> create_reflection_pad3d_backward_typed_handle() {
9511 return c10::Dispatcher::singleton()
9512 .findSchemaOrThrow(reflection_pad3d_backward::name, reflection_pad3d_backward::overload_name)
9513 .typed<reflection_pad3d_backward::schema>();
9514}
9515
9516// aten::reflection_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor
9517at::Tensor reflection_pad3d_backward::call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
9518
9519 static auto op = create_reflection_pad3d_backward_typed_handle();
9520 return op.call(grad_output, self, padding);
9521}
9522
9523// aten::reflection_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor
9524at::Tensor reflection_pad3d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
9525
9526 static auto op = create_reflection_pad3d_backward_typed_handle();
9527 return op.redispatch(dispatchKeySet, grad_output, self, padding);
9528}
9529
9530STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(replication_pad2d_backward_grad_input, name, "aten::replication_pad2d_backward")
9531STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(replication_pad2d_backward_grad_input, overload_name, "grad_input")
9532STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(replication_pad2d_backward_grad_input, schema_str, "replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)")
9533
9534// aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
9535static C10_NOINLINE c10::TypedOperatorHandle<replication_pad2d_backward_grad_input::schema> create_replication_pad2d_backward_grad_input_typed_handle() {
9536 return c10::Dispatcher::singleton()
9537 .findSchemaOrThrow(replication_pad2d_backward_grad_input::name, replication_pad2d_backward_grad_input::overload_name)
9538 .typed<replication_pad2d_backward_grad_input::schema>();
9539}
9540
9541// aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
9542at::Tensor & replication_pad2d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
9543
9544 static auto op = create_replication_pad2d_backward_grad_input_typed_handle();
9545 return op.call(grad_output, self, padding, grad_input);
9546}
9547
9548// aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
9549at::Tensor & replication_pad2d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
9550
9551 static auto op = create_replication_pad2d_backward_grad_input_typed_handle();
9552 return op.redispatch(dispatchKeySet, grad_output, self, padding, grad_input);
9553}
9554
9555STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(replication_pad2d_backward, name, "aten::replication_pad2d_backward")
9556STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(replication_pad2d_backward, overload_name, "")
9557STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(replication_pad2d_backward, schema_str, "replication_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor")
9558
9559// aten::replication_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor
9560static C10_NOINLINE c10::TypedOperatorHandle<replication_pad2d_backward::schema> create_replication_pad2d_backward_typed_handle() {
9561 return c10::Dispatcher::singleton()
9562 .findSchemaOrThrow(replication_pad2d_backward::name, replication_pad2d_backward::overload_name)
9563 .typed<replication_pad2d_backward::schema>();
9564}
9565
9566// aten::replication_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor
9567at::Tensor replication_pad2d_backward::call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
9568
9569 static auto op = create_replication_pad2d_backward_typed_handle();
9570 return op.call(grad_output, self, padding);
9571}
9572
9573// aten::replication_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor
9574at::Tensor replication_pad2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
9575
9576 static auto op = create_replication_pad2d_backward_typed_handle();
9577 return op.redispatch(dispatchKeySet, grad_output, self, padding);
9578}
9579
9580STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(replication_pad3d_out, name, "aten::replication_pad3d")
9581STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(replication_pad3d_out, overload_name, "out")
9582STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(replication_pad3d_out, schema_str, "replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)")
9583
9584// aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
9585static C10_NOINLINE c10::TypedOperatorHandle<replication_pad3d_out::schema> create_replication_pad3d_out_typed_handle() {
9586 return c10::Dispatcher::singleton()
9587 .findSchemaOrThrow(replication_pad3d_out::name, replication_pad3d_out::overload_name)
9588 .typed<replication_pad3d_out::schema>();
9589}
9590
9591// aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
9592at::Tensor & replication_pad3d_out::call(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
9593
9594 static auto op = create_replication_pad3d_out_typed_handle();
9595 return op.call(self, padding, out);
9596}
9597
9598// aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
9599at::Tensor & replication_pad3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
9600
9601 static auto op = create_replication_pad3d_out_typed_handle();
9602 return op.redispatch(dispatchKeySet, self, padding, out);
9603}
9604
9605STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(replication_pad3d, name, "aten::replication_pad3d")
9606STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(replication_pad3d, overload_name, "")
9607STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(replication_pad3d, schema_str, "replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor")
9608
9609// aten::replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor
9610static C10_NOINLINE c10::TypedOperatorHandle<replication_pad3d::schema> create_replication_pad3d_typed_handle() {
9611 return c10::Dispatcher::singleton()
9612 .findSchemaOrThrow(replication_pad3d::name, replication_pad3d::overload_name)
9613 .typed<replication_pad3d::schema>();
9614}
9615
9616// aten::replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor
9617at::Tensor replication_pad3d::call(const at::Tensor & self, c10::SymIntArrayRef padding) {
9618
9619 static auto op = create_replication_pad3d_typed_handle();
9620 return op.call(self, padding);
9621}
9622
9623// aten::replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor
9624at::Tensor replication_pad3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding) {
9625
9626 static auto op = create_replication_pad3d_typed_handle();
9627 return op.redispatch(dispatchKeySet, self, padding);
9628}
9629
9630STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_linear1d_vec, name, "aten::upsample_linear1d")
9631STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_linear1d_vec, overload_name, "vec")
9632STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_linear1d_vec, schema_str, "upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor")
9633
9634// aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
9635static C10_NOINLINE c10::TypedOperatorHandle<upsample_linear1d_vec::schema> create_upsample_linear1d_vec_typed_handle() {
9636 return c10::Dispatcher::singleton()
9637 .findSchemaOrThrow(upsample_linear1d_vec::name, upsample_linear1d_vec::overload_name)
9638 .typed<upsample_linear1d_vec::schema>();
9639}
9640
9641// aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
9642at::Tensor upsample_linear1d_vec::call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
9643
9644 static auto op = create_upsample_linear1d_vec_typed_handle();
9645 return op.call(input, output_size, align_corners, scale_factors);
9646}
9647
9648// aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
9649at::Tensor upsample_linear1d_vec::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
9650
9651 static auto op = create_upsample_linear1d_vec_typed_handle();
9652 return op.redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors);
9653}
9654
9655STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_bilinear2d_vec, name, "aten::upsample_bilinear2d")
9656STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_bilinear2d_vec, overload_name, "vec")
9657STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_bilinear2d_vec, schema_str, "upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor")
9658
9659// aten::upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
9660static C10_NOINLINE c10::TypedOperatorHandle<upsample_bilinear2d_vec::schema> create_upsample_bilinear2d_vec_typed_handle() {
9661 return c10::Dispatcher::singleton()
9662 .findSchemaOrThrow(upsample_bilinear2d_vec::name, upsample_bilinear2d_vec::overload_name)
9663 .typed<upsample_bilinear2d_vec::schema>();
9664}
9665
9666// aten::upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
9667at::Tensor upsample_bilinear2d_vec::call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
9668
9669 static auto op = create_upsample_bilinear2d_vec_typed_handle();
9670 return op.call(input, output_size, align_corners, scale_factors);
9671}
9672
9673// aten::upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
9674at::Tensor upsample_bilinear2d_vec::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
9675
9676 static auto op = create_upsample_bilinear2d_vec_typed_handle();
9677 return op.redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors);
9678}
9679
9680STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_bicubic2d_vec, name, "aten::upsample_bicubic2d")
9681STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_bicubic2d_vec, overload_name, "vec")
9682STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_bicubic2d_vec, schema_str, "upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor")
9683
9684// aten::upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
9685static C10_NOINLINE c10::TypedOperatorHandle<upsample_bicubic2d_vec::schema> create_upsample_bicubic2d_vec_typed_handle() {
9686 return c10::Dispatcher::singleton()
9687 .findSchemaOrThrow(upsample_bicubic2d_vec::name, upsample_bicubic2d_vec::overload_name)
9688 .typed<upsample_bicubic2d_vec::schema>();
9689}
9690
9691// aten::upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
9692at::Tensor upsample_bicubic2d_vec::call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
9693
9694 static auto op = create_upsample_bicubic2d_vec_typed_handle();
9695 return op.call(input, output_size, align_corners, scale_factors);
9696}
9697
9698// aten::upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
9699at::Tensor upsample_bicubic2d_vec::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
9700
9701 static auto op = create_upsample_bicubic2d_vec_typed_handle();
9702 return op.redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors);
9703}
9704
9705STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest2d_vec, name, "aten::upsample_nearest2d")
9706STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest2d_vec, overload_name, "vec")
9707STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest2d_vec, schema_str, "upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor")
9708
9709// aten::upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
9710static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest2d_vec::schema> create_upsample_nearest2d_vec_typed_handle() {
9711 return c10::Dispatcher::singleton()
9712 .findSchemaOrThrow(upsample_nearest2d_vec::name, upsample_nearest2d_vec::overload_name)
9713 .typed<upsample_nearest2d_vec::schema>();
9714}
9715
9716// aten::upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
9717at::Tensor upsample_nearest2d_vec::call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
9718
9719 static auto op = create_upsample_nearest2d_vec_typed_handle();
9720 return op.call(input, output_size, scale_factors);
9721}
9722
9723// aten::upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
9724at::Tensor upsample_nearest2d_vec::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
9725
9726 static auto op = create_upsample_nearest2d_vec_typed_handle();
9727 return op.redispatch(dispatchKeySet, input, output_size, scale_factors);
9728}
9729
9730STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_linear1d_out, name, "aten::upsample_linear1d")
9731STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_linear1d_out, overload_name, "out")
9732STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_linear1d_out, schema_str, "upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)")
9733
9734// aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
9735static C10_NOINLINE c10::TypedOperatorHandle<upsample_linear1d_out::schema> create_upsample_linear1d_out_typed_handle() {
9736 return c10::Dispatcher::singleton()
9737 .findSchemaOrThrow(upsample_linear1d_out::name, upsample_linear1d_out::overload_name)
9738 .typed<upsample_linear1d_out::schema>();
9739}
9740
9741// aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
9742at::Tensor & upsample_linear1d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales, at::Tensor & out) {
9743
9744 static auto op = create_upsample_linear1d_out_typed_handle();
9745 return op.call(self, output_size, align_corners, scales, out);
9746}
9747
9748// aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
9749at::Tensor & upsample_linear1d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales, at::Tensor & out) {
9750
9751 static auto op = create_upsample_linear1d_out_typed_handle();
9752 return op.redispatch(dispatchKeySet, self, output_size, align_corners, scales, out);
9753}
9754
9755STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_linear1d, name, "aten::upsample_linear1d")
9756STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_linear1d, overload_name, "")
9757STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_linear1d, schema_str, "upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor")
9758
9759// aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor
9760static C10_NOINLINE c10::TypedOperatorHandle<upsample_linear1d::schema> create_upsample_linear1d_typed_handle() {
9761 return c10::Dispatcher::singleton()
9762 .findSchemaOrThrow(upsample_linear1d::name, upsample_linear1d::overload_name)
9763 .typed<upsample_linear1d::schema>();
9764}
9765
9766// aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor
9767at::Tensor upsample_linear1d::call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales) {
9768
9769 static auto op = create_upsample_linear1d_typed_handle();
9770 return op.call(self, output_size, align_corners, scales);
9771}
9772
9773// aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor
9774at::Tensor upsample_linear1d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales) {
9775
9776 static auto op = create_upsample_linear1d_typed_handle();
9777 return op.redispatch(dispatchKeySet, self, output_size, align_corners, scales);
9778}
9779
9780STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_bilinear2d_out, name, "aten::upsample_bilinear2d")
9781STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_bilinear2d_out, overload_name, "out")
9782STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_bilinear2d_out, schema_str, "upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)")
9783
9784// aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
9785static C10_NOINLINE c10::TypedOperatorHandle<upsample_bilinear2d_out::schema> create_upsample_bilinear2d_out_typed_handle() {
9786 return c10::Dispatcher::singleton()
9787 .findSchemaOrThrow(upsample_bilinear2d_out::name, upsample_bilinear2d_out::overload_name)
9788 .typed<upsample_bilinear2d_out::schema>();
9789}
9790
9791// aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
9792at::Tensor & upsample_bilinear2d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
9793
9794 static auto op = create_upsample_bilinear2d_out_typed_handle();
9795 return op.call(self, output_size, align_corners, scales_h, scales_w, out);
9796}
9797
9798// aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
9799at::Tensor & upsample_bilinear2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
9800
9801 static auto op = create_upsample_bilinear2d_out_typed_handle();
9802 return op.redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w, out);
9803}
9804
9805STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_bilinear2d, name, "aten::upsample_bilinear2d")
9806STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_bilinear2d, overload_name, "")
9807STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_bilinear2d, schema_str, "upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor")
9808
9809// aten::upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
9810static C10_NOINLINE c10::TypedOperatorHandle<upsample_bilinear2d::schema> create_upsample_bilinear2d_typed_handle() {
9811 return c10::Dispatcher::singleton()
9812 .findSchemaOrThrow(upsample_bilinear2d::name, upsample_bilinear2d::overload_name)
9813 .typed<upsample_bilinear2d::schema>();
9814}
9815
9816// aten::upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
9817at::Tensor upsample_bilinear2d::call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
9818
9819 static auto op = create_upsample_bilinear2d_typed_handle();
9820 return op.call(self, output_size, align_corners, scales_h, scales_w);
9821}
9822
9823// aten::upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
9824at::Tensor upsample_bilinear2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
9825
9826 static auto op = create_upsample_bilinear2d_typed_handle();
9827 return op.redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w);
9828}
9829
9830STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_bicubic2d_out, name, "aten::upsample_bicubic2d")
9831STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_bicubic2d_out, overload_name, "out")
9832STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_bicubic2d_out, schema_str, "upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)")
9833
9834// aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
9835static C10_NOINLINE c10::TypedOperatorHandle<upsample_bicubic2d_out::schema> create_upsample_bicubic2d_out_typed_handle() {
9836 return c10::Dispatcher::singleton()
9837 .findSchemaOrThrow(upsample_bicubic2d_out::name, upsample_bicubic2d_out::overload_name)
9838 .typed<upsample_bicubic2d_out::schema>();
9839}
9840
9841// aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
9842at::Tensor & upsample_bicubic2d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
9843
9844 static auto op = create_upsample_bicubic2d_out_typed_handle();
9845 return op.call(self, output_size, align_corners, scales_h, scales_w, out);
9846}
9847
9848// aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
9849at::Tensor & upsample_bicubic2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
9850
9851 static auto op = create_upsample_bicubic2d_out_typed_handle();
9852 return op.redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w, out);
9853}
9854
9855STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_bicubic2d, name, "aten::upsample_bicubic2d")
9856STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_bicubic2d, overload_name, "")
9857STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_bicubic2d, schema_str, "upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor")
9858
9859// aten::upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
9860static C10_NOINLINE c10::TypedOperatorHandle<upsample_bicubic2d::schema> create_upsample_bicubic2d_typed_handle() {
9861 return c10::Dispatcher::singleton()
9862 .findSchemaOrThrow(upsample_bicubic2d::name, upsample_bicubic2d::overload_name)
9863 .typed<upsample_bicubic2d::schema>();
9864}
9865
9866// aten::upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
9867at::Tensor upsample_bicubic2d::call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
9868
9869 static auto op = create_upsample_bicubic2d_typed_handle();
9870 return op.call(self, output_size, align_corners, scales_h, scales_w);
9871}
9872
9873// aten::upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
9874at::Tensor upsample_bicubic2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
9875
9876 static auto op = create_upsample_bicubic2d_typed_handle();
9877 return op.redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w);
9878}
9879
9880STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_bicubic2d_backward_grad_input, name, "aten::upsample_bicubic2d_backward")
9881STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_bicubic2d_backward_grad_input, overload_name, "grad_input")
9882STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_bicubic2d_backward_grad_input, schema_str, "upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)")
9883
9884// aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
9885static C10_NOINLINE c10::TypedOperatorHandle<upsample_bicubic2d_backward_grad_input::schema> create_upsample_bicubic2d_backward_grad_input_typed_handle() {
9886 return c10::Dispatcher::singleton()
9887 .findSchemaOrThrow(upsample_bicubic2d_backward_grad_input::name, upsample_bicubic2d_backward_grad_input::overload_name)
9888 .typed<upsample_bicubic2d_backward_grad_input::schema>();
9889}
9890
9891// aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
9892at::Tensor & upsample_bicubic2d_backward_grad_input::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
9893
9894 static auto op = create_upsample_bicubic2d_backward_grad_input_typed_handle();
9895 return op.call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
9896}
9897
9898// aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
9899at::Tensor & upsample_bicubic2d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
9900
9901 static auto op = create_upsample_bicubic2d_backward_grad_input_typed_handle();
9902 return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
9903}
9904
9905STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_bicubic2d_backward, name, "aten::upsample_bicubic2d_backward")
9906STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_bicubic2d_backward, overload_name, "")
9907STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_bicubic2d_backward, schema_str, "upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor")
9908
9909// aten::upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
9910static C10_NOINLINE c10::TypedOperatorHandle<upsample_bicubic2d_backward::schema> create_upsample_bicubic2d_backward_typed_handle() {
9911 return c10::Dispatcher::singleton()
9912 .findSchemaOrThrow(upsample_bicubic2d_backward::name, upsample_bicubic2d_backward::overload_name)
9913 .typed<upsample_bicubic2d_backward::schema>();
9914}
9915
9916// aten::upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
9917at::Tensor upsample_bicubic2d_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
9918
9919 static auto op = create_upsample_bicubic2d_backward_typed_handle();
9920 return op.call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
9921}
9922
9923// aten::upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
9924at::Tensor upsample_bicubic2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
9925
9926 static auto op = create_upsample_bicubic2d_backward_typed_handle();
9927 return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
9928}
9929
9930STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_trilinear3d_backward_grad_input, name, "aten::upsample_trilinear3d_backward")
9931STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_trilinear3d_backward_grad_input, overload_name, "grad_input")
9932STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_trilinear3d_backward_grad_input, schema_str, "upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)")
9933
9934// aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
9935static C10_NOINLINE c10::TypedOperatorHandle<upsample_trilinear3d_backward_grad_input::schema> create_upsample_trilinear3d_backward_grad_input_typed_handle() {
9936 return c10::Dispatcher::singleton()
9937 .findSchemaOrThrow(upsample_trilinear3d_backward_grad_input::name, upsample_trilinear3d_backward_grad_input::overload_name)
9938 .typed<upsample_trilinear3d_backward_grad_input::schema>();
9939}
9940
9941// aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
9942at::Tensor & upsample_trilinear3d_backward_grad_input::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
9943
9944 static auto op = create_upsample_trilinear3d_backward_grad_input_typed_handle();
9945 return op.call(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w, grad_input);
9946}
9947
9948// aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
9949at::Tensor & upsample_trilinear3d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
9950
9951 static auto op = create_upsample_trilinear3d_backward_grad_input_typed_handle();
9952 return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w, grad_input);
9953}
9954
9955STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_trilinear3d_backward, name, "aten::upsample_trilinear3d_backward")
9956STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_trilinear3d_backward, overload_name, "")
9957STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_trilinear3d_backward, schema_str, "upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor")
9958
9959// aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
9960static C10_NOINLINE c10::TypedOperatorHandle<upsample_trilinear3d_backward::schema> create_upsample_trilinear3d_backward_typed_handle() {
9961 return c10::Dispatcher::singleton()
9962 .findSchemaOrThrow(upsample_trilinear3d_backward::name, upsample_trilinear3d_backward::overload_name)
9963 .typed<upsample_trilinear3d_backward::schema>();
9964}
9965
9966// aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
9967at::Tensor upsample_trilinear3d_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
9968
9969 static auto op = create_upsample_trilinear3d_backward_typed_handle();
9970 return op.call(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w);
9971}
9972
9973// aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
9974at::Tensor upsample_trilinear3d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
9975
9976 static auto op = create_upsample_trilinear3d_backward_typed_handle();
9977 return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w);
9978}
9979
9980STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest2d_out, name, "aten::upsample_nearest2d")
9981STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest2d_out, overload_name, "out")
9982STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest2d_out, schema_str, "upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)")
9983
9984// aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
9985static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest2d_out::schema> create_upsample_nearest2d_out_typed_handle() {
9986 return c10::Dispatcher::singleton()
9987 .findSchemaOrThrow(upsample_nearest2d_out::name, upsample_nearest2d_out::overload_name)
9988 .typed<upsample_nearest2d_out::schema>();
9989}
9990
9991// aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
9992at::Tensor & upsample_nearest2d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
9993
9994 static auto op = create_upsample_nearest2d_out_typed_handle();
9995 return op.call(self, output_size, scales_h, scales_w, out);
9996}
9997
9998// aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
9999at::Tensor & upsample_nearest2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
10000
10001 static auto op = create_upsample_nearest2d_out_typed_handle();
10002 return op.redispatch(dispatchKeySet, self, output_size, scales_h, scales_w, out);
10003}
10004
10005STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest2d, name, "aten::upsample_nearest2d")
10006STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest2d, overload_name, "")
10007STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest2d, schema_str, "upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor")
10008
10009// aten::upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
10010static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest2d::schema> create_upsample_nearest2d_typed_handle() {
10011 return c10::Dispatcher::singleton()
10012 .findSchemaOrThrow(upsample_nearest2d::name, upsample_nearest2d::overload_name)
10013 .typed<upsample_nearest2d::schema>();
10014}
10015
10016// aten::upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
10017at::Tensor upsample_nearest2d::call(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
10018
10019 static auto op = create_upsample_nearest2d_typed_handle();
10020 return op.call(self, output_size, scales_h, scales_w);
10021}
10022
10023// aten::upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
10024at::Tensor upsample_nearest2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
10025
10026 static auto op = create_upsample_nearest2d_typed_handle();
10027 return op.redispatch(dispatchKeySet, self, output_size, scales_h, scales_w);
10028}
10029
10030STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest3d_backward_grad_input, name, "aten::upsample_nearest3d_backward")
10031STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest3d_backward_grad_input, overload_name, "grad_input")
10032STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest3d_backward_grad_input, schema_str, "upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)")
10033
10034// aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
10035static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest3d_backward_grad_input::schema> create_upsample_nearest3d_backward_grad_input_typed_handle() {
10036 return c10::Dispatcher::singleton()
10037 .findSchemaOrThrow(upsample_nearest3d_backward_grad_input::name, upsample_nearest3d_backward_grad_input::overload_name)
10038 .typed<upsample_nearest3d_backward_grad_input::schema>();
10039}
10040
10041// aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
10042at::Tensor & upsample_nearest3d_backward_grad_input::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
10043
10044 static auto op = create_upsample_nearest3d_backward_grad_input_typed_handle();
10045 return op.call(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input);
10046}
10047
10048// aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
10049at::Tensor & upsample_nearest3d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
10050
10051 static auto op = create_upsample_nearest3d_backward_grad_input_typed_handle();
10052 return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input);
10053}
10054
10055STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_nearest_exact3d_backward_grad_input, name, "aten::_upsample_nearest_exact3d_backward")
10056STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_nearest_exact3d_backward_grad_input, overload_name, "grad_input")
10057STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_nearest_exact3d_backward_grad_input, schema_str, "_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)")
10058
10059// aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
10060static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact3d_backward_grad_input::schema> create__upsample_nearest_exact3d_backward_grad_input_typed_handle() {
10061 return c10::Dispatcher::singleton()
10062 .findSchemaOrThrow(_upsample_nearest_exact3d_backward_grad_input::name, _upsample_nearest_exact3d_backward_grad_input::overload_name)
10063 .typed<_upsample_nearest_exact3d_backward_grad_input::schema>();
10064}
10065
10066// aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
10067at::Tensor & _upsample_nearest_exact3d_backward_grad_input::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
10068
10069 static auto op = create__upsample_nearest_exact3d_backward_grad_input_typed_handle();
10070 return op.call(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input);
10071}
10072
10073// aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
10074at::Tensor & _upsample_nearest_exact3d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
10075
10076 static auto op = create__upsample_nearest_exact3d_backward_grad_input_typed_handle();
10077 return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input);
10078}
10079
10080STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest3d_backward, name, "aten::upsample_nearest3d_backward")
10081STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest3d_backward, overload_name, "")
10082STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest3d_backward, schema_str, "upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor")
10083
10084// aten::upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
10085static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest3d_backward::schema> create_upsample_nearest3d_backward_typed_handle() {
10086 return c10::Dispatcher::singleton()
10087 .findSchemaOrThrow(upsample_nearest3d_backward::name, upsample_nearest3d_backward::overload_name)
10088 .typed<upsample_nearest3d_backward::schema>();
10089}
10090
10091// aten::upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
10092at::Tensor upsample_nearest3d_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
10093
10094 static auto op = create_upsample_nearest3d_backward_typed_handle();
10095 return op.call(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
10096}
10097
10098// aten::upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
10099at::Tensor upsample_nearest3d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
10100
10101 static auto op = create_upsample_nearest3d_backward_typed_handle();
10102 return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_d, scales_h, scales_w);
10103}
10104
10105STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_nearest_exact3d_backward, name, "aten::_upsample_nearest_exact3d_backward")
10106STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_nearest_exact3d_backward, overload_name, "")
10107STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_nearest_exact3d_backward, schema_str, "_upsample_nearest_exact3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor")
10108
10109// aten::_upsample_nearest_exact3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
10110static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact3d_backward::schema> create__upsample_nearest_exact3d_backward_typed_handle() {
10111 return c10::Dispatcher::singleton()
10112 .findSchemaOrThrow(_upsample_nearest_exact3d_backward::name, _upsample_nearest_exact3d_backward::overload_name)
10113 .typed<_upsample_nearest_exact3d_backward::schema>();
10114}
10115
10116// aten::_upsample_nearest_exact3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
10117at::Tensor _upsample_nearest_exact3d_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
10118
10119 static auto op = create__upsample_nearest_exact3d_backward_typed_handle();
10120 return op.call(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
10121}
10122
10123// aten::_upsample_nearest_exact3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
10124at::Tensor _upsample_nearest_exact3d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
10125
10126 static auto op = create__upsample_nearest_exact3d_backward_typed_handle();
10127 return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_d, scales_h, scales_w);
10128}
10129
10130STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logit_backward_grad_input, name, "aten::logit_backward")
10131STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logit_backward_grad_input, overload_name, "grad_input")
10132STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logit_backward_grad_input, schema_str, "logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!)")
10133
10134// aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!)
10135static C10_NOINLINE c10::TypedOperatorHandle<logit_backward_grad_input::schema> create_logit_backward_grad_input_typed_handle() {
10136 return c10::Dispatcher::singleton()
10137 .findSchemaOrThrow(logit_backward_grad_input::name, logit_backward_grad_input::overload_name)
10138 .typed<logit_backward_grad_input::schema>();
10139}
10140
10141// aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!)
10142at::Tensor & logit_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, c10::optional<double> eps, at::Tensor & grad_input) {
10143
10144 static auto op = create_logit_backward_grad_input_typed_handle();
10145 return op.call(grad_output, self, eps, grad_input);
10146}
10147
10148// aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!)
10149at::Tensor & logit_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::optional<double> eps, at::Tensor & grad_input) {
10150
10151 static auto op = create_logit_backward_grad_input_typed_handle();
10152 return op.redispatch(dispatchKeySet, grad_output, self, eps, grad_input);
10153}
10154
10155STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logit_backward, name, "aten::logit_backward")
10156STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logit_backward, overload_name, "")
10157STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logit_backward, schema_str, "logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor")
10158
10159// aten::logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor
10160static C10_NOINLINE c10::TypedOperatorHandle<logit_backward::schema> create_logit_backward_typed_handle() {
10161 return c10::Dispatcher::singleton()
10162 .findSchemaOrThrow(logit_backward::name, logit_backward::overload_name)
10163 .typed<logit_backward::schema>();
10164}
10165
10166// aten::logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor
10167at::Tensor logit_backward::call(const at::Tensor & grad_output, const at::Tensor & self, c10::optional<double> eps) {
10168
10169 static auto op = create_logit_backward_typed_handle();
10170 return op.call(grad_output, self, eps);
10171}
10172
10173// aten::logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor
10174at::Tensor logit_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::optional<double> eps) {
10175
10176 static auto op = create_logit_backward_typed_handle();
10177 return op.redispatch(dispatchKeySet, grad_output, self, eps);
10178}
10179
10180STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv_transpose2d_out, name, "aten::slow_conv_transpose2d")
10181STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv_transpose2d_out, overload_name, "out")
10182STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv_transpose2d_out, schema_str, "slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)")
10183
10184// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
10185static C10_NOINLINE c10::TypedOperatorHandle<slow_conv_transpose2d_out::schema> create_slow_conv_transpose2d_out_typed_handle() {
10186 return c10::Dispatcher::singleton()
10187 .findSchemaOrThrow(slow_conv_transpose2d_out::name, slow_conv_transpose2d_out::overload_name)
10188 .typed<slow_conv_transpose2d_out::schema>();
10189}
10190
10191// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
10192at::Tensor & slow_conv_transpose2d_out::call(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) {
10193
10194 static auto op = create_slow_conv_transpose2d_out_typed_handle();
10195 return op.call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out);
10196}
10197
10198// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
10199at::Tensor & slow_conv_transpose2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) {
10200
10201 static auto op = create_slow_conv_transpose2d_out_typed_handle();
10202 return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out);
10203}
10204
10205STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv_transpose2d, name, "aten::slow_conv_transpose2d")
10206STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv_transpose2d, overload_name, "")
10207STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv_transpose2d, schema_str, "slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1) -> Tensor")
10208
10209// aten::slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1) -> Tensor
10210static C10_NOINLINE c10::TypedOperatorHandle<slow_conv_transpose2d::schema> create_slow_conv_transpose2d_typed_handle() {
10211 return c10::Dispatcher::singleton()
10212 .findSchemaOrThrow(slow_conv_transpose2d::name, slow_conv_transpose2d::overload_name)
10213 .typed<slow_conv_transpose2d::schema>();
10214}
10215
10216// aten::slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1) -> Tensor
10217at::Tensor slow_conv_transpose2d::call(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation) {
10218
10219 static auto op = create_slow_conv_transpose2d_typed_handle();
10220 return op.call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
10221}
10222
10223// aten::slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1) -> Tensor
10224at::Tensor slow_conv_transpose2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation) {
10225
10226 static auto op = create_slow_conv_transpose2d_typed_handle();
10227 return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
10228}
10229
10230STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_slow_conv2d_backward_grad_input, name, "aten::_slow_conv2d_backward")
10231STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_slow_conv2d_backward_grad_input, overload_name, "grad_input")
10232STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_slow_conv2d_backward_grad_input, schema_str, "_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
10233
10234// aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
10235static C10_NOINLINE c10::TypedOperatorHandle<_slow_conv2d_backward_grad_input::schema> create__slow_conv2d_backward_grad_input_typed_handle() {
10236 return c10::Dispatcher::singleton()
10237 .findSchemaOrThrow(_slow_conv2d_backward_grad_input::name, _slow_conv2d_backward_grad_input::overload_name)
10238 .typed<_slow_conv2d_backward_grad_input::schema>();
10239}
10240
10241// aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
10242::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias) {
10243
10244 static auto op = create__slow_conv2d_backward_grad_input_typed_handle();
10245 return op.call(grad_output, self, weight, kernel_size, stride, padding, grad_input, grad_weight, grad_bias);
10246}
10247
10248// aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
10249::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias) {
10250
10251 static auto op = create__slow_conv2d_backward_grad_input_typed_handle();
10252 return op.redispatch(dispatchKeySet, grad_output, self, weight, kernel_size, stride, padding, grad_input, grad_weight, grad_bias);
10253}
10254
10255STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_slow_conv2d_backward_output_mask, name, "aten::_slow_conv2d_backward")
10256STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_slow_conv2d_backward_output_mask, overload_name, "output_mask")
10257STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_slow_conv2d_backward_output_mask, schema_str, "_slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)")
10258
10259// aten::_slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
10260static C10_NOINLINE c10::TypedOperatorHandle<_slow_conv2d_backward_output_mask::schema> create__slow_conv2d_backward_output_mask_typed_handle() {
10261 return c10::Dispatcher::singleton()
10262 .findSchemaOrThrow(_slow_conv2d_backward_output_mask::name, _slow_conv2d_backward_output_mask::overload_name)
10263 .typed<_slow_conv2d_backward_output_mask::schema>();
10264}
10265
10266// aten::_slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
10267::std::tuple<at::Tensor,at::Tensor,at::Tensor> _slow_conv2d_backward_output_mask::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array<bool,3> output_mask) {
10268
10269 static auto op = create__slow_conv2d_backward_output_mask_typed_handle();
10270 return op.call(grad_output, self, weight, kernel_size, stride, padding, output_mask);
10271}
10272
10273// aten::_slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
10274::std::tuple<at::Tensor,at::Tensor,at::Tensor> _slow_conv2d_backward_output_mask::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array<bool,3> output_mask) {
10275
10276 static auto op = create__slow_conv2d_backward_output_mask_typed_handle();
10277 return op.redispatch(dispatchKeySet, grad_output, self, weight, kernel_size, stride, padding, output_mask);
10278}
10279
10280STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv_depthwise3d, name, "aten::conv_depthwise3d")
10281STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv_depthwise3d, overload_name, "")
10282STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv_depthwise3d, schema_str, "conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation) -> Tensor")
10283
10284// aten::conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation) -> Tensor
10285static C10_NOINLINE c10::TypedOperatorHandle<conv_depthwise3d::schema> create_conv_depthwise3d_typed_handle() {
10286 return c10::Dispatcher::singleton()
10287 .findSchemaOrThrow(conv_depthwise3d::name, conv_depthwise3d::overload_name)
10288 .typed<conv_depthwise3d::schema>();
10289}
10290
10291// aten::conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation) -> Tensor
10292at::Tensor conv_depthwise3d::call(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
10293
10294 static auto op = create_conv_depthwise3d_typed_handle();
10295 return op.call(self, weight, kernel_size, bias, stride, padding, dilation);
10296}
10297
10298// aten::conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation) -> Tensor
10299at::Tensor conv_depthwise3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
10300
10301 static auto op = create_conv_depthwise3d_typed_handle();
10302 return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation);
10303}
10304
10305STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv_dilated2d, name, "aten::slow_conv_dilated2d")
10306STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv_dilated2d, overload_name, "")
10307STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv_dilated2d, schema_str, "slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1) -> Tensor")
10308
10309// aten::slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1) -> Tensor
10310static C10_NOINLINE c10::TypedOperatorHandle<slow_conv_dilated2d::schema> create_slow_conv_dilated2d_typed_handle() {
10311 return c10::Dispatcher::singleton()
10312 .findSchemaOrThrow(slow_conv_dilated2d::name, slow_conv_dilated2d::overload_name)
10313 .typed<slow_conv_dilated2d::schema>();
10314}
10315
10316// aten::slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1) -> Tensor
10317at::Tensor slow_conv_dilated2d::call(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
10318
10319 static auto op = create_slow_conv_dilated2d_typed_handle();
10320 return op.call(self, weight, kernel_size, bias, stride, padding, dilation);
10321}
10322
10323// aten::slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1) -> Tensor
10324at::Tensor slow_conv_dilated2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
10325
10326 static auto op = create_slow_conv_dilated2d_typed_handle();
10327 return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation);
10328}
10329
10330STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(col2im_out, name, "aten::col2im")
10331STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(col2im_out, overload_name, "out")
10332STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(col2im_out, schema_str, "col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)")
10333
10334// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
10335static C10_NOINLINE c10::TypedOperatorHandle<col2im_out::schema> create_col2im_out_typed_handle() {
10336 return c10::Dispatcher::singleton()
10337 .findSchemaOrThrow(col2im_out::name, col2im_out::overload_name)
10338 .typed<col2im_out::schema>();
10339}
10340
10341// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
10342at::Tensor & col2im_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
10343
10344 static auto op = create_col2im_out_typed_handle();
10345 return op.call(self, output_size, kernel_size, dilation, padding, stride, out);
10346}
10347
10348// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
10349at::Tensor & col2im_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
10350
10351 static auto op = create_col2im_out_typed_handle();
10352 return op.redispatch(dispatchKeySet, self, output_size, kernel_size, dilation, padding, stride, out);
10353}
10354
10355STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(col2im, name, "aten::col2im")
10356STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(col2im, overload_name, "")
10357STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(col2im, schema_str, "col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor")
10358
10359// aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
10360static C10_NOINLINE c10::TypedOperatorHandle<col2im::schema> create_col2im_typed_handle() {
10361 return c10::Dispatcher::singleton()
10362 .findSchemaOrThrow(col2im::name, col2im::overload_name)
10363 .typed<col2im::schema>();
10364}
10365
10366// aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
10367at::Tensor col2im::call(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
10368
10369 static auto op = create_col2im_typed_handle();
10370 return op.call(self, output_size, kernel_size, dilation, padding, stride);
10371}
10372
10373// aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
10374at::Tensor col2im::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
10375
10376 static auto op = create_col2im_typed_handle();
10377 return op.redispatch(dispatchKeySet, self, output_size, kernel_size, dilation, padding, stride);
10378}
10379
10380STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(isfinite, name, "aten::isfinite")
10381STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(isfinite, overload_name, "")
10382STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(isfinite, schema_str, "isfinite(Tensor self) -> Tensor")
10383
10384// aten::isfinite(Tensor self) -> Tensor
10385static C10_NOINLINE c10::TypedOperatorHandle<isfinite::schema> create_isfinite_typed_handle() {
10386 return c10::Dispatcher::singleton()
10387 .findSchemaOrThrow(isfinite::name, isfinite::overload_name)
10388 .typed<isfinite::schema>();
10389}
10390
10391// aten::isfinite(Tensor self) -> Tensor
10392at::Tensor isfinite::call(const at::Tensor & self) {
10393
10394 static auto op = create_isfinite_typed_handle();
10395 return op.call(self);
10396}
10397
10398// aten::isfinite(Tensor self) -> Tensor
10399at::Tensor isfinite::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
10400
10401 static auto op = create_isfinite_typed_handle();
10402 return op.redispatch(dispatchKeySet, self);
10403}
10404
10405STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(record_stream, name, "aten::record_stream")
10406STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(record_stream, overload_name, "")
10407STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(record_stream, schema_str, "record_stream(Tensor(a!) self, Stream s) -> ()")
10408
10409// aten::record_stream(Tensor(a!) self, Stream s) -> ()
10410static C10_NOINLINE c10::TypedOperatorHandle<record_stream::schema> create_record_stream_typed_handle() {
10411 return c10::Dispatcher::singleton()
10412 .findSchemaOrThrow(record_stream::name, record_stream::overload_name)
10413 .typed<record_stream::schema>();
10414}
10415
10416// aten::record_stream(Tensor(a!) self, Stream s) -> ()
10417void record_stream::call(at::Tensor & self, at::Stream s) {
10418
10419 static auto op = create_record_stream_typed_handle();
10420 return op.call(self, s);
10421}
10422
10423// aten::record_stream(Tensor(a!) self, Stream s) -> ()
10424void record_stream::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Stream s) {
10425
10426 static auto op = create_record_stream_typed_handle();
10427 return op.redispatch(dispatchKeySet, self, s);
10428}
10429
10430STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(isposinf, name, "aten::isposinf")
10431STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(isposinf, overload_name, "")
10432STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(isposinf, schema_str, "isposinf(Tensor self) -> Tensor")
10433
10434// aten::isposinf(Tensor self) -> Tensor
10435static C10_NOINLINE c10::TypedOperatorHandle<isposinf::schema> create_isposinf_typed_handle() {
10436 return c10::Dispatcher::singleton()
10437 .findSchemaOrThrow(isposinf::name, isposinf::overload_name)
10438 .typed<isposinf::schema>();
10439}
10440
10441// aten::isposinf(Tensor self) -> Tensor
10442at::Tensor isposinf::call(const at::Tensor & self) {
10443
10444 static auto op = create_isposinf_typed_handle();
10445 return op.call(self);
10446}
10447
10448// aten::isposinf(Tensor self) -> Tensor
10449at::Tensor isposinf::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
10450
10451 static auto op = create_isposinf_typed_handle();
10452 return op.redispatch(dispatchKeySet, self);
10453}
10454
10455STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(isposinf_out, name, "aten::isposinf")
10456STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(isposinf_out, overload_name, "out")
10457STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(isposinf_out, schema_str, "isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
10458
10459// aten::isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10460static C10_NOINLINE c10::TypedOperatorHandle<isposinf_out::schema> create_isposinf_out_typed_handle() {
10461 return c10::Dispatcher::singleton()
10462 .findSchemaOrThrow(isposinf_out::name, isposinf_out::overload_name)
10463 .typed<isposinf_out::schema>();
10464}
10465
10466// aten::isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10467at::Tensor & isposinf_out::call(const at::Tensor & self, at::Tensor & out) {
10468
10469 static auto op = create_isposinf_out_typed_handle();
10470 return op.call(self, out);
10471}
10472
10473// aten::isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10474at::Tensor & isposinf_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
10475
10476 static auto op = create_isposinf_out_typed_handle();
10477 return op.redispatch(dispatchKeySet, self, out);
10478}
10479
10480STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_expm1, name, "aten::special_expm1")
10481STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_expm1, overload_name, "")
10482STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_expm1, schema_str, "special_expm1(Tensor self) -> Tensor")
10483
10484// aten::special_expm1(Tensor self) -> Tensor
10485static C10_NOINLINE c10::TypedOperatorHandle<special_expm1::schema> create_special_expm1_typed_handle() {
10486 return c10::Dispatcher::singleton()
10487 .findSchemaOrThrow(special_expm1::name, special_expm1::overload_name)
10488 .typed<special_expm1::schema>();
10489}
10490
10491// aten::special_expm1(Tensor self) -> Tensor
10492at::Tensor special_expm1::call(const at::Tensor & self) {
10493
10494 static auto op = create_special_expm1_typed_handle();
10495 return op.call(self);
10496}
10497
10498// aten::special_expm1(Tensor self) -> Tensor
10499at::Tensor special_expm1::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
10500
10501 static auto op = create_special_expm1_typed_handle();
10502 return op.redispatch(dispatchKeySet, self);
10503}
10504
10505STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_expm1_out, name, "aten::special_expm1")
10506STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_expm1_out, overload_name, "out")
10507STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_expm1_out, schema_str, "special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
10508
10509// aten::special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10510static C10_NOINLINE c10::TypedOperatorHandle<special_expm1_out::schema> create_special_expm1_out_typed_handle() {
10511 return c10::Dispatcher::singleton()
10512 .findSchemaOrThrow(special_expm1_out::name, special_expm1_out::overload_name)
10513 .typed<special_expm1_out::schema>();
10514}
10515
10516// aten::special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10517at::Tensor & special_expm1_out::call(const at::Tensor & self, at::Tensor & out) {
10518
10519 static auto op = create_special_expm1_out_typed_handle();
10520 return op.call(self, out);
10521}
10522
10523// aten::special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10524at::Tensor & special_expm1_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
10525
10526 static auto op = create_special_expm1_out_typed_handle();
10527 return op.redispatch(dispatchKeySet, self, out);
10528}
10529
10530STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_exp2, name, "aten::special_exp2")
10531STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_exp2, overload_name, "")
10532STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_exp2, schema_str, "special_exp2(Tensor self) -> Tensor")
10533
10534// aten::special_exp2(Tensor self) -> Tensor
10535static C10_NOINLINE c10::TypedOperatorHandle<special_exp2::schema> create_special_exp2_typed_handle() {
10536 return c10::Dispatcher::singleton()
10537 .findSchemaOrThrow(special_exp2::name, special_exp2::overload_name)
10538 .typed<special_exp2::schema>();
10539}
10540
10541// aten::special_exp2(Tensor self) -> Tensor
10542at::Tensor special_exp2::call(const at::Tensor & self) {
10543
10544 static auto op = create_special_exp2_typed_handle();
10545 return op.call(self);
10546}
10547
10548// aten::special_exp2(Tensor self) -> Tensor
10549at::Tensor special_exp2::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
10550
10551 static auto op = create_special_exp2_typed_handle();
10552 return op.redispatch(dispatchKeySet, self);
10553}
10554
10555STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_exp2_out, name, "aten::special_exp2")
10556STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_exp2_out, overload_name, "out")
10557STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_exp2_out, schema_str, "special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
10558
10559// aten::special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10560static C10_NOINLINE c10::TypedOperatorHandle<special_exp2_out::schema> create_special_exp2_out_typed_handle() {
10561 return c10::Dispatcher::singleton()
10562 .findSchemaOrThrow(special_exp2_out::name, special_exp2_out::overload_name)
10563 .typed<special_exp2_out::schema>();
10564}
10565
10566// aten::special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10567at::Tensor & special_exp2_out::call(const at::Tensor & self, at::Tensor & out) {
10568
10569 static auto op = create_special_exp2_out_typed_handle();
10570 return op.call(self, out);
10571}
10572
10573// aten::special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10574at::Tensor & special_exp2_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
10575
10576 static auto op = create_special_exp2_out_typed_handle();
10577 return op.redispatch(dispatchKeySet, self, out);
10578}
10579
10580STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_gammaln, name, "aten::special_gammaln")
10581STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_gammaln, overload_name, "")
10582STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_gammaln, schema_str, "special_gammaln(Tensor self) -> Tensor")
10583
10584// aten::special_gammaln(Tensor self) -> Tensor
10585static C10_NOINLINE c10::TypedOperatorHandle<special_gammaln::schema> create_special_gammaln_typed_handle() {
10586 return c10::Dispatcher::singleton()
10587 .findSchemaOrThrow(special_gammaln::name, special_gammaln::overload_name)
10588 .typed<special_gammaln::schema>();
10589}
10590
10591// aten::special_gammaln(Tensor self) -> Tensor
10592at::Tensor special_gammaln::call(const at::Tensor & self) {
10593
10594 static auto op = create_special_gammaln_typed_handle();
10595 return op.call(self);
10596}
10597
10598// aten::special_gammaln(Tensor self) -> Tensor
10599at::Tensor special_gammaln::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
10600
10601 static auto op = create_special_gammaln_typed_handle();
10602 return op.redispatch(dispatchKeySet, self);
10603}
10604
10605STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_gammaln_out, name, "aten::special_gammaln")
10606STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_gammaln_out, overload_name, "out")
10607STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_gammaln_out, schema_str, "special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
10608
10609// aten::special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10610static C10_NOINLINE c10::TypedOperatorHandle<special_gammaln_out::schema> create_special_gammaln_out_typed_handle() {
10611 return c10::Dispatcher::singleton()
10612 .findSchemaOrThrow(special_gammaln_out::name, special_gammaln_out::overload_name)
10613 .typed<special_gammaln_out::schema>();
10614}
10615
10616// aten::special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10617at::Tensor & special_gammaln_out::call(const at::Tensor & self, at::Tensor & out) {
10618
10619 static auto op = create_special_gammaln_out_typed_handle();
10620 return op.call(self, out);
10621}
10622
10623// aten::special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10624at::Tensor & special_gammaln_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
10625
10626 static auto op = create_special_gammaln_out_typed_handle();
10627 return op.redispatch(dispatchKeySet, self, out);
10628}
10629
10630STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_erfinv, name, "aten::special_erfinv")
10631STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_erfinv, overload_name, "")
10632STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_erfinv, schema_str, "special_erfinv(Tensor self) -> Tensor")
10633
10634// aten::special_erfinv(Tensor self) -> Tensor
10635static C10_NOINLINE c10::TypedOperatorHandle<special_erfinv::schema> create_special_erfinv_typed_handle() {
10636 return c10::Dispatcher::singleton()
10637 .findSchemaOrThrow(special_erfinv::name, special_erfinv::overload_name)
10638 .typed<special_erfinv::schema>();
10639}
10640
10641// aten::special_erfinv(Tensor self) -> Tensor
10642at::Tensor special_erfinv::call(const at::Tensor & self) {
10643
10644 static auto op = create_special_erfinv_typed_handle();
10645 return op.call(self);
10646}
10647
10648// aten::special_erfinv(Tensor self) -> Tensor
10649at::Tensor special_erfinv::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
10650
10651 static auto op = create_special_erfinv_typed_handle();
10652 return op.redispatch(dispatchKeySet, self);
10653}
10654
10655STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_erfinv_out, name, "aten::special_erfinv")
10656STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_erfinv_out, overload_name, "out")
10657STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_erfinv_out, schema_str, "special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
10658
10659// aten::special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10660static C10_NOINLINE c10::TypedOperatorHandle<special_erfinv_out::schema> create_special_erfinv_out_typed_handle() {
10661 return c10::Dispatcher::singleton()
10662 .findSchemaOrThrow(special_erfinv_out::name, special_erfinv_out::overload_name)
10663 .typed<special_erfinv_out::schema>();
10664}
10665
10666// aten::special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10667at::Tensor & special_erfinv_out::call(const at::Tensor & self, at::Tensor & out) {
10668
10669 static auto op = create_special_erfinv_out_typed_handle();
10670 return op.call(self, out);
10671}
10672
10673// aten::special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10674at::Tensor & special_erfinv_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
10675
10676 static auto op = create_special_erfinv_out_typed_handle();
10677 return op.redispatch(dispatchKeySet, self, out);
10678}
10679
10680STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlog1py, name, "aten::special_xlog1py")
10681STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlog1py, overload_name, "")
10682STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlog1py, schema_str, "special_xlog1py(Tensor self, Tensor other) -> Tensor")
10683
10684// aten::special_xlog1py(Tensor self, Tensor other) -> Tensor
10685static C10_NOINLINE c10::TypedOperatorHandle<special_xlog1py::schema> create_special_xlog1py_typed_handle() {
10686 return c10::Dispatcher::singleton()
10687 .findSchemaOrThrow(special_xlog1py::name, special_xlog1py::overload_name)
10688 .typed<special_xlog1py::schema>();
10689}
10690
10691// aten::special_xlog1py(Tensor self, Tensor other) -> Tensor
10692at::Tensor special_xlog1py::call(const at::Tensor & self, const at::Tensor & other) {
10693
10694 static auto op = create_special_xlog1py_typed_handle();
10695 return op.call(self, other);
10696}
10697
10698// aten::special_xlog1py(Tensor self, Tensor other) -> Tensor
10699at::Tensor special_xlog1py::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
10700
10701 static auto op = create_special_xlog1py_typed_handle();
10702 return op.redispatch(dispatchKeySet, self, other);
10703}
10704
10705STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlog1py_self_scalar, name, "aten::special_xlog1py")
10706STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlog1py_self_scalar, overload_name, "self_scalar")
10707STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlog1py_self_scalar, schema_str, "special_xlog1py.self_scalar(Scalar self, Tensor other) -> Tensor")
10708
10709// aten::special_xlog1py.self_scalar(Scalar self, Tensor other) -> Tensor
10710static C10_NOINLINE c10::TypedOperatorHandle<special_xlog1py_self_scalar::schema> create_special_xlog1py_self_scalar_typed_handle() {
10711 return c10::Dispatcher::singleton()
10712 .findSchemaOrThrow(special_xlog1py_self_scalar::name, special_xlog1py_self_scalar::overload_name)
10713 .typed<special_xlog1py_self_scalar::schema>();
10714}
10715
10716// aten::special_xlog1py.self_scalar(Scalar self, Tensor other) -> Tensor
10717at::Tensor special_xlog1py_self_scalar::call(const at::Scalar & self, const at::Tensor & other) {
10718
10719 static auto op = create_special_xlog1py_self_scalar_typed_handle();
10720 return op.call(self, other);
10721}
10722
10723// aten::special_xlog1py.self_scalar(Scalar self, Tensor other) -> Tensor
10724at::Tensor special_xlog1py_self_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) {
10725
10726 static auto op = create_special_xlog1py_self_scalar_typed_handle();
10727 return op.redispatch(dispatchKeySet, self, other);
10728}
10729
10730STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlog1py_other_scalar, name, "aten::special_xlog1py")
10731STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlog1py_other_scalar, overload_name, "other_scalar")
10732STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlog1py_other_scalar, schema_str, "special_xlog1py.other_scalar(Tensor self, Scalar other) -> Tensor")
10733
10734// aten::special_xlog1py.other_scalar(Tensor self, Scalar other) -> Tensor
10735static C10_NOINLINE c10::TypedOperatorHandle<special_xlog1py_other_scalar::schema> create_special_xlog1py_other_scalar_typed_handle() {
10736 return c10::Dispatcher::singleton()
10737 .findSchemaOrThrow(special_xlog1py_other_scalar::name, special_xlog1py_other_scalar::overload_name)
10738 .typed<special_xlog1py_other_scalar::schema>();
10739}
10740
10741// aten::special_xlog1py.other_scalar(Tensor self, Scalar other) -> Tensor
10742at::Tensor special_xlog1py_other_scalar::call(const at::Tensor & self, const at::Scalar & other) {
10743
10744 static auto op = create_special_xlog1py_other_scalar_typed_handle();
10745 return op.call(self, other);
10746}
10747
10748// aten::special_xlog1py.other_scalar(Tensor self, Scalar other) -> Tensor
10749at::Tensor special_xlog1py_other_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
10750
10751 static auto op = create_special_xlog1py_other_scalar_typed_handle();
10752 return op.redispatch(dispatchKeySet, self, other);
10753}
10754
10755STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlog1py_out, name, "aten::special_xlog1py")
10756STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlog1py_out, overload_name, "out")
10757STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlog1py_out, schema_str, "special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
10758
10759// aten::special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10760static C10_NOINLINE c10::TypedOperatorHandle<special_xlog1py_out::schema> create_special_xlog1py_out_typed_handle() {
10761 return c10::Dispatcher::singleton()
10762 .findSchemaOrThrow(special_xlog1py_out::name, special_xlog1py_out::overload_name)
10763 .typed<special_xlog1py_out::schema>();
10764}
10765
10766// aten::special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10767at::Tensor & special_xlog1py_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
10768
10769 static auto op = create_special_xlog1py_out_typed_handle();
10770 return op.call(self, other, out);
10771}
10772
10773// aten::special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10774at::Tensor & special_xlog1py_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
10775
10776 static auto op = create_special_xlog1py_out_typed_handle();
10777 return op.redispatch(dispatchKeySet, self, other, out);
10778}
10779
10780STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlog1py_self_scalar_out, name, "aten::special_xlog1py")
10781STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlog1py_self_scalar_out, overload_name, "self_scalar_out")
10782STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlog1py_self_scalar_out, schema_str, "special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
10783
10784// aten::special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10785static C10_NOINLINE c10::TypedOperatorHandle<special_xlog1py_self_scalar_out::schema> create_special_xlog1py_self_scalar_out_typed_handle() {
10786 return c10::Dispatcher::singleton()
10787 .findSchemaOrThrow(special_xlog1py_self_scalar_out::name, special_xlog1py_self_scalar_out::overload_name)
10788 .typed<special_xlog1py_self_scalar_out::schema>();
10789}
10790
10791// aten::special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10792at::Tensor & special_xlog1py_self_scalar_out::call(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
10793
10794 static auto op = create_special_xlog1py_self_scalar_out_typed_handle();
10795 return op.call(self, other, out);
10796}
10797
10798// aten::special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10799at::Tensor & special_xlog1py_self_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
10800
10801 static auto op = create_special_xlog1py_self_scalar_out_typed_handle();
10802 return op.redispatch(dispatchKeySet, self, other, out);
10803}
10804
10805STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlog1py_other_scalar_out, name, "aten::special_xlog1py")
10806STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlog1py_other_scalar_out, overload_name, "other_scalar_out")
10807STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlog1py_other_scalar_out, schema_str, "special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
10808
10809// aten::special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
10810static C10_NOINLINE c10::TypedOperatorHandle<special_xlog1py_other_scalar_out::schema> create_special_xlog1py_other_scalar_out_typed_handle() {
10811 return c10::Dispatcher::singleton()
10812 .findSchemaOrThrow(special_xlog1py_other_scalar_out::name, special_xlog1py_other_scalar_out::overload_name)
10813 .typed<special_xlog1py_other_scalar_out::schema>();
10814}
10815
10816// aten::special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
10817at::Tensor & special_xlog1py_other_scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
10818
10819 static auto op = create_special_xlog1py_other_scalar_out_typed_handle();
10820 return op.call(self, other, out);
10821}
10822
10823// aten::special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
10824at::Tensor & special_xlog1py_other_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
10825
10826 static auto op = create_special_xlog1py_other_scalar_out_typed_handle();
10827 return op.redispatch(dispatchKeySet, self, other, out);
10828}
10829
10830STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_i0, name, "aten::special_i0")
10831STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_i0, overload_name, "")
10832STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_i0, schema_str, "special_i0(Tensor self) -> Tensor")
10833
10834// aten::special_i0(Tensor self) -> Tensor
10835static C10_NOINLINE c10::TypedOperatorHandle<special_i0::schema> create_special_i0_typed_handle() {
10836 return c10::Dispatcher::singleton()
10837 .findSchemaOrThrow(special_i0::name, special_i0::overload_name)
10838 .typed<special_i0::schema>();
10839}
10840
10841// aten::special_i0(Tensor self) -> Tensor
10842at::Tensor special_i0::call(const at::Tensor & self) {
10843
10844 static auto op = create_special_i0_typed_handle();
10845 return op.call(self);
10846}
10847
10848// aten::special_i0(Tensor self) -> Tensor
10849at::Tensor special_i0::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
10850
10851 static auto op = create_special_i0_typed_handle();
10852 return op.redispatch(dispatchKeySet, self);
10853}
10854
10855STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_i0_out, name, "aten::special_i0")
10856STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_i0_out, overload_name, "out")
10857STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_i0_out, schema_str, "special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
10858
10859// aten::special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10860static C10_NOINLINE c10::TypedOperatorHandle<special_i0_out::schema> create_special_i0_out_typed_handle() {
10861 return c10::Dispatcher::singleton()
10862 .findSchemaOrThrow(special_i0_out::name, special_i0_out::overload_name)
10863 .typed<special_i0_out::schema>();
10864}
10865
10866// aten::special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10867at::Tensor & special_i0_out::call(const at::Tensor & self, at::Tensor & out) {
10868
10869 static auto op = create_special_i0_out_typed_handle();
10870 return op.call(self, out);
10871}
10872
10873// aten::special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10874at::Tensor & special_i0_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
10875
10876 static auto op = create_special_i0_out_typed_handle();
10877 return op.redispatch(dispatchKeySet, self, out);
10878}
10879
10880STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_polygamma, name, "aten::special_polygamma")
10881STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_polygamma, overload_name, "")
10882STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_polygamma, schema_str, "special_polygamma(int n, Tensor self) -> Tensor")
10883
10884// aten::special_polygamma(int n, Tensor self) -> Tensor
10885static C10_NOINLINE c10::TypedOperatorHandle<special_polygamma::schema> create_special_polygamma_typed_handle() {
10886 return c10::Dispatcher::singleton()
10887 .findSchemaOrThrow(special_polygamma::name, special_polygamma::overload_name)
10888 .typed<special_polygamma::schema>();
10889}
10890
10891// aten::special_polygamma(int n, Tensor self) -> Tensor
10892at::Tensor special_polygamma::call(int64_t n, const at::Tensor & self) {
10893
10894 static auto op = create_special_polygamma_typed_handle();
10895 return op.call(n, self);
10896}
10897
10898// aten::special_polygamma(int n, Tensor self) -> Tensor
10899at::Tensor special_polygamma::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t n, const at::Tensor & self) {
10900
10901 static auto op = create_special_polygamma_typed_handle();
10902 return op.redispatch(dispatchKeySet, n, self);
10903}
10904
10905STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_polygamma_out, name, "aten::special_polygamma")
10906STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_polygamma_out, overload_name, "out")
10907STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_polygamma_out, schema_str, "special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
10908
10909// aten::special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10910static C10_NOINLINE c10::TypedOperatorHandle<special_polygamma_out::schema> create_special_polygamma_out_typed_handle() {
10911 return c10::Dispatcher::singleton()
10912 .findSchemaOrThrow(special_polygamma_out::name, special_polygamma_out::overload_name)
10913 .typed<special_polygamma_out::schema>();
10914}
10915
10916// aten::special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10917at::Tensor & special_polygamma_out::call(int64_t n, const at::Tensor & self, at::Tensor & out) {
10918
10919 static auto op = create_special_polygamma_out_typed_handle();
10920 return op.call(n, self, out);
10921}
10922
10923// aten::special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10924at::Tensor & special_polygamma_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t n, const at::Tensor & self, at::Tensor & out) {
10925
10926 static auto op = create_special_polygamma_out_typed_handle();
10927 return op.redispatch(dispatchKeySet, n, self, out);
10928}
10929
10930STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_log1p, name, "aten::special_log1p")
10931STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_log1p, overload_name, "")
10932STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_log1p, schema_str, "special_log1p(Tensor self) -> Tensor")
10933
10934// aten::special_log1p(Tensor self) -> Tensor
10935static C10_NOINLINE c10::TypedOperatorHandle<special_log1p::schema> create_special_log1p_typed_handle() {
10936 return c10::Dispatcher::singleton()
10937 .findSchemaOrThrow(special_log1p::name, special_log1p::overload_name)
10938 .typed<special_log1p::schema>();
10939}
10940
10941// aten::special_log1p(Tensor self) -> Tensor
10942at::Tensor special_log1p::call(const at::Tensor & self) {
10943
10944 static auto op = create_special_log1p_typed_handle();
10945 return op.call(self);
10946}
10947
10948// aten::special_log1p(Tensor self) -> Tensor
10949at::Tensor special_log1p::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
10950
10951 static auto op = create_special_log1p_typed_handle();
10952 return op.redispatch(dispatchKeySet, self);
10953}
10954
10955STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_log1p_out, name, "aten::special_log1p")
10956STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_log1p_out, overload_name, "out")
10957STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_log1p_out, schema_str, "special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
10958
10959// aten::special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10960static C10_NOINLINE c10::TypedOperatorHandle<special_log1p_out::schema> create_special_log1p_out_typed_handle() {
10961 return c10::Dispatcher::singleton()
10962 .findSchemaOrThrow(special_log1p_out::name, special_log1p_out::overload_name)
10963 .typed<special_log1p_out::schema>();
10964}
10965
10966// aten::special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10967at::Tensor & special_log1p_out::call(const at::Tensor & self, at::Tensor & out) {
10968
10969 static auto op = create_special_log1p_out_typed_handle();
10970 return op.call(self, out);
10971}
10972
10973// aten::special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10974at::Tensor & special_log1p_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
10975
10976 static auto op = create_special_log1p_out_typed_handle();
10977 return op.redispatch(dispatchKeySet, self, out);
10978}
10979
10980STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_irfft, name, "aten::fft_irfft")
10981STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_irfft, overload_name, "")
10982STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_irfft, schema_str, "fft_irfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor")
10983
10984// aten::fft_irfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor
10985static C10_NOINLINE c10::TypedOperatorHandle<fft_irfft::schema> create_fft_irfft_typed_handle() {
10986 return c10::Dispatcher::singleton()
10987 .findSchemaOrThrow(fft_irfft::name, fft_irfft::overload_name)
10988 .typed<fft_irfft::schema>();
10989}
10990
10991// aten::fft_irfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor
10992at::Tensor fft_irfft::call(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
10993
10994 static auto op = create_fft_irfft_typed_handle();
10995 return op.call(self, n, dim, norm);
10996}
10997
10998// aten::fft_irfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor
10999at::Tensor fft_irfft::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
11000
11001 static auto op = create_fft_irfft_typed_handle();
11002 return op.redispatch(dispatchKeySet, self, n, dim, norm);
11003}
11004
11005STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_irfft_out, name, "aten::fft_irfft")
11006STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_irfft_out, overload_name, "out")
11007STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_irfft_out, schema_str, "fft_irfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)")
11008
11009// aten::fft_irfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
11010static C10_NOINLINE c10::TypedOperatorHandle<fft_irfft_out::schema> create_fft_irfft_out_typed_handle() {
11011 return c10::Dispatcher::singleton()
11012 .findSchemaOrThrow(fft_irfft_out::name, fft_irfft_out::overload_name)
11013 .typed<fft_irfft_out::schema>();
11014}
11015
11016// aten::fft_irfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
11017at::Tensor & fft_irfft_out::call(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11018
11019 static auto op = create_fft_irfft_out_typed_handle();
11020 return op.call(self, n, dim, norm, out);
11021}
11022
11023// aten::fft_irfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
11024at::Tensor & fft_irfft_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11025
11026 static auto op = create_fft_irfft_out_typed_handle();
11027 return op.redispatch(dispatchKeySet, self, n, dim, norm, out);
11028}
11029
11030STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_ifft2, name, "aten::fft_ifft2")
11031STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_ifft2, overload_name, "")
11032STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_ifft2, schema_str, "fft_ifft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor")
11033
11034// aten::fft_ifft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
11035static C10_NOINLINE c10::TypedOperatorHandle<fft_ifft2::schema> create_fft_ifft2_typed_handle() {
11036 return c10::Dispatcher::singleton()
11037 .findSchemaOrThrow(fft_ifft2::name, fft_ifft2::overload_name)
11038 .typed<fft_ifft2::schema>();
11039}
11040
11041// aten::fft_ifft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
11042at::Tensor fft_ifft2::call(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
11043
11044 static auto op = create_fft_ifft2_typed_handle();
11045 return op.call(self, s, dim, norm);
11046}
11047
11048// aten::fft_ifft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
11049at::Tensor fft_ifft2::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
11050
11051 static auto op = create_fft_ifft2_typed_handle();
11052 return op.redispatch(dispatchKeySet, self, s, dim, norm);
11053}
11054
11055STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_ifft2_out, name, "aten::fft_ifft2")
11056STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_ifft2_out, overload_name, "out")
11057STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_ifft2_out, schema_str, "fft_ifft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)")
11058
11059// aten::fft_ifft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
11060static C10_NOINLINE c10::TypedOperatorHandle<fft_ifft2_out::schema> create_fft_ifft2_out_typed_handle() {
11061 return c10::Dispatcher::singleton()
11062 .findSchemaOrThrow(fft_ifft2_out::name, fft_ifft2_out::overload_name)
11063 .typed<fft_ifft2_out::schema>();
11064}
11065
11066// aten::fft_ifft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
11067at::Tensor & fft_ifft2_out::call(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11068
11069 static auto op = create_fft_ifft2_out_typed_handle();
11070 return op.call(self, s, dim, norm, out);
11071}
11072
11073// aten::fft_ifft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
11074at::Tensor & fft_ifft2_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11075
11076 static auto op = create_fft_ifft2_out_typed_handle();
11077 return op.redispatch(dispatchKeySet, self, s, dim, norm, out);
11078}
11079
11080STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_irfft2, name, "aten::fft_irfft2")
11081STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_irfft2, overload_name, "")
11082STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_irfft2, schema_str, "fft_irfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor")
11083
11084// aten::fft_irfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
11085static C10_NOINLINE c10::TypedOperatorHandle<fft_irfft2::schema> create_fft_irfft2_typed_handle() {
11086 return c10::Dispatcher::singleton()
11087 .findSchemaOrThrow(fft_irfft2::name, fft_irfft2::overload_name)
11088 .typed<fft_irfft2::schema>();
11089}
11090
11091// aten::fft_irfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
11092at::Tensor fft_irfft2::call(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
11093
11094 static auto op = create_fft_irfft2_typed_handle();
11095 return op.call(self, s, dim, norm);
11096}
11097
11098// aten::fft_irfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
11099at::Tensor fft_irfft2::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
11100
11101 static auto op = create_fft_irfft2_typed_handle();
11102 return op.redispatch(dispatchKeySet, self, s, dim, norm);
11103}
11104
11105STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_irfft2_out, name, "aten::fft_irfft2")
11106STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_irfft2_out, overload_name, "out")
11107STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_irfft2_out, schema_str, "fft_irfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)")
11108
11109// aten::fft_irfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
11110static C10_NOINLINE c10::TypedOperatorHandle<fft_irfft2_out::schema> create_fft_irfft2_out_typed_handle() {
11111 return c10::Dispatcher::singleton()
11112 .findSchemaOrThrow(fft_irfft2_out::name, fft_irfft2_out::overload_name)
11113 .typed<fft_irfft2_out::schema>();
11114}
11115
11116// aten::fft_irfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
11117at::Tensor & fft_irfft2_out::call(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11118
11119 static auto op = create_fft_irfft2_out_typed_handle();
11120 return op.call(self, s, dim, norm, out);
11121}
11122
11123// aten::fft_irfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
11124at::Tensor & fft_irfft2_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11125
11126 static auto op = create_fft_irfft2_out_typed_handle();
11127 return op.redispatch(dispatchKeySet, self, s, dim, norm, out);
11128}
11129
11130STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_rfftn, name, "aten::fft_rfftn")
11131STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_rfftn, overload_name, "")
11132STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_rfftn, schema_str, "fft_rfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor")
11133
11134// aten::fft_rfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
11135static C10_NOINLINE c10::TypedOperatorHandle<fft_rfftn::schema> create_fft_rfftn_typed_handle() {
11136 return c10::Dispatcher::singleton()
11137 .findSchemaOrThrow(fft_rfftn::name, fft_rfftn::overload_name)
11138 .typed<fft_rfftn::schema>();
11139}
11140
11141// aten::fft_rfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
11142at::Tensor fft_rfftn::call(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
11143
11144 static auto op = create_fft_rfftn_typed_handle();
11145 return op.call(self, s, dim, norm);
11146}
11147
11148// aten::fft_rfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
11149at::Tensor fft_rfftn::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
11150
11151 static auto op = create_fft_rfftn_typed_handle();
11152 return op.redispatch(dispatchKeySet, self, s, dim, norm);
11153}
11154
11155STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_rfftn_out, name, "aten::fft_rfftn")
11156STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_rfftn_out, overload_name, "out")
11157STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_rfftn_out, schema_str, "fft_rfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)")
11158
11159// aten::fft_rfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
11160static C10_NOINLINE c10::TypedOperatorHandle<fft_rfftn_out::schema> create_fft_rfftn_out_typed_handle() {
11161 return c10::Dispatcher::singleton()
11162 .findSchemaOrThrow(fft_rfftn_out::name, fft_rfftn_out::overload_name)
11163 .typed<fft_rfftn_out::schema>();
11164}
11165
11166// aten::fft_rfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
11167at::Tensor & fft_rfftn_out::call(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11168
11169 static auto op = create_fft_rfftn_out_typed_handle();
11170 return op.call(self, s, dim, norm, out);
11171}
11172
11173// aten::fft_rfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
11174at::Tensor & fft_rfftn_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11175
11176 static auto op = create_fft_rfftn_out_typed_handle();
11177 return op.redispatch(dispatchKeySet, self, s, dim, norm, out);
11178}
11179
11180STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cholesky, name, "aten::linalg_cholesky")
11181STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cholesky, overload_name, "")
11182STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cholesky, schema_str, "linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor")
11183
11184// aten::linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor
11185static C10_NOINLINE c10::TypedOperatorHandle<linalg_cholesky::schema> create_linalg_cholesky_typed_handle() {
11186 return c10::Dispatcher::singleton()
11187 .findSchemaOrThrow(linalg_cholesky::name, linalg_cholesky::overload_name)
11188 .typed<linalg_cholesky::schema>();
11189}
11190
11191// aten::linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor
11192at::Tensor linalg_cholesky::call(const at::Tensor & self, bool upper) {
11193
11194 static auto op = create_linalg_cholesky_typed_handle();
11195 return op.call(self, upper);
11196}
11197
11198// aten::linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor
11199at::Tensor linalg_cholesky::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper) {
11200
11201 static auto op = create_linalg_cholesky_typed_handle();
11202 return op.redispatch(dispatchKeySet, self, upper);
11203}
11204
11205STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cholesky_out, name, "aten::linalg_cholesky")
11206STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cholesky_out, overload_name, "out")
11207STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cholesky_out, schema_str, "linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!)")
11208
11209// aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!)
11210static C10_NOINLINE c10::TypedOperatorHandle<linalg_cholesky_out::schema> create_linalg_cholesky_out_typed_handle() {
11211 return c10::Dispatcher::singleton()
11212 .findSchemaOrThrow(linalg_cholesky_out::name, linalg_cholesky_out::overload_name)
11213 .typed<linalg_cholesky_out::schema>();
11214}
11215
11216// aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!)
11217at::Tensor & linalg_cholesky_out::call(const at::Tensor & self, bool upper, at::Tensor & out) {
11218
11219 static auto op = create_linalg_cholesky_out_typed_handle();
11220 return op.call(self, upper, out);
11221}
11222
11223// aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!)
11224at::Tensor & linalg_cholesky_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, at::Tensor & out) {
11225
11226 static auto op = create_linalg_cholesky_out_typed_handle();
11227 return op.redispatch(dispatchKeySet, self, upper, out);
11228}
11229
11230STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_det, name, "aten::_linalg_det")
11231STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_det, overload_name, "")
11232STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_det, schema_str, "_linalg_det(Tensor A) -> (Tensor result, Tensor LU, Tensor pivots)")
11233
11234// aten::_linalg_det(Tensor A) -> (Tensor result, Tensor LU, Tensor pivots)
11235static C10_NOINLINE c10::TypedOperatorHandle<_linalg_det::schema> create__linalg_det_typed_handle() {
11236 return c10::Dispatcher::singleton()
11237 .findSchemaOrThrow(_linalg_det::name, _linalg_det::overload_name)
11238 .typed<_linalg_det::schema>();
11239}
11240
11241// aten::_linalg_det(Tensor A) -> (Tensor result, Tensor LU, Tensor pivots)
11242::std::tuple<at::Tensor,at::Tensor,at::Tensor> _linalg_det::call(const at::Tensor & A) {
11243
11244 static auto op = create__linalg_det_typed_handle();
11245 return op.call(A);
11246}
11247
11248// aten::_linalg_det(Tensor A) -> (Tensor result, Tensor LU, Tensor pivots)
11249::std::tuple<at::Tensor,at::Tensor,at::Tensor> _linalg_det::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A) {
11250
11251 static auto op = create__linalg_det_typed_handle();
11252 return op.redispatch(dispatchKeySet, A);
11253}
11254
11255STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_det_result, name, "aten::_linalg_det")
11256STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_det_result, overload_name, "result")
11257STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_linalg_det_result, schema_str, "_linalg_det.result(Tensor A, *, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots)")
11258
11259// aten::_linalg_det.result(Tensor A, *, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots)
11260static C10_NOINLINE c10::TypedOperatorHandle<_linalg_det_result::schema> create__linalg_det_result_typed_handle() {
11261 return c10::Dispatcher::singleton()
11262 .findSchemaOrThrow(_linalg_det_result::name, _linalg_det_result::overload_name)
11263 .typed<_linalg_det_result::schema>();
11264}
11265
11266// aten::_linalg_det.result(Tensor A, *, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots)
11267::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _linalg_det_result::call(const at::Tensor & A, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots) {
11268
11269 static auto op = create__linalg_det_result_typed_handle();
11270 return op.call(A, result, LU, pivots);
11271}
11272
11273// aten::_linalg_det.result(Tensor A, *, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots)
11274::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _linalg_det_result::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots) {
11275
11276 static auto op = create__linalg_det_result_typed_handle();
11277 return op.redispatch(dispatchKeySet, A, result, LU, pivots);
11278}
11279
11280STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_ldl_factor, name, "aten::linalg_ldl_factor")
11281STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_ldl_factor, overload_name, "")
11282STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_ldl_factor, schema_str, "linalg_ldl_factor(Tensor self, *, bool hermitian=False) -> (Tensor LD, Tensor pivots)")
11283
11284// aten::linalg_ldl_factor(Tensor self, *, bool hermitian=False) -> (Tensor LD, Tensor pivots)
11285static C10_NOINLINE c10::TypedOperatorHandle<linalg_ldl_factor::schema> create_linalg_ldl_factor_typed_handle() {
11286 return c10::Dispatcher::singleton()
11287 .findSchemaOrThrow(linalg_ldl_factor::name, linalg_ldl_factor::overload_name)
11288 .typed<linalg_ldl_factor::schema>();
11289}
11290
11291// aten::linalg_ldl_factor(Tensor self, *, bool hermitian=False) -> (Tensor LD, Tensor pivots)
11292::std::tuple<at::Tensor,at::Tensor> linalg_ldl_factor::call(const at::Tensor & self, bool hermitian) {
11293
11294 static auto op = create_linalg_ldl_factor_typed_handle();
11295 return op.call(self, hermitian);
11296}
11297
11298// aten::linalg_ldl_factor(Tensor self, *, bool hermitian=False) -> (Tensor LD, Tensor pivots)
11299::std::tuple<at::Tensor,at::Tensor> linalg_ldl_factor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian) {
11300
11301 static auto op = create_linalg_ldl_factor_typed_handle();
11302 return op.redispatch(dispatchKeySet, self, hermitian);
11303}
11304
11305STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_ldl_factor_out, name, "aten::linalg_ldl_factor")
11306STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_ldl_factor_out, overload_name, "out")
11307STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_ldl_factor_out, schema_str, "linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots)")
11308
11309// aten::linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots)
11310static C10_NOINLINE c10::TypedOperatorHandle<linalg_ldl_factor_out::schema> create_linalg_ldl_factor_out_typed_handle() {
11311 return c10::Dispatcher::singleton()
11312 .findSchemaOrThrow(linalg_ldl_factor_out::name, linalg_ldl_factor_out::overload_name)
11313 .typed<linalg_ldl_factor_out::schema>();
11314}
11315
11316// aten::linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots)
11317::std::tuple<at::Tensor &,at::Tensor &> linalg_ldl_factor_out::call(const at::Tensor & self, bool hermitian, at::Tensor & LD, at::Tensor & pivots) {
11318
11319 static auto op = create_linalg_ldl_factor_out_typed_handle();
11320 return op.call(self, hermitian, LD, pivots);
11321}
11322
11323// aten::linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots)
11324::std::tuple<at::Tensor &,at::Tensor &> linalg_ldl_factor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian, at::Tensor & LD, at::Tensor & pivots) {
11325
11326 static auto op = create_linalg_ldl_factor_out_typed_handle();
11327 return op.redispatch(dispatchKeySet, self, hermitian, LD, pivots);
11328}
11329
11330STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matmul, name, "aten::linalg_matmul")
11331STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matmul, overload_name, "")
11332STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matmul, schema_str, "linalg_matmul(Tensor self, Tensor other) -> Tensor")
11333
11334// aten::linalg_matmul(Tensor self, Tensor other) -> Tensor
11335static C10_NOINLINE c10::TypedOperatorHandle<linalg_matmul::schema> create_linalg_matmul_typed_handle() {
11336 return c10::Dispatcher::singleton()
11337 .findSchemaOrThrow(linalg_matmul::name, linalg_matmul::overload_name)
11338 .typed<linalg_matmul::schema>();
11339}
11340
11341// aten::linalg_matmul(Tensor self, Tensor other) -> Tensor
11342at::Tensor linalg_matmul::call(const at::Tensor & self, const at::Tensor & other) {
11343
11344 static auto op = create_linalg_matmul_typed_handle();
11345 return op.call(self, other);
11346}
11347
11348// aten::linalg_matmul(Tensor self, Tensor other) -> Tensor
11349at::Tensor linalg_matmul::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
11350
11351 static auto op = create_linalg_matmul_typed_handle();
11352 return op.redispatch(dispatchKeySet, self, other);
11353}
11354
11355STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matmul_out, name, "aten::linalg_matmul")
11356STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matmul_out, overload_name, "out")
11357STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matmul_out, schema_str, "linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
11358
11359// aten::linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
11360static C10_NOINLINE c10::TypedOperatorHandle<linalg_matmul_out::schema> create_linalg_matmul_out_typed_handle() {
11361 return c10::Dispatcher::singleton()
11362 .findSchemaOrThrow(linalg_matmul_out::name, linalg_matmul_out::overload_name)
11363 .typed<linalg_matmul_out::schema>();
11364}
11365
11366// aten::linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
11367at::Tensor & linalg_matmul_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
11368
11369 static auto op = create_linalg_matmul_out_typed_handle();
11370 return op.call(self, other, out);
11371}
11372
11373// aten::linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
11374at::Tensor & linalg_matmul_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
11375
11376 static auto op = create_linalg_matmul_out_typed_handle();
11377 return op.redispatch(dispatchKeySet, self, other, out);
11378}
11379
11380STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_slogdet, name, "aten::linalg_slogdet")
11381STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_slogdet, overload_name, "")
11382STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_slogdet, schema_str, "linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet)")
11383
11384// aten::linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet)
11385static C10_NOINLINE c10::TypedOperatorHandle<linalg_slogdet::schema> create_linalg_slogdet_typed_handle() {
11386 return c10::Dispatcher::singleton()
11387 .findSchemaOrThrow(linalg_slogdet::name, linalg_slogdet::overload_name)
11388 .typed<linalg_slogdet::schema>();
11389}
11390
11391// aten::linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet)
11392::std::tuple<at::Tensor,at::Tensor> linalg_slogdet::call(const at::Tensor & A) {
11393
11394 static auto op = create_linalg_slogdet_typed_handle();
11395 return op.call(A);
11396}
11397
11398// aten::linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet)
11399::std::tuple<at::Tensor,at::Tensor> linalg_slogdet::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A) {
11400
11401 static auto op = create_linalg_slogdet_typed_handle();
11402 return op.redispatch(dispatchKeySet, A);
11403}
11404
11405STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_slogdet_out, name, "aten::linalg_slogdet")
11406STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_slogdet_out, overload_name, "out")
11407STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_slogdet_out, schema_str, "linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)")
11408
11409// aten::linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)
11410static C10_NOINLINE c10::TypedOperatorHandle<linalg_slogdet_out::schema> create_linalg_slogdet_out_typed_handle() {
11411 return c10::Dispatcher::singleton()
11412 .findSchemaOrThrow(linalg_slogdet_out::name, linalg_slogdet_out::overload_name)
11413 .typed<linalg_slogdet_out::schema>();
11414}
11415
11416// aten::linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)
11417::std::tuple<at::Tensor &,at::Tensor &> linalg_slogdet_out::call(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet) {
11418
11419 static auto op = create_linalg_slogdet_out_typed_handle();
11420 return op.call(A, sign, logabsdet);
11421}
11422
11423// aten::linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)
11424::std::tuple<at::Tensor &,at::Tensor &> linalg_slogdet_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet) {
11425
11426 static auto op = create_linalg_slogdet_out_typed_handle();
11427 return op.redispatch(dispatchKeySet, A, sign, logabsdet);
11428}
11429
11430STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logdet, name, "aten::logdet")
11431STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logdet, overload_name, "")
11432STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logdet, schema_str, "logdet(Tensor self) -> Tensor")
11433
11434// aten::logdet(Tensor self) -> Tensor
11435static C10_NOINLINE c10::TypedOperatorHandle<logdet::schema> create_logdet_typed_handle() {
11436 return c10::Dispatcher::singleton()
11437 .findSchemaOrThrow(logdet::name, logdet::overload_name)
11438 .typed<logdet::schema>();
11439}
11440
11441// aten::logdet(Tensor self) -> Tensor
11442at::Tensor logdet::call(const at::Tensor & self) {
11443
11444 static auto op = create_logdet_typed_handle();
11445 return op.call(self);
11446}
11447
11448// aten::logdet(Tensor self) -> Tensor
11449at::Tensor logdet::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
11450
11451 static auto op = create_logdet_typed_handle();
11452 return op.redispatch(dispatchKeySet, self);
11453}
11454
11455STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_eigvals, name, "aten::linalg_eigvals")
11456STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_eigvals, overload_name, "")
11457STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_eigvals, schema_str, "linalg_eigvals(Tensor self) -> Tensor")
11458
11459// aten::linalg_eigvals(Tensor self) -> Tensor
11460static C10_NOINLINE c10::TypedOperatorHandle<linalg_eigvals::schema> create_linalg_eigvals_typed_handle() {
11461 return c10::Dispatcher::singleton()
11462 .findSchemaOrThrow(linalg_eigvals::name, linalg_eigvals::overload_name)
11463 .typed<linalg_eigvals::schema>();
11464}
11465
11466// aten::linalg_eigvals(Tensor self) -> Tensor
11467at::Tensor linalg_eigvals::call(const at::Tensor & self) {
11468
11469 static auto op = create_linalg_eigvals_typed_handle();
11470 return op.call(self);
11471}
11472
11473// aten::linalg_eigvals(Tensor self) -> Tensor
11474at::Tensor linalg_eigvals::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
11475
11476 static auto op = create_linalg_eigvals_typed_handle();
11477 return op.redispatch(dispatchKeySet, self);
11478}
11479
11480STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_eigvals_out, name, "aten::linalg_eigvals")
11481STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_eigvals_out, overload_name, "out")
11482STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_eigvals_out, schema_str, "linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
11483
11484// aten::linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11485static C10_NOINLINE c10::TypedOperatorHandle<linalg_eigvals_out::schema> create_linalg_eigvals_out_typed_handle() {
11486 return c10::Dispatcher::singleton()
11487 .findSchemaOrThrow(linalg_eigvals_out::name, linalg_eigvals_out::overload_name)
11488 .typed<linalg_eigvals_out::schema>();
11489}
11490
11491// aten::linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11492at::Tensor & linalg_eigvals_out::call(const at::Tensor & self, at::Tensor & out) {
11493
11494 static auto op = create_linalg_eigvals_out_typed_handle();
11495 return op.call(self, out);
11496}
11497
11498// aten::linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11499at::Tensor & linalg_eigvals_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
11500
11501 static auto op = create_linalg_eigvals_out_typed_handle();
11502 return op.redispatch(dispatchKeySet, self, out);
11503}
11504
11505STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_inv_ex, name, "aten::linalg_inv_ex")
11506STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_inv_ex, overload_name, "")
11507STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_inv_ex, schema_str, "linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info)")
11508
11509// aten::linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info)
11510static C10_NOINLINE c10::TypedOperatorHandle<linalg_inv_ex::schema> create_linalg_inv_ex_typed_handle() {
11511 return c10::Dispatcher::singleton()
11512 .findSchemaOrThrow(linalg_inv_ex::name, linalg_inv_ex::overload_name)
11513 .typed<linalg_inv_ex::schema>();
11514}
11515
11516// aten::linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info)
11517::std::tuple<at::Tensor,at::Tensor> linalg_inv_ex::call(const at::Tensor & A, bool check_errors) {
11518
11519 static auto op = create_linalg_inv_ex_typed_handle();
11520 return op.call(A, check_errors);
11521}
11522
11523// aten::linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info)
11524::std::tuple<at::Tensor,at::Tensor> linalg_inv_ex::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool check_errors) {
11525
11526 static auto op = create_linalg_inv_ex_typed_handle();
11527 return op.redispatch(dispatchKeySet, A, check_errors);
11528}
11529
11530STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_inv_ex_inverse, name, "aten::linalg_inv_ex")
11531STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_inv_ex_inverse, overload_name, "inverse")
11532STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_inv_ex_inverse, schema_str, "linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)")
11533
11534// aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)
11535static C10_NOINLINE c10::TypedOperatorHandle<linalg_inv_ex_inverse::schema> create_linalg_inv_ex_inverse_typed_handle() {
11536 return c10::Dispatcher::singleton()
11537 .findSchemaOrThrow(linalg_inv_ex_inverse::name, linalg_inv_ex_inverse::overload_name)
11538 .typed<linalg_inv_ex_inverse::schema>();
11539}
11540
11541// aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)
11542::std::tuple<at::Tensor &,at::Tensor &> linalg_inv_ex_inverse::call(const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info) {
11543
11544 static auto op = create_linalg_inv_ex_inverse_typed_handle();
11545 return op.call(A, check_errors, inverse, info);
11546}
11547
11548// aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)
11549::std::tuple<at::Tensor &,at::Tensor &> linalg_inv_ex_inverse::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info) {
11550
11551 static auto op = create_linalg_inv_ex_inverse_typed_handle();
11552 return op.redispatch(dispatchKeySet, A, check_errors, inverse, info);
11553}
11554
11555STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(inner, name, "aten::inner")
11556STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(inner, overload_name, "")
11557STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(inner, schema_str, "inner(Tensor self, Tensor other) -> Tensor")
11558
11559// aten::inner(Tensor self, Tensor other) -> Tensor
11560static C10_NOINLINE c10::TypedOperatorHandle<inner::schema> create_inner_typed_handle() {
11561 return c10::Dispatcher::singleton()
11562 .findSchemaOrThrow(inner::name, inner::overload_name)
11563 .typed<inner::schema>();
11564}
11565
11566// aten::inner(Tensor self, Tensor other) -> Tensor
11567at::Tensor inner::call(const at::Tensor & self, const at::Tensor & other) {
11568
11569 static auto op = create_inner_typed_handle();
11570 return op.call(self, other);
11571}
11572
11573// aten::inner(Tensor self, Tensor other) -> Tensor
11574at::Tensor inner::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
11575
11576 static auto op = create_inner_typed_handle();
11577 return op.redispatch(dispatchKeySet, self, other);
11578}
11579
11580STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(inner_out, name, "aten::inner")
11581STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(inner_out, overload_name, "out")
11582STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(inner_out, schema_str, "inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
11583
11584// aten::inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
11585static C10_NOINLINE c10::TypedOperatorHandle<inner_out::schema> create_inner_out_typed_handle() {
11586 return c10::Dispatcher::singleton()
11587 .findSchemaOrThrow(inner_out::name, inner_out::overload_name)
11588 .typed<inner_out::schema>();
11589}
11590
11591// aten::inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
11592at::Tensor & inner_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
11593
11594 static auto op = create_inner_out_typed_handle();
11595 return op.call(self, other, out);
11596}
11597
11598// aten::inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
11599at::Tensor & inner_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
11600
11601 static auto op = create_inner_out_typed_handle();
11602 return op.redispatch(dispatchKeySet, self, other, out);
11603}
11604
11605STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_vector_norm, name, "aten::linalg_vector_norm")
11606STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_vector_norm, overload_name, "")
11607STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_vector_norm, schema_str, "linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor")
11608
11609// aten::linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
11610static C10_NOINLINE c10::TypedOperatorHandle<linalg_vector_norm::schema> create_linalg_vector_norm_typed_handle() {
11611 return c10::Dispatcher::singleton()
11612 .findSchemaOrThrow(linalg_vector_norm::name, linalg_vector_norm::overload_name)
11613 .typed<linalg_vector_norm::schema>();
11614}
11615
11616// aten::linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
11617at::Tensor linalg_vector_norm::call(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
11618
11619 static auto op = create_linalg_vector_norm_typed_handle();
11620 return op.call(self, ord, dim, keepdim, dtype);
11621}
11622
11623// aten::linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
11624at::Tensor linalg_vector_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
11625
11626 static auto op = create_linalg_vector_norm_typed_handle();
11627 return op.redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype);
11628}
11629
11630STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_vector_norm_out, name, "aten::linalg_vector_norm")
11631STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_vector_norm_out, overload_name, "out")
11632STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_vector_norm_out, schema_str, "linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)")
11633
11634// aten::linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
11635static C10_NOINLINE c10::TypedOperatorHandle<linalg_vector_norm_out::schema> create_linalg_vector_norm_out_typed_handle() {
11636 return c10::Dispatcher::singleton()
11637 .findSchemaOrThrow(linalg_vector_norm_out::name, linalg_vector_norm_out::overload_name)
11638 .typed<linalg_vector_norm_out::schema>();
11639}
11640
11641// aten::linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
11642at::Tensor & linalg_vector_norm_out::call(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
11643
11644 static auto op = create_linalg_vector_norm_out_typed_handle();
11645 return op.call(self, ord, dim, keepdim, dtype, out);
11646}
11647
11648// aten::linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
11649at::Tensor & linalg_vector_norm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
11650
11651 static auto op = create_linalg_vector_norm_out_typed_handle();
11652 return op.redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out);
11653}
11654
11655STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_solve, name, "aten::linalg_solve")
11656STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_solve, overload_name, "")
11657STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_solve, schema_str, "linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor")
11658
11659// aten::linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor
11660static C10_NOINLINE c10::TypedOperatorHandle<linalg_solve::schema> create_linalg_solve_typed_handle() {
11661 return c10::Dispatcher::singleton()
11662 .findSchemaOrThrow(linalg_solve::name, linalg_solve::overload_name)
11663 .typed<linalg_solve::schema>();
11664}
11665
11666// aten::linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor
11667at::Tensor linalg_solve::call(const at::Tensor & A, const at::Tensor & B, bool left) {
11668
11669 static auto op = create_linalg_solve_typed_handle();
11670 return op.call(A, B, left);
11671}
11672
11673// aten::linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor
11674at::Tensor linalg_solve::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left) {
11675
11676 static auto op = create_linalg_solve_typed_handle();
11677 return op.redispatch(dispatchKeySet, A, B, left);
11678}
11679
11680STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_solve_out, name, "aten::linalg_solve")
11681STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_solve_out, overload_name, "out")
11682STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_solve_out, schema_str, "linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!)")
11683
11684// aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!)
11685static C10_NOINLINE c10::TypedOperatorHandle<linalg_solve_out::schema> create_linalg_solve_out_typed_handle() {
11686 return c10::Dispatcher::singleton()
11687 .findSchemaOrThrow(linalg_solve_out::name, linalg_solve_out::overload_name)
11688 .typed<linalg_solve_out::schema>();
11689}
11690
11691// aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!)
11692at::Tensor & linalg_solve_out::call(const at::Tensor & A, const at::Tensor & B, bool left, at::Tensor & out) {
11693
11694 static auto op = create_linalg_solve_out_typed_handle();
11695 return op.call(A, B, left, out);
11696}
11697
11698// aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!)
11699at::Tensor & linalg_solve_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left, at::Tensor & out) {
11700
11701 static auto op = create_linalg_solve_out_typed_handle();
11702 return op.redispatch(dispatchKeySet, A, B, left, out);
11703}
11704
11705STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_tensorinv, name, "aten::linalg_tensorinv")
11706STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_tensorinv, overload_name, "")
11707STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_tensorinv, schema_str, "linalg_tensorinv(Tensor self, int ind=2) -> Tensor")
11708
11709// aten::linalg_tensorinv(Tensor self, int ind=2) -> Tensor
11710static C10_NOINLINE c10::TypedOperatorHandle<linalg_tensorinv::schema> create_linalg_tensorinv_typed_handle() {
11711 return c10::Dispatcher::singleton()
11712 .findSchemaOrThrow(linalg_tensorinv::name, linalg_tensorinv::overload_name)
11713 .typed<linalg_tensorinv::schema>();
11714}
11715
11716// aten::linalg_tensorinv(Tensor self, int ind=2) -> Tensor
11717at::Tensor linalg_tensorinv::call(const at::Tensor & self, int64_t ind) {
11718
11719 static auto op = create_linalg_tensorinv_typed_handle();
11720 return op.call(self, ind);
11721}
11722
11723// aten::linalg_tensorinv(Tensor self, int ind=2) -> Tensor
11724at::Tensor linalg_tensorinv::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t ind) {
11725
11726 static auto op = create_linalg_tensorinv_typed_handle();
11727 return op.redispatch(dispatchKeySet, self, ind);
11728}
11729
11730STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_tensorinv_out, name, "aten::linalg_tensorinv")
11731STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_tensorinv_out, overload_name, "out")
11732STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_tensorinv_out, schema_str, "linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!)")
11733
11734// aten::linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!)
11735static C10_NOINLINE c10::TypedOperatorHandle<linalg_tensorinv_out::schema> create_linalg_tensorinv_out_typed_handle() {
11736 return c10::Dispatcher::singleton()
11737 .findSchemaOrThrow(linalg_tensorinv_out::name, linalg_tensorinv_out::overload_name)
11738 .typed<linalg_tensorinv_out::schema>();
11739}
11740
11741// aten::linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!)
11742at::Tensor & linalg_tensorinv_out::call(const at::Tensor & self, int64_t ind, at::Tensor & out) {
11743
11744 static auto op = create_linalg_tensorinv_out_typed_handle();
11745 return op.call(self, ind, out);
11746}
11747
11748// aten::linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!)
11749at::Tensor & linalg_tensorinv_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t ind, at::Tensor & out) {
11750
11751 static auto op = create_linalg_tensorinv_out_typed_handle();
11752 return op.redispatch(dispatchKeySet, self, ind, out);
11753}
11754
11755STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_rank_atol_rtol_tensor, name, "aten::linalg_matrix_rank")
11756STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_rank_atol_rtol_tensor, overload_name, "atol_rtol_tensor")
11757STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_rank_atol_rtol_tensor, schema_str, "linalg_matrix_rank.atol_rtol_tensor(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor")
11758
11759// aten::linalg_matrix_rank.atol_rtol_tensor(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor
11760static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_rank_atol_rtol_tensor::schema> create_linalg_matrix_rank_atol_rtol_tensor_typed_handle() {
11761 return c10::Dispatcher::singleton()
11762 .findSchemaOrThrow(linalg_matrix_rank_atol_rtol_tensor::name, linalg_matrix_rank_atol_rtol_tensor::overload_name)
11763 .typed<linalg_matrix_rank_atol_rtol_tensor::schema>();
11764}
11765
11766// aten::linalg_matrix_rank.atol_rtol_tensor(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor
11767at::Tensor linalg_matrix_rank_atol_rtol_tensor::call(const at::Tensor & input, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian) {
11768
11769 static auto op = create_linalg_matrix_rank_atol_rtol_tensor_typed_handle();
11770 return op.call(input, atol, rtol, hermitian);
11771}
11772
11773// aten::linalg_matrix_rank.atol_rtol_tensor(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor
11774at::Tensor linalg_matrix_rank_atol_rtol_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian) {
11775
11776 static auto op = create_linalg_matrix_rank_atol_rtol_tensor_typed_handle();
11777 return op.redispatch(dispatchKeySet, input, atol, rtol, hermitian);
11778}
11779
11780STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_rank_atol_rtol_tensor_out, name, "aten::linalg_matrix_rank")
11781STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_rank_atol_rtol_tensor_out, overload_name, "atol_rtol_tensor_out")
11782STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_rank_atol_rtol_tensor_out, schema_str, "linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)")
11783
11784// aten::linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
11785static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_rank_atol_rtol_tensor_out::schema> create_linalg_matrix_rank_atol_rtol_tensor_out_typed_handle() {
11786 return c10::Dispatcher::singleton()
11787 .findSchemaOrThrow(linalg_matrix_rank_atol_rtol_tensor_out::name, linalg_matrix_rank_atol_rtol_tensor_out::overload_name)
11788 .typed<linalg_matrix_rank_atol_rtol_tensor_out::schema>();
11789}
11790
11791// aten::linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
11792at::Tensor & linalg_matrix_rank_atol_rtol_tensor_out::call(const at::Tensor & input, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian, at::Tensor & out) {
11793
11794 static auto op = create_linalg_matrix_rank_atol_rtol_tensor_out_typed_handle();
11795 return op.call(input, atol, rtol, hermitian, out);
11796}
11797
11798// aten::linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
11799at::Tensor & linalg_matrix_rank_atol_rtol_tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian, at::Tensor & out) {
11800
11801 static auto op = create_linalg_matrix_rank_atol_rtol_tensor_out_typed_handle();
11802 return op.redispatch(dispatchKeySet, input, atol, rtol, hermitian, out);
11803}
11804
11805STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_rank_atol_rtol_float, name, "aten::linalg_matrix_rank")
11806STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_rank_atol_rtol_float, overload_name, "atol_rtol_float")
11807STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_rank_atol_rtol_float, schema_str, "linalg_matrix_rank.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor")
11808
11809// aten::linalg_matrix_rank.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor
11810static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_rank_atol_rtol_float::schema> create_linalg_matrix_rank_atol_rtol_float_typed_handle() {
11811 return c10::Dispatcher::singleton()
11812 .findSchemaOrThrow(linalg_matrix_rank_atol_rtol_float::name, linalg_matrix_rank_atol_rtol_float::overload_name)
11813 .typed<linalg_matrix_rank_atol_rtol_float::schema>();
11814}
11815
11816// aten::linalg_matrix_rank.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor
11817at::Tensor linalg_matrix_rank_atol_rtol_float::call(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian) {
11818
11819 static auto op = create_linalg_matrix_rank_atol_rtol_float_typed_handle();
11820 return op.call(self, atol, rtol, hermitian);
11821}
11822
11823// aten::linalg_matrix_rank.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor
11824at::Tensor linalg_matrix_rank_atol_rtol_float::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian) {
11825
11826 static auto op = create_linalg_matrix_rank_atol_rtol_float_typed_handle();
11827 return op.redispatch(dispatchKeySet, self, atol, rtol, hermitian);
11828}
11829
11830STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_rank_atol_rtol_float_out, name, "aten::linalg_matrix_rank")
11831STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_rank_atol_rtol_float_out, overload_name, "atol_rtol_float_out")
11832STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_rank_atol_rtol_float_out, schema_str, "linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)")
11833
11834// aten::linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
11835static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_rank_atol_rtol_float_out::schema> create_linalg_matrix_rank_atol_rtol_float_out_typed_handle() {
11836 return c10::Dispatcher::singleton()
11837 .findSchemaOrThrow(linalg_matrix_rank_atol_rtol_float_out::name, linalg_matrix_rank_atol_rtol_float_out::overload_name)
11838 .typed<linalg_matrix_rank_atol_rtol_float_out::schema>();
11839}
11840
11841// aten::linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
11842at::Tensor & linalg_matrix_rank_atol_rtol_float_out::call(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian, at::Tensor & out) {
11843
11844 static auto op = create_linalg_matrix_rank_atol_rtol_float_out_typed_handle();
11845 return op.call(self, atol, rtol, hermitian, out);
11846}
11847
11848// aten::linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
11849at::Tensor & linalg_matrix_rank_atol_rtol_float_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian, at::Tensor & out) {
11850
11851 static auto op = create_linalg_matrix_rank_atol_rtol_float_out_typed_handle();
11852 return op.redispatch(dispatchKeySet, self, atol, rtol, hermitian, out);
11853}
11854
11855STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_rank, name, "aten::linalg_matrix_rank")
11856STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_rank, overload_name, "")
11857STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_rank, schema_str, "linalg_matrix_rank(Tensor self, float tol, bool hermitian=False) -> Tensor")
11858
11859// aten::linalg_matrix_rank(Tensor self, float tol, bool hermitian=False) -> Tensor
11860static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_rank::schema> create_linalg_matrix_rank_typed_handle() {
11861 return c10::Dispatcher::singleton()
11862 .findSchemaOrThrow(linalg_matrix_rank::name, linalg_matrix_rank::overload_name)
11863 .typed<linalg_matrix_rank::schema>();
11864}
11865
11866// aten::linalg_matrix_rank(Tensor self, float tol, bool hermitian=False) -> Tensor
11867at::Tensor linalg_matrix_rank::call(const at::Tensor & self, double tol, bool hermitian) {
11868
11869 static auto op = create_linalg_matrix_rank_typed_handle();
11870 return op.call(self, tol, hermitian);
11871}
11872
11873// aten::linalg_matrix_rank(Tensor self, float tol, bool hermitian=False) -> Tensor
11874at::Tensor linalg_matrix_rank::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double tol, bool hermitian) {
11875
11876 static auto op = create_linalg_matrix_rank_typed_handle();
11877 return op.redispatch(dispatchKeySet, self, tol, hermitian);
11878}
11879
11880STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_rank_out, name, "aten::linalg_matrix_rank")
11881STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_rank_out, overload_name, "out")
11882STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_rank_out, schema_str, "linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)")
11883
11884// aten::linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
11885static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_rank_out::schema> create_linalg_matrix_rank_out_typed_handle() {
11886 return c10::Dispatcher::singleton()
11887 .findSchemaOrThrow(linalg_matrix_rank_out::name, linalg_matrix_rank_out::overload_name)
11888 .typed<linalg_matrix_rank_out::schema>();
11889}
11890
11891// aten::linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
11892at::Tensor & linalg_matrix_rank_out::call(const at::Tensor & self, double tol, bool hermitian, at::Tensor & out) {
11893
11894 static auto op = create_linalg_matrix_rank_out_typed_handle();
11895 return op.call(self, tol, hermitian, out);
11896}
11897
11898// aten::linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
11899at::Tensor & linalg_matrix_rank_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double tol, bool hermitian, at::Tensor & out) {
11900
11901 static auto op = create_linalg_matrix_rank_out_typed_handle();
11902 return op.redispatch(dispatchKeySet, self, tol, hermitian, out);
11903}
11904
11905STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_rank_tol_tensor, name, "aten::linalg_matrix_rank")
11906STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_rank_tol_tensor, overload_name, "tol_tensor")
11907STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_rank_tol_tensor, schema_str, "linalg_matrix_rank.tol_tensor(Tensor input, Tensor tol, bool hermitian=False) -> Tensor")
11908
11909// aten::linalg_matrix_rank.tol_tensor(Tensor input, Tensor tol, bool hermitian=False) -> Tensor
11910static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_rank_tol_tensor::schema> create_linalg_matrix_rank_tol_tensor_typed_handle() {
11911 return c10::Dispatcher::singleton()
11912 .findSchemaOrThrow(linalg_matrix_rank_tol_tensor::name, linalg_matrix_rank_tol_tensor::overload_name)
11913 .typed<linalg_matrix_rank_tol_tensor::schema>();
11914}
11915
11916// aten::linalg_matrix_rank.tol_tensor(Tensor input, Tensor tol, bool hermitian=False) -> Tensor
11917at::Tensor linalg_matrix_rank_tol_tensor::call(const at::Tensor & input, const at::Tensor & tol, bool hermitian) {
11918
11919 static auto op = create_linalg_matrix_rank_tol_tensor_typed_handle();
11920 return op.call(input, tol, hermitian);
11921}
11922
11923// aten::linalg_matrix_rank.tol_tensor(Tensor input, Tensor tol, bool hermitian=False) -> Tensor
11924at::Tensor linalg_matrix_rank_tol_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tol, bool hermitian) {
11925
11926 static auto op = create_linalg_matrix_rank_tol_tensor_typed_handle();
11927 return op.redispatch(dispatchKeySet, input, tol, hermitian);
11928}
11929
11930STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_rank_out_tol_tensor, name, "aten::linalg_matrix_rank")
11931STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_rank_out_tol_tensor, overload_name, "out_tol_tensor")
11932STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_matrix_rank_out_tol_tensor, schema_str, "linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)")
11933
11934// aten::linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
11935static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_rank_out_tol_tensor::schema> create_linalg_matrix_rank_out_tol_tensor_typed_handle() {
11936 return c10::Dispatcher::singleton()
11937 .findSchemaOrThrow(linalg_matrix_rank_out_tol_tensor::name, linalg_matrix_rank_out_tol_tensor::overload_name)
11938 .typed<linalg_matrix_rank_out_tol_tensor::schema>();
11939}
11940
11941// aten::linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
11942at::Tensor & linalg_matrix_rank_out_tol_tensor::call(const at::Tensor & input, const at::Tensor & tol, bool hermitian, at::Tensor & out) {
11943
11944 static auto op = create_linalg_matrix_rank_out_tol_tensor_typed_handle();
11945 return op.call(input, tol, hermitian, out);
11946}
11947
11948// aten::linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
11949at::Tensor & linalg_matrix_rank_out_tol_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tol, bool hermitian, at::Tensor & out) {
11950
11951 static auto op = create_linalg_matrix_rank_out_tol_tensor_typed_handle();
11952 return op.redispatch(dispatchKeySet, input, tol, hermitian, out);
11953}
11954
11955STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_optional_filled_intlist, name, "aten::_test_optional_filled_intlist")
11956STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_optional_filled_intlist, overload_name, "")
11957STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_optional_filled_intlist, schema_str, "_test_optional_filled_intlist(Tensor values, int[2]? addends) -> Tensor")
11958
11959// aten::_test_optional_filled_intlist(Tensor values, int[2]? addends) -> Tensor
11960static C10_NOINLINE c10::TypedOperatorHandle<_test_optional_filled_intlist::schema> create__test_optional_filled_intlist_typed_handle() {
11961 return c10::Dispatcher::singleton()
11962 .findSchemaOrThrow(_test_optional_filled_intlist::name, _test_optional_filled_intlist::overload_name)
11963 .typed<_test_optional_filled_intlist::schema>();
11964}
11965
11966// aten::_test_optional_filled_intlist(Tensor values, int[2]? addends) -> Tensor
11967at::Tensor _test_optional_filled_intlist::call(const at::Tensor & values, at::OptionalIntArrayRef addends) {
11968
11969 static auto op = create__test_optional_filled_intlist_typed_handle();
11970 return op.call(values, addends);
11971}
11972
11973// aten::_test_optional_filled_intlist(Tensor values, int[2]? addends) -> Tensor
11974at::Tensor _test_optional_filled_intlist::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, at::OptionalIntArrayRef addends) {
11975
11976 static auto op = create__test_optional_filled_intlist_typed_handle();
11977 return op.redispatch(dispatchKeySet, values, addends);
11978}
11979
11980STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_autograd_multiple_dispatch_view_copy, name, "aten::_test_autograd_multiple_dispatch_view_copy")
11981STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_autograd_multiple_dispatch_view_copy, overload_name, "")
11982STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_autograd_multiple_dispatch_view_copy, schema_str, "_test_autograd_multiple_dispatch_view_copy(Tensor self) -> Tensor")
11983
11984// aten::_test_autograd_multiple_dispatch_view_copy(Tensor self) -> Tensor
11985static C10_NOINLINE c10::TypedOperatorHandle<_test_autograd_multiple_dispatch_view_copy::schema> create__test_autograd_multiple_dispatch_view_copy_typed_handle() {
11986 return c10::Dispatcher::singleton()
11987 .findSchemaOrThrow(_test_autograd_multiple_dispatch_view_copy::name, _test_autograd_multiple_dispatch_view_copy::overload_name)
11988 .typed<_test_autograd_multiple_dispatch_view_copy::schema>();
11989}
11990
11991// aten::_test_autograd_multiple_dispatch_view_copy(Tensor self) -> Tensor
11992at::Tensor _test_autograd_multiple_dispatch_view_copy::call(const at::Tensor & self) {
11993
11994 static auto op = create__test_autograd_multiple_dispatch_view_copy_typed_handle();
11995 return op.call(self);
11996}
11997
11998// aten::_test_autograd_multiple_dispatch_view_copy(Tensor self) -> Tensor
11999at::Tensor _test_autograd_multiple_dispatch_view_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
12000
12001 static auto op = create__test_autograd_multiple_dispatch_view_copy_typed_handle();
12002 return op.redispatch(dispatchKeySet, self);
12003}
12004
12005STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pad_sequence, name, "aten::pad_sequence")
12006STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pad_sequence, overload_name, "")
12007STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pad_sequence, schema_str, "pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0) -> Tensor")
12008
12009// aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0) -> Tensor
12010static C10_NOINLINE c10::TypedOperatorHandle<pad_sequence::schema> create_pad_sequence_typed_handle() {
12011 return c10::Dispatcher::singleton()
12012 .findSchemaOrThrow(pad_sequence::name, pad_sequence::overload_name)
12013 .typed<pad_sequence::schema>();
12014}
12015
12016// aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0) -> Tensor
12017at::Tensor pad_sequence::call(at::TensorList sequences, bool batch_first, double padding_value) {
12018
12019 static auto op = create_pad_sequence_typed_handle();
12020 return op.call(sequences, batch_first, padding_value);
12021}
12022
12023// aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0) -> Tensor
12024at::Tensor pad_sequence::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList sequences, bool batch_first, double padding_value) {
12025
12026 static auto op = create_pad_sequence_typed_handle();
12027 return op.redispatch(dispatchKeySet, sequences, batch_first, padding_value);
12028}
12029
12030STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fw_primal_copy, name, "aten::_fw_primal_copy")
12031STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fw_primal_copy, overload_name, "")
12032STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fw_primal_copy, schema_str, "_fw_primal_copy(Tensor self, int level) -> Tensor")
12033
12034// aten::_fw_primal_copy(Tensor self, int level) -> Tensor
12035static C10_NOINLINE c10::TypedOperatorHandle<_fw_primal_copy::schema> create__fw_primal_copy_typed_handle() {
12036 return c10::Dispatcher::singleton()
12037 .findSchemaOrThrow(_fw_primal_copy::name, _fw_primal_copy::overload_name)
12038 .typed<_fw_primal_copy::schema>();
12039}
12040
12041// aten::_fw_primal_copy(Tensor self, int level) -> Tensor
12042at::Tensor _fw_primal_copy::call(const at::Tensor & self, int64_t level) {
12043
12044 static auto op = create__fw_primal_copy_typed_handle();
12045 return op.call(self, level);
12046}
12047
12048// aten::_fw_primal_copy(Tensor self, int level) -> Tensor
12049at::Tensor _fw_primal_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t level) {
12050
12051 static auto op = create__fw_primal_copy_typed_handle();
12052 return op.redispatch(dispatchKeySet, self, level);
12053}
12054
12055STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(view_as_real_copy, name, "aten::view_as_real_copy")
12056STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(view_as_real_copy, overload_name, "")
12057STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(view_as_real_copy, schema_str, "view_as_real_copy(Tensor self) -> Tensor")
12058
12059// aten::view_as_real_copy(Tensor self) -> Tensor
12060static C10_NOINLINE c10::TypedOperatorHandle<view_as_real_copy::schema> create_view_as_real_copy_typed_handle() {
12061 return c10::Dispatcher::singleton()
12062 .findSchemaOrThrow(view_as_real_copy::name, view_as_real_copy::overload_name)
12063 .typed<view_as_real_copy::schema>();
12064}
12065
12066// aten::view_as_real_copy(Tensor self) -> Tensor
12067at::Tensor view_as_real_copy::call(const at::Tensor & self) {
12068
12069 static auto op = create_view_as_real_copy_typed_handle();
12070 return op.call(self);
12071}
12072
12073// aten::view_as_real_copy(Tensor self) -> Tensor
12074at::Tensor view_as_real_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
12075
12076 static auto op = create_view_as_real_copy_typed_handle();
12077 return op.redispatch(dispatchKeySet, self);
12078}
12079
12080STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(as_strided_copy, name, "aten::as_strided_copy")
12081STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(as_strided_copy, overload_name, "")
12082STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(as_strided_copy, schema_str, "as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor")
12083
12084// aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
12085static C10_NOINLINE c10::TypedOperatorHandle<as_strided_copy::schema> create_as_strided_copy_typed_handle() {
12086 return c10::Dispatcher::singleton()
12087 .findSchemaOrThrow(as_strided_copy::name, as_strided_copy::overload_name)
12088 .typed<as_strided_copy::schema>();
12089}
12090
12091// aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
12092at::Tensor as_strided_copy::call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
12093
12094 static auto op = create_as_strided_copy_typed_handle();
12095 return op.call(self, size, stride, storage_offset);
12096}
12097
12098// aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
12099at::Tensor as_strided_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
12100
12101 static auto op = create_as_strided_copy_typed_handle();
12102 return op.redispatch(dispatchKeySet, self, size, stride, storage_offset);
12103}
12104
12105STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_reshape_alias_copy, name, "aten::_reshape_alias_copy")
12106STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_reshape_alias_copy, overload_name, "")
12107STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_reshape_alias_copy, schema_str, "_reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor")
12108
12109// aten::_reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor
12110static C10_NOINLINE c10::TypedOperatorHandle<_reshape_alias_copy::schema> create__reshape_alias_copy_typed_handle() {
12111 return c10::Dispatcher::singleton()
12112 .findSchemaOrThrow(_reshape_alias_copy::name, _reshape_alias_copy::overload_name)
12113 .typed<_reshape_alias_copy::schema>();
12114}
12115
12116// aten::_reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor
12117at::Tensor _reshape_alias_copy::call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
12118
12119 static auto op = create__reshape_alias_copy_typed_handle();
12120 return op.call(self, size, stride);
12121}
12122
12123// aten::_reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor
12124at::Tensor _reshape_alias_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
12125
12126 static auto op = create__reshape_alias_copy_typed_handle();
12127 return op.redispatch(dispatchKeySet, self, size, stride);
12128}
12129
12130STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(split_copy_Tensor, name, "aten::split_copy")
12131STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(split_copy_Tensor, overload_name, "Tensor")
12132STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(split_copy_Tensor, schema_str, "split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]")
12133
12134// aten::split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]
12135static C10_NOINLINE c10::TypedOperatorHandle<split_copy_Tensor::schema> create_split_copy_Tensor_typed_handle() {
12136 return c10::Dispatcher::singleton()
12137 .findSchemaOrThrow(split_copy_Tensor::name, split_copy_Tensor::overload_name)
12138 .typed<split_copy_Tensor::schema>();
12139}
12140
12141// aten::split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]
12142::std::vector<at::Tensor> split_copy_Tensor::call(const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
12143
12144 static auto op = create_split_copy_Tensor_typed_handle();
12145 return op.call(self, split_size, dim);
12146}
12147
12148// aten::split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]
12149::std::vector<at::Tensor> split_copy_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
12150
12151 static auto op = create_split_copy_Tensor_typed_handle();
12152 return op.redispatch(dispatchKeySet, self, split_size, dim);
12153}
12154
12155STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_copy, name, "aten::squeeze_copy")
12156STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_copy, overload_name, "")
12157STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_copy, schema_str, "squeeze_copy(Tensor self) -> Tensor")
12158
12159// aten::squeeze_copy(Tensor self) -> Tensor
12160static C10_NOINLINE c10::TypedOperatorHandle<squeeze_copy::schema> create_squeeze_copy_typed_handle() {
12161 return c10::Dispatcher::singleton()
12162 .findSchemaOrThrow(squeeze_copy::name, squeeze_copy::overload_name)
12163 .typed<squeeze_copy::schema>();
12164}
12165
12166// aten::squeeze_copy(Tensor self) -> Tensor
12167at::Tensor squeeze_copy::call(const at::Tensor & self) {
12168
12169 static auto op = create_squeeze_copy_typed_handle();
12170 return op.call(self);
12171}
12172
12173// aten::squeeze_copy(Tensor self) -> Tensor
12174at::Tensor squeeze_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
12175
12176 static auto op = create_squeeze_copy_typed_handle();
12177 return op.redispatch(dispatchKeySet, self);
12178}
12179
12180STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_copy_dim, name, "aten::squeeze_copy")
12181STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_copy_dim, overload_name, "dim")
12182STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_copy_dim, schema_str, "squeeze_copy.dim(Tensor self, int dim) -> Tensor")
12183
12184// aten::squeeze_copy.dim(Tensor self, int dim) -> Tensor
12185static C10_NOINLINE c10::TypedOperatorHandle<squeeze_copy_dim::schema> create_squeeze_copy_dim_typed_handle() {
12186 return c10::Dispatcher::singleton()
12187 .findSchemaOrThrow(squeeze_copy_dim::name, squeeze_copy_dim::overload_name)
12188 .typed<squeeze_copy_dim::schema>();
12189}
12190
12191// aten::squeeze_copy.dim(Tensor self, int dim) -> Tensor
12192at::Tensor squeeze_copy_dim::call(const at::Tensor & self, int64_t dim) {
12193
12194 static auto op = create_squeeze_copy_dim_typed_handle();
12195 return op.call(self, dim);
12196}
12197
12198// aten::squeeze_copy.dim(Tensor self, int dim) -> Tensor
12199at::Tensor squeeze_copy_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
12200
12201 static auto op = create_squeeze_copy_dim_typed_handle();
12202 return op.redispatch(dispatchKeySet, self, dim);
12203}
12204
12205STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_copy_dims, name, "aten::squeeze_copy")
12206STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_copy_dims, overload_name, "dims")
12207STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_copy_dims, schema_str, "squeeze_copy.dims(Tensor self, int[] dim) -> Tensor")
12208
12209// aten::squeeze_copy.dims(Tensor self, int[] dim) -> Tensor
12210static C10_NOINLINE c10::TypedOperatorHandle<squeeze_copy_dims::schema> create_squeeze_copy_dims_typed_handle() {
12211 return c10::Dispatcher::singleton()
12212 .findSchemaOrThrow(squeeze_copy_dims::name, squeeze_copy_dims::overload_name)
12213 .typed<squeeze_copy_dims::schema>();
12214}
12215
12216// aten::squeeze_copy.dims(Tensor self, int[] dim) -> Tensor
12217at::Tensor squeeze_copy_dims::call(const at::Tensor & self, at::IntArrayRef dim) {
12218
12219 static auto op = create_squeeze_copy_dims_typed_handle();
12220 return op.call(self, dim);
12221}
12222
12223// aten::squeeze_copy.dims(Tensor self, int[] dim) -> Tensor
12224at::Tensor squeeze_copy_dims::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim) {
12225
12226 static auto op = create_squeeze_copy_dims_typed_handle();
12227 return op.redispatch(dispatchKeySet, self, dim);
12228}
12229
12230STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(indices_copy, name, "aten::indices_copy")
12231STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(indices_copy, overload_name, "")
12232STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(indices_copy, schema_str, "indices_copy(Tensor self) -> Tensor")
12233
12234// aten::indices_copy(Tensor self) -> Tensor
12235static C10_NOINLINE c10::TypedOperatorHandle<indices_copy::schema> create_indices_copy_typed_handle() {
12236 return c10::Dispatcher::singleton()
12237 .findSchemaOrThrow(indices_copy::name, indices_copy::overload_name)
12238 .typed<indices_copy::schema>();
12239}
12240
12241// aten::indices_copy(Tensor self) -> Tensor
12242at::Tensor indices_copy::call(const at::Tensor & self) {
12243
12244 static auto op = create_indices_copy_typed_handle();
12245 return op.call(self);
12246}
12247
12248// aten::indices_copy(Tensor self) -> Tensor
12249at::Tensor indices_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
12250
12251 static auto op = create_indices_copy_typed_handle();
12252 return op.redispatch(dispatchKeySet, self);
12253}
12254
12255STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ccol_indices_copy, name, "aten::ccol_indices_copy")
12256STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ccol_indices_copy, overload_name, "")
12257STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ccol_indices_copy, schema_str, "ccol_indices_copy(Tensor self) -> Tensor")
12258
12259// aten::ccol_indices_copy(Tensor self) -> Tensor
12260static C10_NOINLINE c10::TypedOperatorHandle<ccol_indices_copy::schema> create_ccol_indices_copy_typed_handle() {
12261 return c10::Dispatcher::singleton()
12262 .findSchemaOrThrow(ccol_indices_copy::name, ccol_indices_copy::overload_name)
12263 .typed<ccol_indices_copy::schema>();
12264}
12265
12266// aten::ccol_indices_copy(Tensor self) -> Tensor
12267at::Tensor ccol_indices_copy::call(const at::Tensor & self) {
12268
12269 static auto op = create_ccol_indices_copy_typed_handle();
12270 return op.call(self);
12271}
12272
12273// aten::ccol_indices_copy(Tensor self) -> Tensor
12274at::Tensor ccol_indices_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
12275
12276 static auto op = create_ccol_indices_copy_typed_handle();
12277 return op.redispatch(dispatchKeySet, self);
12278}
12279
12280STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(split_copy_Tensor_out, name, "aten::split_copy")
12281STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(split_copy_Tensor_out, overload_name, "Tensor_out")
12282STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(split_copy_Tensor_out, schema_str, "split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()")
12283
12284// aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()
12285static C10_NOINLINE c10::TypedOperatorHandle<split_copy_Tensor_out::schema> create_split_copy_Tensor_out_typed_handle() {
12286 return c10::Dispatcher::singleton()
12287 .findSchemaOrThrow(split_copy_Tensor_out::name, split_copy_Tensor_out::overload_name)
12288 .typed<split_copy_Tensor_out::schema>();
12289}
12290
12291// aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()
12292void split_copy_Tensor_out::call(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) {
12293
12294 static auto op = create_split_copy_Tensor_out_typed_handle();
12295 return op.call(self, split_size, dim, out);
12296}
12297
12298// aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()
12299void split_copy_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) {
12300
12301 static auto op = create_split_copy_Tensor_out_typed_handle();
12302 return op.redispatch(dispatchKeySet, self, split_size, dim, out);
12303}
12304
12305STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_scaled_dot_product_efficient_attention, name, "aten::_scaled_dot_product_efficient_attention")
12306STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_scaled_dot_product_efficient_attention, overload_name, "")
12307STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_scaled_dot_product_efficient_attention, schema_str, "_scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, bool compute_log_sumexp, bool is_causal=False) -> (Tensor, Tensor)")
12308
12309// aten::_scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, bool compute_log_sumexp, bool is_causal=False) -> (Tensor, Tensor)
12310static C10_NOINLINE c10::TypedOperatorHandle<_scaled_dot_product_efficient_attention::schema> create__scaled_dot_product_efficient_attention_typed_handle() {
12311 return c10::Dispatcher::singleton()
12312 .findSchemaOrThrow(_scaled_dot_product_efficient_attention::name, _scaled_dot_product_efficient_attention::overload_name)
12313 .typed<_scaled_dot_product_efficient_attention::schema>();
12314}
12315
12316// aten::_scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, bool compute_log_sumexp, bool is_causal=False) -> (Tensor, Tensor)
12317::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_efficient_attention::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, bool compute_log_sumexp, bool is_causal) {
12318
12319 static auto op = create__scaled_dot_product_efficient_attention_typed_handle();
12320 return op.call(query, key, value, compute_log_sumexp, is_causal);
12321}
12322
12323// aten::_scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, bool compute_log_sumexp, bool is_causal=False) -> (Tensor, Tensor)
12324::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_efficient_attention::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, bool compute_log_sumexp, bool is_causal) {
12325
12326 static auto op = create__scaled_dot_product_efficient_attention_typed_handle();
12327 return op.redispatch(dispatchKeySet, query, key, value, compute_log_sumexp, is_causal);
12328}
12329
12330STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_chunk_grad_outputs_efficient_attention, name, "aten::_chunk_grad_outputs_efficient_attention")
12331STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_chunk_grad_outputs_efficient_attention, overload_name, "")
12332STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_chunk_grad_outputs_efficient_attention, schema_str, "_chunk_grad_outputs_efficient_attention(Tensor query, Tensor key, Tensor value, bool is_causal=False) -> bool")
12333
12334// aten::_chunk_grad_outputs_efficient_attention(Tensor query, Tensor key, Tensor value, bool is_causal=False) -> bool
12335static C10_NOINLINE c10::TypedOperatorHandle<_chunk_grad_outputs_efficient_attention::schema> create__chunk_grad_outputs_efficient_attention_typed_handle() {
12336 return c10::Dispatcher::singleton()
12337 .findSchemaOrThrow(_chunk_grad_outputs_efficient_attention::name, _chunk_grad_outputs_efficient_attention::overload_name)
12338 .typed<_chunk_grad_outputs_efficient_attention::schema>();
12339}
12340
12341// aten::_chunk_grad_outputs_efficient_attention(Tensor query, Tensor key, Tensor value, bool is_causal=False) -> bool
12342bool _chunk_grad_outputs_efficient_attention::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, bool is_causal) {
12343
12344 static auto op = create__chunk_grad_outputs_efficient_attention_typed_handle();
12345 return op.call(query, key, value, is_causal);
12346}
12347
12348// aten::_chunk_grad_outputs_efficient_attention(Tensor query, Tensor key, Tensor value, bool is_causal=False) -> bool
12349bool _chunk_grad_outputs_efficient_attention::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, bool is_causal) {
12350
12351 static auto op = create__chunk_grad_outputs_efficient_attention_typed_handle();
12352 return op.redispatch(dispatchKeySet, query, key, value, is_causal);
12353}
12354
12355STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_efficient_attention_forward, name, "aten::_efficient_attention_forward")
12356STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_efficient_attention_forward, overload_name, "")
12357STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_efficient_attention_forward, schema_str, "_efficient_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, int? max_seqlen_q, bool compute_log_sumexp=False, bool causal=False) -> (Tensor, Tensor)")
12358
12359// aten::_efficient_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, int? max_seqlen_q, bool compute_log_sumexp=False, bool causal=False) -> (Tensor, Tensor)
12360static C10_NOINLINE c10::TypedOperatorHandle<_efficient_attention_forward::schema> create__efficient_attention_forward_typed_handle() {
12361 return c10::Dispatcher::singleton()
12362 .findSchemaOrThrow(_efficient_attention_forward::name, _efficient_attention_forward::overload_name)
12363 .typed<_efficient_attention_forward::schema>();
12364}
12365
12366// aten::_efficient_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, int? max_seqlen_q, bool compute_log_sumexp=False, bool causal=False) -> (Tensor, Tensor)
12367::std::tuple<at::Tensor,at::Tensor> _efficient_attention_forward::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & cu_seqlens_q, const c10::optional<at::Tensor> & cu_seqlens_k, c10::optional<int64_t> max_seqlen_q, bool compute_log_sumexp, bool causal) {
12368
12369 static auto op = create__efficient_attention_forward_typed_handle();
12370 return op.call(query, key, value, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, compute_log_sumexp, causal);
12371}
12372
12373// aten::_efficient_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, int? max_seqlen_q, bool compute_log_sumexp=False, bool causal=False) -> (Tensor, Tensor)
12374::std::tuple<at::Tensor,at::Tensor> _efficient_attention_forward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & cu_seqlens_q, const c10::optional<at::Tensor> & cu_seqlens_k, c10::optional<int64_t> max_seqlen_q, bool compute_log_sumexp, bool causal) {
12375
12376 static auto op = create__efficient_attention_forward_typed_handle();
12377 return op.redispatch(dispatchKeySet, query, key, value, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, compute_log_sumexp, causal);
12378}
12379
12380STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_transformer_decoder_only_layer_fwd, name, "aten::_transformer_decoder_only_layer_fwd")
12381STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_transformer_decoder_only_layer_fwd, overload_name, "")
12382STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_transformer_decoder_only_layer_fwd, schema_str, "_transformer_decoder_only_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None) -> (Tensor, Tensor, Tensor)")
12383
12384// aten::_transformer_decoder_only_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None) -> (Tensor, Tensor, Tensor)
12385static C10_NOINLINE c10::TypedOperatorHandle<_transformer_decoder_only_layer_fwd::schema> create__transformer_decoder_only_layer_fwd_typed_handle() {
12386 return c10::Dispatcher::singleton()
12387 .findSchemaOrThrow(_transformer_decoder_only_layer_fwd::name, _transformer_decoder_only_layer_fwd::overload_name)
12388 .typed<_transformer_decoder_only_layer_fwd::schema>();
12389}
12390
12391// aten::_transformer_decoder_only_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None) -> (Tensor, Tensor, Tensor)
12392::std::tuple<at::Tensor,at::Tensor,at::Tensor> _transformer_decoder_only_layer_fwd::call(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, const c10::optional<at::Tensor> & incr_key, const c10::optional<at::Tensor> & incr_value) {
12393
12394 static auto op = create__transformer_decoder_only_layer_fwd_typed_handle();
12395 return op.call(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value);
12396}
12397
12398// aten::_transformer_decoder_only_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None) -> (Tensor, Tensor, Tensor)
12399::std::tuple<at::Tensor,at::Tensor,at::Tensor> _transformer_decoder_only_layer_fwd::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, const c10::optional<at::Tensor> & incr_key, const c10::optional<at::Tensor> & incr_value) {
12400
12401 static auto op = create__transformer_decoder_only_layer_fwd_typed_handle();
12402 return op.redispatch(dispatchKeySet, src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value);
12403}
12404
12405STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_bessel_j1, name, "aten::special_bessel_j1")
12406STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_bessel_j1, overload_name, "")
12407STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_bessel_j1, schema_str, "special_bessel_j1(Tensor self) -> Tensor")
12408
12409// aten::special_bessel_j1(Tensor self) -> Tensor
12410static C10_NOINLINE c10::TypedOperatorHandle<special_bessel_j1::schema> create_special_bessel_j1_typed_handle() {
12411 return c10::Dispatcher::singleton()
12412 .findSchemaOrThrow(special_bessel_j1::name, special_bessel_j1::overload_name)
12413 .typed<special_bessel_j1::schema>();
12414}
12415
12416// aten::special_bessel_j1(Tensor self) -> Tensor
12417at::Tensor special_bessel_j1::call(const at::Tensor & self) {
12418
12419 static auto op = create_special_bessel_j1_typed_handle();
12420 return op.call(self);
12421}
12422
12423// aten::special_bessel_j1(Tensor self) -> Tensor
12424at::Tensor special_bessel_j1::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
12425
12426 static auto op = create_special_bessel_j1_typed_handle();
12427 return op.redispatch(dispatchKeySet, self);
12428}
12429
12430STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_bessel_j1_out, name, "aten::special_bessel_j1")
12431STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_bessel_j1_out, overload_name, "out")
12432STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_bessel_j1_out, schema_str, "special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
12433
12434// aten::special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
12435static C10_NOINLINE c10::TypedOperatorHandle<special_bessel_j1_out::schema> create_special_bessel_j1_out_typed_handle() {
12436 return c10::Dispatcher::singleton()
12437 .findSchemaOrThrow(special_bessel_j1_out::name, special_bessel_j1_out::overload_name)
12438 .typed<special_bessel_j1_out::schema>();
12439}
12440
12441// aten::special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
12442at::Tensor & special_bessel_j1_out::call(const at::Tensor & self, at::Tensor & out) {
12443
12444 static auto op = create_special_bessel_j1_out_typed_handle();
12445 return op.call(self, out);
12446}
12447
12448// aten::special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
12449at::Tensor & special_bessel_j1_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
12450
12451 static auto op = create_special_bessel_j1_out_typed_handle();
12452 return op.redispatch(dispatchKeySet, self, out);
12453}
12454
12455STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_v, name, "aten::special_chebyshev_polynomial_v")
12456STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_v, overload_name, "")
12457STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_v, schema_str, "special_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor")
12458
12459// aten::special_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor
12460static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_v::schema> create_special_chebyshev_polynomial_v_typed_handle() {
12461 return c10::Dispatcher::singleton()
12462 .findSchemaOrThrow(special_chebyshev_polynomial_v::name, special_chebyshev_polynomial_v::overload_name)
12463 .typed<special_chebyshev_polynomial_v::schema>();
12464}
12465
12466// aten::special_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor
12467at::Tensor special_chebyshev_polynomial_v::call(const at::Tensor & x, const at::Tensor & n) {
12468
12469 static auto op = create_special_chebyshev_polynomial_v_typed_handle();
12470 return op.call(x, n);
12471}
12472
12473// aten::special_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor
12474at::Tensor special_chebyshev_polynomial_v::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) {
12475
12476 static auto op = create_special_chebyshev_polynomial_v_typed_handle();
12477 return op.redispatch(dispatchKeySet, x, n);
12478}
12479
12480STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_v_x_scalar, name, "aten::special_chebyshev_polynomial_v")
12481STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_v_x_scalar, overload_name, "x_scalar")
12482STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_v_x_scalar, schema_str, "special_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor")
12483
12484// aten::special_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor
12485static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_v_x_scalar::schema> create_special_chebyshev_polynomial_v_x_scalar_typed_handle() {
12486 return c10::Dispatcher::singleton()
12487 .findSchemaOrThrow(special_chebyshev_polynomial_v_x_scalar::name, special_chebyshev_polynomial_v_x_scalar::overload_name)
12488 .typed<special_chebyshev_polynomial_v_x_scalar::schema>();
12489}
12490
12491// aten::special_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor
12492at::Tensor special_chebyshev_polynomial_v_x_scalar::call(const at::Scalar & x, const at::Tensor & n) {
12493
12494 static auto op = create_special_chebyshev_polynomial_v_x_scalar_typed_handle();
12495 return op.call(x, n);
12496}
12497
12498// aten::special_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor
12499at::Tensor special_chebyshev_polynomial_v_x_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) {
12500
12501 static auto op = create_special_chebyshev_polynomial_v_x_scalar_typed_handle();
12502 return op.redispatch(dispatchKeySet, x, n);
12503}
12504
12505STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_v_n_scalar, name, "aten::special_chebyshev_polynomial_v")
12506STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_v_n_scalar, overload_name, "n_scalar")
12507STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_v_n_scalar, schema_str, "special_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor")
12508
12509// aten::special_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor
12510static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_v_n_scalar::schema> create_special_chebyshev_polynomial_v_n_scalar_typed_handle() {
12511 return c10::Dispatcher::singleton()
12512 .findSchemaOrThrow(special_chebyshev_polynomial_v_n_scalar::name, special_chebyshev_polynomial_v_n_scalar::overload_name)
12513 .typed<special_chebyshev_polynomial_v_n_scalar::schema>();
12514}
12515
12516// aten::special_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor
12517at::Tensor special_chebyshev_polynomial_v_n_scalar::call(const at::Tensor & x, const at::Scalar & n) {
12518
12519 static auto op = create_special_chebyshev_polynomial_v_n_scalar_typed_handle();
12520 return op.call(x, n);
12521}
12522
12523// aten::special_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor
12524at::Tensor special_chebyshev_polynomial_v_n_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) {
12525
12526 static auto op = create_special_chebyshev_polynomial_v_n_scalar_typed_handle();
12527 return op.redispatch(dispatchKeySet, x, n);
12528}
12529
12530STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_v_out, name, "aten::special_chebyshev_polynomial_v")
12531STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_v_out, overload_name, "out")
12532STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_v_out, schema_str, "special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)")
12533
12534// aten::special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
12535static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_v_out::schema> create_special_chebyshev_polynomial_v_out_typed_handle() {
12536 return c10::Dispatcher::singleton()
12537 .findSchemaOrThrow(special_chebyshev_polynomial_v_out::name, special_chebyshev_polynomial_v_out::overload_name)
12538 .typed<special_chebyshev_polynomial_v_out::schema>();
12539}
12540
12541// aten::special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
12542at::Tensor & special_chebyshev_polynomial_v_out::call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
12543
12544 static auto op = create_special_chebyshev_polynomial_v_out_typed_handle();
12545 return op.call(x, n, out);
12546}
12547
12548// aten::special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
12549at::Tensor & special_chebyshev_polynomial_v_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
12550
12551 static auto op = create_special_chebyshev_polynomial_v_out_typed_handle();
12552 return op.redispatch(dispatchKeySet, x, n, out);
12553}
12554
12555STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_v_x_scalar_out, name, "aten::special_chebyshev_polynomial_v")
12556STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_v_x_scalar_out, overload_name, "x_scalar_out")
12557STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_v_x_scalar_out, schema_str, "special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)")
12558
12559// aten::special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
12560static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_v_x_scalar_out::schema> create_special_chebyshev_polynomial_v_x_scalar_out_typed_handle() {
12561 return c10::Dispatcher::singleton()
12562 .findSchemaOrThrow(special_chebyshev_polynomial_v_x_scalar_out::name, special_chebyshev_polynomial_v_x_scalar_out::overload_name)
12563 .typed<special_chebyshev_polynomial_v_x_scalar_out::schema>();
12564}
12565
12566// aten::special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
12567at::Tensor & special_chebyshev_polynomial_v_x_scalar_out::call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
12568
12569 static auto op = create_special_chebyshev_polynomial_v_x_scalar_out_typed_handle();
12570 return op.call(x, n, out);
12571}
12572
12573// aten::special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
12574at::Tensor & special_chebyshev_polynomial_v_x_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
12575
12576 static auto op = create_special_chebyshev_polynomial_v_x_scalar_out_typed_handle();
12577 return op.redispatch(dispatchKeySet, x, n, out);
12578}
12579
12580STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_v_n_scalar_out, name, "aten::special_chebyshev_polynomial_v")
12581STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_v_n_scalar_out, overload_name, "n_scalar_out")
12582STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_v_n_scalar_out, schema_str, "special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)")
12583
12584// aten::special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
12585static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_v_n_scalar_out::schema> create_special_chebyshev_polynomial_v_n_scalar_out_typed_handle() {
12586 return c10::Dispatcher::singleton()
12587 .findSchemaOrThrow(special_chebyshev_polynomial_v_n_scalar_out::name, special_chebyshev_polynomial_v_n_scalar_out::overload_name)
12588 .typed<special_chebyshev_polynomial_v_n_scalar_out::schema>();
12589}
12590
12591// aten::special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
12592at::Tensor & special_chebyshev_polynomial_v_n_scalar_out::call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
12593
12594 static auto op = create_special_chebyshev_polynomial_v_n_scalar_out_typed_handle();
12595 return op.call(x, n, out);
12596}
12597
12598// aten::special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
12599at::Tensor & special_chebyshev_polynomial_v_n_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
12600
12601 static auto op = create_special_chebyshev_polynomial_v_n_scalar_out_typed_handle();
12602 return op.redispatch(dispatchKeySet, x, n, out);
12603}
12604
12605STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cudnn_rnn_backward_out, name, "aten::_cudnn_rnn_backward")
12606STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cudnn_rnn_backward_out, overload_name, "out")
12607STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cudnn_rnn_backward_out, schema_str, "_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()")
12608
12609// aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()
12610static C10_NOINLINE c10::TypedOperatorHandle<_cudnn_rnn_backward_out::schema> create__cudnn_rnn_backward_out_typed_handle() {
12611 return c10::Dispatcher::singleton()
12612 .findSchemaOrThrow(_cudnn_rnn_backward_out::name, _cudnn_rnn_backward_out::overload_name)
12613 .typed<_cudnn_rnn_backward_out::schema>();
12614}
12615
12616// aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()
12617void _cudnn_rnn_backward_out::call(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) {
12618
12619 static auto op = create__cudnn_rnn_backward_out_typed_handle();
12620 return op.call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3);
12621}
12622
12623// aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()
12624void _cudnn_rnn_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) {
12625
12626 static auto op = create__cudnn_rnn_backward_out_typed_handle();
12627 return op.redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3);
12628}
12629
12630STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_dropout_backward_out, name, "aten::native_dropout_backward")
12631STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_dropout_backward_out, overload_name, "out")
12632STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_dropout_backward_out, schema_str, "native_dropout_backward.out(Tensor grad_output, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!)")
12633
12634// aten::native_dropout_backward.out(Tensor grad_output, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!)
12635static C10_NOINLINE c10::TypedOperatorHandle<native_dropout_backward_out::schema> create_native_dropout_backward_out_typed_handle() {
12636 return c10::Dispatcher::singleton()
12637 .findSchemaOrThrow(native_dropout_backward_out::name, native_dropout_backward_out::overload_name)
12638 .typed<native_dropout_backward_out::schema>();
12639}
12640
12641// aten::native_dropout_backward.out(Tensor grad_output, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!)
12642at::Tensor & native_dropout_backward_out::call(const at::Tensor & grad_output, const at::Tensor & mask, double scale, at::Tensor & out) {
12643
12644 static auto op = create_native_dropout_backward_out_typed_handle();
12645 return op.call(grad_output, mask, scale, out);
12646}
12647
12648// aten::native_dropout_backward.out(Tensor grad_output, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!)
12649at::Tensor & native_dropout_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & mask, double scale, at::Tensor & out) {
12650
12651 static auto op = create_native_dropout_backward_out_typed_handle();
12652 return op.redispatch(dispatchKeySet, grad_output, mask, scale, out);
12653}
12654
12655STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_add_relu_Scalar_out, name, "aten::_add_relu")
12656STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_add_relu_Scalar_out, overload_name, "Scalar_out")
12657STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_add_relu_Scalar_out, schema_str, "_add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)")
12658
12659// aten::_add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
12660static C10_NOINLINE c10::TypedOperatorHandle<_add_relu_Scalar_out::schema> create__add_relu_Scalar_out_typed_handle() {
12661 return c10::Dispatcher::singleton()
12662 .findSchemaOrThrow(_add_relu_Scalar_out::name, _add_relu_Scalar_out::overload_name)
12663 .typed<_add_relu_Scalar_out::schema>();
12664}
12665
12666// aten::_add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
12667at::Tensor & _add_relu_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) {
12668
12669 static auto op = create__add_relu_Scalar_out_typed_handle();
12670 return op.call(self, other, alpha, out);
12671}
12672
12673// aten::_add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
12674at::Tensor & _add_relu_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) {
12675
12676 static auto op = create__add_relu_Scalar_out_typed_handle();
12677 return op.redispatch(dispatchKeySet, self, other, alpha, out);
12678}
12679
12680STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(affine_grid_generator_out, name, "aten::affine_grid_generator")
12681STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(affine_grid_generator_out, overload_name, "out")
12682STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(affine_grid_generator_out, schema_str, "affine_grid_generator.out(Tensor theta, int[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)")
12683
12684// aten::affine_grid_generator.out(Tensor theta, int[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
12685static C10_NOINLINE c10::TypedOperatorHandle<affine_grid_generator_out::schema> create_affine_grid_generator_out_typed_handle() {
12686 return c10::Dispatcher::singleton()
12687 .findSchemaOrThrow(affine_grid_generator_out::name, affine_grid_generator_out::overload_name)
12688 .typed<affine_grid_generator_out::schema>();
12689}
12690
12691// aten::affine_grid_generator.out(Tensor theta, int[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
12692at::Tensor & affine_grid_generator_out::call(const at::Tensor & theta, at::IntArrayRef size, bool align_corners, at::Tensor & out) {
12693
12694 static auto op = create_affine_grid_generator_out_typed_handle();
12695 return op.call(theta, size, align_corners, out);
12696}
12697
12698// aten::affine_grid_generator.out(Tensor theta, int[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
12699at::Tensor & affine_grid_generator_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, at::IntArrayRef size, bool align_corners, at::Tensor & out) {
12700
12701 static auto op = create_affine_grid_generator_out_typed_handle();
12702 return op.redispatch(dispatchKeySet, theta, size, align_corners, out);
12703}
12704
12705STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bartlett_window_out, name, "aten::bartlett_window")
12706STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bartlett_window_out, overload_name, "out")
12707STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bartlett_window_out, schema_str, "bartlett_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)")
12708
12709// aten::bartlett_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
12710static C10_NOINLINE c10::TypedOperatorHandle<bartlett_window_out::schema> create_bartlett_window_out_typed_handle() {
12711 return c10::Dispatcher::singleton()
12712 .findSchemaOrThrow(bartlett_window_out::name, bartlett_window_out::overload_name)
12713 .typed<bartlett_window_out::schema>();
12714}
12715
12716// aten::bartlett_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
12717at::Tensor & bartlett_window_out::call(int64_t window_length, at::Tensor & out) {
12718
12719 static auto op = create_bartlett_window_out_typed_handle();
12720 return op.call(window_length, out);
12721}
12722
12723// aten::bartlett_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
12724at::Tensor & bartlett_window_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) {
12725
12726 static auto op = create_bartlett_window_out_typed_handle();
12727 return op.redispatch(dispatchKeySet, window_length, out);
12728}
12729
12730STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bartlett_window_periodic_out, name, "aten::bartlett_window")
12731STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bartlett_window_periodic_out, overload_name, "periodic_out")
12732STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bartlett_window_periodic_out, schema_str, "bartlett_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)")
12733
12734// aten::bartlett_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
12735static C10_NOINLINE c10::TypedOperatorHandle<bartlett_window_periodic_out::schema> create_bartlett_window_periodic_out_typed_handle() {
12736 return c10::Dispatcher::singleton()
12737 .findSchemaOrThrow(bartlett_window_periodic_out::name, bartlett_window_periodic_out::overload_name)
12738 .typed<bartlett_window_periodic_out::schema>();
12739}
12740
12741// aten::bartlett_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
12742at::Tensor & bartlett_window_periodic_out::call(int64_t window_length, bool periodic, at::Tensor & out) {
12743
12744 static auto op = create_bartlett_window_periodic_out_typed_handle();
12745 return op.call(window_length, periodic, out);
12746}
12747
12748// aten::bartlett_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
12749at::Tensor & bartlett_window_periodic_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) {
12750
12751 static auto op = create_bartlett_window_periodic_out_typed_handle();
12752 return op.redispatch(dispatchKeySet, window_length, periodic, out);
12753}
12754
12755STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copy_out, name, "aten::copy")
12756STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copy_out, overload_name, "out")
12757STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(copy_out, schema_str, "copy.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)")
12758
12759// aten::copy.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)
12760static C10_NOINLINE c10::TypedOperatorHandle<copy_out::schema> create_copy_out_typed_handle() {
12761 return c10::Dispatcher::singleton()
12762 .findSchemaOrThrow(copy_out::name, copy_out::overload_name)
12763 .typed<copy_out::schema>();
12764}
12765
12766// aten::copy.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)
12767at::Tensor & copy_out::call(const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out) {
12768
12769 static auto op = create_copy_out_typed_handle();
12770 return op.call(self, src, non_blocking, out);
12771}
12772
12773// aten::copy.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)
12774at::Tensor & copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out) {
12775
12776 static auto op = create_copy_out_typed_handle();
12777 return op.redispatch(dispatchKeySet, self, src, non_blocking, out);
12778}
12779
12780STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_copy_from_and_resize_out, name, "aten::_copy_from_and_resize")
12781STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_copy_from_and_resize_out, overload_name, "out")
12782STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_copy_from_and_resize_out, schema_str, "_copy_from_and_resize.out(Tensor self, Tensor dst, *, Tensor(a!) out) -> Tensor(a!)")
12783
12784// aten::_copy_from_and_resize.out(Tensor self, Tensor dst, *, Tensor(a!) out) -> Tensor(a!)
12785static C10_NOINLINE c10::TypedOperatorHandle<_copy_from_and_resize_out::schema> create__copy_from_and_resize_out_typed_handle() {
12786 return c10::Dispatcher::singleton()
12787 .findSchemaOrThrow(_copy_from_and_resize_out::name, _copy_from_and_resize_out::overload_name)
12788 .typed<_copy_from_and_resize_out::schema>();
12789}
12790
12791// aten::_copy_from_and_resize.out(Tensor self, Tensor dst, *, Tensor(a!) out) -> Tensor(a!)
12792at::Tensor & _copy_from_and_resize_out::call(const at::Tensor & self, const at::Tensor & dst, at::Tensor & out) {
12793
12794 static auto op = create__copy_from_and_resize_out_typed_handle();
12795 return op.call(self, dst, out);
12796}
12797
12798// aten::_copy_from_and_resize.out(Tensor self, Tensor dst, *, Tensor(a!) out) -> Tensor(a!)
12799at::Tensor & _copy_from_and_resize_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & dst, at::Tensor & out) {
12800
12801 static auto op = create__copy_from_and_resize_out_typed_handle();
12802 return op.redispatch(dispatchKeySet, self, dst, out);
12803}
12804
12805STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_convolution_out, name, "aten::cudnn_convolution")
12806STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_convolution_out, overload_name, "out")
12807STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_convolution_out, schema_str, "cudnn_convolution.out(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)")
12808
12809// aten::cudnn_convolution.out(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
12810static C10_NOINLINE c10::TypedOperatorHandle<cudnn_convolution_out::schema> create_cudnn_convolution_out_typed_handle() {
12811 return c10::Dispatcher::singleton()
12812 .findSchemaOrThrow(cudnn_convolution_out::name, cudnn_convolution_out::overload_name)
12813 .typed<cudnn_convolution_out::schema>();
12814}
12815
12816// aten::cudnn_convolution.out(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
12817at::Tensor & cudnn_convolution_out::call(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) {
12818
12819 static auto op = create_cudnn_convolution_out_typed_handle();
12820 return op.call(self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out);
12821}
12822
12823// aten::cudnn_convolution.out(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
12824at::Tensor & cudnn_convolution_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) {
12825
12826 static auto op = create_cudnn_convolution_out_typed_handle();
12827 return op.redispatch(dispatchKeySet, self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out);
12828}
12829
12830STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_convolution_relu_out, name, "aten::cudnn_convolution_relu")
12831STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_convolution_relu_out, overload_name, "out")
12832STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_convolution_relu_out, schema_str, "cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)")
12833
12834// aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)
12835static C10_NOINLINE c10::TypedOperatorHandle<cudnn_convolution_relu_out::schema> create_cudnn_convolution_relu_out_typed_handle() {
12836 return c10::Dispatcher::singleton()
12837 .findSchemaOrThrow(cudnn_convolution_relu_out::name, cudnn_convolution_relu_out::overload_name)
12838 .typed<cudnn_convolution_relu_out::schema>();
12839}
12840
12841// aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)
12842at::Tensor & cudnn_convolution_relu_out::call(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) {
12843
12844 static auto op = create_cudnn_convolution_relu_out_typed_handle();
12845 return op.call(self, weight, bias, stride, padding, dilation, groups, out);
12846}
12847
12848// aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)
12849at::Tensor & cudnn_convolution_relu_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) {
12850
12851 static auto op = create_cudnn_convolution_relu_out_typed_handle();
12852 return op.redispatch(dispatchKeySet, self, weight, bias, stride, padding, dilation, groups, out);
12853}
12854
12855STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diag_embed_out, name, "aten::diag_embed")
12856STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diag_embed_out, overload_name, "out")
12857STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(diag_embed_out, schema_str, "diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!)")
12858
12859// aten::diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!)
12860static C10_NOINLINE c10::TypedOperatorHandle<diag_embed_out::schema> create_diag_embed_out_typed_handle() {
12861 return c10::Dispatcher::singleton()
12862 .findSchemaOrThrow(diag_embed_out::name, diag_embed_out::overload_name)
12863 .typed<diag_embed_out::schema>();
12864}
12865
12866// aten::diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!)
12867at::Tensor & diag_embed_out::call(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
12868
12869 static auto op = create_diag_embed_out_typed_handle();
12870 return op.call(self, offset, dim1, dim2, out);
12871}
12872
12873// aten::diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!)
12874at::Tensor & diag_embed_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
12875
12876 static auto op = create_diag_embed_out_typed_handle();
12877 return op.redispatch(dispatchKeySet, self, offset, dim1, dim2, out);
12878}
12879
12880STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_empty_affine_quantized_out, name, "aten::_empty_affine_quantized")
12881STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_empty_affine_quantized_out, overload_name, "out")
12882STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_empty_affine_quantized_out, schema_str, "_empty_affine_quantized.out(int[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)")
12883
12884// aten::_empty_affine_quantized.out(int[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)
12885static C10_NOINLINE c10::TypedOperatorHandle<_empty_affine_quantized_out::schema> create__empty_affine_quantized_out_typed_handle() {
12886 return c10::Dispatcher::singleton()
12887 .findSchemaOrThrow(_empty_affine_quantized_out::name, _empty_affine_quantized_out::overload_name)
12888 .typed<_empty_affine_quantized_out::schema>();
12889}
12890
12891// aten::_empty_affine_quantized.out(int[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)
12892at::Tensor & _empty_affine_quantized_out::call(at::IntArrayRef size, double scale, int64_t zero_point, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
12893
12894 static auto op = create__empty_affine_quantized_out_typed_handle();
12895 return op.call(size, scale, zero_point, memory_format, out);
12896}
12897
12898// aten::_empty_affine_quantized.out(int[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)
12899at::Tensor & _empty_affine_quantized_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, double scale, int64_t zero_point, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
12900
12901 static auto op = create__empty_affine_quantized_out_typed_handle();
12902 return op.redispatch(dispatchKeySet, size, scale, zero_point, memory_format, out);
12903}
12904
12905STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_resize_output_out, name, "aten::_resize_output")
12906STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_resize_output_out, overload_name, "out")
12907STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_resize_output_out, schema_str, "_resize_output.out(Tensor self, int[] size, Device device, *, Tensor(a!) out) -> Tensor(a!)")
12908
12909// aten::_resize_output.out(Tensor self, int[] size, Device device, *, Tensor(a!) out) -> Tensor(a!)
12910static C10_NOINLINE c10::TypedOperatorHandle<_resize_output_out::schema> create__resize_output_out_typed_handle() {
12911 return c10::Dispatcher::singleton()
12912 .findSchemaOrThrow(_resize_output_out::name, _resize_output_out::overload_name)
12913 .typed<_resize_output_out::schema>();
12914}
12915
12916// aten::_resize_output.out(Tensor self, int[] size, Device device, *, Tensor(a!) out) -> Tensor(a!)
12917const at::Tensor & _resize_output_out::call(const at::Tensor & self, at::IntArrayRef size, at::Device device, const at::Tensor & out) {
12918
12919 static auto op = create__resize_output_out_typed_handle();
12920 return op.call(self, size, device, out);
12921}
12922
12923// aten::_resize_output.out(Tensor self, int[] size, Device device, *, Tensor(a!) out) -> Tensor(a!)
12924const at::Tensor & _resize_output_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Device device, const at::Tensor & out) {
12925
12926 static auto op = create__resize_output_out_typed_handle();
12927 return op.redispatch(dispatchKeySet, self, size, device, out);
12928}
12929
12930STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_resize_output, name, "aten::_resize_output")
12931STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_resize_output, overload_name, "")
12932STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_resize_output, schema_str, "_resize_output(Tensor self, int[] size, Device device) -> Tensor")
12933
12934// aten::_resize_output(Tensor self, int[] size, Device device) -> Tensor
12935static C10_NOINLINE c10::TypedOperatorHandle<_resize_output::schema> create__resize_output_typed_handle() {
12936 return c10::Dispatcher::singleton()
12937 .findSchemaOrThrow(_resize_output::name, _resize_output::overload_name)
12938 .typed<_resize_output::schema>();
12939}
12940
12941// aten::_resize_output(Tensor self, int[] size, Device device) -> Tensor
12942at::Tensor _resize_output::call(const at::Tensor & self, at::IntArrayRef size, at::Device device) {
12943
12944 static auto op = create__resize_output_typed_handle();
12945 return op.call(self, size, device);
12946}
12947
12948// aten::_resize_output(Tensor self, int[] size, Device device) -> Tensor
12949at::Tensor _resize_output::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Device device) {
12950
12951 static auto op = create__resize_output_typed_handle();
12952 return op.redispatch(dispatchKeySet, self, size, device);
12953}
12954
12955STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(empty_like_out, name, "aten::empty_like")
12956STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(empty_like_out, overload_name, "out")
12957STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(empty_like_out, schema_str, "empty_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)")
12958
12959// aten::empty_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
12960static C10_NOINLINE c10::TypedOperatorHandle<empty_like_out::schema> create_empty_like_out_typed_handle() {
12961 return c10::Dispatcher::singleton()
12962 .findSchemaOrThrow(empty_like_out::name, empty_like_out::overload_name)
12963 .typed<empty_like_out::schema>();
12964}
12965
12966// aten::empty_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
12967at::Tensor & empty_like_out::call(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
12968
12969 static auto op = create_empty_like_out_typed_handle();
12970 return op.call(self, memory_format, out);
12971}
12972
12973// aten::empty_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
12974at::Tensor & empty_like_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
12975
12976 static auto op = create_empty_like_out_typed_handle();
12977 return op.redispatch(dispatchKeySet, self, memory_format, out);
12978}
12979
12980STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(grid_sampler_3d_backward_out, name, "aten::grid_sampler_3d_backward")
12981STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(grid_sampler_3d_backward_out, overload_name, "out")
12982STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(grid_sampler_3d_backward_out, schema_str, "grid_sampler_3d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
12983
12984// aten::grid_sampler_3d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
12985static C10_NOINLINE c10::TypedOperatorHandle<grid_sampler_3d_backward_out::schema> create_grid_sampler_3d_backward_out_typed_handle() {
12986 return c10::Dispatcher::singleton()
12987 .findSchemaOrThrow(grid_sampler_3d_backward_out::name, grid_sampler_3d_backward_out::overload_name)
12988 .typed<grid_sampler_3d_backward_out::schema>();
12989}
12990
12991// aten::grid_sampler_3d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
12992::std::tuple<at::Tensor &,at::Tensor &> grid_sampler_3d_backward_out::call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) {
12993
12994 static auto op = create_grid_sampler_3d_backward_out_typed_handle();
12995 return op.call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1);
12996}
12997
12998// aten::grid_sampler_3d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
12999::std::tuple<at::Tensor &,at::Tensor &> grid_sampler_3d_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) {
13000
13001 static auto op = create_grid_sampler_3d_backward_out_typed_handle();
13002 return op.redispatch(dispatchKeySet, grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1);
13003}
13004
13005STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_group_norm_out, name, "aten::native_group_norm")
13006STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_group_norm_out, overload_name, "out")
13007STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_group_norm_out, schema_str, "native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
13008
13009// aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
13010static C10_NOINLINE c10::TypedOperatorHandle<native_group_norm_out::schema> create_native_group_norm_out_typed_handle() {
13011 return c10::Dispatcher::singleton()
13012 .findSchemaOrThrow(native_group_norm_out::name, native_group_norm_out::overload_name)
13013 .typed<native_group_norm_out::schema>();
13014}
13015
13016// aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
13017::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_out::call(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
13018
13019 static auto op = create_native_group_norm_out_typed_handle();
13020 return op.call(input, weight, bias, N, C, HxW, group, eps, out0, out1, out2);
13021}
13022
13023// aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
13024::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
13025
13026 static auto op = create_native_group_norm_out_typed_handle();
13027 return op.redispatch(dispatchKeySet, input, weight, bias, N, C, HxW, group, eps, out0, out1, out2);
13028}
13029
13030STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linear_backward_out, name, "aten::linear_backward")
13031STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linear_backward_out, overload_name, "out")
13032STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linear_backward_out, schema_str, "linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
13033
13034// aten::linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
13035static C10_NOINLINE c10::TypedOperatorHandle<linear_backward_out::schema> create_linear_backward_out_typed_handle() {
13036 return c10::Dispatcher::singleton()
13037 .findSchemaOrThrow(linear_backward_out::name, linear_backward_out::overload_name)
13038 .typed<linear_backward_out::schema>();
13039}
13040
13041// aten::linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
13042::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linear_backward_out::call(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
13043
13044 static auto op = create_linear_backward_out_typed_handle();
13045 return op.call(self, grad_output, weight, output_mask, out0, out1, out2);
13046}
13047
13048// aten::linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
13049::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linear_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
13050
13051 static auto op = create_linear_backward_out_typed_handle();
13052 return op.redispatch(dispatchKeySet, self, grad_output, weight, output_mask, out0, out1, out2);
13053}
13054
13055STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_linear_backward_input_out, name, "aten::mkldnn_linear_backward_input")
13056STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_linear_backward_input_out, overload_name, "out")
13057STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_linear_backward_input_out, schema_str, "mkldnn_linear_backward_input.out(int[] input_size, Tensor grad_output, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)")
13058
13059// aten::mkldnn_linear_backward_input.out(int[] input_size, Tensor grad_output, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)
13060static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_linear_backward_input_out::schema> create_mkldnn_linear_backward_input_out_typed_handle() {
13061 return c10::Dispatcher::singleton()
13062 .findSchemaOrThrow(mkldnn_linear_backward_input_out::name, mkldnn_linear_backward_input_out::overload_name)
13063 .typed<mkldnn_linear_backward_input_out::schema>();
13064}
13065
13066// aten::mkldnn_linear_backward_input.out(int[] input_size, Tensor grad_output, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)
13067at::Tensor & mkldnn_linear_backward_input_out::call(at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight, at::Tensor & out) {
13068
13069 static auto op = create_mkldnn_linear_backward_input_out_typed_handle();
13070 return op.call(input_size, grad_output, weight, out);
13071}
13072
13073// aten::mkldnn_linear_backward_input.out(int[] input_size, Tensor grad_output, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)
13074at::Tensor & mkldnn_linear_backward_input_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight, at::Tensor & out) {
13075
13076 static auto op = create_mkldnn_linear_backward_input_out_typed_handle();
13077 return op.redispatch(dispatchKeySet, input_size, grad_output, weight, out);
13078}
13079
13080STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_linear_backward_out, name, "aten::mkldnn_linear_backward")
13081STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_linear_backward_out, overload_name, "out")
13082STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_linear_backward_out, schema_str, "mkldnn_linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
13083
13084// aten::mkldnn_linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
13085static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_linear_backward_out::schema> create_mkldnn_linear_backward_out_typed_handle() {
13086 return c10::Dispatcher::singleton()
13087 .findSchemaOrThrow(mkldnn_linear_backward_out::name, mkldnn_linear_backward_out::overload_name)
13088 .typed<mkldnn_linear_backward_out::schema>();
13089}
13090
13091// aten::mkldnn_linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
13092::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_linear_backward_out::call(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
13093
13094 static auto op = create_mkldnn_linear_backward_out_typed_handle();
13095 return op.call(self, grad_output, weight, output_mask, out0, out1, out2);
13096}
13097
13098// aten::mkldnn_linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
13099::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_linear_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
13100
13101 static auto op = create_mkldnn_linear_backward_out_typed_handle();
13102 return op.redispatch(dispatchKeySet, self, grad_output, weight, output_mask, out0, out1, out2);
13103}
13104
13105STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_gather_stats_with_counts_out, name, "aten::batch_norm_gather_stats_with_counts")
13106STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_gather_stats_with_counts_out, overload_name, "out")
13107STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_gather_stats_with_counts_out, schema_str, "batch_norm_gather_stats_with_counts.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
13108
13109// aten::batch_norm_gather_stats_with_counts.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
13110static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_gather_stats_with_counts_out::schema> create_batch_norm_gather_stats_with_counts_out_typed_handle() {
13111 return c10::Dispatcher::singleton()
13112 .findSchemaOrThrow(batch_norm_gather_stats_with_counts_out::name, batch_norm_gather_stats_with_counts_out::overload_name)
13113 .typed<batch_norm_gather_stats_with_counts_out::schema>();
13114}
13115
13116// aten::batch_norm_gather_stats_with_counts.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
13117::std::tuple<at::Tensor &,at::Tensor &> batch_norm_gather_stats_with_counts_out::call(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts, at::Tensor & out0, at::Tensor & out1) {
13118
13119 static auto op = create_batch_norm_gather_stats_with_counts_out_typed_handle();
13120 return op.call(input, mean, invstd, running_mean, running_var, momentum, eps, counts, out0, out1);
13121}
13122
13123// aten::batch_norm_gather_stats_with_counts.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
13124::std::tuple<at::Tensor &,at::Tensor &> batch_norm_gather_stats_with_counts_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts, at::Tensor & out0, at::Tensor & out1) {
13125
13126 static auto op = create_batch_norm_gather_stats_with_counts_out_typed_handle();
13127 return op.redispatch(dispatchKeySet, input, mean, invstd, running_mean, running_var, momentum, eps, counts, out0, out1);
13128}
13129
13130STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pdist_backward_out, name, "aten::_pdist_backward")
13131STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pdist_backward_out, overload_name, "out")
13132STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pdist_backward_out, schema_str, "_pdist_backward.out(Tensor grad, Tensor self, float p, Tensor pdist, *, Tensor(a!) out) -> Tensor(a!)")
13133
13134// aten::_pdist_backward.out(Tensor grad, Tensor self, float p, Tensor pdist, *, Tensor(a!) out) -> Tensor(a!)
13135static C10_NOINLINE c10::TypedOperatorHandle<_pdist_backward_out::schema> create__pdist_backward_out_typed_handle() {
13136 return c10::Dispatcher::singleton()
13137 .findSchemaOrThrow(_pdist_backward_out::name, _pdist_backward_out::overload_name)
13138 .typed<_pdist_backward_out::schema>();
13139}
13140
13141// aten::_pdist_backward.out(Tensor grad, Tensor self, float p, Tensor pdist, *, Tensor(a!) out) -> Tensor(a!)
13142at::Tensor & _pdist_backward_out::call(const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist, at::Tensor & out) {
13143
13144 static auto op = create__pdist_backward_out_typed_handle();
13145 return op.call(grad, self, p, pdist, out);
13146}
13147
13148// aten::_pdist_backward.out(Tensor grad, Tensor self, float p, Tensor pdist, *, Tensor(a!) out) -> Tensor(a!)
13149at::Tensor & _pdist_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist, at::Tensor & out) {
13150
13151 static auto op = create__pdist_backward_out_typed_handle();
13152 return op.redispatch(dispatchKeySet, grad, self, p, pdist, out);
13153}
13154
13155STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pixel_shuffle_out, name, "aten::pixel_shuffle")
13156STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pixel_shuffle_out, overload_name, "out")
13157STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pixel_shuffle_out, schema_str, "pixel_shuffle.out(Tensor self, int upscale_factor, *, Tensor(a!) out) -> Tensor(a!)")
13158
13159// aten::pixel_shuffle.out(Tensor self, int upscale_factor, *, Tensor(a!) out) -> Tensor(a!)
13160static C10_NOINLINE c10::TypedOperatorHandle<pixel_shuffle_out::schema> create_pixel_shuffle_out_typed_handle() {
13161 return c10::Dispatcher::singleton()
13162 .findSchemaOrThrow(pixel_shuffle_out::name, pixel_shuffle_out::overload_name)
13163 .typed<pixel_shuffle_out::schema>();
13164}
13165
13166// aten::pixel_shuffle.out(Tensor self, int upscale_factor, *, Tensor(a!) out) -> Tensor(a!)
13167at::Tensor & pixel_shuffle_out::call(const at::Tensor & self, int64_t upscale_factor, at::Tensor & out) {
13168
13169 static auto op = create_pixel_shuffle_out_typed_handle();
13170 return op.call(self, upscale_factor, out);
13171}
13172
13173// aten::pixel_shuffle.out(Tensor self, int upscale_factor, *, Tensor(a!) out) -> Tensor(a!)
13174at::Tensor & pixel_shuffle_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t upscale_factor, at::Tensor & out) {
13175
13176 static auto op = create_pixel_shuffle_out_typed_handle();
13177 return op.redispatch(dispatchKeySet, self, upscale_factor, out);
13178}
13179
13180STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(celu_out, name, "aten::celu")
13181STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(celu_out, overload_name, "out")
13182STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(celu_out, schema_str, "celu.out(Tensor self, Scalar alpha=1.0, *, Tensor(a!) out) -> Tensor(a!)")
13183
13184// aten::celu.out(Tensor self, Scalar alpha=1.0, *, Tensor(a!) out) -> Tensor(a!)
13185static C10_NOINLINE c10::TypedOperatorHandle<celu_out::schema> create_celu_out_typed_handle() {
13186 return c10::Dispatcher::singleton()
13187 .findSchemaOrThrow(celu_out::name, celu_out::overload_name)
13188 .typed<celu_out::schema>();
13189}
13190
13191// aten::celu.out(Tensor self, Scalar alpha=1.0, *, Tensor(a!) out) -> Tensor(a!)
13192at::Tensor & celu_out::call(const at::Tensor & self, const at::Scalar & alpha, at::Tensor & out) {
13193
13194 static auto op = create_celu_out_typed_handle();
13195 return op.call(self, alpha, out);
13196}
13197
13198// aten::celu.out(Tensor self, Scalar alpha=1.0, *, Tensor(a!) out) -> Tensor(a!)
13199at::Tensor & celu_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & alpha, at::Tensor & out) {
13200
13201 static auto op = create_celu_out_typed_handle();
13202 return op.redispatch(dispatchKeySet, self, alpha, out);
13203}
13204
13205STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slice_backward_out, name, "aten::slice_backward")
13206STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slice_backward_out, overload_name, "out")
13207STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slice_backward_out, schema_str, "slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!)")
13208
13209// aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!)
13210static C10_NOINLINE c10::TypedOperatorHandle<slice_backward_out::schema> create_slice_backward_out_typed_handle() {
13211 return c10::Dispatcher::singleton()
13212 .findSchemaOrThrow(slice_backward_out::name, slice_backward_out::overload_name)
13213 .typed<slice_backward_out::schema>();
13214}
13215
13216// aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!)
13217at::Tensor & slice_backward_out::call(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step, at::Tensor & out) {
13218
13219 static auto op = create_slice_backward_out_typed_handle();
13220 return op.call(grad_output, input_sizes, dim, start, end, step, out);
13221}
13222
13223// aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!)
13224at::Tensor & slice_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step, at::Tensor & out) {
13225
13226 static auto op = create_slice_backward_out_typed_handle();
13227 return op.redispatch(dispatchKeySet, grad_output, input_sizes, dim, start, end, step, out);
13228}
13229
13230STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unsafe_split_Tensor_out, name, "aten::unsafe_split")
13231STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unsafe_split_Tensor_out, overload_name, "Tensor_out")
13232STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unsafe_split_Tensor_out, schema_str, "unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()")
13233
13234// aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()
13235static C10_NOINLINE c10::TypedOperatorHandle<unsafe_split_Tensor_out::schema> create_unsafe_split_Tensor_out_typed_handle() {
13236 return c10::Dispatcher::singleton()
13237 .findSchemaOrThrow(unsafe_split_Tensor_out::name, unsafe_split_Tensor_out::overload_name)
13238 .typed<unsafe_split_Tensor_out::schema>();
13239}
13240
13241// aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()
13242void unsafe_split_Tensor_out::call(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) {
13243
13244 static auto op = create_unsafe_split_Tensor_out_typed_handle();
13245 return op.call(self, split_size, dim, out);
13246}
13247
13248// aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()
13249void unsafe_split_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) {
13250
13251 static auto op = create_unsafe_split_Tensor_out_typed_handle();
13252 return op.redispatch(dispatchKeySet, self, split_size, dim, out);
13253}
13254
13255STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_mean_correction_out, name, "aten::std_mean")
13256STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_mean_correction_out, overload_name, "correction_out")
13257STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(std_mean_correction_out, schema_str, "std_mean.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
13258
13259// aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
13260static C10_NOINLINE c10::TypedOperatorHandle<std_mean_correction_out::schema> create_std_mean_correction_out_typed_handle() {
13261 return c10::Dispatcher::singleton()
13262 .findSchemaOrThrow(std_mean_correction_out::name, std_mean_correction_out::overload_name)
13263 .typed<std_mean_correction_out::schema>();
13264}
13265
13266// aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
13267::std::tuple<at::Tensor &,at::Tensor &> std_mean_correction_out::call(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out0, at::Tensor & out1) {
13268
13269 static auto op = create_std_mean_correction_out_typed_handle();
13270 return op.call(self, dim, correction, keepdim, out0, out1);
13271}
13272
13273// aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
13274::std::tuple<at::Tensor &,at::Tensor &> std_mean_correction_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out0, at::Tensor & out1) {
13275
13276 static auto op = create_std_mean_correction_out_typed_handle();
13277 return op.redispatch(dispatchKeySet, self, dim, correction, keepdim, out0, out1);
13278}
13279
13280STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(flip_out, name, "aten::flip")
13281STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(flip_out, overload_name, "out")
13282STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(flip_out, schema_str, "flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)")
13283
13284// aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)
13285static C10_NOINLINE c10::TypedOperatorHandle<flip_out::schema> create_flip_out_typed_handle() {
13286 return c10::Dispatcher::singleton()
13287 .findSchemaOrThrow(flip_out::name, flip_out::overload_name)
13288 .typed<flip_out::schema>();
13289}
13290
13291// aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)
13292at::Tensor & flip_out::call(const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) {
13293
13294 static auto op = create_flip_out_typed_handle();
13295 return op.call(self, dims, out);
13296}
13297
13298// aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)
13299at::Tensor & flip_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) {
13300
13301 static auto op = create_flip_out_typed_handle();
13302 return op.redispatch(dispatchKeySet, self, dims, out);
13303}
13304
13305STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(roll_out, name, "aten::roll")
13306STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(roll_out, overload_name, "out")
13307STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(roll_out, schema_str, "roll.out(Tensor self, int[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!)")
13308
13309// aten::roll.out(Tensor self, int[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!)
13310static C10_NOINLINE c10::TypedOperatorHandle<roll_out::schema> create_roll_out_typed_handle() {
13311 return c10::Dispatcher::singleton()
13312 .findSchemaOrThrow(roll_out::name, roll_out::overload_name)
13313 .typed<roll_out::schema>();
13314}
13315
13316// aten::roll.out(Tensor self, int[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!)
13317at::Tensor & roll_out::call(const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims, at::Tensor & out) {
13318
13319 static auto op = create_roll_out_typed_handle();
13320 return op.call(self, shifts, dims, out);
13321}
13322
13323// aten::roll.out(Tensor self, int[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!)
13324at::Tensor & roll_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims, at::Tensor & out) {
13325
13326 static auto op = create_roll_out_typed_handle();
13327 return op.redispatch(dispatchKeySet, self, shifts, dims, out);
13328}
13329
13330STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_from_padded_out, name, "aten::_nested_from_padded")
13331STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_from_padded_out, overload_name, "out")
13332STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_from_padded_out, schema_str, "_nested_from_padded.out(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False, *, Tensor(a!) out) -> Tensor(a!)")
13333
13334// aten::_nested_from_padded.out(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False, *, Tensor(a!) out) -> Tensor(a!)
13335static C10_NOINLINE c10::TypedOperatorHandle<_nested_from_padded_out::schema> create__nested_from_padded_out_typed_handle() {
13336 return c10::Dispatcher::singleton()
13337 .findSchemaOrThrow(_nested_from_padded_out::name, _nested_from_padded_out::overload_name)
13338 .typed<_nested_from_padded_out::schema>();
13339}
13340
13341// aten::_nested_from_padded.out(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False, *, Tensor(a!) out) -> Tensor(a!)
13342at::Tensor & _nested_from_padded_out::call(const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213, at::Tensor & out) {
13343
13344 static auto op = create__nested_from_padded_out_typed_handle();
13345 return op.call(padded, cpu_nested_shape_example, fuse_transform_0213, out);
13346}
13347
13348// aten::_nested_from_padded.out(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False, *, Tensor(a!) out) -> Tensor(a!)
13349at::Tensor & _nested_from_padded_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213, at::Tensor & out) {
13350
13351 static auto op = create__nested_from_padded_out_typed_handle();
13352 return op.redispatch(dispatchKeySet, padded, cpu_nested_shape_example, fuse_transform_0213, out);
13353}
13354
13355STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_trilinear_out, name, "aten::_trilinear")
13356STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_trilinear_out, overload_name, "out")
13357STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_trilinear_out, schema_str, "_trilinear.out(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1, *, Tensor(a!) out) -> Tensor(a!)")
13358
13359// aten::_trilinear.out(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1, *, Tensor(a!) out) -> Tensor(a!)
13360static C10_NOINLINE c10::TypedOperatorHandle<_trilinear_out::schema> create__trilinear_out_typed_handle() {
13361 return c10::Dispatcher::singleton()
13362 .findSchemaOrThrow(_trilinear_out::name, _trilinear_out::overload_name)
13363 .typed<_trilinear_out::schema>();
13364}
13365
13366// aten::_trilinear.out(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1, *, Tensor(a!) out) -> Tensor(a!)
13367at::Tensor & _trilinear_out::call(const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim, at::Tensor & out) {
13368
13369 static auto op = create__trilinear_out_typed_handle();
13370 return op.call(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim, out);
13371}
13372
13373// aten::_trilinear.out(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1, *, Tensor(a!) out) -> Tensor(a!)
13374at::Tensor & _trilinear_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim, at::Tensor & out) {
13375
13376 static auto op = create__trilinear_out_typed_handle();
13377 return op.redispatch(dispatchKeySet, i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim, out);
13378}
13379
13380STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_unique2_out, name, "aten::_unique2")
13381STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_unique2_out, overload_name, "out")
13382STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_unique2_out, schema_str, "_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
13383
13384// aten::_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
13385static C10_NOINLINE c10::TypedOperatorHandle<_unique2_out::schema> create__unique2_out_typed_handle() {
13386 return c10::Dispatcher::singleton()
13387 .findSchemaOrThrow(_unique2_out::name, _unique2_out::overload_name)
13388 .typed<_unique2_out::schema>();
13389}
13390
13391// aten::_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
13392::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _unique2_out::call(const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
13393
13394 static auto op = create__unique2_out_typed_handle();
13395 return op.call(self, sorted, return_inverse, return_counts, out0, out1, out2);
13396}
13397
13398// aten::_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
13399::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _unique2_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
13400
13401 static auto op = create__unique2_out_typed_handle();
13402 return op.redispatch(dispatchKeySet, self, sorted, return_inverse, return_counts, out0, out1, out2);
13403}
13404
13405STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_weight_norm_interface_backward_out, name, "aten::_weight_norm_interface_backward")
13406STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_weight_norm_interface_backward_out, overload_name, "out")
13407STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_weight_norm_interface_backward_out, schema_str, "_weight_norm_interface_backward.out(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
13408
13409// aten::_weight_norm_interface_backward.out(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
13410static C10_NOINLINE c10::TypedOperatorHandle<_weight_norm_interface_backward_out::schema> create__weight_norm_interface_backward_out_typed_handle() {
13411 return c10::Dispatcher::singleton()
13412 .findSchemaOrThrow(_weight_norm_interface_backward_out::name, _weight_norm_interface_backward_out::overload_name)
13413 .typed<_weight_norm_interface_backward_out::schema>();
13414}
13415
13416// aten::_weight_norm_interface_backward.out(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
13417::std::tuple<at::Tensor &,at::Tensor &> _weight_norm_interface_backward_out::call(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim, at::Tensor & out0, at::Tensor & out1) {
13418
13419 static auto op = create__weight_norm_interface_backward_out_typed_handle();
13420 return op.call(grad_w, saved_v, saved_g, saved_norms, dim, out0, out1);
13421}
13422
13423// aten::_weight_norm_interface_backward.out(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
13424::std::tuple<at::Tensor &,at::Tensor &> _weight_norm_interface_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim, at::Tensor & out0, at::Tensor & out1) {
13425
13426 static auto op = create__weight_norm_interface_backward_out_typed_handle();
13427 return op.redispatch(dispatchKeySet, grad_w, saved_v, saved_g, saved_norms, dim, out0, out1);
13428}
13429
13430STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(zeros_like_out, name, "aten::zeros_like")
13431STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(zeros_like_out, overload_name, "out")
13432STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(zeros_like_out, schema_str, "zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)")
13433
13434// aten::zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
13435static C10_NOINLINE c10::TypedOperatorHandle<zeros_like_out::schema> create_zeros_like_out_typed_handle() {
13436 return c10::Dispatcher::singleton()
13437 .findSchemaOrThrow(zeros_like_out::name, zeros_like_out::overload_name)
13438 .typed<zeros_like_out::schema>();
13439}
13440
13441// aten::zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
13442at::Tensor & zeros_like_out::call(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
13443
13444 static auto op = create_zeros_like_out_typed_handle();
13445 return op.call(self, memory_format, out);
13446}
13447
13448// aten::zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
13449at::Tensor & zeros_like_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
13450
13451 static auto op = create_zeros_like_out_typed_handle();
13452 return op.redispatch(dispatchKeySet, self, memory_format, out);
13453}
13454
13455STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_csr_prod_dim_dtype_out, name, "aten::_sparse_csr_prod")
13456STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_csr_prod_dim_dtype_out, overload_name, "dim_dtype_out")
13457STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_csr_prod_dim_dtype_out, schema_str, "_sparse_csr_prod.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)")
13458
13459// aten::_sparse_csr_prod.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
13460static C10_NOINLINE c10::TypedOperatorHandle<_sparse_csr_prod_dim_dtype_out::schema> create__sparse_csr_prod_dim_dtype_out_typed_handle() {
13461 return c10::Dispatcher::singleton()
13462 .findSchemaOrThrow(_sparse_csr_prod_dim_dtype_out::name, _sparse_csr_prod_dim_dtype_out::overload_name)
13463 .typed<_sparse_csr_prod_dim_dtype_out::schema>();
13464}
13465
13466// aten::_sparse_csr_prod.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
13467at::Tensor & _sparse_csr_prod_dim_dtype_out::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
13468
13469 static auto op = create__sparse_csr_prod_dim_dtype_out_typed_handle();
13470 return op.call(self, dim, keepdim, dtype, out);
13471}
13472
13473// aten::_sparse_csr_prod.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
13474at::Tensor & _sparse_csr_prod_dim_dtype_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
13475
13476 static auto op = create__sparse_csr_prod_dim_dtype_out_typed_handle();
13477 return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype, out);
13478}
13479
13480STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_softmax_backward_data_out, name, "aten::_sparse_softmax_backward_data")
13481STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_softmax_backward_data_out, overload_name, "out")
13482STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_softmax_backward_data_out, schema_str, "_sparse_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
13483
13484// aten::_sparse_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13485static C10_NOINLINE c10::TypedOperatorHandle<_sparse_softmax_backward_data_out::schema> create__sparse_softmax_backward_data_out_typed_handle() {
13486 return c10::Dispatcher::singleton()
13487 .findSchemaOrThrow(_sparse_softmax_backward_data_out::name, _sparse_softmax_backward_data_out::overload_name)
13488 .typed<_sparse_softmax_backward_data_out::schema>();
13489}
13490
13491// aten::_sparse_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13492at::Tensor & _sparse_softmax_backward_data_out::call(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out) {
13493
13494 static auto op = create__sparse_softmax_backward_data_out_typed_handle();
13495 return op.call(grad_output, output, dim, self, out);
13496}
13497
13498// aten::_sparse_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13499at::Tensor & _sparse_softmax_backward_data_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out) {
13500
13501 static auto op = create__sparse_softmax_backward_data_out_typed_handle();
13502 return op.redispatch(dispatchKeySet, grad_output, output, dim, self, out);
13503}
13504
13505STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_log_softmax_out, name, "aten::_sparse_log_softmax")
13506STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_log_softmax_out, overload_name, "out")
13507STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_log_softmax_out, schema_str, "_sparse_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)")
13508
13509// aten::_sparse_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
13510static C10_NOINLINE c10::TypedOperatorHandle<_sparse_log_softmax_out::schema> create__sparse_log_softmax_out_typed_handle() {
13511 return c10::Dispatcher::singleton()
13512 .findSchemaOrThrow(_sparse_log_softmax_out::name, _sparse_log_softmax_out::overload_name)
13513 .typed<_sparse_log_softmax_out::schema>();
13514}
13515
13516// aten::_sparse_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
13517at::Tensor & _sparse_log_softmax_out::call(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
13518
13519 static auto op = create__sparse_log_softmax_out_typed_handle();
13520 return op.call(self, dim, half_to_float, out);
13521}
13522
13523// aten::_sparse_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
13524at::Tensor & _sparse_log_softmax_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
13525
13526 static auto op = create__sparse_log_softmax_out_typed_handle();
13527 return op.redispatch(dispatchKeySet, self, dim, half_to_float, out);
13528}
13529
13530STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_log_softmax_backward_data_out, name, "aten::_sparse_log_softmax_backward_data")
13531STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_log_softmax_backward_data_out, overload_name, "out")
13532STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_log_softmax_backward_data_out, schema_str, "_sparse_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
13533
13534// aten::_sparse_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13535static C10_NOINLINE c10::TypedOperatorHandle<_sparse_log_softmax_backward_data_out::schema> create__sparse_log_softmax_backward_data_out_typed_handle() {
13536 return c10::Dispatcher::singleton()
13537 .findSchemaOrThrow(_sparse_log_softmax_backward_data_out::name, _sparse_log_softmax_backward_data_out::overload_name)
13538 .typed<_sparse_log_softmax_backward_data_out::schema>();
13539}
13540
13541// aten::_sparse_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13542at::Tensor & _sparse_log_softmax_backward_data_out::call(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out) {
13543
13544 static auto op = create__sparse_log_softmax_backward_data_out_typed_handle();
13545 return op.call(grad_output, output, dim, self, out);
13546}
13547
13548// aten::_sparse_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13549at::Tensor & _sparse_log_softmax_backward_data_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out) {
13550
13551 static auto op = create__sparse_log_softmax_backward_data_out_typed_handle();
13552 return op.redispatch(dispatchKeySet, grad_output, output, dim, self, out);
13553}
13554
13555STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_spdiags_out, name, "aten::_spdiags")
13556STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_spdiags_out, overload_name, "out")
13557STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_spdiags_out, schema_str, "_spdiags.out(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None, *, Tensor(a!) out) -> Tensor(a!)")
13558
13559// aten::_spdiags.out(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None, *, Tensor(a!) out) -> Tensor(a!)
13560static C10_NOINLINE c10::TypedOperatorHandle<_spdiags_out::schema> create__spdiags_out_typed_handle() {
13561 return c10::Dispatcher::singleton()
13562 .findSchemaOrThrow(_spdiags_out::name, _spdiags_out::overload_name)
13563 .typed<_spdiags_out::schema>();
13564}
13565
13566// aten::_spdiags.out(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None, *, Tensor(a!) out) -> Tensor(a!)
13567at::Tensor & _spdiags_out::call(const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional<at::Layout> layout, at::Tensor & out) {
13568
13569 static auto op = create__spdiags_out_typed_handle();
13570 return op.call(diagonals, offsets, shape, layout, out);
13571}
13572
13573// aten::_spdiags.out(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None, *, Tensor(a!) out) -> Tensor(a!)
13574at::Tensor & _spdiags_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional<at::Layout> layout, at::Tensor & out) {
13575
13576 static auto op = create__spdiags_out_typed_handle();
13577 return op.redispatch(dispatchKeySet, diagonals, offsets, shape, layout, out);
13578}
13579
13580STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(zero_out, name, "aten::zero")
13581STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(zero_out, overload_name, "out")
13582STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(zero_out, schema_str, "zero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
13583
13584// aten::zero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13585static C10_NOINLINE c10::TypedOperatorHandle<zero_out::schema> create_zero_out_typed_handle() {
13586 return c10::Dispatcher::singleton()
13587 .findSchemaOrThrow(zero_out::name, zero_out::overload_name)
13588 .typed<zero_out::schema>();
13589}
13590
13591// aten::zero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13592at::Tensor & zero_out::call(const at::Tensor & self, at::Tensor & out) {
13593
13594 static auto op = create_zero_out_typed_handle();
13595 return op.call(self, out);
13596}
13597
13598// aten::zero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13599at::Tensor & zero_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
13600
13601 static auto op = create_zero_out_typed_handle();
13602 return op.redispatch(dispatchKeySet, self, out);
13603}
13604
13605STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(zero, name, "aten::zero")
13606STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(zero, overload_name, "")
13607STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(zero, schema_str, "zero(Tensor self) -> Tensor")
13608
13609// aten::zero(Tensor self) -> Tensor
13610static C10_NOINLINE c10::TypedOperatorHandle<zero::schema> create_zero_typed_handle() {
13611 return c10::Dispatcher::singleton()
13612 .findSchemaOrThrow(zero::name, zero::overload_name)
13613 .typed<zero::schema>();
13614}
13615
13616// aten::zero(Tensor self) -> Tensor
13617at::Tensor zero::call(const at::Tensor & self) {
13618
13619 static auto op = create_zero_typed_handle();
13620 return op.call(self);
13621}
13622
13623// aten::zero(Tensor self) -> Tensor
13624at::Tensor zero::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
13625
13626 static auto op = create_zero_typed_handle();
13627 return op.redispatch(dispatchKeySet, self);
13628}
13629
13630STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rsub_Tensor_out, name, "aten::rsub")
13631STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rsub_Tensor_out, overload_name, "Tensor_out")
13632STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rsub_Tensor_out, schema_str, "rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)")
13633
13634// aten::rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
13635static C10_NOINLINE c10::TypedOperatorHandle<rsub_Tensor_out::schema> create_rsub_Tensor_out_typed_handle() {
13636 return c10::Dispatcher::singleton()
13637 .findSchemaOrThrow(rsub_Tensor_out::name, rsub_Tensor_out::overload_name)
13638 .typed<rsub_Tensor_out::schema>();
13639}
13640
13641// aten::rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
13642at::Tensor & rsub_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
13643
13644 static auto op = create_rsub_Tensor_out_typed_handle();
13645 return op.call(self, other, alpha, out);
13646}
13647
13648// aten::rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
13649at::Tensor & rsub_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
13650
13651 static auto op = create_rsub_Tensor_out_typed_handle();
13652 return op.redispatch(dispatchKeySet, self, other, alpha, out);
13653}
13654
13655STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rsub_Scalar_out, name, "aten::rsub")
13656STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rsub_Scalar_out, overload_name, "Scalar_out")
13657STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rsub_Scalar_out, schema_str, "rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)")
13658
13659// aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
13660static C10_NOINLINE c10::TypedOperatorHandle<rsub_Scalar_out::schema> create_rsub_Scalar_out_typed_handle() {
13661 return c10::Dispatcher::singleton()
13662 .findSchemaOrThrow(rsub_Scalar_out::name, rsub_Scalar_out::overload_name)
13663 .typed<rsub_Scalar_out::schema>();
13664}
13665
13666// aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
13667at::Tensor & rsub_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) {
13668
13669 static auto op = create_rsub_Scalar_out_typed_handle();
13670 return op.call(self, other, alpha, out);
13671}
13672
13673// aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
13674at::Tensor & rsub_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) {
13675
13676 static auto op = create_rsub_Scalar_out_typed_handle();
13677 return op.redispatch(dispatchKeySet, self, other, alpha, out);
13678}
13679
13680STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_coo_tensor_with_dims_out, name, "aten::_sparse_coo_tensor_with_dims")
13681STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_coo_tensor_with_dims_out, overload_name, "out")
13682STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_coo_tensor_with_dims_out, schema_str, "_sparse_coo_tensor_with_dims.out(int sparse_dim, int dense_dim, int[] size, *, Tensor(a!) out) -> Tensor(a!)")
13683
13684// aten::_sparse_coo_tensor_with_dims.out(int sparse_dim, int dense_dim, int[] size, *, Tensor(a!) out) -> Tensor(a!)
13685static C10_NOINLINE c10::TypedOperatorHandle<_sparse_coo_tensor_with_dims_out::schema> create__sparse_coo_tensor_with_dims_out_typed_handle() {
13686 return c10::Dispatcher::singleton()
13687 .findSchemaOrThrow(_sparse_coo_tensor_with_dims_out::name, _sparse_coo_tensor_with_dims_out::overload_name)
13688 .typed<_sparse_coo_tensor_with_dims_out::schema>();
13689}
13690
13691// aten::_sparse_coo_tensor_with_dims.out(int sparse_dim, int dense_dim, int[] size, *, Tensor(a!) out) -> Tensor(a!)
13692at::Tensor & _sparse_coo_tensor_with_dims_out::call(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::Tensor & out) {
13693
13694 static auto op = create__sparse_coo_tensor_with_dims_out_typed_handle();
13695 return op.call(sparse_dim, dense_dim, size, out);
13696}
13697
13698// aten::_sparse_coo_tensor_with_dims.out(int sparse_dim, int dense_dim, int[] size, *, Tensor(a!) out) -> Tensor(a!)
13699at::Tensor & _sparse_coo_tensor_with_dims_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::Tensor & out) {
13700
13701 static auto op = create__sparse_coo_tensor_with_dims_out_typed_handle();
13702 return op.redispatch(dispatchKeySet, sparse_dim, dense_dim, size, out);
13703}
13704
13705STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_coalesce_out, name, "aten::_coalesce")
13706STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_coalesce_out, overload_name, "out")
13707STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_coalesce_out, schema_str, "_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
13708
13709// aten::_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13710static C10_NOINLINE c10::TypedOperatorHandle<_coalesce_out::schema> create__coalesce_out_typed_handle() {
13711 return c10::Dispatcher::singleton()
13712 .findSchemaOrThrow(_coalesce_out::name, _coalesce_out::overload_name)
13713 .typed<_coalesce_out::schema>();
13714}
13715
13716// aten::_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13717at::Tensor & _coalesce_out::call(const at::Tensor & self, at::Tensor & out) {
13718
13719 static auto op = create__coalesce_out_typed_handle();
13720 return op.call(self, out);
13721}
13722
13723// aten::_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13724at::Tensor & _coalesce_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
13725
13726 static auto op = create__coalesce_out_typed_handle();
13727 return op.redispatch(dispatchKeySet, self, out);
13728}
13729
13730STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(q_per_channel_scales_out, name, "aten::q_per_channel_scales")
13731STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(q_per_channel_scales_out, overload_name, "out")
13732STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(q_per_channel_scales_out, schema_str, "q_per_channel_scales.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
13733
13734// aten::q_per_channel_scales.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13735static C10_NOINLINE c10::TypedOperatorHandle<q_per_channel_scales_out::schema> create_q_per_channel_scales_out_typed_handle() {
13736 return c10::Dispatcher::singleton()
13737 .findSchemaOrThrow(q_per_channel_scales_out::name, q_per_channel_scales_out::overload_name)
13738 .typed<q_per_channel_scales_out::schema>();
13739}
13740
13741// aten::q_per_channel_scales.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13742at::Tensor & q_per_channel_scales_out::call(const at::Tensor & self, at::Tensor & out) {
13743
13744 static auto op = create_q_per_channel_scales_out_typed_handle();
13745 return op.call(self, out);
13746}
13747
13748// aten::q_per_channel_scales.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13749at::Tensor & q_per_channel_scales_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
13750
13751 static auto op = create_q_per_channel_scales_out_typed_handle();
13752 return op.redispatch(dispatchKeySet, self, out);
13753}
13754
13755STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lstm_mps_backward_out, name, "aten::lstm_mps_backward")
13756STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lstm_mps_backward_out, overload_name, "out")
13757STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lstm_mps_backward_out, schema_str, "lstm_mps_backward.out(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> ()")
13758
13759// aten::lstm_mps_backward.out(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> ()
13760static C10_NOINLINE c10::TypedOperatorHandle<lstm_mps_backward_out::schema> create_lstm_mps_backward_out_typed_handle() {
13761 return c10::Dispatcher::singleton()
13762 .findSchemaOrThrow(lstm_mps_backward_out::name, lstm_mps_backward_out::overload_name)
13763 .typed<lstm_mps_backward_out::schema>();
13764}
13765
13766// aten::lstm_mps_backward.out(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> ()
13767void lstm_mps_backward_out::call(const at::Tensor & grad_y, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2) {
13768
13769 static auto op = create_lstm_mps_backward_out_typed_handle();
13770 return op.call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2);
13771}
13772
13773// aten::lstm_mps_backward.out(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> ()
13774void lstm_mps_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_y, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2) {
13775
13776 static auto op = create_lstm_mps_backward_out_typed_handle();
13777 return op.redispatch(dispatchKeySet, grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2);
13778}
13779
13780STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_thnn_fused_lstm_cell_backward_impl_out, name, "aten::_thnn_fused_lstm_cell_backward_impl")
13781STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_thnn_fused_lstm_cell_backward_impl_out, overload_name, "out")
13782STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_thnn_fused_lstm_cell_backward_impl_out, schema_str, "_thnn_fused_lstm_cell_backward_impl.out(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
13783
13784// aten::_thnn_fused_lstm_cell_backward_impl.out(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
13785static C10_NOINLINE c10::TypedOperatorHandle<_thnn_fused_lstm_cell_backward_impl_out::schema> create__thnn_fused_lstm_cell_backward_impl_out_typed_handle() {
13786 return c10::Dispatcher::singleton()
13787 .findSchemaOrThrow(_thnn_fused_lstm_cell_backward_impl_out::name, _thnn_fused_lstm_cell_backward_impl_out::overload_name)
13788 .typed<_thnn_fused_lstm_cell_backward_impl_out::schema>();
13789}
13790
13791// aten::_thnn_fused_lstm_cell_backward_impl.out(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
13792::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_lstm_cell_backward_impl_out::call(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
13793
13794 static auto op = create__thnn_fused_lstm_cell_backward_impl_out_typed_handle();
13795 return op.call(grad_hy, grad_cy, cx, cy, workspace, has_bias, out0, out1, out2);
13796}
13797
13798// aten::_thnn_fused_lstm_cell_backward_impl.out(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
13799::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_lstm_cell_backward_impl_out::redispatch(c10::DispatchKeySet dispatchKeySet, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
13800
13801 static auto op = create__thnn_fused_lstm_cell_backward_impl_out_typed_handle();
13802 return op.redispatch(dispatchKeySet, grad_hy, grad_cy, cx, cy, workspace, has_bias, out0, out1, out2);
13803}
13804
13805STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_thnn_fused_gru_cell_out, name, "aten::_thnn_fused_gru_cell")
13806STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_thnn_fused_gru_cell_out, overload_name, "out")
13807STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_thnn_fused_gru_cell_out, schema_str, "_thnn_fused_gru_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
13808
13809// aten::_thnn_fused_gru_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
13810static C10_NOINLINE c10::TypedOperatorHandle<_thnn_fused_gru_cell_out::schema> create__thnn_fused_gru_cell_out_typed_handle() {
13811 return c10::Dispatcher::singleton()
13812 .findSchemaOrThrow(_thnn_fused_gru_cell_out::name, _thnn_fused_gru_cell_out::overload_name)
13813 .typed<_thnn_fused_gru_cell_out::schema>();
13814}
13815
13816// aten::_thnn_fused_gru_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
13817::std::tuple<at::Tensor &,at::Tensor &> _thnn_fused_gru_cell_out::call(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, at::Tensor & out0, at::Tensor & out1) {
13818
13819 static auto op = create__thnn_fused_gru_cell_out_typed_handle();
13820 return op.call(input_gates, hidden_gates, hx, input_bias, hidden_bias, out0, out1);
13821}
13822
13823// aten::_thnn_fused_gru_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
13824::std::tuple<at::Tensor &,at::Tensor &> _thnn_fused_gru_cell_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, at::Tensor & out0, at::Tensor & out1) {
13825
13826 static auto op = create__thnn_fused_gru_cell_out_typed_handle();
13827 return op.redispatch(dispatchKeySet, input_gates, hidden_gates, hx, input_bias, hidden_bias, out0, out1);
13828}
13829
13830STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pack_padded_sequence_out, name, "aten::_pack_padded_sequence")
13831STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pack_padded_sequence_out, overload_name, "out")
13832STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pack_padded_sequence_out, schema_str, "_pack_padded_sequence.out(Tensor input, Tensor lengths, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
13833
13834// aten::_pack_padded_sequence.out(Tensor input, Tensor lengths, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
13835static C10_NOINLINE c10::TypedOperatorHandle<_pack_padded_sequence_out::schema> create__pack_padded_sequence_out_typed_handle() {
13836 return c10::Dispatcher::singleton()
13837 .findSchemaOrThrow(_pack_padded_sequence_out::name, _pack_padded_sequence_out::overload_name)
13838 .typed<_pack_padded_sequence_out::schema>();
13839}
13840
13841// aten::_pack_padded_sequence.out(Tensor input, Tensor lengths, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
13842::std::tuple<at::Tensor &,at::Tensor &> _pack_padded_sequence_out::call(const at::Tensor & input, const at::Tensor & lengths, bool batch_first, at::Tensor & out0, at::Tensor & out1) {
13843
13844 static auto op = create__pack_padded_sequence_out_typed_handle();
13845 return op.call(input, lengths, batch_first, out0, out1);
13846}
13847
13848// aten::_pack_padded_sequence.out(Tensor input, Tensor lengths, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
13849::std::tuple<at::Tensor &,at::Tensor &> _pack_padded_sequence_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & lengths, bool batch_first, at::Tensor & out0, at::Tensor & out1) {
13850
13851 static auto op = create__pack_padded_sequence_out_typed_handle();
13852 return op.redispatch(dispatchKeySet, input, lengths, batch_first, out0, out1);
13853}
13854
13855STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_masked_softmax_out, name, "aten::_masked_softmax")
13856STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_masked_softmax_out, overload_name, "out")
13857STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_masked_softmax_out, schema_str, "_masked_softmax.out(Tensor self, Tensor mask, int? dim=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)")
13858
13859// aten::_masked_softmax.out(Tensor self, Tensor mask, int? dim=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)
13860static C10_NOINLINE c10::TypedOperatorHandle<_masked_softmax_out::schema> create__masked_softmax_out_typed_handle() {
13861 return c10::Dispatcher::singleton()
13862 .findSchemaOrThrow(_masked_softmax_out::name, _masked_softmax_out::overload_name)
13863 .typed<_masked_softmax_out::schema>();
13864}
13865
13866// aten::_masked_softmax.out(Tensor self, Tensor mask, int? dim=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)
13867at::Tensor & _masked_softmax_out::call(const at::Tensor & self, const at::Tensor & mask, c10::optional<int64_t> dim, c10::optional<int64_t> mask_type, at::Tensor & out) {
13868
13869 static auto op = create__masked_softmax_out_typed_handle();
13870 return op.call(self, mask, dim, mask_type, out);
13871}
13872
13873// aten::_masked_softmax.out(Tensor self, Tensor mask, int? dim=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)
13874at::Tensor & _masked_softmax_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, c10::optional<int64_t> dim, c10::optional<int64_t> mask_type, at::Tensor & out) {
13875
13876 static auto op = create__masked_softmax_out_typed_handle();
13877 return op.redispatch(dispatchKeySet, self, mask, dim, mask_type, out);
13878}
13879
13880STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_mul_Scalar_out, name, "aten::_foreach_mul")
13881STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_mul_Scalar_out, overload_name, "Scalar_out")
13882STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_mul_Scalar_out, schema_str, "_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()")
13883
13884// aten::_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
13885static C10_NOINLINE c10::TypedOperatorHandle<_foreach_mul_Scalar_out::schema> create__foreach_mul_Scalar_out_typed_handle() {
13886 return c10::Dispatcher::singleton()
13887 .findSchemaOrThrow(_foreach_mul_Scalar_out::name, _foreach_mul_Scalar_out::overload_name)
13888 .typed<_foreach_mul_Scalar_out::schema>();
13889}
13890
13891// aten::_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
13892void _foreach_mul_Scalar_out::call(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
13893
13894 static auto op = create__foreach_mul_Scalar_out_typed_handle();
13895 return op.call(self, scalar, out);
13896}
13897
13898// aten::_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
13899void _foreach_mul_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
13900
13901 static auto op = create__foreach_mul_Scalar_out_typed_handle();
13902 return op.redispatch(dispatchKeySet, self, scalar, out);
13903}
13904
13905STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_div_Scalar_out, name, "aten::_foreach_div")
13906STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_div_Scalar_out, overload_name, "Scalar_out")
13907STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_div_Scalar_out, schema_str, "_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()")
13908
13909// aten::_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
13910static C10_NOINLINE c10::TypedOperatorHandle<_foreach_div_Scalar_out::schema> create__foreach_div_Scalar_out_typed_handle() {
13911 return c10::Dispatcher::singleton()
13912 .findSchemaOrThrow(_foreach_div_Scalar_out::name, _foreach_div_Scalar_out::overload_name)
13913 .typed<_foreach_div_Scalar_out::schema>();
13914}
13915
13916// aten::_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
13917void _foreach_div_Scalar_out::call(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
13918
13919 static auto op = create__foreach_div_Scalar_out_typed_handle();
13920 return op.call(self, scalar, out);
13921}
13922
13923// aten::_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
13924void _foreach_div_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
13925
13926 static auto op = create__foreach_div_Scalar_out_typed_handle();
13927 return op.redispatch(dispatchKeySet, self, scalar, out);
13928}
13929
13930STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_mul_List_out, name, "aten::_foreach_mul")
13931STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_mul_List_out, overload_name, "List_out")
13932STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_mul_List_out, schema_str, "_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()")
13933
13934// aten::_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
13935static C10_NOINLINE c10::TypedOperatorHandle<_foreach_mul_List_out::schema> create__foreach_mul_List_out_typed_handle() {
13936 return c10::Dispatcher::singleton()
13937 .findSchemaOrThrow(_foreach_mul_List_out::name, _foreach_mul_List_out::overload_name)
13938 .typed<_foreach_mul_List_out::schema>();
13939}
13940
13941// aten::_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
13942void _foreach_mul_List_out::call(at::TensorList self, at::TensorList other, at::TensorList out) {
13943
13944 static auto op = create__foreach_mul_List_out_typed_handle();
13945 return op.call(self, other, out);
13946}
13947
13948// aten::_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
13949void _foreach_mul_List_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) {
13950
13951 static auto op = create__foreach_mul_List_out_typed_handle();
13952 return op.redispatch(dispatchKeySet, self, other, out);
13953}
13954
13955STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_div_List_out, name, "aten::_foreach_div")
13956STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_div_List_out, overload_name, "List_out")
13957STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_div_List_out, schema_str, "_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()")
13958
13959// aten::_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
13960static C10_NOINLINE c10::TypedOperatorHandle<_foreach_div_List_out::schema> create__foreach_div_List_out_typed_handle() {
13961 return c10::Dispatcher::singleton()
13962 .findSchemaOrThrow(_foreach_div_List_out::name, _foreach_div_List_out::overload_name)
13963 .typed<_foreach_div_List_out::schema>();
13964}
13965
13966// aten::_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
13967void _foreach_div_List_out::call(at::TensorList self, at::TensorList other, at::TensorList out) {
13968
13969 static auto op = create__foreach_div_List_out_typed_handle();
13970 return op.call(self, other, out);
13971}
13972
13973// aten::_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
13974void _foreach_div_List_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) {
13975
13976 static auto op = create__foreach_div_List_out_typed_handle();
13977 return op.redispatch(dispatchKeySet, self, other, out);
13978}
13979
13980STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_div_ScalarList_out, name, "aten::_foreach_div")
13981STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_div_ScalarList_out, overload_name, "ScalarList_out")
13982STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_div_ScalarList_out, schema_str, "_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()")
13983
13984// aten::_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
13985static C10_NOINLINE c10::TypedOperatorHandle<_foreach_div_ScalarList_out::schema> create__foreach_div_ScalarList_out_typed_handle() {
13986 return c10::Dispatcher::singleton()
13987 .findSchemaOrThrow(_foreach_div_ScalarList_out::name, _foreach_div_ScalarList_out::overload_name)
13988 .typed<_foreach_div_ScalarList_out::schema>();
13989}
13990
13991// aten::_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
13992void _foreach_div_ScalarList_out::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
13993
13994 static auto op = create__foreach_div_ScalarList_out_typed_handle();
13995 return op.call(self, scalars, out);
13996}
13997
13998// aten::_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
13999void _foreach_div_ScalarList_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
14000
14001 static auto op = create__foreach_div_ScalarList_out_typed_handle();
14002 return op.redispatch(dispatchKeySet, self, scalars, out);
14003}
14004
14005STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_mul_ScalarList_out, name, "aten::_foreach_mul")
14006STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_mul_ScalarList_out, overload_name, "ScalarList_out")
14007STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_mul_ScalarList_out, schema_str, "_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()")
14008
14009// aten::_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
14010static C10_NOINLINE c10::TypedOperatorHandle<_foreach_mul_ScalarList_out::schema> create__foreach_mul_ScalarList_out_typed_handle() {
14011 return c10::Dispatcher::singleton()
14012 .findSchemaOrThrow(_foreach_mul_ScalarList_out::name, _foreach_mul_ScalarList_out::overload_name)
14013 .typed<_foreach_mul_ScalarList_out::schema>();
14014}
14015
14016// aten::_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
14017void _foreach_mul_ScalarList_out::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
14018
14019 static auto op = create__foreach_mul_ScalarList_out_typed_handle();
14020 return op.call(self, scalars, out);
14021}
14022
14023// aten::_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
14024void _foreach_mul_ScalarList_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
14025
14026 static auto op = create__foreach_mul_ScalarList_out_typed_handle();
14027 return op.redispatch(dispatchKeySet, self, scalars, out);
14028}
14029
14030STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_zero_out, name, "aten::_foreach_zero")
14031STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_zero_out, overload_name, "out")
14032STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_zero_out, schema_str, "_foreach_zero.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
14033
14034// aten::_foreach_zero.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
14035static C10_NOINLINE c10::TypedOperatorHandle<_foreach_zero_out::schema> create__foreach_zero_out_typed_handle() {
14036 return c10::Dispatcher::singleton()
14037 .findSchemaOrThrow(_foreach_zero_out::name, _foreach_zero_out::overload_name)
14038 .typed<_foreach_zero_out::schema>();
14039}
14040
14041// aten::_foreach_zero.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
14042void _foreach_zero_out::call(at::TensorList self, at::TensorList out) {
14043
14044 static auto op = create__foreach_zero_out_typed_handle();
14045 return op.call(self, out);
14046}
14047
14048// aten::_foreach_zero.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
14049void _foreach_zero_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
14050
14051 static auto op = create__foreach_zero_out_typed_handle();
14052 return op.redispatch(dispatchKeySet, self, out);
14053}
14054
14055STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_zero, name, "aten::_foreach_zero")
14056STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_zero, overload_name, "")
14057STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_zero, schema_str, "_foreach_zero(Tensor[] self) -> Tensor[] self_out")
14058
14059// aten::_foreach_zero(Tensor[] self) -> Tensor[] self_out
14060static C10_NOINLINE c10::TypedOperatorHandle<_foreach_zero::schema> create__foreach_zero_typed_handle() {
14061 return c10::Dispatcher::singleton()
14062 .findSchemaOrThrow(_foreach_zero::name, _foreach_zero::overload_name)
14063 .typed<_foreach_zero::schema>();
14064}
14065
14066// aten::_foreach_zero(Tensor[] self) -> Tensor[] self_out
14067::std::vector<at::Tensor> _foreach_zero::call(at::TensorList self) {
14068
14069 static auto op = create__foreach_zero_typed_handle();
14070 return op.call(self);
14071}
14072
14073// aten::_foreach_zero(Tensor[] self) -> Tensor[] self_out
14074::std::vector<at::Tensor> _foreach_zero::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
14075
14076 static auto op = create__foreach_zero_typed_handle();
14077 return op.redispatch(dispatchKeySet, self);
14078}
14079
14080STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_asin_out, name, "aten::_foreach_asin")
14081STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_asin_out, overload_name, "out")
14082STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_asin_out, schema_str, "_foreach_asin.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
14083
14084// aten::_foreach_asin.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
14085static C10_NOINLINE c10::TypedOperatorHandle<_foreach_asin_out::schema> create__foreach_asin_out_typed_handle() {
14086 return c10::Dispatcher::singleton()
14087 .findSchemaOrThrow(_foreach_asin_out::name, _foreach_asin_out::overload_name)
14088 .typed<_foreach_asin_out::schema>();
14089}
14090
14091// aten::_foreach_asin.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
14092void _foreach_asin_out::call(at::TensorList self, at::TensorList out) {
14093
14094 static auto op = create__foreach_asin_out_typed_handle();
14095 return op.call(self, out);
14096}
14097
14098// aten::_foreach_asin.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
14099void _foreach_asin_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
14100
14101 static auto op = create__foreach_asin_out_typed_handle();
14102 return op.redispatch(dispatchKeySet, self, out);
14103}
14104
14105STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_cos_out, name, "aten::_foreach_cos")
14106STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_cos_out, overload_name, "out")
14107STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_cos_out, schema_str, "_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
14108
14109// aten::_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
14110static C10_NOINLINE c10::TypedOperatorHandle<_foreach_cos_out::schema> create__foreach_cos_out_typed_handle() {
14111 return c10::Dispatcher::singleton()
14112 .findSchemaOrThrow(_foreach_cos_out::name, _foreach_cos_out::overload_name)
14113 .typed<_foreach_cos_out::schema>();
14114}
14115
14116// aten::_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
14117void _foreach_cos_out::call(at::TensorList self, at::TensorList out) {
14118
14119 static auto op = create__foreach_cos_out_typed_handle();
14120 return op.call(self, out);
14121}
14122
14123// aten::_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
14124void _foreach_cos_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
14125
14126 static auto op = create__foreach_cos_out_typed_handle();
14127 return op.redispatch(dispatchKeySet, self, out);
14128}
14129
14130STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_floor_out, name, "aten::_foreach_floor")
14131STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_floor_out, overload_name, "out")
14132STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_floor_out, schema_str, "_foreach_floor.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
14133
14134// aten::_foreach_floor.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
14135static C10_NOINLINE c10::TypedOperatorHandle<_foreach_floor_out::schema> create__foreach_floor_out_typed_handle() {
14136 return c10::Dispatcher::singleton()
14137 .findSchemaOrThrow(_foreach_floor_out::name, _foreach_floor_out::overload_name)
14138 .typed<_foreach_floor_out::schema>();
14139}
14140
14141// aten::_foreach_floor.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
14142void _foreach_floor_out::call(at::TensorList self, at::TensorList out) {
14143
14144 static auto op = create__foreach_floor_out_typed_handle();
14145 return op.call(self, out);
14146}
14147
14148// aten::_foreach_floor.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
14149void _foreach_floor_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
14150
14151 static auto op = create__foreach_floor_out_typed_handle();
14152 return op.redispatch(dispatchKeySet, self, out);
14153}
14154
14155STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_tanh_out, name, "aten::_foreach_tanh")
14156STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_tanh_out, overload_name, "out")
14157STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_tanh_out, schema_str, "_foreach_tanh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
14158
14159// aten::_foreach_tanh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
14160static C10_NOINLINE c10::TypedOperatorHandle<_foreach_tanh_out::schema> create__foreach_tanh_out_typed_handle() {
14161 return c10::Dispatcher::singleton()
14162 .findSchemaOrThrow(_foreach_tanh_out::name, _foreach_tanh_out::overload_name)
14163 .typed<_foreach_tanh_out::schema>();
14164}
14165
14166// aten::_foreach_tanh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
14167void _foreach_tanh_out::call(at::TensorList self, at::TensorList out) {
14168
14169 static auto op = create__foreach_tanh_out_typed_handle();
14170 return op.call(self, out);
14171}
14172
14173// aten::_foreach_tanh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
14174void _foreach_tanh_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
14175
14176 static auto op = create__foreach_tanh_out_typed_handle();
14177 return op.redispatch(dispatchKeySet, self, out);
14178}
14179
14180STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_addcmul_Scalar_out, name, "aten::_foreach_addcmul")
14181STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_addcmul_Scalar_out, overload_name, "Scalar_out")
14182STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_addcmul_Scalar_out, schema_str, "_foreach_addcmul.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> ()")
14183
14184// aten::_foreach_addcmul.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> ()
14185static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcmul_Scalar_out::schema> create__foreach_addcmul_Scalar_out_typed_handle() {
14186 return c10::Dispatcher::singleton()
14187 .findSchemaOrThrow(_foreach_addcmul_Scalar_out::name, _foreach_addcmul_Scalar_out::overload_name)
14188 .typed<_foreach_addcmul_Scalar_out::schema>();
14189}
14190
14191// aten::_foreach_addcmul.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> ()
14192void _foreach_addcmul_Scalar_out::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) {
14193
14194 static auto op = create__foreach_addcmul_Scalar_out_typed_handle();
14195 return op.call(self, tensor1, tensor2, value, out);
14196}
14197
14198// aten::_foreach_addcmul.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> ()
14199void _foreach_addcmul_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) {
14200
14201 static auto op = create__foreach_addcmul_Scalar_out_typed_handle();
14202 return op.redispatch(dispatchKeySet, self, tensor1, tensor2, value, out);
14203}
14204
14205STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_addcmul_ScalarList_out, name, "aten::_foreach_addcmul")
14206STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_addcmul_ScalarList_out, overload_name, "ScalarList_out")
14207STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_addcmul_ScalarList_out, schema_str, "_foreach_addcmul.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> ()")
14208
14209// aten::_foreach_addcmul.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
14210static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcmul_ScalarList_out::schema> create__foreach_addcmul_ScalarList_out_typed_handle() {
14211 return c10::Dispatcher::singleton()
14212 .findSchemaOrThrow(_foreach_addcmul_ScalarList_out::name, _foreach_addcmul_ScalarList_out::overload_name)
14213 .typed<_foreach_addcmul_ScalarList_out::schema>();
14214}
14215
14216// aten::_foreach_addcmul.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
14217void _foreach_addcmul_ScalarList_out::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
14218
14219 static auto op = create__foreach_addcmul_ScalarList_out_typed_handle();
14220 return op.call(self, tensor1, tensor2, scalars, out);
14221}
14222
14223// aten::_foreach_addcmul.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
14224void _foreach_addcmul_ScalarList_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
14225
14226 static auto op = create__foreach_addcmul_ScalarList_out_typed_handle();
14227 return op.redispatch(dispatchKeySet, self, tensor1, tensor2, scalars, out);
14228}
14229
14230STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_addcmul_Tensor_out, name, "aten::_foreach_addcmul")
14231STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_addcmul_Tensor_out, overload_name, "Tensor_out")
14232STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_addcmul_Tensor_out, schema_str, "_foreach_addcmul.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> ()")
14233
14234// aten::_foreach_addcmul.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> ()
14235static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcmul_Tensor_out::schema> create__foreach_addcmul_Tensor_out_typed_handle() {
14236 return c10::Dispatcher::singleton()
14237 .findSchemaOrThrow(_foreach_addcmul_Tensor_out::name, _foreach_addcmul_Tensor_out::overload_name)
14238 .typed<_foreach_addcmul_Tensor_out::schema>();
14239}
14240
14241// aten::_foreach_addcmul.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> ()
14242void _foreach_addcmul_Tensor_out::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) {
14243
14244 static auto op = create__foreach_addcmul_Tensor_out_typed_handle();
14245 return op.call(self, tensor1, tensor2, scalars, out);
14246}
14247
14248// aten::_foreach_addcmul.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> ()
14249void _foreach_addcmul_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) {
14250
14251 static auto op = create__foreach_addcmul_Tensor_out_typed_handle();
14252 return op.redispatch(dispatchKeySet, self, tensor1, tensor2, scalars, out);
14253}
14254
14255STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_adaptive_avg_pool3d_out, name, "aten::_adaptive_avg_pool3d")
14256STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_adaptive_avg_pool3d_out, overload_name, "out")
14257STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_adaptive_avg_pool3d_out, schema_str, "_adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)")
14258
14259// aten::_adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
14260static C10_NOINLINE c10::TypedOperatorHandle<_adaptive_avg_pool3d_out::schema> create__adaptive_avg_pool3d_out_typed_handle() {
14261 return c10::Dispatcher::singleton()
14262 .findSchemaOrThrow(_adaptive_avg_pool3d_out::name, _adaptive_avg_pool3d_out::overload_name)
14263 .typed<_adaptive_avg_pool3d_out::schema>();
14264}
14265
14266// aten::_adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
14267at::Tensor & _adaptive_avg_pool3d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
14268
14269 static auto op = create__adaptive_avg_pool3d_out_typed_handle();
14270 return op.call(self, output_size, out);
14271}
14272
14273// aten::_adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
14274at::Tensor & _adaptive_avg_pool3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
14275
14276 static auto op = create__adaptive_avg_pool3d_out_typed_handle();
14277 return op.redispatch(dispatchKeySet, self, output_size, out);
14278}
14279
14280STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_slow_conv2d_backward_output_mask_out, name, "aten::_slow_conv2d_backward")
14281STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_slow_conv2d_backward_output_mask_out, overload_name, "output_mask_out")
14282STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_slow_conv2d_backward_output_mask_out, schema_str, "_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
14283
14284// aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
14285static C10_NOINLINE c10::TypedOperatorHandle<_slow_conv2d_backward_output_mask_out::schema> create__slow_conv2d_backward_output_mask_out_typed_handle() {
14286 return c10::Dispatcher::singleton()
14287 .findSchemaOrThrow(_slow_conv2d_backward_output_mask_out::name, _slow_conv2d_backward_output_mask_out::overload_name)
14288 .typed<_slow_conv2d_backward_output_mask_out::schema>();
14289}
14290
14291// aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
14292::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_output_mask_out::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
14293
14294 static auto op = create__slow_conv2d_backward_output_mask_out_typed_handle();
14295 return op.call(grad_output, self, weight, kernel_size, stride, padding, output_mask, out0, out1, out2);
14296}
14297
14298// aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
14299::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_output_mask_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
14300
14301 static auto op = create__slow_conv2d_backward_output_mask_out_typed_handle();
14302 return op.redispatch(dispatchKeySet, grad_output, self, weight, kernel_size, stride, padding, output_mask, out0, out1, out2);
14303}
14304
14305STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv_depthwise3d_out, name, "aten::conv_depthwise3d")
14306STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv_depthwise3d_out, overload_name, "out")
14307STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv_depthwise3d_out, schema_str, "conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!)")
14308
14309// aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!)
14310static C10_NOINLINE c10::TypedOperatorHandle<conv_depthwise3d_out::schema> create_conv_depthwise3d_out_typed_handle() {
14311 return c10::Dispatcher::singleton()
14312 .findSchemaOrThrow(conv_depthwise3d_out::name, conv_depthwise3d_out::overload_name)
14313 .typed<conv_depthwise3d_out::schema>();
14314}
14315
14316// aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!)
14317at::Tensor & conv_depthwise3d_out::call(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) {
14318
14319 static auto op = create_conv_depthwise3d_out_typed_handle();
14320 return op.call(self, weight, kernel_size, bias, stride, padding, dilation, out);
14321}
14322
14323// aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!)
14324at::Tensor & conv_depthwise3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) {
14325
14326 static auto op = create_conv_depthwise3d_out_typed_handle();
14327 return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation, out);
14328}
14329
14330STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv_dilated2d_out, name, "aten::slow_conv_dilated2d")
14331STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv_dilated2d_out, overload_name, "out")
14332STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv_dilated2d_out, schema_str, "slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)")
14333
14334// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
14335static C10_NOINLINE c10::TypedOperatorHandle<slow_conv_dilated2d_out::schema> create_slow_conv_dilated2d_out_typed_handle() {
14336 return c10::Dispatcher::singleton()
14337 .findSchemaOrThrow(slow_conv_dilated2d_out::name, slow_conv_dilated2d_out::overload_name)
14338 .typed<slow_conv_dilated2d_out::schema>();
14339}
14340
14341// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
14342at::Tensor & slow_conv_dilated2d_out::call(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) {
14343
14344 static auto op = create_slow_conv_dilated2d_out_typed_handle();
14345 return op.call(self, weight, kernel_size, bias, stride, padding, dilation, out);
14346}
14347
14348// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
14349at::Tensor & slow_conv_dilated2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) {
14350
14351 static auto op = create_slow_conv_dilated2d_out_typed_handle();
14352 return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation, out);
14353}
14354
14355STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_optional_filled_intlist_out, name, "aten::_test_optional_filled_intlist")
14356STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_optional_filled_intlist_out, overload_name, "out")
14357STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_optional_filled_intlist_out, schema_str, "_test_optional_filled_intlist.out(Tensor values, int[2]? addends, *, Tensor(a!) out) -> Tensor(a!)")
14358
14359// aten::_test_optional_filled_intlist.out(Tensor values, int[2]? addends, *, Tensor(a!) out) -> Tensor(a!)
14360static C10_NOINLINE c10::TypedOperatorHandle<_test_optional_filled_intlist_out::schema> create__test_optional_filled_intlist_out_typed_handle() {
14361 return c10::Dispatcher::singleton()
14362 .findSchemaOrThrow(_test_optional_filled_intlist_out::name, _test_optional_filled_intlist_out::overload_name)
14363 .typed<_test_optional_filled_intlist_out::schema>();
14364}
14365
14366// aten::_test_optional_filled_intlist.out(Tensor values, int[2]? addends, *, Tensor(a!) out) -> Tensor(a!)
14367at::Tensor & _test_optional_filled_intlist_out::call(const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out) {
14368
14369 static auto op = create__test_optional_filled_intlist_out_typed_handle();
14370 return op.call(values, addends, out);
14371}
14372
14373// aten::_test_optional_filled_intlist.out(Tensor values, int[2]? addends, *, Tensor(a!) out) -> Tensor(a!)
14374at::Tensor & _test_optional_filled_intlist_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out) {
14375
14376 static auto op = create__test_optional_filled_intlist_out_typed_handle();
14377 return op.redispatch(dispatchKeySet, values, addends, out);
14378}
14379
14380STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_autograd_multiple_dispatch_view_copy_out, name, "aten::_test_autograd_multiple_dispatch_view_copy")
14381STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_autograd_multiple_dispatch_view_copy_out, overload_name, "out")
14382STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_autograd_multiple_dispatch_view_copy_out, schema_str, "_test_autograd_multiple_dispatch_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
14383
14384// aten::_test_autograd_multiple_dispatch_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14385static C10_NOINLINE c10::TypedOperatorHandle<_test_autograd_multiple_dispatch_view_copy_out::schema> create__test_autograd_multiple_dispatch_view_copy_out_typed_handle() {
14386 return c10::Dispatcher::singleton()
14387 .findSchemaOrThrow(_test_autograd_multiple_dispatch_view_copy_out::name, _test_autograd_multiple_dispatch_view_copy_out::overload_name)
14388 .typed<_test_autograd_multiple_dispatch_view_copy_out::schema>();
14389}
14390
14391// aten::_test_autograd_multiple_dispatch_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14392at::Tensor & _test_autograd_multiple_dispatch_view_copy_out::call(const at::Tensor & self, at::Tensor & out) {
14393
14394 static auto op = create__test_autograd_multiple_dispatch_view_copy_out_typed_handle();
14395 return op.call(self, out);
14396}
14397
14398// aten::_test_autograd_multiple_dispatch_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14399at::Tensor & _test_autograd_multiple_dispatch_view_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
14400
14401 static auto op = create__test_autograd_multiple_dispatch_view_copy_out_typed_handle();
14402 return op.redispatch(dispatchKeySet, self, out);
14403}
14404
14405STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fw_primal_copy_out, name, "aten::_fw_primal_copy")
14406STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fw_primal_copy_out, overload_name, "out")
14407STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fw_primal_copy_out, schema_str, "_fw_primal_copy.out(Tensor self, int level, *, Tensor(a!) out) -> Tensor(a!)")
14408
14409// aten::_fw_primal_copy.out(Tensor self, int level, *, Tensor(a!) out) -> Tensor(a!)
14410static C10_NOINLINE c10::TypedOperatorHandle<_fw_primal_copy_out::schema> create__fw_primal_copy_out_typed_handle() {
14411 return c10::Dispatcher::singleton()
14412 .findSchemaOrThrow(_fw_primal_copy_out::name, _fw_primal_copy_out::overload_name)
14413 .typed<_fw_primal_copy_out::schema>();
14414}
14415
14416// aten::_fw_primal_copy.out(Tensor self, int level, *, Tensor(a!) out) -> Tensor(a!)
14417at::Tensor & _fw_primal_copy_out::call(const at::Tensor & self, int64_t level, at::Tensor & out) {
14418
14419 static auto op = create__fw_primal_copy_out_typed_handle();
14420 return op.call(self, level, out);
14421}
14422
14423// aten::_fw_primal_copy.out(Tensor self, int level, *, Tensor(a!) out) -> Tensor(a!)
14424at::Tensor & _fw_primal_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t level, at::Tensor & out) {
14425
14426 static auto op = create__fw_primal_copy_out_typed_handle();
14427 return op.redispatch(dispatchKeySet, self, level, out);
14428}
14429
14430STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(view_as_real_copy_out, name, "aten::view_as_real_copy")
14431STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(view_as_real_copy_out, overload_name, "out")
14432STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(view_as_real_copy_out, schema_str, "view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
14433
14434// aten::view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14435static C10_NOINLINE c10::TypedOperatorHandle<view_as_real_copy_out::schema> create_view_as_real_copy_out_typed_handle() {
14436 return c10::Dispatcher::singleton()
14437 .findSchemaOrThrow(view_as_real_copy_out::name, view_as_real_copy_out::overload_name)
14438 .typed<view_as_real_copy_out::schema>();
14439}
14440
14441// aten::view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14442at::Tensor & view_as_real_copy_out::call(const at::Tensor & self, at::Tensor & out) {
14443
14444 static auto op = create_view_as_real_copy_out_typed_handle();
14445 return op.call(self, out);
14446}
14447
14448// aten::view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14449at::Tensor & view_as_real_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
14450
14451 static auto op = create_view_as_real_copy_out_typed_handle();
14452 return op.redispatch(dispatchKeySet, self, out);
14453}
14454
14455STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(as_strided_copy_out, name, "aten::as_strided_copy")
14456STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(as_strided_copy_out, overload_name, "out")
14457STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(as_strided_copy_out, schema_str, "as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)")
14458
14459// aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)
14460static C10_NOINLINE c10::TypedOperatorHandle<as_strided_copy_out::schema> create_as_strided_copy_out_typed_handle() {
14461 return c10::Dispatcher::singleton()
14462 .findSchemaOrThrow(as_strided_copy_out::name, as_strided_copy_out::overload_name)
14463 .typed<as_strided_copy_out::schema>();
14464}
14465
14466// aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)
14467at::Tensor & as_strided_copy_out::call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset, at::Tensor & out) {
14468
14469 static auto op = create_as_strided_copy_out_typed_handle();
14470 return op.call(self, size, stride, storage_offset, out);
14471}
14472
14473// aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)
14474at::Tensor & as_strided_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset, at::Tensor & out) {
14475
14476 static auto op = create_as_strided_copy_out_typed_handle();
14477 return op.redispatch(dispatchKeySet, self, size, stride, storage_offset, out);
14478}
14479
14480STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_reshape_alias_copy_out, name, "aten::_reshape_alias_copy")
14481STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_reshape_alias_copy_out, overload_name, "out")
14482STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_reshape_alias_copy_out, schema_str, "_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)")
14483
14484// aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
14485static C10_NOINLINE c10::TypedOperatorHandle<_reshape_alias_copy_out::schema> create__reshape_alias_copy_out_typed_handle() {
14486 return c10::Dispatcher::singleton()
14487 .findSchemaOrThrow(_reshape_alias_copy_out::name, _reshape_alias_copy_out::overload_name)
14488 .typed<_reshape_alias_copy_out::schema>();
14489}
14490
14491// aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
14492at::Tensor & _reshape_alias_copy_out::call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
14493
14494 static auto op = create__reshape_alias_copy_out_typed_handle();
14495 return op.call(self, size, stride, out);
14496}
14497
14498// aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
14499at::Tensor & _reshape_alias_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
14500
14501 static auto op = create__reshape_alias_copy_out_typed_handle();
14502 return op.redispatch(dispatchKeySet, self, size, stride, out);
14503}
14504
14505STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_copy_out, name, "aten::squeeze_copy")
14506STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_copy_out, overload_name, "out")
14507STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_copy_out, schema_str, "squeeze_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
14508
14509// aten::squeeze_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14510static C10_NOINLINE c10::TypedOperatorHandle<squeeze_copy_out::schema> create_squeeze_copy_out_typed_handle() {
14511 return c10::Dispatcher::singleton()
14512 .findSchemaOrThrow(squeeze_copy_out::name, squeeze_copy_out::overload_name)
14513 .typed<squeeze_copy_out::schema>();
14514}
14515
14516// aten::squeeze_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14517at::Tensor & squeeze_copy_out::call(const at::Tensor & self, at::Tensor & out) {
14518
14519 static auto op = create_squeeze_copy_out_typed_handle();
14520 return op.call(self, out);
14521}
14522
14523// aten::squeeze_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14524at::Tensor & squeeze_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
14525
14526 static auto op = create_squeeze_copy_out_typed_handle();
14527 return op.redispatch(dispatchKeySet, self, out);
14528}
14529
14530STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_copy_dim_out, name, "aten::squeeze_copy")
14531STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_copy_dim_out, overload_name, "dim_out")
14532STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_copy_dim_out, schema_str, "squeeze_copy.dim_out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)")
14533
14534// aten::squeeze_copy.dim_out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
14535static C10_NOINLINE c10::TypedOperatorHandle<squeeze_copy_dim_out::schema> create_squeeze_copy_dim_out_typed_handle() {
14536 return c10::Dispatcher::singleton()
14537 .findSchemaOrThrow(squeeze_copy_dim_out::name, squeeze_copy_dim_out::overload_name)
14538 .typed<squeeze_copy_dim_out::schema>();
14539}
14540
14541// aten::squeeze_copy.dim_out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
14542at::Tensor & squeeze_copy_dim_out::call(const at::Tensor & self, int64_t dim, at::Tensor & out) {
14543
14544 static auto op = create_squeeze_copy_dim_out_typed_handle();
14545 return op.call(self, dim, out);
14546}
14547
14548// aten::squeeze_copy.dim_out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
14549at::Tensor & squeeze_copy_dim_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) {
14550
14551 static auto op = create_squeeze_copy_dim_out_typed_handle();
14552 return op.redispatch(dispatchKeySet, self, dim, out);
14553}
14554
14555STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_copy_dims_out, name, "aten::squeeze_copy")
14556STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_copy_dims_out, overload_name, "dims_out")
14557STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(squeeze_copy_dims_out, schema_str, "squeeze_copy.dims_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)")
14558
14559// aten::squeeze_copy.dims_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)
14560static C10_NOINLINE c10::TypedOperatorHandle<squeeze_copy_dims_out::schema> create_squeeze_copy_dims_out_typed_handle() {
14561 return c10::Dispatcher::singleton()
14562 .findSchemaOrThrow(squeeze_copy_dims_out::name, squeeze_copy_dims_out::overload_name)
14563 .typed<squeeze_copy_dims_out::schema>();
14564}
14565
14566// aten::squeeze_copy.dims_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)
14567at::Tensor & squeeze_copy_dims_out::call(const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) {
14568
14569 static auto op = create_squeeze_copy_dims_out_typed_handle();
14570 return op.call(self, dim, out);
14571}
14572
14573// aten::squeeze_copy.dims_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)
14574at::Tensor & squeeze_copy_dims_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) {
14575
14576 static auto op = create_squeeze_copy_dims_out_typed_handle();
14577 return op.redispatch(dispatchKeySet, self, dim, out);
14578}
14579
14580STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(indices_copy_out, name, "aten::indices_copy")
14581STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(indices_copy_out, overload_name, "out")
14582STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(indices_copy_out, schema_str, "indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
14583
14584// aten::indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14585static C10_NOINLINE c10::TypedOperatorHandle<indices_copy_out::schema> create_indices_copy_out_typed_handle() {
14586 return c10::Dispatcher::singleton()
14587 .findSchemaOrThrow(indices_copy_out::name, indices_copy_out::overload_name)
14588 .typed<indices_copy_out::schema>();
14589}
14590
14591// aten::indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14592at::Tensor & indices_copy_out::call(const at::Tensor & self, at::Tensor & out) {
14593
14594 static auto op = create_indices_copy_out_typed_handle();
14595 return op.call(self, out);
14596}
14597
14598// aten::indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14599at::Tensor & indices_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
14600
14601 static auto op = create_indices_copy_out_typed_handle();
14602 return op.redispatch(dispatchKeySet, self, out);
14603}
14604
14605STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ccol_indices_copy_out, name, "aten::ccol_indices_copy")
14606STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ccol_indices_copy_out, overload_name, "out")
14607STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ccol_indices_copy_out, schema_str, "ccol_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
14608
14609// aten::ccol_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14610static C10_NOINLINE c10::TypedOperatorHandle<ccol_indices_copy_out::schema> create_ccol_indices_copy_out_typed_handle() {
14611 return c10::Dispatcher::singleton()
14612 .findSchemaOrThrow(ccol_indices_copy_out::name, ccol_indices_copy_out::overload_name)
14613 .typed<ccol_indices_copy_out::schema>();
14614}
14615
14616// aten::ccol_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14617at::Tensor & ccol_indices_copy_out::call(const at::Tensor & self, at::Tensor & out) {
14618
14619 static auto op = create_ccol_indices_copy_out_typed_handle();
14620 return op.call(self, out);
14621}
14622
14623// aten::ccol_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14624at::Tensor & ccol_indices_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
14625
14626 static auto op = create_ccol_indices_copy_out_typed_handle();
14627 return op.redispatch(dispatchKeySet, self, out);
14628}
14629
14630STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_transformer_decoder_only_layer_fwd_out, name, "aten::_transformer_decoder_only_layer_fwd")
14631STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_transformer_decoder_only_layer_fwd_out, overload_name, "out")
14632STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_transformer_decoder_only_layer_fwd_out, schema_str, "_transformer_decoder_only_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
14633
14634// aten::_transformer_decoder_only_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
14635static C10_NOINLINE c10::TypedOperatorHandle<_transformer_decoder_only_layer_fwd_out::schema> create__transformer_decoder_only_layer_fwd_out_typed_handle() {
14636 return c10::Dispatcher::singleton()
14637 .findSchemaOrThrow(_transformer_decoder_only_layer_fwd_out::name, _transformer_decoder_only_layer_fwd_out::overload_name)
14638 .typed<_transformer_decoder_only_layer_fwd_out::schema>();
14639}
14640
14641// aten::_transformer_decoder_only_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
14642::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _transformer_decoder_only_layer_fwd_out::call(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, const c10::optional<at::Tensor> & incr_key, const c10::optional<at::Tensor> & incr_value, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
14643
14644 static auto op = create__transformer_decoder_only_layer_fwd_out_typed_handle();
14645 return op.call(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value, out0, out1, out2);
14646}
14647
14648// aten::_transformer_decoder_only_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
14649::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _transformer_decoder_only_layer_fwd_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, const c10::optional<at::Tensor> & incr_key, const c10::optional<at::Tensor> & incr_value, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
14650
14651 static auto op = create__transformer_decoder_only_layer_fwd_out_typed_handle();
14652 return op.redispatch(dispatchKeySet, src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value, out0, out1, out2);
14653}
14654
14655}} // namespace at::_ops
14656