1#include <ATen/Tensor.h>
2#include <ATen/core/dispatch/Dispatcher.h>
3
4// @generated by torchgen/gen.py from Operators.cpp
5// NOTE See [Sharded File] comment in VariableType
6
7#ifndef AT_PER_OPERATOR_HEADERS
8#include <ATen/Operators.h>
9#else
10#include <ATen/ops/_cast_Double.h>
11#include <ATen/ops/_cast_Float.h>
12#include <ATen/ops/_cast_Half.h>
13#include <ATen/ops/_version.h>
14#include <ATen/ops/_make_dual.h>
15#include <ATen/ops/align_as.h>
16#include <ATen/ops/_assert_tensor_metadata.h>
17#include <ATen/ops/refine_names.h>
18#include <ATen/ops/_use_cudnn_rnn_flatten_weight.h>
19#include <ATen/ops/_cudnn_rnn_flatten_weight.h>
20#include <ATen/ops/_sobol_engine_ff.h>
21#include <ATen/ops/_sobol_engine_scramble.h>
22#include <ATen/ops/feature_alpha_dropout.h>
23#include <ATen/ops/feature_alpha_dropout.h>
24#include <ATen/ops/abs.h>
25#include <ATen/ops/abs.h>
26#include <ATen/ops/abs.h>
27#include <ATen/ops/imag.h>
28#include <ATen/ops/resolve_conj.h>
29#include <ATen/ops/resolve_neg.h>
30#include <ATen/ops/adaptive_max_pool1d.h>
31#include <ATen/ops/addmv.h>
32#include <ATen/ops/addmv.h>
33#include <ATen/ops/addmv.h>
34#include <ATen/ops/addr.h>
35#include <ATen/ops/addr.h>
36#include <ATen/ops/addr.h>
37#include <ATen/ops/affine_grid_generator_backward.h>
38#include <ATen/ops/argmin.h>
39#include <ATen/ops/argmin.h>
40#include <ATen/ops/atan.h>
41#include <ATen/ops/atan.h>
42#include <ATen/ops/atan.h>
43#include <ATen/ops/arctan.h>
44#include <ATen/ops/arctan.h>
45#include <ATen/ops/arctan.h>
46#include <ATen/ops/quantized_batch_norm.h>
47#include <ATen/ops/binary_cross_entropy_backward.h>
48#include <ATen/ops/binary_cross_entropy_backward.h>
49#include <ATen/ops/bitwise_not.h>
50#include <ATen/ops/bitwise_not.h>
51#include <ATen/ops/bitwise_not.h>
52#include <ATen/ops/logical_not.h>
53#include <ATen/ops/logical_not.h>
54#include <ATen/ops/logical_not.h>
55#include <ATen/ops/concatenate.h>
56#include <ATen/ops/concatenate.h>
57#include <ATen/ops/concatenate.h>
58#include <ATen/ops/concatenate.h>
59#include <ATen/ops/ceil.h>
60#include <ATen/ops/ceil.h>
61#include <ATen/ops/ceil.h>
62#include <ATen/ops/conv_tbc.h>
63#include <ATen/ops/cosh.h>
64#include <ATen/ops/cosh.h>
65#include <ATen/ops/cosh.h>
66#include <ATen/ops/cosine_embedding_loss.h>
67#include <ATen/ops/cudnn_affine_grid_generator_backward.h>
68#include <ATen/ops/cudnn_grid_sampler.h>
69#include <ATen/ops/cummin.h>
70#include <ATen/ops/cummin.h>
71#include <ATen/ops/cummin.h>
72#include <ATen/ops/cummin.h>
73#include <ATen/ops/_cummin_helper.h>
74#include <ATen/ops/div.h>
75#include <ATen/ops/div.h>
76#include <ATen/ops/div.h>
77#include <ATen/ops/div.h>
78#include <ATen/ops/div.h>
79#include <ATen/ops/div.h>
80#include <ATen/ops/div.h>
81#include <ATen/ops/div.h>
82#include <ATen/ops/div.h>
83#include <ATen/ops/div.h>
84#include <ATen/ops/_embedding_bag_forward_only.h>
85#include <ATen/ops/embedding_bag.h>
86#include <ATen/ops/embedding_bag.h>
87#include <ATen/ops/new_zeros.h>
88#include <ATen/ops/erf.h>
89#include <ATen/ops/erf.h>
90#include <ATen/ops/erf.h>
91#include <ATen/ops/grid_sampler.h>
92#include <ATen/ops/_grid_sampler_2d_cpu_fallback.h>
93#include <ATen/ops/grid_sampler_3d.h>
94#include <ATen/ops/hann_window.h>
95#include <ATen/ops/hann_window.h>
96#include <ATen/ops/hamming_window.h>
97#include <ATen/ops/hamming_window.h>
98#include <ATen/ops/hamming_window.h>
99#include <ATen/ops/hamming_window.h>
100#include <ATen/ops/native_group_norm_backward.h>
101#include <ATen/ops/_fft_c2c.h>
102#include <ATen/ops/_fft_c2c.h>
103#include <ATen/ops/_validate_compressed_sparse_indices.h>
104#include <ATen/ops/_cufft_get_plan_cache_size.h>
105#include <ATen/ops/_cufft_get_plan_cache_max_size.h>
106#include <ATen/ops/index.h>
107#include <ATen/ops/index.h>
108#include <ATen/ops/isnan.h>
109#include <ATen/ops/kthvalue.h>
110#include <ATen/ops/kthvalue.h>
111#include <ATen/ops/kthvalue.h>
112#include <ATen/ops/kthvalue.h>
113#include <ATen/ops/native_layer_norm.h>
114#include <ATen/ops/nan_to_num.h>
115#include <ATen/ops/nan_to_num.h>
116#include <ATen/ops/nan_to_num.h>
117#include <ATen/ops/fbgemm_linear_int8_weight_fp32_activation.h>
118#include <ATen/ops/fbgemm_linear_int8_weight.h>
119#include <ATen/ops/fbgemm_linear_fp16_weight_fp32_activation.h>
120#include <ATen/ops/xlogy.h>
121#include <ATen/ops/xlogy.h>
122#include <ATen/ops/xlogy.h>
123#include <ATen/ops/xlogy.h>
124#include <ATen/ops/xlogy.h>
125#include <ATen/ops/xlogy.h>
126#include <ATen/ops/xlogy.h>
127#include <ATen/ops/xlogy.h>
128#include <ATen/ops/_log_softmax_backward_data.h>
129#include <ATen/ops/_log_softmax_backward_data.h>
130#include <ATen/ops/logcumsumexp.h>
131#include <ATen/ops/logcumsumexp.h>
132#include <ATen/ops/logcumsumexp.h>
133#include <ATen/ops/logcumsumexp.h>
134#include <ATen/ops/matrix_exp_backward.h>
135#include <ATen/ops/amax.h>
136#include <ATen/ops/amax.h>
137#include <ATen/ops/_mps_max_pool2d.h>
138#include <ATen/ops/mkldnn_max_pool2d.h>
139#include <ATen/ops/quantized_max_pool2d.h>
140#include <ATen/ops/amin.h>
141#include <ATen/ops/amin.h>
142#include <ATen/ops/_mps_convolution.h>
143#include <ATen/ops/mkldnn_rnn_layer_backward.h>
144#include <ATen/ops/miopen_depthwise_convolution.h>
145#include <ATen/ops/native_batch_norm.h>
146#include <ATen/ops/native_batch_norm.h>
147#include <ATen/ops/batch_norm_stats.h>
148#include <ATen/ops/batch_norm_gather_stats.h>
149#include <ATen/ops/native_batch_norm_backward.h>
150#include <ATen/ops/batch_norm_backward_reduce.h>
151#include <ATen/ops/is_vulkan_available.h>
152#include <ATen/ops/_nnpack_spatial_convolution.h>
153#include <ATen/ops/ones.h>
154#include <ATen/ops/ones.h>
155#include <ATen/ops/ones.h>
156#include <ATen/ops/_cdist_forward.h>
157#include <ATen/ops/cosine_similarity.h>
158#include <ATen/ops/movedim.h>
159#include <ATen/ops/movedim.h>
160#include <ATen/ops/numpy_T.h>
161#include <ATen/ops/mH.h>
162#include <ATen/ops/rand_like.h>
163#include <ATen/ops/randint_like.h>
164#include <ATen/ops/randint_like.h>
165#include <ATen/ops/round.h>
166#include <ATen/ops/round.h>
167#include <ATen/ops/round.h>
168#include <ATen/ops/round.h>
169#include <ATen/ops/round.h>
170#include <ATen/ops/round.h>
171#include <ATen/ops/gelu.h>
172#include <ATen/ops/gelu.h>
173#include <ATen/ops/gelu.h>
174#include <ATen/ops/hardshrink.h>
175#include <ATen/ops/hardshrink.h>
176#include <ATen/ops/select_backward.h>
177#include <ATen/ops/mish.h>
178#include <ATen/ops/mish.h>
179#include <ATen/ops/mish.h>
180#include <ATen/ops/sigmoid.h>
181#include <ATen/ops/sigmoid.h>
182#include <ATen/ops/sigmoid.h>
183#include <ATen/ops/detach.h>
184#include <ATen/ops/detach.h>
185#include <ATen/ops/size.h>
186#include <ATen/ops/size.h>
187#include <ATen/ops/slice_scatter.h>
188#include <ATen/ops/_softmax_backward_data.h>
189#include <ATen/ops/_softmax_backward_data.h>
190#include <ATen/ops/split_with_sizes.h>
191#include <ATen/ops/hsplit.h>
192#include <ATen/ops/hsplit.h>
193#include <ATen/ops/stack.h>
194#include <ATen/ops/stack.h>
195#include <ATen/ops/_stack.h>
196#include <ATen/ops/_stack.h>
197#include <ATen/ops/square.h>
198#include <ATen/ops/square.h>
199#include <ATen/ops/square.h>
200#include <ATen/ops/tanh.h>
201#include <ATen/ops/tanh.h>
202#include <ATen/ops/tanh.h>
203#include <ATen/ops/tensordot.h>
204#include <ATen/ops/tensordot.h>
205#include <ATen/ops/tile.h>
206#include <ATen/ops/_mkldnn_transpose.h>
207#include <ATen/ops/_mkldnn_transpose.h>
208#include <ATen/ops/fliplr.h>
209#include <ATen/ops/_nested_from_padded_and_nested_example.h>
210#include <ATen/ops/fix.h>
211#include <ATen/ops/fix.h>
212#include <ATen/ops/fix.h>
213#include <ATen/ops/unique_dim.h>
214#include <ATen/ops/unique_consecutive.h>
215#include <ATen/ops/vander.h>
216#include <ATen/ops/view_as.h>
217#include <ATen/ops/_dirichlet_grad.h>
218#include <ATen/ops/frobenius_norm.h>
219#include <ATen/ops/frobenius_norm.h>
220#include <ATen/ops/clone.h>
221#include <ATen/ops/positive.h>
222#include <ATen/ops/resize_as.h>
223#include <ATen/ops/resize_as_sparse.h>
224#include <ATen/ops/sparse_sampled_addmm.h>
225#include <ATen/ops/sparse_sampled_addmm.h>
226#include <ATen/ops/sparse_csr_tensor.h>
227#include <ATen/ops/sparse_csr_tensor.h>
228#include <ATen/ops/_sparse_bsc_tensor_unsafe.h>
229#include <ATen/ops/dense_dim.h>
230#include <ATen/ops/_dimV.h>
231#include <ATen/ops/coalesce.h>
232#include <ATen/ops/_indices.h>
233#include <ATen/ops/to_sparse_csc.h>
234#include <ATen/ops/mkldnn_reorder_conv2d_weight.h>
235#include <ATen/ops/quantize_per_channel.h>
236#include <ATen/ops/dequantize.h>
237#include <ATen/ops/dequantize.h>
238#include <ATen/ops/q_per_channel_zero_points.h>
239#include <ATen/ops/fake_quantize_per_tensor_affine.h>
240#include <ATen/ops/fake_quantize_per_tensor_affine.h>
241#include <ATen/ops/_fake_quantize_learnable_per_channel_affine.h>
242#include <ATen/ops/_autocast_to_full_precision.h>
243#include <ATen/ops/to.h>
244#include <ATen/ops/to.h>
245#include <ATen/ops/to.h>
246#include <ATen/ops/to.h>
247#include <ATen/ops/combinations.h>
248#include <ATen/ops/item.h>
249#include <ATen/ops/_lstm_mps.h>
250#include <ATen/ops/_thnn_fused_lstm_cell.h>
251#include <ATen/ops/lstm.h>
252#include <ATen/ops/lstm.h>
253#include <ATen/ops/gru.h>
254#include <ATen/ops/gru.h>
255#include <ATen/ops/rnn_tanh.h>
256#include <ATen/ops/rnn_tanh.h>
257#include <ATen/ops/rnn_relu_cell.h>
258#include <ATen/ops/_pad_packed_sequence.h>
259#include <ATen/ops/lift_fresh_copy.h>
260#include <ATen/ops/index_reduce.h>
261#include <ATen/ops/index_reduce.h>
262#include <ATen/ops/index_reduce.h>
263#include <ATen/ops/index_fill.h>
264#include <ATen/ops/index_fill.h>
265#include <ATen/ops/index_fill.h>
266#include <ATen/ops/index_fill.h>
267#include <ATen/ops/index_fill.h>
268#include <ATen/ops/index_fill.h>
269#include <ATen/ops/index_fill.h>
270#include <ATen/ops/index_fill.h>
271#include <ATen/ops/scatter_add.h>
272#include <ATen/ops/scatter_add.h>
273#include <ATen/ops/scatter_add.h>
274#include <ATen/ops/scatter_add.h>
275#include <ATen/ops/digamma.h>
276#include <ATen/ops/random.h>
277#include <ATen/ops/random.h>
278#include <ATen/ops/random.h>
279#include <ATen/ops/cauchy.h>
280#include <ATen/ops/log_normal.h>
281#include <ATen/ops/cross.h>
282#include <ATen/ops/cross.h>
283#include <ATen/ops/ne.h>
284#include <ATen/ops/ne.h>
285#include <ATen/ops/ne.h>
286#include <ATen/ops/ne.h>
287#include <ATen/ops/ne.h>
288#include <ATen/ops/ne.h>
289#include <ATen/ops/ge.h>
290#include <ATen/ops/ge.h>
291#include <ATen/ops/ge.h>
292#include <ATen/ops/ge.h>
293#include <ATen/ops/ge.h>
294#include <ATen/ops/ge.h>
295#include <ATen/ops/_gather_sparse_backward.h>
296#include <ATen/ops/linalg_vander.h>
297#include <ATen/ops/swapaxes.h>
298#include <ATen/ops/swapaxes.h>
299#include <ATen/ops/cholesky_solve.h>
300#include <ATen/ops/cholesky_solve.h>
301#include <ATen/ops/qr.h>
302#include <ATen/ops/qr.h>
303#include <ATen/ops/digamma.h>
304#include <ATen/ops/digamma.h>
305#include <ATen/ops/polygamma.h>
306#include <ATen/ops/polygamma.h>
307#include <ATen/ops/polygamma.h>
308#include <ATen/ops/histc.h>
309#include <ATen/ops/histc.h>
310#include <ATen/ops/_histogramdd_bin_edges.h>
311#include <ATen/ops/_histogramdd_from_bin_tensors.h>
312#include <ATen/ops/nextafter.h>
313#include <ATen/ops/nextafter.h>
314#include <ATen/ops/nextafter.h>
315#include <ATen/ops/maximum.h>
316#include <ATen/ops/maximum.h>
317#include <ATen/ops/minimum.h>
318#include <ATen/ops/minimum.h>
319#include <ATen/ops/quantile.h>
320#include <ATen/ops/quantile.h>
321#include <ATen/ops/quantile.h>
322#include <ATen/ops/quantile.h>
323#include <ATen/ops/msort.h>
324#include <ATen/ops/msort.h>
325#include <ATen/ops/argsort.h>
326#include <ATen/ops/argsort.h>
327#include <ATen/ops/argsort.h>
328#include <ATen/ops/topk.h>
329#include <ATen/ops/topk.h>
330#include <ATen/ops/unfold_backward.h>
331#include <ATen/ops/normal.h>
332#include <ATen/ops/normal.h>
333#include <ATen/ops/normal.h>
334#include <ATen/ops/normal.h>
335#include <ATen/ops/normal.h>
336#include <ATen/ops/normal.h>
337#include <ATen/ops/normal.h>
338#include <ATen/ops/normal.h>
339#include <ATen/ops/normal.h>
340#include <ATen/ops/normal.h>
341#include <ATen/ops/alias.h>
342#include <ATen/ops/_foreach_sub.h>
343#include <ATen/ops/_foreach_sub.h>
344#include <ATen/ops/_foreach_maximum.h>
345#include <ATen/ops/_foreach_maximum.h>
346#include <ATen/ops/_foreach_sub.h>
347#include <ATen/ops/_foreach_sub.h>
348#include <ATen/ops/_foreach_maximum.h>
349#include <ATen/ops/_foreach_maximum.h>
350#include <ATen/ops/_foreach_sub.h>
351#include <ATen/ops/_foreach_sub.h>
352#include <ATen/ops/_foreach_maximum.h>
353#include <ATen/ops/_foreach_maximum.h>
354#include <ATen/ops/_foreach_acos.h>
355#include <ATen/ops/_foreach_acos.h>
356#include <ATen/ops/_foreach_atan.h>
357#include <ATen/ops/_foreach_atan.h>
358#include <ATen/ops/_foreach_ceil.h>
359#include <ATen/ops/_foreach_ceil.h>
360#include <ATen/ops/_foreach_erf.h>
361#include <ATen/ops/_foreach_erf.h>
362#include <ATen/ops/_foreach_log2.h>
363#include <ATen/ops/_foreach_log2.h>
364#include <ATen/ops/bucketize.h>
365#include <ATen/ops/bucketize.h>
366#include <ATen/ops/bucketize.h>
367#include <ATen/ops/mse_loss.h>
368#include <ATen/ops/mse_loss.h>
369#include <ATen/ops/l1_loss.h>
370#include <ATen/ops/nll_loss_nd.h>
371#include <ATen/ops/nll_loss2d.h>
372#include <ATen/ops/nll_loss2d.h>
373#include <ATen/ops/nll_loss2d_forward.h>
374#include <ATen/ops/nll_loss2d_forward.h>
375#include <ATen/ops/nll_loss2d_backward.h>
376#include <ATen/ops/nll_loss2d_backward.h>
377#include <ATen/ops/soft_margin_loss.h>
378#include <ATen/ops/soft_margin_loss.h>
379#include <ATen/ops/glu.h>
380#include <ATen/ops/glu.h>
381#include <ATen/ops/glu_backward_jvp.h>
382#include <ATen/ops/hardtanh.h>
383#include <ATen/ops/hardtanh.h>
384#include <ATen/ops/hardtanh.h>
385#include <ATen/ops/hardswish_backward.h>
386#include <ATen/ops/leaky_relu.h>
387#include <ATen/ops/leaky_relu.h>
388#include <ATen/ops/leaky_relu.h>
389#include <ATen/ops/log_sigmoid_forward.h>
390#include <ATen/ops/log_sigmoid_forward.h>
391#include <ATen/ops/log_sigmoid_backward.h>
392#include <ATen/ops/log_sigmoid_backward.h>
393#include <ATen/ops/softshrink.h>
394#include <ATen/ops/softshrink.h>
395#include <ATen/ops/adaptive_avg_pool3d_backward.h>
396#include <ATen/ops/_adaptive_avg_pool3d_backward.h>
397#include <ATen/ops/adaptive_max_pool2d_backward.h>
398#include <ATen/ops/adaptive_max_pool2d_backward.h>
399#include <ATen/ops/adaptive_max_pool3d_backward.h>
400#include <ATen/ops/adaptive_max_pool3d_backward.h>
401#include <ATen/ops/fractional_max_pool3d.h>
402#include <ATen/ops/fractional_max_pool3d.h>
403#include <ATen/ops/reflection_pad3d.h>
404#include <ATen/ops/reflection_pad3d.h>
405#include <ATen/ops/replication_pad1d.h>
406#include <ATen/ops/replication_pad1d.h>
407#include <ATen/ops/replication_pad2d.h>
408#include <ATen/ops/replication_pad2d.h>
409#include <ATen/ops/_pad_circular.h>
410#include <ATen/ops/pad.h>
411#include <ATen/ops/upsample_nearest1d.h>
412#include <ATen/ops/_upsample_nearest_exact1d.h>
413#include <ATen/ops/upsample_nearest1d.h>
414#include <ATen/ops/_upsample_nearest_exact1d.h>
415#include <ATen/ops/upsample_nearest1d.h>
416#include <ATen/ops/_upsample_nearest_exact1d.h>
417#include <ATen/ops/_conv_depthwise2d.h>
418#include <ATen/ops/_conv_depthwise2d.h>
419#include <ATen/ops/slow_conv3d.h>
420#include <ATen/ops/slow_conv3d.h>
421#include <ATen/ops/_remove_batch_dim.h>
422#include <ATen/ops/special_log_ndtr.h>
423#include <ATen/ops/special_log_ndtr.h>
424#include <ATen/ops/special_erf.h>
425#include <ATen/ops/special_erf.h>
426#include <ATen/ops/special_xlogy.h>
427#include <ATen/ops/special_xlogy.h>
428#include <ATen/ops/special_xlogy.h>
429#include <ATen/ops/special_xlogy.h>
430#include <ATen/ops/special_xlogy.h>
431#include <ATen/ops/special_xlogy.h>
432#include <ATen/ops/special_expit.h>
433#include <ATen/ops/special_expit.h>
434#include <ATen/ops/special_sinc.h>
435#include <ATen/ops/special_sinc.h>
436#include <ATen/ops/special_softmax.h>
437#include <ATen/ops/fft_fft.h>
438#include <ATen/ops/fft_fft.h>
439#include <ATen/ops/fft_rfft.h>
440#include <ATen/ops/fft_rfft.h>
441#include <ATen/ops/fft_hfft2.h>
442#include <ATen/ops/fft_hfft2.h>
443#include <ATen/ops/fft_ifftn.h>
444#include <ATen/ops/fft_ifftn.h>
445#include <ATen/ops/fft_ihfftn.h>
446#include <ATen/ops/fft_ihfftn.h>
447#include <ATen/ops/fft_fftfreq.h>
448#include <ATen/ops/fft_fftfreq.h>
449#include <ATen/ops/fft_rfftfreq.h>
450#include <ATen/ops/fft_rfftfreq.h>
451#include <ATen/ops/linalg_cholesky_ex.h>
452#include <ATen/ops/linalg_cholesky_ex.h>
453#include <ATen/ops/linalg_cross.h>
454#include <ATen/ops/linalg_cross.h>
455#include <ATen/ops/linalg_lu_factor_ex.h>
456#include <ATen/ops/linalg_lu_factor_ex.h>
457#include <ATen/ops/det.h>
458#include <ATen/ops/inverse.h>
459#include <ATen/ops/inverse.h>
460#include <ATen/ops/linalg_cond.h>
461#include <ATen/ops/linalg_cond.h>
462#include <ATen/ops/linalg_cond.h>
463#include <ATen/ops/linalg_cond.h>
464#include <ATen/ops/linalg_pinv.h>
465#include <ATen/ops/linalg_pinv.h>
466#include <ATen/ops/linalg_pinv.h>
467#include <ATen/ops/linalg_pinv.h>
468#include <ATen/ops/linalg_pinv.h>
469#include <ATen/ops/linalg_pinv.h>
470#include <ATen/ops/linalg_pinv.h>
471#include <ATen/ops/linalg_pinv.h>
472#include <ATen/ops/linalg_solve_ex.h>
473#include <ATen/ops/linalg_solve_ex.h>
474#include <ATen/ops/linalg_tensorsolve.h>
475#include <ATen/ops/linalg_tensorsolve.h>
476#include <ATen/ops/linalg_multi_dot.h>
477#include <ATen/ops/linalg_multi_dot.h>
478#include <ATen/ops/_test_string_default.h>
479#include <ATen/ops/flatten_dense_tensors.h>
480#include <ATen/ops/_conj_copy.h>
481#include <ATen/ops/detach_copy.h>
482#include <ATen/ops/row_indices_copy.h>
483#include <ATen/ops/_transformer_encoder_layer_fwd.h>
484#include <ATen/ops/_native_multi_head_attention.h>
485#include <ATen/ops/_scaled_dot_product_attention.h>
486#include <ATen/ops/_fused_sdp_choice.h>
487#include <ATen/ops/_scaled_dot_product_flash_attention.h>
488#include <ATen/ops/_scaled_dot_product_efficient_attention_backward.h>
489#include <ATen/ops/_flash_attention_backward.h>
490#include <ATen/ops/_efficient_attention_backward.h>
491#include <ATen/ops/_triton_multi_head_attention.h>
492#include <ATen/ops/special_airy_ai.h>
493#include <ATen/ops/special_airy_ai.h>
494#include <ATen/ops/special_chebyshev_polynomial_w.h>
495#include <ATen/ops/special_chebyshev_polynomial_w.h>
496#include <ATen/ops/special_chebyshev_polynomial_w.h>
497#include <ATen/ops/special_chebyshev_polynomial_w.h>
498#include <ATen/ops/special_chebyshev_polynomial_w.h>
499#include <ATen/ops/special_chebyshev_polynomial_w.h>
500#include <ATen/ops/special_hermite_polynomial_h.h>
501#include <ATen/ops/special_hermite_polynomial_h.h>
502#include <ATen/ops/special_hermite_polynomial_h.h>
503#include <ATen/ops/special_hermite_polynomial_h.h>
504#include <ATen/ops/special_hermite_polynomial_h.h>
505#include <ATen/ops/special_hermite_polynomial_h.h>
506#include <ATen/ops/special_modified_bessel_i0.h>
507#include <ATen/ops/special_modified_bessel_i0.h>
508#include <ATen/ops/special_modified_bessel_k0.h>
509#include <ATen/ops/special_modified_bessel_k0.h>
510#include <ATen/ops/special_modified_bessel_k1.h>
511#include <ATen/ops/special_modified_bessel_k1.h>
512#include <ATen/ops/special_shifted_chebyshev_polynomial_t.h>
513#include <ATen/ops/special_shifted_chebyshev_polynomial_t.h>
514#include <ATen/ops/special_shifted_chebyshev_polynomial_t.h>
515#include <ATen/ops/special_shifted_chebyshev_polynomial_t.h>
516#include <ATen/ops/special_shifted_chebyshev_polynomial_t.h>
517#include <ATen/ops/special_shifted_chebyshev_polynomial_t.h>
518#include <ATen/ops/_fused_adamw.h>
519#include <ATen/ops/_cudnn_rnn_flatten_weight.h>
520#include <ATen/ops/quantized_batch_norm.h>
521#include <ATen/ops/conv_tbc.h>
522#include <ATen/ops/cudnn_affine_grid_generator_backward.h>
523#include <ATen/ops/cudnn_grid_sampler.h>
524#include <ATen/ops/div.h>
525#include <ATen/ops/div.h>
526#include <ATen/ops/_embedding_bag_forward_only.h>
527#include <ATen/ops/new_zeros.h>
528#include <ATen/ops/_grid_sampler_2d_cpu_fallback.h>
529#include <ATen/ops/grid_sampler_3d.h>
530#include <ATen/ops/hann_window.h>
531#include <ATen/ops/hann_window.h>
532#include <ATen/ops/hamming_window.h>
533#include <ATen/ops/hamming_window.h>
534#include <ATen/ops/hamming_window.h>
535#include <ATen/ops/hamming_window.h>
536#include <ATen/ops/native_group_norm_backward.h>
537#include <ATen/ops/isnan.h>
538#include <ATen/ops/native_layer_norm.h>
539#include <ATen/ops/_mps_max_pool2d.h>
540#include <ATen/ops/mkldnn_max_pool2d.h>
541#include <ATen/ops/quantized_max_pool2d.h>
542#include <ATen/ops/_mps_convolution.h>
543#include <ATen/ops/mkldnn_rnn_layer_backward.h>
544#include <ATen/ops/miopen_depthwise_convolution.h>
545#include <ATen/ops/batch_norm_stats.h>
546#include <ATen/ops/batch_norm_gather_stats.h>
547#include <ATen/ops/native_batch_norm_backward.h>
548#include <ATen/ops/batch_norm_backward_reduce.h>
549#include <ATen/ops/_nnpack_spatial_convolution.h>
550#include <ATen/ops/ones.h>
551#include <ATen/ops/_cdist_forward.h>
552#include <ATen/ops/rand_like.h>
553#include <ATen/ops/randint_like.h>
554#include <ATen/ops/randint_like.h>
555#include <ATen/ops/select_backward.h>
556#include <ATen/ops/slice_scatter.h>
557#include <ATen/ops/_mkldnn_transpose.h>
558#include <ATen/ops/_nested_from_padded_and_nested_example.h>
559#include <ATen/ops/unique_dim.h>
560#include <ATen/ops/unique_consecutive.h>
561#include <ATen/ops/_dirichlet_grad.h>
562#include <ATen/ops/clone.h>
563#include <ATen/ops/resize_as.h>
564#include <ATen/ops/resize_as.h>
565#include <ATen/ops/resize_as_sparse.h>
566#include <ATen/ops/resize_as_sparse.h>
567#include <ATen/ops/to_sparse_csc.h>
568#include <ATen/ops/mkldnn_reorder_conv2d_weight.h>
569#include <ATen/ops/quantize_per_channel.h>
570#include <ATen/ops/dequantize.h>
571#include <ATen/ops/dequantize.h>
572#include <ATen/ops/q_per_channel_zero_points.h>
573#include <ATen/ops/_fake_quantize_learnable_per_channel_affine.h>
574#include <ATen/ops/_lstm_mps.h>
575#include <ATen/ops/_thnn_fused_lstm_cell.h>
576#include <ATen/ops/lift_fresh_copy.h>
577#include <ATen/ops/index_fill.h>
578#include <ATen/ops/index_fill.h>
579#include <ATen/ops/random.h>
580#include <ATen/ops/random.h>
581#include <ATen/ops/random.h>
582#include <ATen/ops/random.h>
583#include <ATen/ops/random.h>
584#include <ATen/ops/random.h>
585#include <ATen/ops/cauchy.h>
586#include <ATen/ops/cauchy.h>
587#include <ATen/ops/log_normal.h>
588#include <ATen/ops/log_normal.h>
589#include <ATen/ops/_histogramdd_bin_edges.h>
590#include <ATen/ops/_histogramdd_from_bin_tensors.h>
591#include <ATen/ops/argsort.h>
592#include <ATen/ops/unfold_backward.h>
593#include <ATen/ops/normal.h>
594#include <ATen/ops/_foreach_sub.h>
595#include <ATen/ops/_foreach_maximum.h>
596#include <ATen/ops/_foreach_sub.h>
597#include <ATen/ops/_foreach_maximum.h>
598#include <ATen/ops/_foreach_sub.h>
599#include <ATen/ops/_foreach_maximum.h>
600#include <ATen/ops/_foreach_acos.h>
601#include <ATen/ops/_foreach_atan.h>
602#include <ATen/ops/_foreach_ceil.h>
603#include <ATen/ops/_foreach_erf.h>
604#include <ATen/ops/_foreach_log2.h>
605#include <ATen/ops/bucketize.h>
606#include <ATen/ops/glu_backward_jvp.h>
607#include <ATen/ops/hardswish_backward.h>
608#include <ATen/ops/_adaptive_avg_pool3d_backward.h>
609#include <ATen/ops/_conj_copy.h>
610#include <ATen/ops/detach_copy.h>
611#include <ATen/ops/row_indices_copy.h>
612#include <ATen/ops/_transformer_encoder_layer_fwd.h>
613#include <ATen/ops/_native_multi_head_attention.h>
614#include <ATen/ops/_triton_multi_head_attention.h>
615#include <ATen/ops/_fused_adamw.h>
616#include <ATen/ops/_fused_adamw.h>
617#endif
618
619
620
621namespace at { namespace _ops {
622
623
624STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cast_Double, name, "aten::_cast_Double")
625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cast_Double, overload_name, "")
626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cast_Double, schema_str, "_cast_Double(Tensor self, bool non_blocking=False) -> Tensor")
627
628// aten::_cast_Double(Tensor self, bool non_blocking=False) -> Tensor
629static C10_NOINLINE c10::TypedOperatorHandle<_cast_Double::schema> create__cast_Double_typed_handle() {
630 return c10::Dispatcher::singleton()
631 .findSchemaOrThrow(_cast_Double::name, _cast_Double::overload_name)
632 .typed<_cast_Double::schema>();
633}
634
635// aten::_cast_Double(Tensor self, bool non_blocking=False) -> Tensor
636at::Tensor _cast_Double::call(const at::Tensor & self, bool non_blocking) {
637
638 static auto op = create__cast_Double_typed_handle();
639 return op.call(self, non_blocking);
640}
641
642// aten::_cast_Double(Tensor self, bool non_blocking=False) -> Tensor
643at::Tensor _cast_Double::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking) {
644
645 static auto op = create__cast_Double_typed_handle();
646 return op.redispatch(dispatchKeySet, self, non_blocking);
647}
648
649STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cast_Float, name, "aten::_cast_Float")
650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cast_Float, overload_name, "")
651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cast_Float, schema_str, "_cast_Float(Tensor self, bool non_blocking=False) -> Tensor")
652
653// aten::_cast_Float(Tensor self, bool non_blocking=False) -> Tensor
654static C10_NOINLINE c10::TypedOperatorHandle<_cast_Float::schema> create__cast_Float_typed_handle() {
655 return c10::Dispatcher::singleton()
656 .findSchemaOrThrow(_cast_Float::name, _cast_Float::overload_name)
657 .typed<_cast_Float::schema>();
658}
659
660// aten::_cast_Float(Tensor self, bool non_blocking=False) -> Tensor
661at::Tensor _cast_Float::call(const at::Tensor & self, bool non_blocking) {
662
663 static auto op = create__cast_Float_typed_handle();
664 return op.call(self, non_blocking);
665}
666
667// aten::_cast_Float(Tensor self, bool non_blocking=False) -> Tensor
668at::Tensor _cast_Float::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking) {
669
670 static auto op = create__cast_Float_typed_handle();
671 return op.redispatch(dispatchKeySet, self, non_blocking);
672}
673
674STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cast_Half, name, "aten::_cast_Half")
675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cast_Half, overload_name, "")
676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cast_Half, schema_str, "_cast_Half(Tensor self, bool non_blocking=False) -> Tensor")
677
678// aten::_cast_Half(Tensor self, bool non_blocking=False) -> Tensor
679static C10_NOINLINE c10::TypedOperatorHandle<_cast_Half::schema> create__cast_Half_typed_handle() {
680 return c10::Dispatcher::singleton()
681 .findSchemaOrThrow(_cast_Half::name, _cast_Half::overload_name)
682 .typed<_cast_Half::schema>();
683}
684
685// aten::_cast_Half(Tensor self, bool non_blocking=False) -> Tensor
686at::Tensor _cast_Half::call(const at::Tensor & self, bool non_blocking) {
687
688 static auto op = create__cast_Half_typed_handle();
689 return op.call(self, non_blocking);
690}
691
692// aten::_cast_Half(Tensor self, bool non_blocking=False) -> Tensor
693at::Tensor _cast_Half::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking) {
694
695 static auto op = create__cast_Half_typed_handle();
696 return op.redispatch(dispatchKeySet, self, non_blocking);
697}
698
699STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_version, name, "aten::_version")
700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_version, overload_name, "")
701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_version, schema_str, "_version(Tensor self) -> int")
702
703// aten::_version(Tensor self) -> int
704static C10_NOINLINE c10::TypedOperatorHandle<_version::schema> create__version_typed_handle() {
705 return c10::Dispatcher::singleton()
706 .findSchemaOrThrow(_version::name, _version::overload_name)
707 .typed<_version::schema>();
708}
709
710// aten::_version(Tensor self) -> int
711int64_t _version::call(const at::Tensor & self) {
712
713 static auto op = create__version_typed_handle();
714 return op.call(self);
715}
716
717// aten::_version(Tensor self) -> int
718int64_t _version::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
719
720 static auto op = create__version_typed_handle();
721 return op.redispatch(dispatchKeySet, self);
722}
723
724STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_make_dual, name, "aten::_make_dual")
725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_make_dual, overload_name, "")
726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_make_dual, schema_str, "_make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a)")
727
728// aten::_make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a)
729static C10_NOINLINE c10::TypedOperatorHandle<_make_dual::schema> create__make_dual_typed_handle() {
730 return c10::Dispatcher::singleton()
731 .findSchemaOrThrow(_make_dual::name, _make_dual::overload_name)
732 .typed<_make_dual::schema>();
733}
734
735// aten::_make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a)
736at::Tensor _make_dual::call(const at::Tensor & primal, const at::Tensor & tangent, int64_t level) {
737
738 static auto op = create__make_dual_typed_handle();
739 return op.call(primal, tangent, level);
740}
741
742// aten::_make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a)
743at::Tensor _make_dual::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & primal, const at::Tensor & tangent, int64_t level) {
744
745 static auto op = create__make_dual_typed_handle();
746 return op.redispatch(dispatchKeySet, primal, tangent, level);
747}
748
749STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(align_as, name, "aten::align_as")
750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(align_as, overload_name, "")
751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(align_as, schema_str, "align_as(Tensor self, Tensor other) -> Tensor")
752
753// aten::align_as(Tensor self, Tensor other) -> Tensor
754static C10_NOINLINE c10::TypedOperatorHandle<align_as::schema> create_align_as_typed_handle() {
755 return c10::Dispatcher::singleton()
756 .findSchemaOrThrow(align_as::name, align_as::overload_name)
757 .typed<align_as::schema>();
758}
759
760// aten::align_as(Tensor self, Tensor other) -> Tensor
761at::Tensor align_as::call(const at::Tensor & self, const at::Tensor & other) {
762
763 static auto op = create_align_as_typed_handle();
764 return op.call(self, other);
765}
766
767// aten::align_as(Tensor self, Tensor other) -> Tensor
768at::Tensor align_as::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
769
770 static auto op = create_align_as_typed_handle();
771 return op.redispatch(dispatchKeySet, self, other);
772}
773
774STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_assert_tensor_metadata, name, "aten::_assert_tensor_metadata")
775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_assert_tensor_metadata, overload_name, "")
776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_assert_tensor_metadata, schema_str, "_assert_tensor_metadata(Tensor a, int[]? size=None, int[]? stride=None, ScalarType? dtype=None) -> ()")
777
778// aten::_assert_tensor_metadata(Tensor a, int[]? size=None, int[]? stride=None, ScalarType? dtype=None) -> ()
779static C10_NOINLINE c10::TypedOperatorHandle<_assert_tensor_metadata::schema> create__assert_tensor_metadata_typed_handle() {
780 return c10::Dispatcher::singleton()
781 .findSchemaOrThrow(_assert_tensor_metadata::name, _assert_tensor_metadata::overload_name)
782 .typed<_assert_tensor_metadata::schema>();
783}
784
785// aten::_assert_tensor_metadata(Tensor a, int[]? size=None, int[]? stride=None, ScalarType? dtype=None) -> ()
786void _assert_tensor_metadata::call(const at::Tensor & a, at::OptionalIntArrayRef size, at::OptionalIntArrayRef stride, c10::optional<at::ScalarType> dtype) {
787
788 static auto op = create__assert_tensor_metadata_typed_handle();
789 return op.call(a, size, stride, dtype);
790}
791
792// aten::_assert_tensor_metadata(Tensor a, int[]? size=None, int[]? stride=None, ScalarType? dtype=None) -> ()
793void _assert_tensor_metadata::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & a, at::OptionalIntArrayRef size, at::OptionalIntArrayRef stride, c10::optional<at::ScalarType> dtype) {
794
795 static auto op = create__assert_tensor_metadata_typed_handle();
796 return op.redispatch(dispatchKeySet, a, size, stride, dtype);
797}
798
799STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(refine_names, name, "aten::refine_names")
800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(refine_names, overload_name, "")
801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(refine_names, schema_str, "refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a)")
802
803// aten::refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a)
804static C10_NOINLINE c10::TypedOperatorHandle<refine_names::schema> create_refine_names_typed_handle() {
805 return c10::Dispatcher::singleton()
806 .findSchemaOrThrow(refine_names::name, refine_names::overload_name)
807 .typed<refine_names::schema>();
808}
809
810// aten::refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a)
811at::Tensor refine_names::call(const at::Tensor & self, at::DimnameList names) {
812
813 static auto op = create_refine_names_typed_handle();
814 return op.call(self, names);
815}
816
817// aten::refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a)
818at::Tensor refine_names::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList names) {
819
820 static auto op = create_refine_names_typed_handle();
821 return op.redispatch(dispatchKeySet, self, names);
822}
823
824STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_use_cudnn_rnn_flatten_weight, name, "aten::_use_cudnn_rnn_flatten_weight")
825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_use_cudnn_rnn_flatten_weight, overload_name, "")
826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_use_cudnn_rnn_flatten_weight, schema_str, "_use_cudnn_rnn_flatten_weight() -> bool")
827
828// aten::_use_cudnn_rnn_flatten_weight() -> bool
829static C10_NOINLINE c10::TypedOperatorHandle<_use_cudnn_rnn_flatten_weight::schema> create__use_cudnn_rnn_flatten_weight_typed_handle() {
830 return c10::Dispatcher::singleton()
831 .findSchemaOrThrow(_use_cudnn_rnn_flatten_weight::name, _use_cudnn_rnn_flatten_weight::overload_name)
832 .typed<_use_cudnn_rnn_flatten_weight::schema>();
833}
834
835// aten::_use_cudnn_rnn_flatten_weight() -> bool
836bool _use_cudnn_rnn_flatten_weight::call() {
837
838 static auto op = create__use_cudnn_rnn_flatten_weight_typed_handle();
839 return op.call();
840}
841
842// aten::_use_cudnn_rnn_flatten_weight() -> bool
843bool _use_cudnn_rnn_flatten_weight::redispatch(c10::DispatchKeySet dispatchKeySet) {
844
845 static auto op = create__use_cudnn_rnn_flatten_weight_typed_handle();
846 return op.redispatch(dispatchKeySet);
847}
848
849STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cudnn_rnn_flatten_weight, name, "aten::_cudnn_rnn_flatten_weight")
850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cudnn_rnn_flatten_weight, overload_name, "")
851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cudnn_rnn_flatten_weight, schema_str, "_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor")
852
853// aten::_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor
854static C10_NOINLINE c10::TypedOperatorHandle<_cudnn_rnn_flatten_weight::schema> create__cudnn_rnn_flatten_weight_typed_handle() {
855 return c10::Dispatcher::singleton()
856 .findSchemaOrThrow(_cudnn_rnn_flatten_weight::name, _cudnn_rnn_flatten_weight::overload_name)
857 .typed<_cudnn_rnn_flatten_weight::schema>();
858}
859
860// aten::_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor
861at::Tensor _cudnn_rnn_flatten_weight::call(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) {
862
863 static auto op = create__cudnn_rnn_flatten_weight_typed_handle();
864 return op.call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional);
865}
866
867// aten::_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor
868at::Tensor _cudnn_rnn_flatten_weight::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) {
869
870 static auto op = create__cudnn_rnn_flatten_weight_typed_handle();
871 return op.redispatch(dispatchKeySet, weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional);
872}
873
874STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sobol_engine_ff_, name, "aten::_sobol_engine_ff_")
875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sobol_engine_ff_, overload_name, "")
876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sobol_engine_ff_, schema_str, "_sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!)")
877
878// aten::_sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!)
879static C10_NOINLINE c10::TypedOperatorHandle<_sobol_engine_ff_::schema> create__sobol_engine_ff__typed_handle() {
880 return c10::Dispatcher::singleton()
881 .findSchemaOrThrow(_sobol_engine_ff_::name, _sobol_engine_ff_::overload_name)
882 .typed<_sobol_engine_ff_::schema>();
883}
884
885// aten::_sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!)
886at::Tensor & _sobol_engine_ff_::call(at::Tensor & self, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated) {
887
888 static auto op = create__sobol_engine_ff__typed_handle();
889 return op.call(self, n, sobolstate, dimension, num_generated);
890}
891
892// aten::_sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!)
893at::Tensor & _sobol_engine_ff_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated) {
894
895 static auto op = create__sobol_engine_ff__typed_handle();
896 return op.redispatch(dispatchKeySet, self, n, sobolstate, dimension, num_generated);
897}
898
899STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sobol_engine_scramble_, name, "aten::_sobol_engine_scramble_")
900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sobol_engine_scramble_, overload_name, "")
901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sobol_engine_scramble_, schema_str, "_sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!)")
902
903// aten::_sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!)
904static C10_NOINLINE c10::TypedOperatorHandle<_sobol_engine_scramble_::schema> create__sobol_engine_scramble__typed_handle() {
905 return c10::Dispatcher::singleton()
906 .findSchemaOrThrow(_sobol_engine_scramble_::name, _sobol_engine_scramble_::overload_name)
907 .typed<_sobol_engine_scramble_::schema>();
908}
909
910// aten::_sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!)
911at::Tensor & _sobol_engine_scramble_::call(at::Tensor & self, const at::Tensor & ltm, int64_t dimension) {
912
913 static auto op = create__sobol_engine_scramble__typed_handle();
914 return op.call(self, ltm, dimension);
915}
916
917// aten::_sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!)
918at::Tensor & _sobol_engine_scramble_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & ltm, int64_t dimension) {
919
920 static auto op = create__sobol_engine_scramble__typed_handle();
921 return op.redispatch(dispatchKeySet, self, ltm, dimension);
922}
923
924STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(feature_alpha_dropout, name, "aten::feature_alpha_dropout")
925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(feature_alpha_dropout, overload_name, "")
926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(feature_alpha_dropout, schema_str, "feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor")
927
928// aten::feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor
929static C10_NOINLINE c10::TypedOperatorHandle<feature_alpha_dropout::schema> create_feature_alpha_dropout_typed_handle() {
930 return c10::Dispatcher::singleton()
931 .findSchemaOrThrow(feature_alpha_dropout::name, feature_alpha_dropout::overload_name)
932 .typed<feature_alpha_dropout::schema>();
933}
934
935// aten::feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor
936at::Tensor feature_alpha_dropout::call(const at::Tensor & input, double p, bool train) {
937
938 static auto op = create_feature_alpha_dropout_typed_handle();
939 return op.call(input, p, train);
940}
941
942// aten::feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor
943at::Tensor feature_alpha_dropout::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, bool train) {
944
945 static auto op = create_feature_alpha_dropout_typed_handle();
946 return op.redispatch(dispatchKeySet, input, p, train);
947}
948
949STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(feature_alpha_dropout_, name, "aten::feature_alpha_dropout_")
950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(feature_alpha_dropout_, overload_name, "")
951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(feature_alpha_dropout_, schema_str, "feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)")
952
953// aten::feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
954static C10_NOINLINE c10::TypedOperatorHandle<feature_alpha_dropout_::schema> create_feature_alpha_dropout__typed_handle() {
955 return c10::Dispatcher::singleton()
956 .findSchemaOrThrow(feature_alpha_dropout_::name, feature_alpha_dropout_::overload_name)
957 .typed<feature_alpha_dropout_::schema>();
958}
959
960// aten::feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
961at::Tensor & feature_alpha_dropout_::call(at::Tensor & self, double p, bool train) {
962
963 static auto op = create_feature_alpha_dropout__typed_handle();
964 return op.call(self, p, train);
965}
966
967// aten::feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
968at::Tensor & feature_alpha_dropout_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, bool train) {
969
970 static auto op = create_feature_alpha_dropout__typed_handle();
971 return op.redispatch(dispatchKeySet, self, p, train);
972}
973
974STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(abs, name, "aten::abs")
975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(abs, overload_name, "")
976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(abs, schema_str, "abs(Tensor self) -> Tensor")
977
978// aten::abs(Tensor self) -> Tensor
979static C10_NOINLINE c10::TypedOperatorHandle<abs::schema> create_abs_typed_handle() {
980 return c10::Dispatcher::singleton()
981 .findSchemaOrThrow(abs::name, abs::overload_name)
982 .typed<abs::schema>();
983}
984
985// aten::abs(Tensor self) -> Tensor
986at::Tensor abs::call(const at::Tensor & self) {
987
988 static auto op = create_abs_typed_handle();
989 return op.call(self);
990}
991
992// aten::abs(Tensor self) -> Tensor
993at::Tensor abs::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
994
995 static auto op = create_abs_typed_handle();
996 return op.redispatch(dispatchKeySet, self);
997}
998
999STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(abs_, name, "aten::abs_")
1000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(abs_, overload_name, "")
1001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(abs_, schema_str, "abs_(Tensor(a!) self) -> Tensor(a!)")
1002
1003// aten::abs_(Tensor(a!) self) -> Tensor(a!)
1004static C10_NOINLINE c10::TypedOperatorHandle<abs_::schema> create_abs__typed_handle() {
1005 return c10::Dispatcher::singleton()
1006 .findSchemaOrThrow(abs_::name, abs_::overload_name)
1007 .typed<abs_::schema>();
1008}
1009
1010// aten::abs_(Tensor(a!) self) -> Tensor(a!)
1011at::Tensor & abs_::call(at::Tensor & self) {
1012
1013 static auto op = create_abs__typed_handle();
1014 return op.call(self);
1015}
1016
1017// aten::abs_(Tensor(a!) self) -> Tensor(a!)
1018at::Tensor & abs_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
1019
1020 static auto op = create_abs__typed_handle();
1021 return op.redispatch(dispatchKeySet, self);
1022}
1023
1024STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(abs_out, name, "aten::abs")
1025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(abs_out, overload_name, "out")
1026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(abs_out, schema_str, "abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
1027
1028// aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1029static C10_NOINLINE c10::TypedOperatorHandle<abs_out::schema> create_abs_out_typed_handle() {
1030 return c10::Dispatcher::singleton()
1031 .findSchemaOrThrow(abs_out::name, abs_out::overload_name)
1032 .typed<abs_out::schema>();
1033}
1034
1035// aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1036at::Tensor & abs_out::call(const at::Tensor & self, at::Tensor & out) {
1037
1038 static auto op = create_abs_out_typed_handle();
1039 return op.call(self, out);
1040}
1041
1042// aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1043at::Tensor & abs_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
1044
1045 static auto op = create_abs_out_typed_handle();
1046 return op.redispatch(dispatchKeySet, self, out);
1047}
1048
1049STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(imag, name, "aten::imag")
1050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(imag, overload_name, "")
1051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(imag, schema_str, "imag(Tensor(a) self) -> Tensor(a)")
1052
1053// aten::imag(Tensor(a) self) -> Tensor(a)
1054static C10_NOINLINE c10::TypedOperatorHandle<imag::schema> create_imag_typed_handle() {
1055 return c10::Dispatcher::singleton()
1056 .findSchemaOrThrow(imag::name, imag::overload_name)
1057 .typed<imag::schema>();
1058}
1059
1060// aten::imag(Tensor(a) self) -> Tensor(a)
1061at::Tensor imag::call(const at::Tensor & self) {
1062
1063 static auto op = create_imag_typed_handle();
1064 return op.call(self);
1065}
1066
1067// aten::imag(Tensor(a) self) -> Tensor(a)
1068at::Tensor imag::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1069
1070 static auto op = create_imag_typed_handle();
1071 return op.redispatch(dispatchKeySet, self);
1072}
1073
1074STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resolve_conj, name, "aten::resolve_conj")
1075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resolve_conj, overload_name, "")
1076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resolve_conj, schema_str, "resolve_conj(Tensor(a) self) -> Tensor(a)")
1077
1078// aten::resolve_conj(Tensor(a) self) -> Tensor(a)
1079static C10_NOINLINE c10::TypedOperatorHandle<resolve_conj::schema> create_resolve_conj_typed_handle() {
1080 return c10::Dispatcher::singleton()
1081 .findSchemaOrThrow(resolve_conj::name, resolve_conj::overload_name)
1082 .typed<resolve_conj::schema>();
1083}
1084
1085// aten::resolve_conj(Tensor(a) self) -> Tensor(a)
1086at::Tensor resolve_conj::call(const at::Tensor & self) {
1087
1088 static auto op = create_resolve_conj_typed_handle();
1089 return op.call(self);
1090}
1091
1092// aten::resolve_conj(Tensor(a) self) -> Tensor(a)
1093at::Tensor resolve_conj::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1094
1095 static auto op = create_resolve_conj_typed_handle();
1096 return op.redispatch(dispatchKeySet, self);
1097}
1098
1099STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resolve_neg, name, "aten::resolve_neg")
1100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resolve_neg, overload_name, "")
1101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resolve_neg, schema_str, "resolve_neg(Tensor(a) self) -> Tensor(a)")
1102
1103// aten::resolve_neg(Tensor(a) self) -> Tensor(a)
1104static C10_NOINLINE c10::TypedOperatorHandle<resolve_neg::schema> create_resolve_neg_typed_handle() {
1105 return c10::Dispatcher::singleton()
1106 .findSchemaOrThrow(resolve_neg::name, resolve_neg::overload_name)
1107 .typed<resolve_neg::schema>();
1108}
1109
1110// aten::resolve_neg(Tensor(a) self) -> Tensor(a)
1111at::Tensor resolve_neg::call(const at::Tensor & self) {
1112
1113 static auto op = create_resolve_neg_typed_handle();
1114 return op.call(self);
1115}
1116
1117// aten::resolve_neg(Tensor(a) self) -> Tensor(a)
1118at::Tensor resolve_neg::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1119
1120 static auto op = create_resolve_neg_typed_handle();
1121 return op.redispatch(dispatchKeySet, self);
1122}
1123
1124STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_max_pool1d, name, "aten::adaptive_max_pool1d")
1125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_max_pool1d, overload_name, "")
1126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_max_pool1d, schema_str, "adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor)")
1127
1128// aten::adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor)
1129static C10_NOINLINE c10::TypedOperatorHandle<adaptive_max_pool1d::schema> create_adaptive_max_pool1d_typed_handle() {
1130 return c10::Dispatcher::singleton()
1131 .findSchemaOrThrow(adaptive_max_pool1d::name, adaptive_max_pool1d::overload_name)
1132 .typed<adaptive_max_pool1d::schema>();
1133}
1134
1135// aten::adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor)
1136::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool1d::call(const at::Tensor & self, at::IntArrayRef output_size) {
1137
1138 static auto op = create_adaptive_max_pool1d_typed_handle();
1139 return op.call(self, output_size);
1140}
1141
1142// aten::adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor)
1143::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool1d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) {
1144
1145 static auto op = create_adaptive_max_pool1d_typed_handle();
1146 return op.redispatch(dispatchKeySet, self, output_size);
1147}
1148
1149STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addmv, name, "aten::addmv")
1150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addmv, overload_name, "")
1151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addmv, schema_str, "addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor")
1152
1153// aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor
1154static C10_NOINLINE c10::TypedOperatorHandle<addmv::schema> create_addmv_typed_handle() {
1155 return c10::Dispatcher::singleton()
1156 .findSchemaOrThrow(addmv::name, addmv::overload_name)
1157 .typed<addmv::schema>();
1158}
1159
1160// aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor
1161at::Tensor addmv::call(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
1162
1163 static auto op = create_addmv_typed_handle();
1164 return op.call(self, mat, vec, beta, alpha);
1165}
1166
1167// aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor
1168at::Tensor addmv::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
1169
1170 static auto op = create_addmv_typed_handle();
1171 return op.redispatch(dispatchKeySet, self, mat, vec, beta, alpha);
1172}
1173
1174STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addmv_, name, "aten::addmv_")
1175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addmv_, overload_name, "")
1176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addmv_, schema_str, "addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)")
1177
1178// aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
1179static C10_NOINLINE c10::TypedOperatorHandle<addmv_::schema> create_addmv__typed_handle() {
1180 return c10::Dispatcher::singleton()
1181 .findSchemaOrThrow(addmv_::name, addmv_::overload_name)
1182 .typed<addmv_::schema>();
1183}
1184
1185// aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
1186at::Tensor & addmv_::call(at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
1187
1188 static auto op = create_addmv__typed_handle();
1189 return op.call(self, mat, vec, beta, alpha);
1190}
1191
1192// aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
1193at::Tensor & addmv_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
1194
1195 static auto op = create_addmv__typed_handle();
1196 return op.redispatch(dispatchKeySet, self, mat, vec, beta, alpha);
1197}
1198
1199STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addmv_out, name, "aten::addmv")
1200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addmv_out, overload_name, "out")
1201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addmv_out, schema_str, "addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)")
1202
1203// aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
1204static C10_NOINLINE c10::TypedOperatorHandle<addmv_out::schema> create_addmv_out_typed_handle() {
1205 return c10::Dispatcher::singleton()
1206 .findSchemaOrThrow(addmv_out::name, addmv_out::overload_name)
1207 .typed<addmv_out::schema>();
1208}
1209
1210// aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
1211at::Tensor & addmv_out::call(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
1212
1213 static auto op = create_addmv_out_typed_handle();
1214 return op.call(self, mat, vec, beta, alpha, out);
1215}
1216
1217// aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
1218at::Tensor & addmv_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
1219
1220 static auto op = create_addmv_out_typed_handle();
1221 return op.redispatch(dispatchKeySet, self, mat, vec, beta, alpha, out);
1222}
1223
1224STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addr, name, "aten::addr")
1225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addr, overload_name, "")
1226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addr, schema_str, "addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor")
1227
1228// aten::addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
1229static C10_NOINLINE c10::TypedOperatorHandle<addr::schema> create_addr_typed_handle() {
1230 return c10::Dispatcher::singleton()
1231 .findSchemaOrThrow(addr::name, addr::overload_name)
1232 .typed<addr::schema>();
1233}
1234
1235// aten::addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
1236at::Tensor addr::call(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) {
1237
1238 static auto op = create_addr_typed_handle();
1239 return op.call(self, vec1, vec2, beta, alpha);
1240}
1241
1242// aten::addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
1243at::Tensor addr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) {
1244
1245 static auto op = create_addr_typed_handle();
1246 return op.redispatch(dispatchKeySet, self, vec1, vec2, beta, alpha);
1247}
1248
1249STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addr_, name, "aten::addr_")
1250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addr_, overload_name, "")
1251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addr_, schema_str, "addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)")
1252
1253// aten::addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
1254static C10_NOINLINE c10::TypedOperatorHandle<addr_::schema> create_addr__typed_handle() {
1255 return c10::Dispatcher::singleton()
1256 .findSchemaOrThrow(addr_::name, addr_::overload_name)
1257 .typed<addr_::schema>();
1258}
1259
1260// aten::addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
1261at::Tensor & addr_::call(at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) {
1262
1263 static auto op = create_addr__typed_handle();
1264 return op.call(self, vec1, vec2, beta, alpha);
1265}
1266
1267// aten::addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
1268at::Tensor & addr_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) {
1269
1270 static auto op = create_addr__typed_handle();
1271 return op.redispatch(dispatchKeySet, self, vec1, vec2, beta, alpha);
1272}
1273
1274STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addr_out, name, "aten::addr")
1275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addr_out, overload_name, "out")
1276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(addr_out, schema_str, "addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)")
1277
1278// aten::addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
1279static C10_NOINLINE c10::TypedOperatorHandle<addr_out::schema> create_addr_out_typed_handle() {
1280 return c10::Dispatcher::singleton()
1281 .findSchemaOrThrow(addr_out::name, addr_out::overload_name)
1282 .typed<addr_out::schema>();
1283}
1284
1285// aten::addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
1286at::Tensor & addr_out::call(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
1287
1288 static auto op = create_addr_out_typed_handle();
1289 return op.call(self, vec1, vec2, beta, alpha, out);
1290}
1291
1292// aten::addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
1293at::Tensor & addr_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
1294
1295 static auto op = create_addr_out_typed_handle();
1296 return op.redispatch(dispatchKeySet, self, vec1, vec2, beta, alpha, out);
1297}
1298
1299STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(affine_grid_generator_backward, name, "aten::affine_grid_generator_backward")
1300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(affine_grid_generator_backward, overload_name, "")
1301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(affine_grid_generator_backward, schema_str, "affine_grid_generator_backward(Tensor grad, int[] size, bool align_corners) -> Tensor")
1302
1303// aten::affine_grid_generator_backward(Tensor grad, int[] size, bool align_corners) -> Tensor
1304static C10_NOINLINE c10::TypedOperatorHandle<affine_grid_generator_backward::schema> create_affine_grid_generator_backward_typed_handle() {
1305 return c10::Dispatcher::singleton()
1306 .findSchemaOrThrow(affine_grid_generator_backward::name, affine_grid_generator_backward::overload_name)
1307 .typed<affine_grid_generator_backward::schema>();
1308}
1309
1310// aten::affine_grid_generator_backward(Tensor grad, int[] size, bool align_corners) -> Tensor
1311at::Tensor affine_grid_generator_backward::call(const at::Tensor & grad, at::IntArrayRef size, bool align_corners) {
1312
1313 static auto op = create_affine_grid_generator_backward_typed_handle();
1314 return op.call(grad, size, align_corners);
1315}
1316
1317// aten::affine_grid_generator_backward(Tensor grad, int[] size, bool align_corners) -> Tensor
1318at::Tensor affine_grid_generator_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, at::IntArrayRef size, bool align_corners) {
1319
1320 static auto op = create_affine_grid_generator_backward_typed_handle();
1321 return op.redispatch(dispatchKeySet, grad, size, align_corners);
1322}
1323
1324STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(argmin, name, "aten::argmin")
1325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(argmin, overload_name, "")
1326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(argmin, schema_str, "argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor")
1327
1328// aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
1329static C10_NOINLINE c10::TypedOperatorHandle<argmin::schema> create_argmin_typed_handle() {
1330 return c10::Dispatcher::singleton()
1331 .findSchemaOrThrow(argmin::name, argmin::overload_name)
1332 .typed<argmin::schema>();
1333}
1334
1335// aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
1336at::Tensor argmin::call(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
1337
1338 static auto op = create_argmin_typed_handle();
1339 return op.call(self, dim, keepdim);
1340}
1341
1342// aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
1343at::Tensor argmin::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
1344
1345 static auto op = create_argmin_typed_handle();
1346 return op.redispatch(dispatchKeySet, self, dim, keepdim);
1347}
1348
1349STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(argmin_out, name, "aten::argmin")
1350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(argmin_out, overload_name, "out")
1351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(argmin_out, schema_str, "argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)")
1352
1353// aten::argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
1354static C10_NOINLINE c10::TypedOperatorHandle<argmin_out::schema> create_argmin_out_typed_handle() {
1355 return c10::Dispatcher::singleton()
1356 .findSchemaOrThrow(argmin_out::name, argmin_out::overload_name)
1357 .typed<argmin_out::schema>();
1358}
1359
1360// aten::argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
1361at::Tensor & argmin_out::call(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim, at::Tensor & out) {
1362
1363 static auto op = create_argmin_out_typed_handle();
1364 return op.call(self, dim, keepdim, out);
1365}
1366
1367// aten::argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
1368at::Tensor & argmin_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim, at::Tensor & out) {
1369
1370 static auto op = create_argmin_out_typed_handle();
1371 return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
1372}
1373
1374STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atan, name, "aten::atan")
1375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atan, overload_name, "")
1376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atan, schema_str, "atan(Tensor self) -> Tensor")
1377
1378// aten::atan(Tensor self) -> Tensor
1379static C10_NOINLINE c10::TypedOperatorHandle<atan::schema> create_atan_typed_handle() {
1380 return c10::Dispatcher::singleton()
1381 .findSchemaOrThrow(atan::name, atan::overload_name)
1382 .typed<atan::schema>();
1383}
1384
1385// aten::atan(Tensor self) -> Tensor
1386at::Tensor atan::call(const at::Tensor & self) {
1387
1388 static auto op = create_atan_typed_handle();
1389 return op.call(self);
1390}
1391
1392// aten::atan(Tensor self) -> Tensor
1393at::Tensor atan::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1394
1395 static auto op = create_atan_typed_handle();
1396 return op.redispatch(dispatchKeySet, self);
1397}
1398
1399STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atan_, name, "aten::atan_")
1400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atan_, overload_name, "")
1401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atan_, schema_str, "atan_(Tensor(a!) self) -> Tensor(a!)")
1402
1403// aten::atan_(Tensor(a!) self) -> Tensor(a!)
1404static C10_NOINLINE c10::TypedOperatorHandle<atan_::schema> create_atan__typed_handle() {
1405 return c10::Dispatcher::singleton()
1406 .findSchemaOrThrow(atan_::name, atan_::overload_name)
1407 .typed<atan_::schema>();
1408}
1409
1410// aten::atan_(Tensor(a!) self) -> Tensor(a!)
1411at::Tensor & atan_::call(at::Tensor & self) {
1412
1413 static auto op = create_atan__typed_handle();
1414 return op.call(self);
1415}
1416
1417// aten::atan_(Tensor(a!) self) -> Tensor(a!)
1418at::Tensor & atan_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
1419
1420 static auto op = create_atan__typed_handle();
1421 return op.redispatch(dispatchKeySet, self);
1422}
1423
1424STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atan_out, name, "aten::atan")
1425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atan_out, overload_name, "out")
1426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(atan_out, schema_str, "atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
1427
1428// aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1429static C10_NOINLINE c10::TypedOperatorHandle<atan_out::schema> create_atan_out_typed_handle() {
1430 return c10::Dispatcher::singleton()
1431 .findSchemaOrThrow(atan_out::name, atan_out::overload_name)
1432 .typed<atan_out::schema>();
1433}
1434
1435// aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1436at::Tensor & atan_out::call(const at::Tensor & self, at::Tensor & out) {
1437
1438 static auto op = create_atan_out_typed_handle();
1439 return op.call(self, out);
1440}
1441
1442// aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1443at::Tensor & atan_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
1444
1445 static auto op = create_atan_out_typed_handle();
1446 return op.redispatch(dispatchKeySet, self, out);
1447}
1448
1449STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arctan, name, "aten::arctan")
1450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arctan, overload_name, "")
1451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arctan, schema_str, "arctan(Tensor self) -> Tensor")
1452
1453// aten::arctan(Tensor self) -> Tensor
1454static C10_NOINLINE c10::TypedOperatorHandle<arctan::schema> create_arctan_typed_handle() {
1455 return c10::Dispatcher::singleton()
1456 .findSchemaOrThrow(arctan::name, arctan::overload_name)
1457 .typed<arctan::schema>();
1458}
1459
1460// aten::arctan(Tensor self) -> Tensor
1461at::Tensor arctan::call(const at::Tensor & self) {
1462
1463 static auto op = create_arctan_typed_handle();
1464 return op.call(self);
1465}
1466
1467// aten::arctan(Tensor self) -> Tensor
1468at::Tensor arctan::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1469
1470 static auto op = create_arctan_typed_handle();
1471 return op.redispatch(dispatchKeySet, self);
1472}
1473
1474STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arctan_, name, "aten::arctan_")
1475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arctan_, overload_name, "")
1476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arctan_, schema_str, "arctan_(Tensor(a!) self) -> Tensor(a!)")
1477
1478// aten::arctan_(Tensor(a!) self) -> Tensor(a!)
1479static C10_NOINLINE c10::TypedOperatorHandle<arctan_::schema> create_arctan__typed_handle() {
1480 return c10::Dispatcher::singleton()
1481 .findSchemaOrThrow(arctan_::name, arctan_::overload_name)
1482 .typed<arctan_::schema>();
1483}
1484
1485// aten::arctan_(Tensor(a!) self) -> Tensor(a!)
1486at::Tensor & arctan_::call(at::Tensor & self) {
1487
1488 static auto op = create_arctan__typed_handle();
1489 return op.call(self);
1490}
1491
1492// aten::arctan_(Tensor(a!) self) -> Tensor(a!)
1493at::Tensor & arctan_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
1494
1495 static auto op = create_arctan__typed_handle();
1496 return op.redispatch(dispatchKeySet, self);
1497}
1498
1499STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arctan_out, name, "aten::arctan")
1500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arctan_out, overload_name, "out")
1501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(arctan_out, schema_str, "arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
1502
1503// aten::arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1504static C10_NOINLINE c10::TypedOperatorHandle<arctan_out::schema> create_arctan_out_typed_handle() {
1505 return c10::Dispatcher::singleton()
1506 .findSchemaOrThrow(arctan_out::name, arctan_out::overload_name)
1507 .typed<arctan_out::schema>();
1508}
1509
1510// aten::arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1511at::Tensor & arctan_out::call(const at::Tensor & self, at::Tensor & out) {
1512
1513 static auto op = create_arctan_out_typed_handle();
1514 return op.call(self, out);
1515}
1516
1517// aten::arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1518at::Tensor & arctan_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
1519
1520 static auto op = create_arctan_out_typed_handle();
1521 return op.redispatch(dispatchKeySet, self, out);
1522}
1523
1524STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantized_batch_norm, name, "aten::quantized_batch_norm")
1525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantized_batch_norm, overload_name, "")
1526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantized_batch_norm, schema_str, "quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor")
1527
1528// aten::quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor
1529static C10_NOINLINE c10::TypedOperatorHandle<quantized_batch_norm::schema> create_quantized_batch_norm_typed_handle() {
1530 return c10::Dispatcher::singleton()
1531 .findSchemaOrThrow(quantized_batch_norm::name, quantized_batch_norm::overload_name)
1532 .typed<quantized_batch_norm::schema>();
1533}
1534
1535// aten::quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor
1536at::Tensor quantized_batch_norm::call(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point) {
1537
1538 static auto op = create_quantized_batch_norm_typed_handle();
1539 return op.call(input, weight, bias, mean, var, eps, output_scale, output_zero_point);
1540}
1541
1542// aten::quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor
1543at::Tensor quantized_batch_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point) {
1544
1545 static auto op = create_quantized_batch_norm_typed_handle();
1546 return op.redispatch(dispatchKeySet, input, weight, bias, mean, var, eps, output_scale, output_zero_point);
1547}
1548
1549STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(binary_cross_entropy_backward, name, "aten::binary_cross_entropy_backward")
1550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(binary_cross_entropy_backward, overload_name, "")
1551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(binary_cross_entropy_backward, schema_str, "binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor")
1552
1553// aten::binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor
1554static C10_NOINLINE c10::TypedOperatorHandle<binary_cross_entropy_backward::schema> create_binary_cross_entropy_backward_typed_handle() {
1555 return c10::Dispatcher::singleton()
1556 .findSchemaOrThrow(binary_cross_entropy_backward::name, binary_cross_entropy_backward::overload_name)
1557 .typed<binary_cross_entropy_backward::schema>();
1558}
1559
1560// aten::binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor
1561at::Tensor binary_cross_entropy_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction) {
1562
1563 static auto op = create_binary_cross_entropy_backward_typed_handle();
1564 return op.call(grad_output, self, target, weight, reduction);
1565}
1566
1567// aten::binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor
1568at::Tensor binary_cross_entropy_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction) {
1569
1570 static auto op = create_binary_cross_entropy_backward_typed_handle();
1571 return op.redispatch(dispatchKeySet, grad_output, self, target, weight, reduction);
1572}
1573
1574STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(binary_cross_entropy_backward_grad_input, name, "aten::binary_cross_entropy_backward")
1575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(binary_cross_entropy_backward_grad_input, overload_name, "grad_input")
1576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(binary_cross_entropy_backward_grad_input, schema_str, "binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)")
1577
1578// aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)
1579static C10_NOINLINE c10::TypedOperatorHandle<binary_cross_entropy_backward_grad_input::schema> create_binary_cross_entropy_backward_grad_input_typed_handle() {
1580 return c10::Dispatcher::singleton()
1581 .findSchemaOrThrow(binary_cross_entropy_backward_grad_input::name, binary_cross_entropy_backward_grad_input::overload_name)
1582 .typed<binary_cross_entropy_backward_grad_input::schema>();
1583}
1584
1585// aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)
1586at::Tensor & binary_cross_entropy_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input) {
1587
1588 static auto op = create_binary_cross_entropy_backward_grad_input_typed_handle();
1589 return op.call(grad_output, self, target, weight, reduction, grad_input);
1590}
1591
1592// aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)
1593at::Tensor & binary_cross_entropy_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input) {
1594
1595 static auto op = create_binary_cross_entropy_backward_grad_input_typed_handle();
1596 return op.redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, grad_input);
1597}
1598
1599STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_not, name, "aten::bitwise_not")
1600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_not, overload_name, "")
1601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_not, schema_str, "bitwise_not(Tensor self) -> Tensor")
1602
1603// aten::bitwise_not(Tensor self) -> Tensor
1604static C10_NOINLINE c10::TypedOperatorHandle<bitwise_not::schema> create_bitwise_not_typed_handle() {
1605 return c10::Dispatcher::singleton()
1606 .findSchemaOrThrow(bitwise_not::name, bitwise_not::overload_name)
1607 .typed<bitwise_not::schema>();
1608}
1609
1610// aten::bitwise_not(Tensor self) -> Tensor
1611at::Tensor bitwise_not::call(const at::Tensor & self) {
1612
1613 static auto op = create_bitwise_not_typed_handle();
1614 return op.call(self);
1615}
1616
1617// aten::bitwise_not(Tensor self) -> Tensor
1618at::Tensor bitwise_not::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1619
1620 static auto op = create_bitwise_not_typed_handle();
1621 return op.redispatch(dispatchKeySet, self);
1622}
1623
1624STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_not_, name, "aten::bitwise_not_")
1625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_not_, overload_name, "")
1626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_not_, schema_str, "bitwise_not_(Tensor(a!) self) -> Tensor(a!)")
1627
1628// aten::bitwise_not_(Tensor(a!) self) -> Tensor(a!)
1629static C10_NOINLINE c10::TypedOperatorHandle<bitwise_not_::schema> create_bitwise_not__typed_handle() {
1630 return c10::Dispatcher::singleton()
1631 .findSchemaOrThrow(bitwise_not_::name, bitwise_not_::overload_name)
1632 .typed<bitwise_not_::schema>();
1633}
1634
1635// aten::bitwise_not_(Tensor(a!) self) -> Tensor(a!)
1636at::Tensor & bitwise_not_::call(at::Tensor & self) {
1637
1638 static auto op = create_bitwise_not__typed_handle();
1639 return op.call(self);
1640}
1641
1642// aten::bitwise_not_(Tensor(a!) self) -> Tensor(a!)
1643at::Tensor & bitwise_not_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
1644
1645 static auto op = create_bitwise_not__typed_handle();
1646 return op.redispatch(dispatchKeySet, self);
1647}
1648
1649STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_not_out, name, "aten::bitwise_not")
1650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_not_out, overload_name, "out")
1651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bitwise_not_out, schema_str, "bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
1652
1653// aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1654static C10_NOINLINE c10::TypedOperatorHandle<bitwise_not_out::schema> create_bitwise_not_out_typed_handle() {
1655 return c10::Dispatcher::singleton()
1656 .findSchemaOrThrow(bitwise_not_out::name, bitwise_not_out::overload_name)
1657 .typed<bitwise_not_out::schema>();
1658}
1659
1660// aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1661at::Tensor & bitwise_not_out::call(const at::Tensor & self, at::Tensor & out) {
1662
1663 static auto op = create_bitwise_not_out_typed_handle();
1664 return op.call(self, out);
1665}
1666
1667// aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1668at::Tensor & bitwise_not_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
1669
1670 static auto op = create_bitwise_not_out_typed_handle();
1671 return op.redispatch(dispatchKeySet, self, out);
1672}
1673
1674STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logical_not, name, "aten::logical_not")
1675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logical_not, overload_name, "")
1676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logical_not, schema_str, "logical_not(Tensor self) -> Tensor")
1677
1678// aten::logical_not(Tensor self) -> Tensor
1679static C10_NOINLINE c10::TypedOperatorHandle<logical_not::schema> create_logical_not_typed_handle() {
1680 return c10::Dispatcher::singleton()
1681 .findSchemaOrThrow(logical_not::name, logical_not::overload_name)
1682 .typed<logical_not::schema>();
1683}
1684
1685// aten::logical_not(Tensor self) -> Tensor
1686at::Tensor logical_not::call(const at::Tensor & self) {
1687
1688 static auto op = create_logical_not_typed_handle();
1689 return op.call(self);
1690}
1691
1692// aten::logical_not(Tensor self) -> Tensor
1693at::Tensor logical_not::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1694
1695 static auto op = create_logical_not_typed_handle();
1696 return op.redispatch(dispatchKeySet, self);
1697}
1698
1699STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logical_not_, name, "aten::logical_not_")
1700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logical_not_, overload_name, "")
1701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logical_not_, schema_str, "logical_not_(Tensor(a!) self) -> Tensor(a!)")
1702
1703// aten::logical_not_(Tensor(a!) self) -> Tensor(a!)
1704static C10_NOINLINE c10::TypedOperatorHandle<logical_not_::schema> create_logical_not__typed_handle() {
1705 return c10::Dispatcher::singleton()
1706 .findSchemaOrThrow(logical_not_::name, logical_not_::overload_name)
1707 .typed<logical_not_::schema>();
1708}
1709
1710// aten::logical_not_(Tensor(a!) self) -> Tensor(a!)
1711at::Tensor & logical_not_::call(at::Tensor & self) {
1712
1713 static auto op = create_logical_not__typed_handle();
1714 return op.call(self);
1715}
1716
1717// aten::logical_not_(Tensor(a!) self) -> Tensor(a!)
1718at::Tensor & logical_not_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
1719
1720 static auto op = create_logical_not__typed_handle();
1721 return op.redispatch(dispatchKeySet, self);
1722}
1723
1724STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logical_not_out, name, "aten::logical_not")
1725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logical_not_out, overload_name, "out")
1726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logical_not_out, schema_str, "logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
1727
1728// aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1729static C10_NOINLINE c10::TypedOperatorHandle<logical_not_out::schema> create_logical_not_out_typed_handle() {
1730 return c10::Dispatcher::singleton()
1731 .findSchemaOrThrow(logical_not_out::name, logical_not_out::overload_name)
1732 .typed<logical_not_out::schema>();
1733}
1734
1735// aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1736at::Tensor & logical_not_out::call(const at::Tensor & self, at::Tensor & out) {
1737
1738 static auto op = create_logical_not_out_typed_handle();
1739 return op.call(self, out);
1740}
1741
1742// aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1743at::Tensor & logical_not_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
1744
1745 static auto op = create_logical_not_out_typed_handle();
1746 return op.redispatch(dispatchKeySet, self, out);
1747}
1748
1749STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(concatenate, name, "aten::concatenate")
1750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(concatenate, overload_name, "")
1751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(concatenate, schema_str, "concatenate(Tensor[] tensors, int dim=0) -> Tensor")
1752
1753// aten::concatenate(Tensor[] tensors, int dim=0) -> Tensor
1754static C10_NOINLINE c10::TypedOperatorHandle<concatenate::schema> create_concatenate_typed_handle() {
1755 return c10::Dispatcher::singleton()
1756 .findSchemaOrThrow(concatenate::name, concatenate::overload_name)
1757 .typed<concatenate::schema>();
1758}
1759
1760// aten::concatenate(Tensor[] tensors, int dim=0) -> Tensor
1761at::Tensor concatenate::call(at::TensorList tensors, int64_t dim) {
1762
1763 static auto op = create_concatenate_typed_handle();
1764 return op.call(tensors, dim);
1765}
1766
1767// aten::concatenate(Tensor[] tensors, int dim=0) -> Tensor
1768at::Tensor concatenate::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim) {
1769
1770 static auto op = create_concatenate_typed_handle();
1771 return op.redispatch(dispatchKeySet, tensors, dim);
1772}
1773
1774STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(concatenate_out, name, "aten::concatenate")
1775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(concatenate_out, overload_name, "out")
1776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(concatenate_out, schema_str, "concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)")
1777
1778// aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
1779static C10_NOINLINE c10::TypedOperatorHandle<concatenate_out::schema> create_concatenate_out_typed_handle() {
1780 return c10::Dispatcher::singleton()
1781 .findSchemaOrThrow(concatenate_out::name, concatenate_out::overload_name)
1782 .typed<concatenate_out::schema>();
1783}
1784
1785// aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
1786at::Tensor & concatenate_out::call(at::TensorList tensors, int64_t dim, at::Tensor & out) {
1787
1788 static auto op = create_concatenate_out_typed_handle();
1789 return op.call(tensors, dim, out);
1790}
1791
1792// aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
1793at::Tensor & concatenate_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, at::Tensor & out) {
1794
1795 static auto op = create_concatenate_out_typed_handle();
1796 return op.redispatch(dispatchKeySet, tensors, dim, out);
1797}
1798
1799STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(concatenate_names, name, "aten::concatenate")
1800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(concatenate_names, overload_name, "names")
1801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(concatenate_names, schema_str, "concatenate.names(Tensor[] tensors, Dimname dim) -> Tensor")
1802
1803// aten::concatenate.names(Tensor[] tensors, Dimname dim) -> Tensor
1804static C10_NOINLINE c10::TypedOperatorHandle<concatenate_names::schema> create_concatenate_names_typed_handle() {
1805 return c10::Dispatcher::singleton()
1806 .findSchemaOrThrow(concatenate_names::name, concatenate_names::overload_name)
1807 .typed<concatenate_names::schema>();
1808}
1809
1810// aten::concatenate.names(Tensor[] tensors, Dimname dim) -> Tensor
1811at::Tensor concatenate_names::call(at::TensorList tensors, at::Dimname dim) {
1812
1813 static auto op = create_concatenate_names_typed_handle();
1814 return op.call(tensors, dim);
1815}
1816
1817// aten::concatenate.names(Tensor[] tensors, Dimname dim) -> Tensor
1818at::Tensor concatenate_names::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim) {
1819
1820 static auto op = create_concatenate_names_typed_handle();
1821 return op.redispatch(dispatchKeySet, tensors, dim);
1822}
1823
1824STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(concatenate_names_out, name, "aten::concatenate")
1825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(concatenate_names_out, overload_name, "names_out")
1826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(concatenate_names_out, schema_str, "concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)")
1827
1828// aten::concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
1829static C10_NOINLINE c10::TypedOperatorHandle<concatenate_names_out::schema> create_concatenate_names_out_typed_handle() {
1830 return c10::Dispatcher::singleton()
1831 .findSchemaOrThrow(concatenate_names_out::name, concatenate_names_out::overload_name)
1832 .typed<concatenate_names_out::schema>();
1833}
1834
1835// aten::concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
1836at::Tensor & concatenate_names_out::call(at::TensorList tensors, at::Dimname dim, at::Tensor & out) {
1837
1838 static auto op = create_concatenate_names_out_typed_handle();
1839 return op.call(tensors, dim, out);
1840}
1841
1842// aten::concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
1843at::Tensor & concatenate_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim, at::Tensor & out) {
1844
1845 static auto op = create_concatenate_names_out_typed_handle();
1846 return op.redispatch(dispatchKeySet, tensors, dim, out);
1847}
1848
1849STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ceil, name, "aten::ceil")
1850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ceil, overload_name, "")
1851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ceil, schema_str, "ceil(Tensor self) -> Tensor")
1852
1853// aten::ceil(Tensor self) -> Tensor
1854static C10_NOINLINE c10::TypedOperatorHandle<ceil::schema> create_ceil_typed_handle() {
1855 return c10::Dispatcher::singleton()
1856 .findSchemaOrThrow(ceil::name, ceil::overload_name)
1857 .typed<ceil::schema>();
1858}
1859
1860// aten::ceil(Tensor self) -> Tensor
1861at::Tensor ceil::call(const at::Tensor & self) {
1862
1863 static auto op = create_ceil_typed_handle();
1864 return op.call(self);
1865}
1866
1867// aten::ceil(Tensor self) -> Tensor
1868at::Tensor ceil::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1869
1870 static auto op = create_ceil_typed_handle();
1871 return op.redispatch(dispatchKeySet, self);
1872}
1873
1874STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ceil_, name, "aten::ceil_")
1875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ceil_, overload_name, "")
1876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ceil_, schema_str, "ceil_(Tensor(a!) self) -> Tensor(a!)")
1877
1878// aten::ceil_(Tensor(a!) self) -> Tensor(a!)
1879static C10_NOINLINE c10::TypedOperatorHandle<ceil_::schema> create_ceil__typed_handle() {
1880 return c10::Dispatcher::singleton()
1881 .findSchemaOrThrow(ceil_::name, ceil_::overload_name)
1882 .typed<ceil_::schema>();
1883}
1884
1885// aten::ceil_(Tensor(a!) self) -> Tensor(a!)
1886at::Tensor & ceil_::call(at::Tensor & self) {
1887
1888 static auto op = create_ceil__typed_handle();
1889 return op.call(self);
1890}
1891
1892// aten::ceil_(Tensor(a!) self) -> Tensor(a!)
1893at::Tensor & ceil_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
1894
1895 static auto op = create_ceil__typed_handle();
1896 return op.redispatch(dispatchKeySet, self);
1897}
1898
1899STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ceil_out, name, "aten::ceil")
1900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ceil_out, overload_name, "out")
1901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ceil_out, schema_str, "ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
1902
1903// aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1904static C10_NOINLINE c10::TypedOperatorHandle<ceil_out::schema> create_ceil_out_typed_handle() {
1905 return c10::Dispatcher::singleton()
1906 .findSchemaOrThrow(ceil_out::name, ceil_out::overload_name)
1907 .typed<ceil_out::schema>();
1908}
1909
1910// aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1911at::Tensor & ceil_out::call(const at::Tensor & self, at::Tensor & out) {
1912
1913 static auto op = create_ceil_out_typed_handle();
1914 return op.call(self, out);
1915}
1916
1917// aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1918at::Tensor & ceil_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
1919
1920 static auto op = create_ceil_out_typed_handle();
1921 return op.redispatch(dispatchKeySet, self, out);
1922}
1923
1924STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv_tbc, name, "aten::conv_tbc")
1925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv_tbc, overload_name, "")
1926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv_tbc, schema_str, "conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor")
1927
1928// aten::conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor
1929static C10_NOINLINE c10::TypedOperatorHandle<conv_tbc::schema> create_conv_tbc_typed_handle() {
1930 return c10::Dispatcher::singleton()
1931 .findSchemaOrThrow(conv_tbc::name, conv_tbc::overload_name)
1932 .typed<conv_tbc::schema>();
1933}
1934
1935// aten::conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor
1936at::Tensor conv_tbc::call(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) {
1937
1938 static auto op = create_conv_tbc_typed_handle();
1939 return op.call(self, weight, bias, pad);
1940}
1941
1942// aten::conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor
1943at::Tensor conv_tbc::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) {
1944
1945 static auto op = create_conv_tbc_typed_handle();
1946 return op.redispatch(dispatchKeySet, self, weight, bias, pad);
1947}
1948
1949STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cosh, name, "aten::cosh")
1950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cosh, overload_name, "")
1951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cosh, schema_str, "cosh(Tensor self) -> Tensor")
1952
1953// aten::cosh(Tensor self) -> Tensor
1954static C10_NOINLINE c10::TypedOperatorHandle<cosh::schema> create_cosh_typed_handle() {
1955 return c10::Dispatcher::singleton()
1956 .findSchemaOrThrow(cosh::name, cosh::overload_name)
1957 .typed<cosh::schema>();
1958}
1959
1960// aten::cosh(Tensor self) -> Tensor
1961at::Tensor cosh::call(const at::Tensor & self) {
1962
1963 static auto op = create_cosh_typed_handle();
1964 return op.call(self);
1965}
1966
1967// aten::cosh(Tensor self) -> Tensor
1968at::Tensor cosh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
1969
1970 static auto op = create_cosh_typed_handle();
1971 return op.redispatch(dispatchKeySet, self);
1972}
1973
1974STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cosh_, name, "aten::cosh_")
1975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cosh_, overload_name, "")
1976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cosh_, schema_str, "cosh_(Tensor(a!) self) -> Tensor(a!)")
1977
1978// aten::cosh_(Tensor(a!) self) -> Tensor(a!)
1979static C10_NOINLINE c10::TypedOperatorHandle<cosh_::schema> create_cosh__typed_handle() {
1980 return c10::Dispatcher::singleton()
1981 .findSchemaOrThrow(cosh_::name, cosh_::overload_name)
1982 .typed<cosh_::schema>();
1983}
1984
1985// aten::cosh_(Tensor(a!) self) -> Tensor(a!)
1986at::Tensor & cosh_::call(at::Tensor & self) {
1987
1988 static auto op = create_cosh__typed_handle();
1989 return op.call(self);
1990}
1991
1992// aten::cosh_(Tensor(a!) self) -> Tensor(a!)
1993at::Tensor & cosh_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
1994
1995 static auto op = create_cosh__typed_handle();
1996 return op.redispatch(dispatchKeySet, self);
1997}
1998
1999STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cosh_out, name, "aten::cosh")
2000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cosh_out, overload_name, "out")
2001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cosh_out, schema_str, "cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
2002
2003// aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2004static C10_NOINLINE c10::TypedOperatorHandle<cosh_out::schema> create_cosh_out_typed_handle() {
2005 return c10::Dispatcher::singleton()
2006 .findSchemaOrThrow(cosh_out::name, cosh_out::overload_name)
2007 .typed<cosh_out::schema>();
2008}
2009
2010// aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2011at::Tensor & cosh_out::call(const at::Tensor & self, at::Tensor & out) {
2012
2013 static auto op = create_cosh_out_typed_handle();
2014 return op.call(self, out);
2015}
2016
2017// aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2018at::Tensor & cosh_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
2019
2020 static auto op = create_cosh_out_typed_handle();
2021 return op.redispatch(dispatchKeySet, self, out);
2022}
2023
2024STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cosine_embedding_loss, name, "aten::cosine_embedding_loss")
2025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cosine_embedding_loss, overload_name, "")
2026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cosine_embedding_loss, schema_str, "cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor")
2027
2028// aten::cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor
2029static C10_NOINLINE c10::TypedOperatorHandle<cosine_embedding_loss::schema> create_cosine_embedding_loss_typed_handle() {
2030 return c10::Dispatcher::singleton()
2031 .findSchemaOrThrow(cosine_embedding_loss::name, cosine_embedding_loss::overload_name)
2032 .typed<cosine_embedding_loss::schema>();
2033}
2034
2035// aten::cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor
2036at::Tensor cosine_embedding_loss::call(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) {
2037
2038 static auto op = create_cosine_embedding_loss_typed_handle();
2039 return op.call(input1, input2, target, margin, reduction);
2040}
2041
2042// aten::cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor
2043at::Tensor cosine_embedding_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) {
2044
2045 static auto op = create_cosine_embedding_loss_typed_handle();
2046 return op.redispatch(dispatchKeySet, input1, input2, target, margin, reduction);
2047}
2048
2049STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_affine_grid_generator_backward, name, "aten::cudnn_affine_grid_generator_backward")
2050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_affine_grid_generator_backward, overload_name, "")
2051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_affine_grid_generator_backward, schema_str, "cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta")
2052
2053// aten::cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta
2054static C10_NOINLINE c10::TypedOperatorHandle<cudnn_affine_grid_generator_backward::schema> create_cudnn_affine_grid_generator_backward_typed_handle() {
2055 return c10::Dispatcher::singleton()
2056 .findSchemaOrThrow(cudnn_affine_grid_generator_backward::name, cudnn_affine_grid_generator_backward::overload_name)
2057 .typed<cudnn_affine_grid_generator_backward::schema>();
2058}
2059
2060// aten::cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta
2061at::Tensor cudnn_affine_grid_generator_backward::call(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W) {
2062
2063 static auto op = create_cudnn_affine_grid_generator_backward_typed_handle();
2064 return op.call(grad, N, C, H, W);
2065}
2066
2067// aten::cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta
2068at::Tensor cudnn_affine_grid_generator_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W) {
2069
2070 static auto op = create_cudnn_affine_grid_generator_backward_typed_handle();
2071 return op.redispatch(dispatchKeySet, grad, N, C, H, W);
2072}
2073
2074STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_grid_sampler, name, "aten::cudnn_grid_sampler")
2075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_grid_sampler, overload_name, "")
2076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_grid_sampler, schema_str, "cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output")
2077
2078// aten::cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output
2079static C10_NOINLINE c10::TypedOperatorHandle<cudnn_grid_sampler::schema> create_cudnn_grid_sampler_typed_handle() {
2080 return c10::Dispatcher::singleton()
2081 .findSchemaOrThrow(cudnn_grid_sampler::name, cudnn_grid_sampler::overload_name)
2082 .typed<cudnn_grid_sampler::schema>();
2083}
2084
2085// aten::cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output
2086at::Tensor cudnn_grid_sampler::call(const at::Tensor & self, const at::Tensor & grid) {
2087
2088 static auto op = create_cudnn_grid_sampler_typed_handle();
2089 return op.call(self, grid);
2090}
2091
2092// aten::cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output
2093at::Tensor cudnn_grid_sampler::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grid) {
2094
2095 static auto op = create_cudnn_grid_sampler_typed_handle();
2096 return op.redispatch(dispatchKeySet, self, grid);
2097}
2098
2099STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cummin, name, "aten::cummin")
2100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cummin, overload_name, "")
2101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cummin, schema_str, "cummin(Tensor self, int dim) -> (Tensor values, Tensor indices)")
2102
2103// aten::cummin(Tensor self, int dim) -> (Tensor values, Tensor indices)
2104static C10_NOINLINE c10::TypedOperatorHandle<cummin::schema> create_cummin_typed_handle() {
2105 return c10::Dispatcher::singleton()
2106 .findSchemaOrThrow(cummin::name, cummin::overload_name)
2107 .typed<cummin::schema>();
2108}
2109
2110// aten::cummin(Tensor self, int dim) -> (Tensor values, Tensor indices)
2111::std::tuple<at::Tensor,at::Tensor> cummin::call(const at::Tensor & self, int64_t dim) {
2112
2113 static auto op = create_cummin_typed_handle();
2114 return op.call(self, dim);
2115}
2116
2117// aten::cummin(Tensor self, int dim) -> (Tensor values, Tensor indices)
2118::std::tuple<at::Tensor,at::Tensor> cummin::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
2119
2120 static auto op = create_cummin_typed_handle();
2121 return op.redispatch(dispatchKeySet, self, dim);
2122}
2123
2124STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cummin_out, name, "aten::cummin")
2125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cummin_out, overload_name, "out")
2126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cummin_out, schema_str, "cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)")
2127
2128// aten::cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
2129static C10_NOINLINE c10::TypedOperatorHandle<cummin_out::schema> create_cummin_out_typed_handle() {
2130 return c10::Dispatcher::singleton()
2131 .findSchemaOrThrow(cummin_out::name, cummin_out::overload_name)
2132 .typed<cummin_out::schema>();
2133}
2134
2135// aten::cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
2136::std::tuple<at::Tensor &,at::Tensor &> cummin_out::call(const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices) {
2137
2138 static auto op = create_cummin_out_typed_handle();
2139 return op.call(self, dim, values, indices);
2140}
2141
2142// aten::cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
2143::std::tuple<at::Tensor &,at::Tensor &> cummin_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices) {
2144
2145 static auto op = create_cummin_out_typed_handle();
2146 return op.redispatch(dispatchKeySet, self, dim, values, indices);
2147}
2148
2149STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cummin_dimname, name, "aten::cummin")
2150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cummin_dimname, overload_name, "dimname")
2151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cummin_dimname, schema_str, "cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)")
2152
2153// aten::cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)
2154static C10_NOINLINE c10::TypedOperatorHandle<cummin_dimname::schema> create_cummin_dimname_typed_handle() {
2155 return c10::Dispatcher::singleton()
2156 .findSchemaOrThrow(cummin_dimname::name, cummin_dimname::overload_name)
2157 .typed<cummin_dimname::schema>();
2158}
2159
2160// aten::cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)
2161::std::tuple<at::Tensor,at::Tensor> cummin_dimname::call(const at::Tensor & self, at::Dimname dim) {
2162
2163 static auto op = create_cummin_dimname_typed_handle();
2164 return op.call(self, dim);
2165}
2166
2167// aten::cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)
2168::std::tuple<at::Tensor,at::Tensor> cummin_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) {
2169
2170 static auto op = create_cummin_dimname_typed_handle();
2171 return op.redispatch(dispatchKeySet, self, dim);
2172}
2173
2174STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cummin_dimname_out, name, "aten::cummin")
2175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cummin_dimname_out, overload_name, "dimname_out")
2176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cummin_dimname_out, schema_str, "cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)")
2177
2178// aten::cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
2179static C10_NOINLINE c10::TypedOperatorHandle<cummin_dimname_out::schema> create_cummin_dimname_out_typed_handle() {
2180 return c10::Dispatcher::singleton()
2181 .findSchemaOrThrow(cummin_dimname_out::name, cummin_dimname_out::overload_name)
2182 .typed<cummin_dimname_out::schema>();
2183}
2184
2185// aten::cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
2186::std::tuple<at::Tensor &,at::Tensor &> cummin_dimname_out::call(const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) {
2187
2188 static auto op = create_cummin_dimname_out_typed_handle();
2189 return op.call(self, dim, values, indices);
2190}
2191
2192// aten::cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
2193::std::tuple<at::Tensor &,at::Tensor &> cummin_dimname_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) {
2194
2195 static auto op = create_cummin_dimname_out_typed_handle();
2196 return op.redispatch(dispatchKeySet, self, dim, values, indices);
2197}
2198
2199STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cummin_helper, name, "aten::_cummin_helper")
2200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cummin_helper, overload_name, "")
2201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cummin_helper, schema_str, "_cummin_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()")
2202
2203// aten::_cummin_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()
2204static C10_NOINLINE c10::TypedOperatorHandle<_cummin_helper::schema> create__cummin_helper_typed_handle() {
2205 return c10::Dispatcher::singleton()
2206 .findSchemaOrThrow(_cummin_helper::name, _cummin_helper::overload_name)
2207 .typed<_cummin_helper::schema>();
2208}
2209
2210// aten::_cummin_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()
2211void _cummin_helper::call(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
2212
2213 static auto op = create__cummin_helper_typed_handle();
2214 return op.call(self, values, indices, dim);
2215}
2216
2217// aten::_cummin_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()
2218void _cummin_helper::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
2219
2220 static auto op = create__cummin_helper_typed_handle();
2221 return op.redispatch(dispatchKeySet, self, values, indices, dim);
2222}
2223
2224STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div_Tensor, name, "aten::div")
2225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div_Tensor, overload_name, "Tensor")
2226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div_Tensor, schema_str, "div.Tensor(Tensor self, Tensor other) -> Tensor")
2227
2228// aten::div.Tensor(Tensor self, Tensor other) -> Tensor
2229static C10_NOINLINE c10::TypedOperatorHandle<div_Tensor::schema> create_div_Tensor_typed_handle() {
2230 return c10::Dispatcher::singleton()
2231 .findSchemaOrThrow(div_Tensor::name, div_Tensor::overload_name)
2232 .typed<div_Tensor::schema>();
2233}
2234
2235// aten::div.Tensor(Tensor self, Tensor other) -> Tensor
2236at::Tensor div_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
2237
2238 static auto op = create_div_Tensor_typed_handle();
2239 return op.call(self, other);
2240}
2241
2242// aten::div.Tensor(Tensor self, Tensor other) -> Tensor
2243at::Tensor div_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
2244
2245 static auto op = create_div_Tensor_typed_handle();
2246 return op.redispatch(dispatchKeySet, self, other);
2247}
2248
2249STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div__Tensor, name, "aten::div_")
2250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div__Tensor, overload_name, "Tensor")
2251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div__Tensor, schema_str, "div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
2252
2253// aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
2254static C10_NOINLINE c10::TypedOperatorHandle<div__Tensor::schema> create_div__Tensor_typed_handle() {
2255 return c10::Dispatcher::singleton()
2256 .findSchemaOrThrow(div__Tensor::name, div__Tensor::overload_name)
2257 .typed<div__Tensor::schema>();
2258}
2259
2260// aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
2261at::Tensor & div__Tensor::call(at::Tensor & self, const at::Tensor & other) {
2262
2263 static auto op = create_div__Tensor_typed_handle();
2264 return op.call(self, other);
2265}
2266
2267// aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
2268at::Tensor & div__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
2269
2270 static auto op = create_div__Tensor_typed_handle();
2271 return op.redispatch(dispatchKeySet, self, other);
2272}
2273
2274STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div_out, name, "aten::div")
2275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div_out, overload_name, "out")
2276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div_out, schema_str, "div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
2277
2278// aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2279static C10_NOINLINE c10::TypedOperatorHandle<div_out::schema> create_div_out_typed_handle() {
2280 return c10::Dispatcher::singleton()
2281 .findSchemaOrThrow(div_out::name, div_out::overload_name)
2282 .typed<div_out::schema>();
2283}
2284
2285// aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2286at::Tensor & div_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
2287
2288 static auto op = create_div_out_typed_handle();
2289 return op.call(self, other, out);
2290}
2291
2292// aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2293at::Tensor & div_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
2294
2295 static auto op = create_div_out_typed_handle();
2296 return op.redispatch(dispatchKeySet, self, other, out);
2297}
2298
2299STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div_Tensor_mode, name, "aten::div")
2300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div_Tensor_mode, overload_name, "Tensor_mode")
2301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div_Tensor_mode, schema_str, "div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor")
2302
2303// aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor
2304static C10_NOINLINE c10::TypedOperatorHandle<div_Tensor_mode::schema> create_div_Tensor_mode_typed_handle() {
2305 return c10::Dispatcher::singleton()
2306 .findSchemaOrThrow(div_Tensor_mode::name, div_Tensor_mode::overload_name)
2307 .typed<div_Tensor_mode::schema>();
2308}
2309
2310// aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor
2311at::Tensor div_Tensor_mode::call(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
2312
2313 static auto op = create_div_Tensor_mode_typed_handle();
2314 return op.call(self, other, rounding_mode);
2315}
2316
2317// aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor
2318at::Tensor div_Tensor_mode::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
2319
2320 static auto op = create_div_Tensor_mode_typed_handle();
2321 return op.redispatch(dispatchKeySet, self, other, rounding_mode);
2322}
2323
2324STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div__Tensor_mode, name, "aten::div_")
2325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div__Tensor_mode, overload_name, "Tensor_mode")
2326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div__Tensor_mode, schema_str, "div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)")
2327
2328// aten::div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)
2329static C10_NOINLINE c10::TypedOperatorHandle<div__Tensor_mode::schema> create_div__Tensor_mode_typed_handle() {
2330 return c10::Dispatcher::singleton()
2331 .findSchemaOrThrow(div__Tensor_mode::name, div__Tensor_mode::overload_name)
2332 .typed<div__Tensor_mode::schema>();
2333}
2334
2335// aten::div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)
2336at::Tensor & div__Tensor_mode::call(at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
2337
2338 static auto op = create_div__Tensor_mode_typed_handle();
2339 return op.call(self, other, rounding_mode);
2340}
2341
2342// aten::div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)
2343at::Tensor & div__Tensor_mode::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
2344
2345 static auto op = create_div__Tensor_mode_typed_handle();
2346 return op.redispatch(dispatchKeySet, self, other, rounding_mode);
2347}
2348
2349STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div_out_mode, name, "aten::div")
2350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div_out_mode, overload_name, "out_mode")
2351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div_out_mode, schema_str, "div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)")
2352
2353// aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
2354static C10_NOINLINE c10::TypedOperatorHandle<div_out_mode::schema> create_div_out_mode_typed_handle() {
2355 return c10::Dispatcher::singleton()
2356 .findSchemaOrThrow(div_out_mode::name, div_out_mode::overload_name)
2357 .typed<div_out_mode::schema>();
2358}
2359
2360// aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
2361at::Tensor & div_out_mode::call(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out) {
2362
2363 static auto op = create_div_out_mode_typed_handle();
2364 return op.call(self, other, rounding_mode, out);
2365}
2366
2367// aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
2368at::Tensor & div_out_mode::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out) {
2369
2370 static auto op = create_div_out_mode_typed_handle();
2371 return op.redispatch(dispatchKeySet, self, other, rounding_mode, out);
2372}
2373
2374STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div_Scalar, name, "aten::div")
2375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div_Scalar, overload_name, "Scalar")
2376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div_Scalar, schema_str, "div.Scalar(Tensor self, Scalar other) -> Tensor")
2377
2378// aten::div.Scalar(Tensor self, Scalar other) -> Tensor
2379static C10_NOINLINE c10::TypedOperatorHandle<div_Scalar::schema> create_div_Scalar_typed_handle() {
2380 return c10::Dispatcher::singleton()
2381 .findSchemaOrThrow(div_Scalar::name, div_Scalar::overload_name)
2382 .typed<div_Scalar::schema>();
2383}
2384
2385// aten::div.Scalar(Tensor self, Scalar other) -> Tensor
2386at::Tensor div_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
2387
2388 static auto op = create_div_Scalar_typed_handle();
2389 return op.call(self, other);
2390}
2391
2392// aten::div.Scalar(Tensor self, Scalar other) -> Tensor
2393at::Tensor div_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
2394
2395 static auto op = create_div_Scalar_typed_handle();
2396 return op.redispatch(dispatchKeySet, self, other);
2397}
2398
2399STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div__Scalar, name, "aten::div_")
2400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div__Scalar, overload_name, "Scalar")
2401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div__Scalar, schema_str, "div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
2402
2403// aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
2404static C10_NOINLINE c10::TypedOperatorHandle<div__Scalar::schema> create_div__Scalar_typed_handle() {
2405 return c10::Dispatcher::singleton()
2406 .findSchemaOrThrow(div__Scalar::name, div__Scalar::overload_name)
2407 .typed<div__Scalar::schema>();
2408}
2409
2410// aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
2411at::Tensor & div__Scalar::call(at::Tensor & self, const at::Scalar & other) {
2412
2413 static auto op = create_div__Scalar_typed_handle();
2414 return op.call(self, other);
2415}
2416
2417// aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
2418at::Tensor & div__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
2419
2420 static auto op = create_div__Scalar_typed_handle();
2421 return op.redispatch(dispatchKeySet, self, other);
2422}
2423
2424STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div_Scalar_mode, name, "aten::div")
2425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div_Scalar_mode, overload_name, "Scalar_mode")
2426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div_Scalar_mode, schema_str, "div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor")
2427
2428// aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor
2429static C10_NOINLINE c10::TypedOperatorHandle<div_Scalar_mode::schema> create_div_Scalar_mode_typed_handle() {
2430 return c10::Dispatcher::singleton()
2431 .findSchemaOrThrow(div_Scalar_mode::name, div_Scalar_mode::overload_name)
2432 .typed<div_Scalar_mode::schema>();
2433}
2434
2435// aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor
2436at::Tensor div_Scalar_mode::call(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
2437
2438 static auto op = create_div_Scalar_mode_typed_handle();
2439 return op.call(self, other, rounding_mode);
2440}
2441
2442// aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor
2443at::Tensor div_Scalar_mode::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
2444
2445 static auto op = create_div_Scalar_mode_typed_handle();
2446 return op.redispatch(dispatchKeySet, self, other, rounding_mode);
2447}
2448
2449STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div__Scalar_mode, name, "aten::div_")
2450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div__Scalar_mode, overload_name, "Scalar_mode")
2451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div__Scalar_mode, schema_str, "div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)")
2452
2453// aten::div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)
2454static C10_NOINLINE c10::TypedOperatorHandle<div__Scalar_mode::schema> create_div__Scalar_mode_typed_handle() {
2455 return c10::Dispatcher::singleton()
2456 .findSchemaOrThrow(div__Scalar_mode::name, div__Scalar_mode::overload_name)
2457 .typed<div__Scalar_mode::schema>();
2458}
2459
2460// aten::div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)
2461at::Tensor & div__Scalar_mode::call(at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
2462
2463 static auto op = create_div__Scalar_mode_typed_handle();
2464 return op.call(self, other, rounding_mode);
2465}
2466
2467// aten::div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)
2468at::Tensor & div__Scalar_mode::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
2469
2470 static auto op = create_div__Scalar_mode_typed_handle();
2471 return op.redispatch(dispatchKeySet, self, other, rounding_mode);
2472}
2473
2474STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_embedding_bag_forward_only, name, "aten::_embedding_bag_forward_only")
2475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_embedding_bag_forward_only, overload_name, "")
2476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_embedding_bag_forward_only, schema_str, "_embedding_bag_forward_only(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)")
2477
2478// aten::_embedding_bag_forward_only(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)
2479static C10_NOINLINE c10::TypedOperatorHandle<_embedding_bag_forward_only::schema> create__embedding_bag_forward_only_typed_handle() {
2480 return c10::Dispatcher::singleton()
2481 .findSchemaOrThrow(_embedding_bag_forward_only::name, _embedding_bag_forward_only::overload_name)
2482 .typed<_embedding_bag_forward_only::schema>();
2483}
2484
2485// aten::_embedding_bag_forward_only(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)
2486::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag_forward_only::call(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx) {
2487
2488 static auto op = create__embedding_bag_forward_only_typed_handle();
2489 return op.call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
2490}
2491
2492// aten::_embedding_bag_forward_only(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)
2493::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag_forward_only::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx) {
2494
2495 static auto op = create__embedding_bag_forward_only_typed_handle();
2496 return op.redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
2497}
2498
2499STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(embedding_bag, name, "aten::embedding_bag")
2500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(embedding_bag, overload_name, "")
2501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(embedding_bag, schema_str, "embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor)")
2502
2503// aten::embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor)
2504static C10_NOINLINE c10::TypedOperatorHandle<embedding_bag::schema> create_embedding_bag_typed_handle() {
2505 return c10::Dispatcher::singleton()
2506 .findSchemaOrThrow(embedding_bag::name, embedding_bag::overload_name)
2507 .typed<embedding_bag::schema>();
2508}
2509
2510// aten::embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor)
2511::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> embedding_bag::call(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset) {
2512
2513 static auto op = create_embedding_bag_typed_handle();
2514 return op.call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset);
2515}
2516
2517// aten::embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor)
2518::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> embedding_bag::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset) {
2519
2520 static auto op = create_embedding_bag_typed_handle();
2521 return op.redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset);
2522}
2523
2524STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(embedding_bag_padding_idx, name, "aten::embedding_bag")
2525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(embedding_bag_padding_idx, overload_name, "padding_idx")
2526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(embedding_bag_padding_idx, schema_str, "embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor)")
2527
2528// aten::embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor)
2529static C10_NOINLINE c10::TypedOperatorHandle<embedding_bag_padding_idx::schema> create_embedding_bag_padding_idx_typed_handle() {
2530 return c10::Dispatcher::singleton()
2531 .findSchemaOrThrow(embedding_bag_padding_idx::name, embedding_bag_padding_idx::overload_name)
2532 .typed<embedding_bag_padding_idx::schema>();
2533}
2534
2535// aten::embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor)
2536::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> embedding_bag_padding_idx::call(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, c10::optional<int64_t> padding_idx) {
2537
2538 static auto op = create_embedding_bag_padding_idx_typed_handle();
2539 return op.call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
2540}
2541
2542// aten::embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor)
2543::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> embedding_bag_padding_idx::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, c10::optional<int64_t> padding_idx) {
2544
2545 static auto op = create_embedding_bag_padding_idx_typed_handle();
2546 return op.redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
2547}
2548
2549STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(new_zeros, name, "aten::new_zeros")
2550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(new_zeros, overload_name, "")
2551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(new_zeros, schema_str, "new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
2552
2553// aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2554static C10_NOINLINE c10::TypedOperatorHandle<new_zeros::schema> create_new_zeros_typed_handle() {
2555 return c10::Dispatcher::singleton()
2556 .findSchemaOrThrow(new_zeros::name, new_zeros::overload_name)
2557 .typed<new_zeros::schema>();
2558}
2559
2560// aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2561at::Tensor new_zeros::call(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2562
2563 static auto op = create_new_zeros_typed_handle();
2564 return op.call(self, size, dtype, layout, device, pin_memory);
2565}
2566
2567// aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2568at::Tensor new_zeros::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2569
2570 static auto op = create_new_zeros_typed_handle();
2571 return op.redispatch(dispatchKeySet, self, size, dtype, layout, device, pin_memory);
2572}
2573
2574STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(erf, name, "aten::erf")
2575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(erf, overload_name, "")
2576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(erf, schema_str, "erf(Tensor self) -> Tensor")
2577
2578// aten::erf(Tensor self) -> Tensor
2579static C10_NOINLINE c10::TypedOperatorHandle<erf::schema> create_erf_typed_handle() {
2580 return c10::Dispatcher::singleton()
2581 .findSchemaOrThrow(erf::name, erf::overload_name)
2582 .typed<erf::schema>();
2583}
2584
2585// aten::erf(Tensor self) -> Tensor
2586at::Tensor erf::call(const at::Tensor & self) {
2587
2588 static auto op = create_erf_typed_handle();
2589 return op.call(self);
2590}
2591
2592// aten::erf(Tensor self) -> Tensor
2593at::Tensor erf::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
2594
2595 static auto op = create_erf_typed_handle();
2596 return op.redispatch(dispatchKeySet, self);
2597}
2598
2599STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(erf_, name, "aten::erf_")
2600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(erf_, overload_name, "")
2601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(erf_, schema_str, "erf_(Tensor(a!) self) -> Tensor(a!)")
2602
2603// aten::erf_(Tensor(a!) self) -> Tensor(a!)
2604static C10_NOINLINE c10::TypedOperatorHandle<erf_::schema> create_erf__typed_handle() {
2605 return c10::Dispatcher::singleton()
2606 .findSchemaOrThrow(erf_::name, erf_::overload_name)
2607 .typed<erf_::schema>();
2608}
2609
2610// aten::erf_(Tensor(a!) self) -> Tensor(a!)
2611at::Tensor & erf_::call(at::Tensor & self) {
2612
2613 static auto op = create_erf__typed_handle();
2614 return op.call(self);
2615}
2616
2617// aten::erf_(Tensor(a!) self) -> Tensor(a!)
2618at::Tensor & erf_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
2619
2620 static auto op = create_erf__typed_handle();
2621 return op.redispatch(dispatchKeySet, self);
2622}
2623
2624STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(erf_out, name, "aten::erf")
2625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(erf_out, overload_name, "out")
2626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(erf_out, schema_str, "erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
2627
2628// aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2629static C10_NOINLINE c10::TypedOperatorHandle<erf_out::schema> create_erf_out_typed_handle() {
2630 return c10::Dispatcher::singleton()
2631 .findSchemaOrThrow(erf_out::name, erf_out::overload_name)
2632 .typed<erf_out::schema>();
2633}
2634
2635// aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2636at::Tensor & erf_out::call(const at::Tensor & self, at::Tensor & out) {
2637
2638 static auto op = create_erf_out_typed_handle();
2639 return op.call(self, out);
2640}
2641
2642// aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2643at::Tensor & erf_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
2644
2645 static auto op = create_erf_out_typed_handle();
2646 return op.redispatch(dispatchKeySet, self, out);
2647}
2648
2649STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(grid_sampler, name, "aten::grid_sampler")
2650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(grid_sampler, overload_name, "")
2651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(grid_sampler, schema_str, "grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor")
2652
2653// aten::grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
2654static C10_NOINLINE c10::TypedOperatorHandle<grid_sampler::schema> create_grid_sampler_typed_handle() {
2655 return c10::Dispatcher::singleton()
2656 .findSchemaOrThrow(grid_sampler::name, grid_sampler::overload_name)
2657 .typed<grid_sampler::schema>();
2658}
2659
2660// aten::grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
2661at::Tensor grid_sampler::call(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
2662
2663 static auto op = create_grid_sampler_typed_handle();
2664 return op.call(input, grid, interpolation_mode, padding_mode, align_corners);
2665}
2666
2667// aten::grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
2668at::Tensor grid_sampler::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
2669
2670 static auto op = create_grid_sampler_typed_handle();
2671 return op.redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners);
2672}
2673
2674STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_grid_sampler_2d_cpu_fallback, name, "aten::_grid_sampler_2d_cpu_fallback")
2675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_grid_sampler_2d_cpu_fallback, overload_name, "")
2676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_grid_sampler_2d_cpu_fallback, schema_str, "_grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor")
2677
2678// aten::_grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
2679static C10_NOINLINE c10::TypedOperatorHandle<_grid_sampler_2d_cpu_fallback::schema> create__grid_sampler_2d_cpu_fallback_typed_handle() {
2680 return c10::Dispatcher::singleton()
2681 .findSchemaOrThrow(_grid_sampler_2d_cpu_fallback::name, _grid_sampler_2d_cpu_fallback::overload_name)
2682 .typed<_grid_sampler_2d_cpu_fallback::schema>();
2683}
2684
2685// aten::_grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
2686at::Tensor _grid_sampler_2d_cpu_fallback::call(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
2687
2688 static auto op = create__grid_sampler_2d_cpu_fallback_typed_handle();
2689 return op.call(input, grid, interpolation_mode, padding_mode, align_corners);
2690}
2691
2692// aten::_grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
2693at::Tensor _grid_sampler_2d_cpu_fallback::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
2694
2695 static auto op = create__grid_sampler_2d_cpu_fallback_typed_handle();
2696 return op.redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners);
2697}
2698
2699STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(grid_sampler_3d, name, "aten::grid_sampler_3d")
2700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(grid_sampler_3d, overload_name, "")
2701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(grid_sampler_3d, schema_str, "grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor")
2702
2703// aten::grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
2704static C10_NOINLINE c10::TypedOperatorHandle<grid_sampler_3d::schema> create_grid_sampler_3d_typed_handle() {
2705 return c10::Dispatcher::singleton()
2706 .findSchemaOrThrow(grid_sampler_3d::name, grid_sampler_3d::overload_name)
2707 .typed<grid_sampler_3d::schema>();
2708}
2709
2710// aten::grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
2711at::Tensor grid_sampler_3d::call(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
2712
2713 static auto op = create_grid_sampler_3d_typed_handle();
2714 return op.call(input, grid, interpolation_mode, padding_mode, align_corners);
2715}
2716
2717// aten::grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
2718at::Tensor grid_sampler_3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
2719
2720 static auto op = create_grid_sampler_3d_typed_handle();
2721 return op.redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners);
2722}
2723
2724STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hann_window, name, "aten::hann_window")
2725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hann_window, overload_name, "")
2726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hann_window, schema_str, "hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
2727
2728// aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2729static C10_NOINLINE c10::TypedOperatorHandle<hann_window::schema> create_hann_window_typed_handle() {
2730 return c10::Dispatcher::singleton()
2731 .findSchemaOrThrow(hann_window::name, hann_window::overload_name)
2732 .typed<hann_window::schema>();
2733}
2734
2735// aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2736at::Tensor hann_window::call(int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2737
2738 static auto op = create_hann_window_typed_handle();
2739 return op.call(window_length, dtype, layout, device, pin_memory);
2740}
2741
2742// aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2743at::Tensor hann_window::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2744
2745 static auto op = create_hann_window_typed_handle();
2746 return op.redispatch(dispatchKeySet, window_length, dtype, layout, device, pin_memory);
2747}
2748
2749STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hann_window_periodic, name, "aten::hann_window")
2750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hann_window_periodic, overload_name, "periodic")
2751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hann_window_periodic, schema_str, "hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
2752
2753// aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2754static C10_NOINLINE c10::TypedOperatorHandle<hann_window_periodic::schema> create_hann_window_periodic_typed_handle() {
2755 return c10::Dispatcher::singleton()
2756 .findSchemaOrThrow(hann_window_periodic::name, hann_window_periodic::overload_name)
2757 .typed<hann_window_periodic::schema>();
2758}
2759
2760// aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2761at::Tensor hann_window_periodic::call(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2762
2763 static auto op = create_hann_window_periodic_typed_handle();
2764 return op.call(window_length, periodic, dtype, layout, device, pin_memory);
2765}
2766
2767// aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2768at::Tensor hann_window_periodic::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2769
2770 static auto op = create_hann_window_periodic_typed_handle();
2771 return op.redispatch(dispatchKeySet, window_length, periodic, dtype, layout, device, pin_memory);
2772}
2773
2774STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hamming_window, name, "aten::hamming_window")
2775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hamming_window, overload_name, "")
2776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hamming_window, schema_str, "hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
2777
2778// aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2779static C10_NOINLINE c10::TypedOperatorHandle<hamming_window::schema> create_hamming_window_typed_handle() {
2780 return c10::Dispatcher::singleton()
2781 .findSchemaOrThrow(hamming_window::name, hamming_window::overload_name)
2782 .typed<hamming_window::schema>();
2783}
2784
2785// aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2786at::Tensor hamming_window::call(int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2787
2788 static auto op = create_hamming_window_typed_handle();
2789 return op.call(window_length, dtype, layout, device, pin_memory);
2790}
2791
2792// aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2793at::Tensor hamming_window::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2794
2795 static auto op = create_hamming_window_typed_handle();
2796 return op.redispatch(dispatchKeySet, window_length, dtype, layout, device, pin_memory);
2797}
2798
2799STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hamming_window_periodic, name, "aten::hamming_window")
2800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hamming_window_periodic, overload_name, "periodic")
2801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hamming_window_periodic, schema_str, "hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
2802
2803// aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2804static C10_NOINLINE c10::TypedOperatorHandle<hamming_window_periodic::schema> create_hamming_window_periodic_typed_handle() {
2805 return c10::Dispatcher::singleton()
2806 .findSchemaOrThrow(hamming_window_periodic::name, hamming_window_periodic::overload_name)
2807 .typed<hamming_window_periodic::schema>();
2808}
2809
2810// aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2811at::Tensor hamming_window_periodic::call(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2812
2813 static auto op = create_hamming_window_periodic_typed_handle();
2814 return op.call(window_length, periodic, dtype, layout, device, pin_memory);
2815}
2816
2817// aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2818at::Tensor hamming_window_periodic::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2819
2820 static auto op = create_hamming_window_periodic_typed_handle();
2821 return op.redispatch(dispatchKeySet, window_length, periodic, dtype, layout, device, pin_memory);
2822}
2823
2824STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hamming_window_periodic_alpha, name, "aten::hamming_window")
2825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hamming_window_periodic_alpha, overload_name, "periodic_alpha")
2826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hamming_window_periodic_alpha, schema_str, "hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
2827
2828// aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2829static C10_NOINLINE c10::TypedOperatorHandle<hamming_window_periodic_alpha::schema> create_hamming_window_periodic_alpha_typed_handle() {
2830 return c10::Dispatcher::singleton()
2831 .findSchemaOrThrow(hamming_window_periodic_alpha::name, hamming_window_periodic_alpha::overload_name)
2832 .typed<hamming_window_periodic_alpha::schema>();
2833}
2834
2835// aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2836at::Tensor hamming_window_periodic_alpha::call(int64_t window_length, bool periodic, double alpha, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2837
2838 static auto op = create_hamming_window_periodic_alpha_typed_handle();
2839 return op.call(window_length, periodic, alpha, dtype, layout, device, pin_memory);
2840}
2841
2842// aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2843at::Tensor hamming_window_periodic_alpha::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2844
2845 static auto op = create_hamming_window_periodic_alpha_typed_handle();
2846 return op.redispatch(dispatchKeySet, window_length, periodic, alpha, dtype, layout, device, pin_memory);
2847}
2848
2849STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hamming_window_periodic_alpha_beta, name, "aten::hamming_window")
2850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hamming_window_periodic_alpha_beta, overload_name, "periodic_alpha_beta")
2851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hamming_window_periodic_alpha_beta, schema_str, "hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
2852
2853// aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2854static C10_NOINLINE c10::TypedOperatorHandle<hamming_window_periodic_alpha_beta::schema> create_hamming_window_periodic_alpha_beta_typed_handle() {
2855 return c10::Dispatcher::singleton()
2856 .findSchemaOrThrow(hamming_window_periodic_alpha_beta::name, hamming_window_periodic_alpha_beta::overload_name)
2857 .typed<hamming_window_periodic_alpha_beta::schema>();
2858}
2859
2860// aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2861at::Tensor hamming_window_periodic_alpha_beta::call(int64_t window_length, bool periodic, double alpha, double beta, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2862
2863 static auto op = create_hamming_window_periodic_alpha_beta_typed_handle();
2864 return op.call(window_length, periodic, alpha, beta, dtype, layout, device, pin_memory);
2865}
2866
2867// aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2868at::Tensor hamming_window_periodic_alpha_beta::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, double beta, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2869
2870 static auto op = create_hamming_window_periodic_alpha_beta_typed_handle();
2871 return op.redispatch(dispatchKeySet, window_length, periodic, alpha, beta, dtype, layout, device, pin_memory);
2872}
2873
2874STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_group_norm_backward, name, "aten::native_group_norm_backward")
2875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_group_norm_backward, overload_name, "")
2876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_group_norm_backward, schema_str, "native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor)")
2877
2878// aten::native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
2879static C10_NOINLINE c10::TypedOperatorHandle<native_group_norm_backward::schema> create_native_group_norm_backward_typed_handle() {
2880 return c10::Dispatcher::singleton()
2881 .findSchemaOrThrow(native_group_norm_backward::name, native_group_norm_backward::overload_name)
2882 .typed<native_group_norm_backward::schema>();
2883}
2884
2885// aten::native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
2886::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm_backward::call(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array<bool,3> output_mask) {
2887
2888 static auto op = create_native_group_norm_backward_typed_handle();
2889 return op.call(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask);
2890}
2891
2892// aten::native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
2893::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array<bool,3> output_mask) {
2894
2895 static auto op = create_native_group_norm_backward_typed_handle();
2896 return op.redispatch(dispatchKeySet, grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask);
2897}
2898
2899STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fft_c2c, name, "aten::_fft_c2c")
2900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fft_c2c, overload_name, "")
2901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fft_c2c, schema_str, "_fft_c2c(Tensor self, SymInt[] dim, int normalization, bool forward) -> Tensor")
2902
2903// aten::_fft_c2c(Tensor self, SymInt[] dim, int normalization, bool forward) -> Tensor
2904static C10_NOINLINE c10::TypedOperatorHandle<_fft_c2c::schema> create__fft_c2c_typed_handle() {
2905 return c10::Dispatcher::singleton()
2906 .findSchemaOrThrow(_fft_c2c::name, _fft_c2c::overload_name)
2907 .typed<_fft_c2c::schema>();
2908}
2909
2910// aten::_fft_c2c(Tensor self, SymInt[] dim, int normalization, bool forward) -> Tensor
2911at::Tensor _fft_c2c::call(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward) {
2912
2913 static auto op = create__fft_c2c_typed_handle();
2914 return op.call(self, dim, normalization, forward);
2915}
2916
2917// aten::_fft_c2c(Tensor self, SymInt[] dim, int normalization, bool forward) -> Tensor
2918at::Tensor _fft_c2c::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward) {
2919
2920 static auto op = create__fft_c2c_typed_handle();
2921 return op.redispatch(dispatchKeySet, self, dim, normalization, forward);
2922}
2923
2924STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fft_c2c_out, name, "aten::_fft_c2c")
2925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fft_c2c_out, overload_name, "out")
2926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fft_c2c_out, schema_str, "_fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!)")
2927
2928// aten::_fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!)
2929static C10_NOINLINE c10::TypedOperatorHandle<_fft_c2c_out::schema> create__fft_c2c_out_typed_handle() {
2930 return c10::Dispatcher::singleton()
2931 .findSchemaOrThrow(_fft_c2c_out::name, _fft_c2c_out::overload_name)
2932 .typed<_fft_c2c_out::schema>();
2933}
2934
2935// aten::_fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!)
2936at::Tensor & _fft_c2c_out::call(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out) {
2937
2938 static auto op = create__fft_c2c_out_typed_handle();
2939 return op.call(self, dim, normalization, forward, out);
2940}
2941
2942// aten::_fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!)
2943at::Tensor & _fft_c2c_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out) {
2944
2945 static auto op = create__fft_c2c_out_typed_handle();
2946 return op.redispatch(dispatchKeySet, self, dim, normalization, forward, out);
2947}
2948
2949STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_validate_compressed_sparse_indices, name, "aten::_validate_compressed_sparse_indices")
2950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_validate_compressed_sparse_indices, overload_name, "")
2951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_validate_compressed_sparse_indices, schema_str, "_validate_compressed_sparse_indices(bool is_crow, Tensor compressed_idx, Tensor plain_idx, int cdim, int dim, int nnz) -> ()")
2952
2953// aten::_validate_compressed_sparse_indices(bool is_crow, Tensor compressed_idx, Tensor plain_idx, int cdim, int dim, int nnz) -> ()
2954static C10_NOINLINE c10::TypedOperatorHandle<_validate_compressed_sparse_indices::schema> create__validate_compressed_sparse_indices_typed_handle() {
2955 return c10::Dispatcher::singleton()
2956 .findSchemaOrThrow(_validate_compressed_sparse_indices::name, _validate_compressed_sparse_indices::overload_name)
2957 .typed<_validate_compressed_sparse_indices::schema>();
2958}
2959
2960// aten::_validate_compressed_sparse_indices(bool is_crow, Tensor compressed_idx, Tensor plain_idx, int cdim, int dim, int nnz) -> ()
2961void _validate_compressed_sparse_indices::call(bool is_crow, const at::Tensor & compressed_idx, const at::Tensor & plain_idx, int64_t cdim, int64_t dim, int64_t nnz) {
2962
2963 static auto op = create__validate_compressed_sparse_indices_typed_handle();
2964 return op.call(is_crow, compressed_idx, plain_idx, cdim, dim, nnz);
2965}
2966
2967// aten::_validate_compressed_sparse_indices(bool is_crow, Tensor compressed_idx, Tensor plain_idx, int cdim, int dim, int nnz) -> ()
2968void _validate_compressed_sparse_indices::redispatch(c10::DispatchKeySet dispatchKeySet, bool is_crow, const at::Tensor & compressed_idx, const at::Tensor & plain_idx, int64_t cdim, int64_t dim, int64_t nnz) {
2969
2970 static auto op = create__validate_compressed_sparse_indices_typed_handle();
2971 return op.redispatch(dispatchKeySet, is_crow, compressed_idx, plain_idx, cdim, dim, nnz);
2972}
2973
2974STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cufft_get_plan_cache_size, name, "aten::_cufft_get_plan_cache_size")
2975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cufft_get_plan_cache_size, overload_name, "")
2976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cufft_get_plan_cache_size, schema_str, "_cufft_get_plan_cache_size(int device_index) -> int")
2977
2978// aten::_cufft_get_plan_cache_size(int device_index) -> int
2979static C10_NOINLINE c10::TypedOperatorHandle<_cufft_get_plan_cache_size::schema> create__cufft_get_plan_cache_size_typed_handle() {
2980 return c10::Dispatcher::singleton()
2981 .findSchemaOrThrow(_cufft_get_plan_cache_size::name, _cufft_get_plan_cache_size::overload_name)
2982 .typed<_cufft_get_plan_cache_size::schema>();
2983}
2984
2985// aten::_cufft_get_plan_cache_size(int device_index) -> int
2986int64_t _cufft_get_plan_cache_size::call(int64_t device_index) {
2987
2988 static auto op = create__cufft_get_plan_cache_size_typed_handle();
2989 return op.call(device_index);
2990}
2991
2992// aten::_cufft_get_plan_cache_size(int device_index) -> int
2993int64_t _cufft_get_plan_cache_size::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t device_index) {
2994
2995 static auto op = create__cufft_get_plan_cache_size_typed_handle();
2996 return op.redispatch(dispatchKeySet, device_index);
2997}
2998
2999STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cufft_get_plan_cache_max_size, name, "aten::_cufft_get_plan_cache_max_size")
3000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cufft_get_plan_cache_max_size, overload_name, "")
3001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cufft_get_plan_cache_max_size, schema_str, "_cufft_get_plan_cache_max_size(int device_index) -> int")
3002
3003// aten::_cufft_get_plan_cache_max_size(int device_index) -> int
3004static C10_NOINLINE c10::TypedOperatorHandle<_cufft_get_plan_cache_max_size::schema> create__cufft_get_plan_cache_max_size_typed_handle() {
3005 return c10::Dispatcher::singleton()
3006 .findSchemaOrThrow(_cufft_get_plan_cache_max_size::name, _cufft_get_plan_cache_max_size::overload_name)
3007 .typed<_cufft_get_plan_cache_max_size::schema>();
3008}
3009
3010// aten::_cufft_get_plan_cache_max_size(int device_index) -> int
3011int64_t _cufft_get_plan_cache_max_size::call(int64_t device_index) {
3012
3013 static auto op = create__cufft_get_plan_cache_max_size_typed_handle();
3014 return op.call(device_index);
3015}
3016
3017// aten::_cufft_get_plan_cache_max_size(int device_index) -> int
3018int64_t _cufft_get_plan_cache_max_size::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t device_index) {
3019
3020 static auto op = create__cufft_get_plan_cache_max_size_typed_handle();
3021 return op.redispatch(dispatchKeySet, device_index);
3022}
3023
3024STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_Tensor, name, "aten::index")
3025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_Tensor, overload_name, "Tensor")
3026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_Tensor, schema_str, "index.Tensor(Tensor self, Tensor?[] indices) -> Tensor")
3027
3028// aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor
3029static C10_NOINLINE c10::TypedOperatorHandle<index_Tensor::schema> create_index_Tensor_typed_handle() {
3030 return c10::Dispatcher::singleton()
3031 .findSchemaOrThrow(index_Tensor::name, index_Tensor::overload_name)
3032 .typed<index_Tensor::schema>();
3033}
3034
3035// aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor
3036at::Tensor index_Tensor::call(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices) {
3037
3038 static auto op = create_index_Tensor_typed_handle();
3039 return op.call(self, indices);
3040}
3041
3042// aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor
3043at::Tensor index_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices) {
3044
3045 static auto op = create_index_Tensor_typed_handle();
3046 return op.redispatch(dispatchKeySet, self, indices);
3047}
3048
3049STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_Tensor_out, name, "aten::index")
3050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_Tensor_out, overload_name, "Tensor_out")
3051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_Tensor_out, schema_str, "index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!)")
3052
3053// aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!)
3054static C10_NOINLINE c10::TypedOperatorHandle<index_Tensor_out::schema> create_index_Tensor_out_typed_handle() {
3055 return c10::Dispatcher::singleton()
3056 .findSchemaOrThrow(index_Tensor_out::name, index_Tensor_out::overload_name)
3057 .typed<index_Tensor_out::schema>();
3058}
3059
3060// aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!)
3061at::Tensor & index_Tensor_out::call(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, at::Tensor & out) {
3062
3063 static auto op = create_index_Tensor_out_typed_handle();
3064 return op.call(self, indices, out);
3065}
3066
3067// aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!)
3068at::Tensor & index_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, at::Tensor & out) {
3069
3070 static auto op = create_index_Tensor_out_typed_handle();
3071 return op.redispatch(dispatchKeySet, self, indices, out);
3072}
3073
3074STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(isnan, name, "aten::isnan")
3075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(isnan, overload_name, "")
3076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(isnan, schema_str, "isnan(Tensor self) -> Tensor")
3077
3078// aten::isnan(Tensor self) -> Tensor
3079static C10_NOINLINE c10::TypedOperatorHandle<isnan::schema> create_isnan_typed_handle() {
3080 return c10::Dispatcher::singleton()
3081 .findSchemaOrThrow(isnan::name, isnan::overload_name)
3082 .typed<isnan::schema>();
3083}
3084
3085// aten::isnan(Tensor self) -> Tensor
3086at::Tensor isnan::call(const at::Tensor & self) {
3087
3088 static auto op = create_isnan_typed_handle();
3089 return op.call(self);
3090}
3091
3092// aten::isnan(Tensor self) -> Tensor
3093at::Tensor isnan::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
3094
3095 static auto op = create_isnan_typed_handle();
3096 return op.redispatch(dispatchKeySet, self);
3097}
3098
3099STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kthvalue, name, "aten::kthvalue")
3100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kthvalue, overload_name, "")
3101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kthvalue, schema_str, "kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)")
3102
3103// aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
3104static C10_NOINLINE c10::TypedOperatorHandle<kthvalue::schema> create_kthvalue_typed_handle() {
3105 return c10::Dispatcher::singleton()
3106 .findSchemaOrThrow(kthvalue::name, kthvalue::overload_name)
3107 .typed<kthvalue::schema>();
3108}
3109
3110// aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
3111::std::tuple<at::Tensor,at::Tensor> kthvalue::call(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim) {
3112
3113 static auto op = create_kthvalue_typed_handle();
3114 return op.call(self, k, dim, keepdim);
3115}
3116
3117// aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
3118::std::tuple<at::Tensor,at::Tensor> kthvalue::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, int64_t dim, bool keepdim) {
3119
3120 static auto op = create_kthvalue_typed_handle();
3121 return op.redispatch(dispatchKeySet, self, k, dim, keepdim);
3122}
3123
3124STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kthvalue_values, name, "aten::kthvalue")
3125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kthvalue_values, overload_name, "values")
3126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kthvalue_values, schema_str, "kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)")
3127
3128// aten::kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
3129static C10_NOINLINE c10::TypedOperatorHandle<kthvalue_values::schema> create_kthvalue_values_typed_handle() {
3130 return c10::Dispatcher::singleton()
3131 .findSchemaOrThrow(kthvalue_values::name, kthvalue_values::overload_name)
3132 .typed<kthvalue_values::schema>();
3133}
3134
3135// aten::kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
3136::std::tuple<at::Tensor &,at::Tensor &> kthvalue_values::call(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
3137
3138 static auto op = create_kthvalue_values_typed_handle();
3139 return op.call(self, k, dim, keepdim, values, indices);
3140}
3141
3142// aten::kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
3143::std::tuple<at::Tensor &,at::Tensor &> kthvalue_values::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
3144
3145 static auto op = create_kthvalue_values_typed_handle();
3146 return op.redispatch(dispatchKeySet, self, k, dim, keepdim, values, indices);
3147}
3148
3149STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kthvalue_dimname, name, "aten::kthvalue")
3150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kthvalue_dimname, overload_name, "dimname")
3151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kthvalue_dimname, schema_str, "kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)")
3152
3153// aten::kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
3154static C10_NOINLINE c10::TypedOperatorHandle<kthvalue_dimname::schema> create_kthvalue_dimname_typed_handle() {
3155 return c10::Dispatcher::singleton()
3156 .findSchemaOrThrow(kthvalue_dimname::name, kthvalue_dimname::overload_name)
3157 .typed<kthvalue_dimname::schema>();
3158}
3159
3160// aten::kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
3161::std::tuple<at::Tensor,at::Tensor> kthvalue_dimname::call(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim) {
3162
3163 static auto op = create_kthvalue_dimname_typed_handle();
3164 return op.call(self, k, dim, keepdim);
3165}
3166
3167// aten::kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
3168::std::tuple<at::Tensor,at::Tensor> kthvalue_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim) {
3169
3170 static auto op = create_kthvalue_dimname_typed_handle();
3171 return op.redispatch(dispatchKeySet, self, k, dim, keepdim);
3172}
3173
3174STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kthvalue_dimname_out, name, "aten::kthvalue")
3175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kthvalue_dimname_out, overload_name, "dimname_out")
3176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(kthvalue_dimname_out, schema_str, "kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)")
3177
3178// aten::kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
3179static C10_NOINLINE c10::TypedOperatorHandle<kthvalue_dimname_out::schema> create_kthvalue_dimname_out_typed_handle() {
3180 return c10::Dispatcher::singleton()
3181 .findSchemaOrThrow(kthvalue_dimname_out::name, kthvalue_dimname_out::overload_name)
3182 .typed<kthvalue_dimname_out::schema>();
3183}
3184
3185// aten::kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
3186::std::tuple<at::Tensor &,at::Tensor &> kthvalue_dimname_out::call(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
3187
3188 static auto op = create_kthvalue_dimname_out_typed_handle();
3189 return op.call(self, k, dim, keepdim, values, indices);
3190}
3191
3192// aten::kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
3193::std::tuple<at::Tensor &,at::Tensor &> kthvalue_dimname_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
3194
3195 static auto op = create_kthvalue_dimname_out_typed_handle();
3196 return op.redispatch(dispatchKeySet, self, k, dim, keepdim, values, indices);
3197}
3198
3199STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_layer_norm, name, "aten::native_layer_norm")
3200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_layer_norm, overload_name, "")
3201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_layer_norm, schema_str, "native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)")
3202
3203// aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)
3204static C10_NOINLINE c10::TypedOperatorHandle<native_layer_norm::schema> create_native_layer_norm_typed_handle() {
3205 return c10::Dispatcher::singleton()
3206 .findSchemaOrThrow(native_layer_norm::name, native_layer_norm::overload_name)
3207 .typed<native_layer_norm::schema>();
3208}
3209
3210// aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)
3211::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm::call(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps) {
3212
3213 static auto op = create_native_layer_norm_typed_handle();
3214 return op.call(input, normalized_shape, weight, bias, eps);
3215}
3216
3217// aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)
3218::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps) {
3219
3220 static auto op = create_native_layer_norm_typed_handle();
3221 return op.redispatch(dispatchKeySet, input, normalized_shape, weight, bias, eps);
3222}
3223
3224STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nan_to_num, name, "aten::nan_to_num")
3225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nan_to_num, overload_name, "")
3226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nan_to_num, schema_str, "nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor")
3227
3228// aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor
3229static C10_NOINLINE c10::TypedOperatorHandle<nan_to_num::schema> create_nan_to_num_typed_handle() {
3230 return c10::Dispatcher::singleton()
3231 .findSchemaOrThrow(nan_to_num::name, nan_to_num::overload_name)
3232 .typed<nan_to_num::schema>();
3233}
3234
3235// aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor
3236at::Tensor nan_to_num::call(const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) {
3237
3238 static auto op = create_nan_to_num_typed_handle();
3239 return op.call(self, nan, posinf, neginf);
3240}
3241
3242// aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor
3243at::Tensor nan_to_num::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) {
3244
3245 static auto op = create_nan_to_num_typed_handle();
3246 return op.redispatch(dispatchKeySet, self, nan, posinf, neginf);
3247}
3248
3249STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nan_to_num_, name, "aten::nan_to_num_")
3250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nan_to_num_, overload_name, "")
3251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nan_to_num_, schema_str, "nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!)")
3252
3253// aten::nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!)
3254static C10_NOINLINE c10::TypedOperatorHandle<nan_to_num_::schema> create_nan_to_num__typed_handle() {
3255 return c10::Dispatcher::singleton()
3256 .findSchemaOrThrow(nan_to_num_::name, nan_to_num_::overload_name)
3257 .typed<nan_to_num_::schema>();
3258}
3259
3260// aten::nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!)
3261at::Tensor & nan_to_num_::call(at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) {
3262
3263 static auto op = create_nan_to_num__typed_handle();
3264 return op.call(self, nan, posinf, neginf);
3265}
3266
3267// aten::nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!)
3268at::Tensor & nan_to_num_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) {
3269
3270 static auto op = create_nan_to_num__typed_handle();
3271 return op.redispatch(dispatchKeySet, self, nan, posinf, neginf);
3272}
3273
3274STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nan_to_num_out, name, "aten::nan_to_num")
3275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nan_to_num_out, overload_name, "out")
3276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nan_to_num_out, schema_str, "nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!)")
3277
3278// aten::nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!)
3279static C10_NOINLINE c10::TypedOperatorHandle<nan_to_num_out::schema> create_nan_to_num_out_typed_handle() {
3280 return c10::Dispatcher::singleton()
3281 .findSchemaOrThrow(nan_to_num_out::name, nan_to_num_out::overload_name)
3282 .typed<nan_to_num_out::schema>();
3283}
3284
3285// aten::nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!)
3286at::Tensor & nan_to_num_out::call(const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf, at::Tensor & out) {
3287
3288 static auto op = create_nan_to_num_out_typed_handle();
3289 return op.call(self, nan, posinf, neginf, out);
3290}
3291
3292// aten::nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!)
3293at::Tensor & nan_to_num_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf, at::Tensor & out) {
3294
3295 static auto op = create_nan_to_num_out_typed_handle();
3296 return op.redispatch(dispatchKeySet, self, nan, posinf, neginf, out);
3297}
3298
3299STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fbgemm_linear_int8_weight_fp32_activation, name, "aten::fbgemm_linear_int8_weight_fp32_activation")
3300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fbgemm_linear_int8_weight_fp32_activation, overload_name, "")
3301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fbgemm_linear_int8_weight_fp32_activation, schema_str, "fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor")
3302
3303// aten::fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
3304static C10_NOINLINE c10::TypedOperatorHandle<fbgemm_linear_int8_weight_fp32_activation::schema> create_fbgemm_linear_int8_weight_fp32_activation_typed_handle() {
3305 return c10::Dispatcher::singleton()
3306 .findSchemaOrThrow(fbgemm_linear_int8_weight_fp32_activation::name, fbgemm_linear_int8_weight_fp32_activation::overload_name)
3307 .typed<fbgemm_linear_int8_weight_fp32_activation::schema>();
3308}
3309
3310// aten::fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
3311at::Tensor fbgemm_linear_int8_weight_fp32_activation::call(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) {
3312
3313 static auto op = create_fbgemm_linear_int8_weight_fp32_activation_typed_handle();
3314 return op.call(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
3315}
3316
3317// aten::fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
3318at::Tensor fbgemm_linear_int8_weight_fp32_activation::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) {
3319
3320 static auto op = create_fbgemm_linear_int8_weight_fp32_activation_typed_handle();
3321 return op.redispatch(dispatchKeySet, input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
3322}
3323
3324STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fbgemm_linear_int8_weight, name, "aten::fbgemm_linear_int8_weight")
3325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fbgemm_linear_int8_weight, overload_name, "")
3326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fbgemm_linear_int8_weight, schema_str, "fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor")
3327
3328// aten::fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
3329static C10_NOINLINE c10::TypedOperatorHandle<fbgemm_linear_int8_weight::schema> create_fbgemm_linear_int8_weight_typed_handle() {
3330 return c10::Dispatcher::singleton()
3331 .findSchemaOrThrow(fbgemm_linear_int8_weight::name, fbgemm_linear_int8_weight::overload_name)
3332 .typed<fbgemm_linear_int8_weight::schema>();
3333}
3334
3335// aten::fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
3336at::Tensor fbgemm_linear_int8_weight::call(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) {
3337
3338 static auto op = create_fbgemm_linear_int8_weight_typed_handle();
3339 return op.call(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
3340}
3341
3342// aten::fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
3343at::Tensor fbgemm_linear_int8_weight::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) {
3344
3345 static auto op = create_fbgemm_linear_int8_weight_typed_handle();
3346 return op.redispatch(dispatchKeySet, input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
3347}
3348
3349STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fbgemm_linear_fp16_weight_fp32_activation, name, "aten::fbgemm_linear_fp16_weight_fp32_activation")
3350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fbgemm_linear_fp16_weight_fp32_activation, overload_name, "")
3351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fbgemm_linear_fp16_weight_fp32_activation, schema_str, "fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor")
3352
3353// aten::fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor
3354static C10_NOINLINE c10::TypedOperatorHandle<fbgemm_linear_fp16_weight_fp32_activation::schema> create_fbgemm_linear_fp16_weight_fp32_activation_typed_handle() {
3355 return c10::Dispatcher::singleton()
3356 .findSchemaOrThrow(fbgemm_linear_fp16_weight_fp32_activation::name, fbgemm_linear_fp16_weight_fp32_activation::overload_name)
3357 .typed<fbgemm_linear_fp16_weight_fp32_activation::schema>();
3358}
3359
3360// aten::fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor
3361at::Tensor fbgemm_linear_fp16_weight_fp32_activation::call(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) {
3362
3363 static auto op = create_fbgemm_linear_fp16_weight_fp32_activation_typed_handle();
3364 return op.call(input, packed_weight, bias);
3365}
3366
3367// aten::fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor
3368at::Tensor fbgemm_linear_fp16_weight_fp32_activation::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) {
3369
3370 static auto op = create_fbgemm_linear_fp16_weight_fp32_activation_typed_handle();
3371 return op.redispatch(dispatchKeySet, input, packed_weight, bias);
3372}
3373
3374STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(xlogy_Tensor, name, "aten::xlogy")
3375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(xlogy_Tensor, overload_name, "Tensor")
3376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(xlogy_Tensor, schema_str, "xlogy.Tensor(Tensor self, Tensor other) -> Tensor")
3377
3378// aten::xlogy.Tensor(Tensor self, Tensor other) -> Tensor
3379static C10_NOINLINE c10::TypedOperatorHandle<xlogy_Tensor::schema> create_xlogy_Tensor_typed_handle() {
3380 return c10::Dispatcher::singleton()
3381 .findSchemaOrThrow(xlogy_Tensor::name, xlogy_Tensor::overload_name)
3382 .typed<xlogy_Tensor::schema>();
3383}
3384
3385// aten::xlogy.Tensor(Tensor self, Tensor other) -> Tensor
3386at::Tensor xlogy_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
3387
3388 static auto op = create_xlogy_Tensor_typed_handle();
3389 return op.call(self, other);
3390}
3391
3392// aten::xlogy.Tensor(Tensor self, Tensor other) -> Tensor
3393at::Tensor xlogy_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
3394
3395 static auto op = create_xlogy_Tensor_typed_handle();
3396 return op.redispatch(dispatchKeySet, self, other);
3397}
3398
3399STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(xlogy_Scalar_Self, name, "aten::xlogy")
3400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(xlogy_Scalar_Self, overload_name, "Scalar_Self")
3401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(xlogy_Scalar_Self, schema_str, "xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor")
3402
3403// aten::xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor
3404static C10_NOINLINE c10::TypedOperatorHandle<xlogy_Scalar_Self::schema> create_xlogy_Scalar_Self_typed_handle() {
3405 return c10::Dispatcher::singleton()
3406 .findSchemaOrThrow(xlogy_Scalar_Self::name, xlogy_Scalar_Self::overload_name)
3407 .typed<xlogy_Scalar_Self::schema>();
3408}
3409
3410// aten::xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor
3411at::Tensor xlogy_Scalar_Self::call(const at::Scalar & self, const at::Tensor & other) {
3412
3413 static auto op = create_xlogy_Scalar_Self_typed_handle();
3414 return op.call(self, other);
3415}
3416
3417// aten::xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor
3418at::Tensor xlogy_Scalar_Self::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) {
3419
3420 static auto op = create_xlogy_Scalar_Self_typed_handle();
3421 return op.redispatch(dispatchKeySet, self, other);
3422}
3423
3424STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(xlogy_Scalar_Other, name, "aten::xlogy")
3425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(xlogy_Scalar_Other, overload_name, "Scalar_Other")
3426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(xlogy_Scalar_Other, schema_str, "xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor")
3427
3428// aten::xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor
3429static C10_NOINLINE c10::TypedOperatorHandle<xlogy_Scalar_Other::schema> create_xlogy_Scalar_Other_typed_handle() {
3430 return c10::Dispatcher::singleton()
3431 .findSchemaOrThrow(xlogy_Scalar_Other::name, xlogy_Scalar_Other::overload_name)
3432 .typed<xlogy_Scalar_Other::schema>();
3433}
3434
3435// aten::xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor
3436at::Tensor xlogy_Scalar_Other::call(const at::Tensor & self, const at::Scalar & other) {
3437
3438 static auto op = create_xlogy_Scalar_Other_typed_handle();
3439 return op.call(self, other);
3440}
3441
3442// aten::xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor
3443at::Tensor xlogy_Scalar_Other::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
3444
3445 static auto op = create_xlogy_Scalar_Other_typed_handle();
3446 return op.redispatch(dispatchKeySet, self, other);
3447}
3448
3449STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(xlogy__Tensor, name, "aten::xlogy_")
3450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(xlogy__Tensor, overload_name, "Tensor")
3451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(xlogy__Tensor, schema_str, "xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
3452
3453// aten::xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
3454static C10_NOINLINE c10::TypedOperatorHandle<xlogy__Tensor::schema> create_xlogy__Tensor_typed_handle() {
3455 return c10::Dispatcher::singleton()
3456 .findSchemaOrThrow(xlogy__Tensor::name, xlogy__Tensor::overload_name)
3457 .typed<xlogy__Tensor::schema>();
3458}
3459
3460// aten::xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
3461at::Tensor & xlogy__Tensor::call(at::Tensor & self, const at::Tensor & other) {
3462
3463 static auto op = create_xlogy__Tensor_typed_handle();
3464 return op.call(self, other);
3465}
3466
3467// aten::xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
3468at::Tensor & xlogy__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
3469
3470 static auto op = create_xlogy__Tensor_typed_handle();
3471 return op.redispatch(dispatchKeySet, self, other);
3472}
3473
3474STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(xlogy__Scalar_Other, name, "aten::xlogy_")
3475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(xlogy__Scalar_Other, overload_name, "Scalar_Other")
3476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(xlogy__Scalar_Other, schema_str, "xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!)")
3477
3478// aten::xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!)
3479static C10_NOINLINE c10::TypedOperatorHandle<xlogy__Scalar_Other::schema> create_xlogy__Scalar_Other_typed_handle() {
3480 return c10::Dispatcher::singleton()
3481 .findSchemaOrThrow(xlogy__Scalar_Other::name, xlogy__Scalar_Other::overload_name)
3482 .typed<xlogy__Scalar_Other::schema>();
3483}
3484
3485// aten::xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!)
3486at::Tensor & xlogy__Scalar_Other::call(at::Tensor & self, const at::Scalar & other) {
3487
3488 static auto op = create_xlogy__Scalar_Other_typed_handle();
3489 return op.call(self, other);
3490}
3491
3492// aten::xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!)
3493at::Tensor & xlogy__Scalar_Other::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
3494
3495 static auto op = create_xlogy__Scalar_Other_typed_handle();
3496 return op.redispatch(dispatchKeySet, self, other);
3497}
3498
3499STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(xlogy_OutTensor, name, "aten::xlogy")
3500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(xlogy_OutTensor, overload_name, "OutTensor")
3501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(xlogy_OutTensor, schema_str, "xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
3502
3503// aten::xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
3504static C10_NOINLINE c10::TypedOperatorHandle<xlogy_OutTensor::schema> create_xlogy_OutTensor_typed_handle() {
3505 return c10::Dispatcher::singleton()
3506 .findSchemaOrThrow(xlogy_OutTensor::name, xlogy_OutTensor::overload_name)
3507 .typed<xlogy_OutTensor::schema>();
3508}
3509
3510// aten::xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
3511at::Tensor & xlogy_OutTensor::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
3512
3513 static auto op = create_xlogy_OutTensor_typed_handle();
3514 return op.call(self, other, out);
3515}
3516
3517// aten::xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
3518at::Tensor & xlogy_OutTensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
3519
3520 static auto op = create_xlogy_OutTensor_typed_handle();
3521 return op.redispatch(dispatchKeySet, self, other, out);
3522}
3523
3524STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(xlogy_OutScalar_Self, name, "aten::xlogy")
3525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(xlogy_OutScalar_Self, overload_name, "OutScalar_Self")
3526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(xlogy_OutScalar_Self, schema_str, "xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
3527
3528// aten::xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
3529static C10_NOINLINE c10::TypedOperatorHandle<xlogy_OutScalar_Self::schema> create_xlogy_OutScalar_Self_typed_handle() {
3530 return c10::Dispatcher::singleton()
3531 .findSchemaOrThrow(xlogy_OutScalar_Self::name, xlogy_OutScalar_Self::overload_name)
3532 .typed<xlogy_OutScalar_Self::schema>();
3533}
3534
3535// aten::xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
3536at::Tensor & xlogy_OutScalar_Self::call(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
3537
3538 static auto op = create_xlogy_OutScalar_Self_typed_handle();
3539 return op.call(self, other, out);
3540}
3541
3542// aten::xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
3543at::Tensor & xlogy_OutScalar_Self::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
3544
3545 static auto op = create_xlogy_OutScalar_Self_typed_handle();
3546 return op.redispatch(dispatchKeySet, self, other, out);
3547}
3548
3549STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(xlogy_OutScalar_Other, name, "aten::xlogy")
3550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(xlogy_OutScalar_Other, overload_name, "OutScalar_Other")
3551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(xlogy_OutScalar_Other, schema_str, "xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
3552
3553// aten::xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
3554static C10_NOINLINE c10::TypedOperatorHandle<xlogy_OutScalar_Other::schema> create_xlogy_OutScalar_Other_typed_handle() {
3555 return c10::Dispatcher::singleton()
3556 .findSchemaOrThrow(xlogy_OutScalar_Other::name, xlogy_OutScalar_Other::overload_name)
3557 .typed<xlogy_OutScalar_Other::schema>();
3558}
3559
3560// aten::xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
3561at::Tensor & xlogy_OutScalar_Other::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
3562
3563 static auto op = create_xlogy_OutScalar_Other_typed_handle();
3564 return op.call(self, other, out);
3565}
3566
3567// aten::xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
3568at::Tensor & xlogy_OutScalar_Other::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
3569
3570 static auto op = create_xlogy_OutScalar_Other_typed_handle();
3571 return op.redispatch(dispatchKeySet, self, other, out);
3572}
3573
3574STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_log_softmax_backward_data, name, "aten::_log_softmax_backward_data")
3575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_log_softmax_backward_data, overload_name, "")
3576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_log_softmax_backward_data, schema_str, "_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor")
3577
3578// aten::_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor
3579static C10_NOINLINE c10::TypedOperatorHandle<_log_softmax_backward_data::schema> create__log_softmax_backward_data_typed_handle() {
3580 return c10::Dispatcher::singleton()
3581 .findSchemaOrThrow(_log_softmax_backward_data::name, _log_softmax_backward_data::overload_name)
3582 .typed<_log_softmax_backward_data::schema>();
3583}
3584
3585// aten::_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor
3586at::Tensor _log_softmax_backward_data::call(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
3587
3588 static auto op = create__log_softmax_backward_data_typed_handle();
3589 return op.call(grad_output, output, dim, input_dtype);
3590}
3591
3592// aten::_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor
3593at::Tensor _log_softmax_backward_data::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
3594
3595 static auto op = create__log_softmax_backward_data_typed_handle();
3596 return op.redispatch(dispatchKeySet, grad_output, output, dim, input_dtype);
3597}
3598
3599STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_log_softmax_backward_data_out, name, "aten::_log_softmax_backward_data")
3600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_log_softmax_backward_data_out, overload_name, "out")
3601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_log_softmax_backward_data_out, schema_str, "_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!)")
3602
3603// aten::_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!)
3604static C10_NOINLINE c10::TypedOperatorHandle<_log_softmax_backward_data_out::schema> create__log_softmax_backward_data_out_typed_handle() {
3605 return c10::Dispatcher::singleton()
3606 .findSchemaOrThrow(_log_softmax_backward_data_out::name, _log_softmax_backward_data_out::overload_name)
3607 .typed<_log_softmax_backward_data_out::schema>();
3608}
3609
3610// aten::_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!)
3611at::Tensor & _log_softmax_backward_data_out::call(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & out) {
3612
3613 static auto op = create__log_softmax_backward_data_out_typed_handle();
3614 return op.call(grad_output, output, dim, input_dtype, out);
3615}
3616
3617// aten::_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!)
3618at::Tensor & _log_softmax_backward_data_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & out) {
3619
3620 static auto op = create__log_softmax_backward_data_out_typed_handle();
3621 return op.redispatch(dispatchKeySet, grad_output, output, dim, input_dtype, out);
3622}
3623
3624STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logcumsumexp, name, "aten::logcumsumexp")
3625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logcumsumexp, overload_name, "")
3626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logcumsumexp, schema_str, "logcumsumexp(Tensor self, int dim) -> Tensor")
3627
3628// aten::logcumsumexp(Tensor self, int dim) -> Tensor
3629static C10_NOINLINE c10::TypedOperatorHandle<logcumsumexp::schema> create_logcumsumexp_typed_handle() {
3630 return c10::Dispatcher::singleton()
3631 .findSchemaOrThrow(logcumsumexp::name, logcumsumexp::overload_name)
3632 .typed<logcumsumexp::schema>();
3633}
3634
3635// aten::logcumsumexp(Tensor self, int dim) -> Tensor
3636at::Tensor logcumsumexp::call(const at::Tensor & self, int64_t dim) {
3637
3638 static auto op = create_logcumsumexp_typed_handle();
3639 return op.call(self, dim);
3640}
3641
3642// aten::logcumsumexp(Tensor self, int dim) -> Tensor
3643at::Tensor logcumsumexp::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
3644
3645 static auto op = create_logcumsumexp_typed_handle();
3646 return op.redispatch(dispatchKeySet, self, dim);
3647}
3648
3649STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logcumsumexp_out, name, "aten::logcumsumexp")
3650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logcumsumexp_out, overload_name, "out")
3651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logcumsumexp_out, schema_str, "logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)")
3652
3653// aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
3654static C10_NOINLINE c10::TypedOperatorHandle<logcumsumexp_out::schema> create_logcumsumexp_out_typed_handle() {
3655 return c10::Dispatcher::singleton()
3656 .findSchemaOrThrow(logcumsumexp_out::name, logcumsumexp_out::overload_name)
3657 .typed<logcumsumexp_out::schema>();
3658}
3659
3660// aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
3661at::Tensor & logcumsumexp_out::call(const at::Tensor & self, int64_t dim, at::Tensor & out) {
3662
3663 static auto op = create_logcumsumexp_out_typed_handle();
3664 return op.call(self, dim, out);
3665}
3666
3667// aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
3668at::Tensor & logcumsumexp_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) {
3669
3670 static auto op = create_logcumsumexp_out_typed_handle();
3671 return op.redispatch(dispatchKeySet, self, dim, out);
3672}
3673
3674STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logcumsumexp_dimname, name, "aten::logcumsumexp")
3675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logcumsumexp_dimname, overload_name, "dimname")
3676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logcumsumexp_dimname, schema_str, "logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor")
3677
3678// aten::logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor
3679static C10_NOINLINE c10::TypedOperatorHandle<logcumsumexp_dimname::schema> create_logcumsumexp_dimname_typed_handle() {
3680 return c10::Dispatcher::singleton()
3681 .findSchemaOrThrow(logcumsumexp_dimname::name, logcumsumexp_dimname::overload_name)
3682 .typed<logcumsumexp_dimname::schema>();
3683}
3684
3685// aten::logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor
3686at::Tensor logcumsumexp_dimname::call(const at::Tensor & self, at::Dimname dim) {
3687
3688 static auto op = create_logcumsumexp_dimname_typed_handle();
3689 return op.call(self, dim);
3690}
3691
3692// aten::logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor
3693at::Tensor logcumsumexp_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) {
3694
3695 static auto op = create_logcumsumexp_dimname_typed_handle();
3696 return op.redispatch(dispatchKeySet, self, dim);
3697}
3698
3699STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logcumsumexp_dimname_out, name, "aten::logcumsumexp")
3700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logcumsumexp_dimname_out, overload_name, "dimname_out")
3701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(logcumsumexp_dimname_out, schema_str, "logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)")
3702
3703// aten::logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
3704static C10_NOINLINE c10::TypedOperatorHandle<logcumsumexp_dimname_out::schema> create_logcumsumexp_dimname_out_typed_handle() {
3705 return c10::Dispatcher::singleton()
3706 .findSchemaOrThrow(logcumsumexp_dimname_out::name, logcumsumexp_dimname_out::overload_name)
3707 .typed<logcumsumexp_dimname_out::schema>();
3708}
3709
3710// aten::logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
3711at::Tensor & logcumsumexp_dimname_out::call(const at::Tensor & self, at::Dimname dim, at::Tensor & out) {
3712
3713 static auto op = create_logcumsumexp_dimname_out_typed_handle();
3714 return op.call(self, dim, out);
3715}
3716
3717// aten::logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
3718at::Tensor & logcumsumexp_dimname_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, at::Tensor & out) {
3719
3720 static auto op = create_logcumsumexp_dimname_out_typed_handle();
3721 return op.redispatch(dispatchKeySet, self, dim, out);
3722}
3723
3724STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(matrix_exp_backward, name, "aten::matrix_exp_backward")
3725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(matrix_exp_backward, overload_name, "")
3726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(matrix_exp_backward, schema_str, "matrix_exp_backward(Tensor self, Tensor grad) -> Tensor")
3727
3728// aten::matrix_exp_backward(Tensor self, Tensor grad) -> Tensor
3729static C10_NOINLINE c10::TypedOperatorHandle<matrix_exp_backward::schema> create_matrix_exp_backward_typed_handle() {
3730 return c10::Dispatcher::singleton()
3731 .findSchemaOrThrow(matrix_exp_backward::name, matrix_exp_backward::overload_name)
3732 .typed<matrix_exp_backward::schema>();
3733}
3734
3735// aten::matrix_exp_backward(Tensor self, Tensor grad) -> Tensor
3736at::Tensor matrix_exp_backward::call(const at::Tensor & self, const at::Tensor & grad) {
3737
3738 static auto op = create_matrix_exp_backward_typed_handle();
3739 return op.call(self, grad);
3740}
3741
3742// aten::matrix_exp_backward(Tensor self, Tensor grad) -> Tensor
3743at::Tensor matrix_exp_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad) {
3744
3745 static auto op = create_matrix_exp_backward_typed_handle();
3746 return op.redispatch(dispatchKeySet, self, grad);
3747}
3748
3749STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(amax, name, "aten::amax")
3750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(amax, overload_name, "")
3751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(amax, schema_str, "amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor")
3752
3753// aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor
3754static C10_NOINLINE c10::TypedOperatorHandle<amax::schema> create_amax_typed_handle() {
3755 return c10::Dispatcher::singleton()
3756 .findSchemaOrThrow(amax::name, amax::overload_name)
3757 .typed<amax::schema>();
3758}
3759
3760// aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor
3761at::Tensor amax::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
3762
3763 static auto op = create_amax_typed_handle();
3764 return op.call(self, dim, keepdim);
3765}
3766
3767// aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor
3768at::Tensor amax::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
3769
3770 static auto op = create_amax_typed_handle();
3771 return op.redispatch(dispatchKeySet, self, dim, keepdim);
3772}
3773
3774STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(amax_out, name, "aten::amax")
3775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(amax_out, overload_name, "out")
3776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(amax_out, schema_str, "amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)")
3777
3778// aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
3779static C10_NOINLINE c10::TypedOperatorHandle<amax_out::schema> create_amax_out_typed_handle() {
3780 return c10::Dispatcher::singleton()
3781 .findSchemaOrThrow(amax_out::name, amax_out::overload_name)
3782 .typed<amax_out::schema>();
3783}
3784
3785// aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
3786at::Tensor & amax_out::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
3787
3788 static auto op = create_amax_out_typed_handle();
3789 return op.call(self, dim, keepdim, out);
3790}
3791
3792// aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
3793at::Tensor & amax_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
3794
3795 static auto op = create_amax_out_typed_handle();
3796 return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
3797}
3798
3799STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_mps_max_pool2d, name, "aten::_mps_max_pool2d")
3800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_mps_max_pool2d, overload_name, "")
3801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_mps_max_pool2d, schema_str, "_mps_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor")
3802
3803// aten::_mps_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
3804static C10_NOINLINE c10::TypedOperatorHandle<_mps_max_pool2d::schema> create__mps_max_pool2d_typed_handle() {
3805 return c10::Dispatcher::singleton()
3806 .findSchemaOrThrow(_mps_max_pool2d::name, _mps_max_pool2d::overload_name)
3807 .typed<_mps_max_pool2d::schema>();
3808}
3809
3810// aten::_mps_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
3811at::Tensor _mps_max_pool2d::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
3812
3813 static auto op = create__mps_max_pool2d_typed_handle();
3814 return op.call(self, kernel_size, stride, padding, dilation, ceil_mode);
3815}
3816
3817// aten::_mps_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
3818at::Tensor _mps_max_pool2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
3819
3820 static auto op = create__mps_max_pool2d_typed_handle();
3821 return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode);
3822}
3823
3824STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_max_pool2d, name, "aten::mkldnn_max_pool2d")
3825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_max_pool2d, overload_name, "")
3826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_max_pool2d, schema_str, "mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor")
3827
3828// aten::mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
3829static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_max_pool2d::schema> create_mkldnn_max_pool2d_typed_handle() {
3830 return c10::Dispatcher::singleton()
3831 .findSchemaOrThrow(mkldnn_max_pool2d::name, mkldnn_max_pool2d::overload_name)
3832 .typed<mkldnn_max_pool2d::schema>();
3833}
3834
3835// aten::mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
3836at::Tensor mkldnn_max_pool2d::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
3837
3838 static auto op = create_mkldnn_max_pool2d_typed_handle();
3839 return op.call(self, kernel_size, stride, padding, dilation, ceil_mode);
3840}
3841
3842// aten::mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
3843at::Tensor mkldnn_max_pool2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
3844
3845 static auto op = create_mkldnn_max_pool2d_typed_handle();
3846 return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode);
3847}
3848
3849STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantized_max_pool2d, name, "aten::quantized_max_pool2d")
3850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantized_max_pool2d, overload_name, "")
3851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantized_max_pool2d, schema_str, "quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor")
3852
3853// aten::quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
3854static C10_NOINLINE c10::TypedOperatorHandle<quantized_max_pool2d::schema> create_quantized_max_pool2d_typed_handle() {
3855 return c10::Dispatcher::singleton()
3856 .findSchemaOrThrow(quantized_max_pool2d::name, quantized_max_pool2d::overload_name)
3857 .typed<quantized_max_pool2d::schema>();
3858}
3859
3860// aten::quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
3861at::Tensor quantized_max_pool2d::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
3862
3863 static auto op = create_quantized_max_pool2d_typed_handle();
3864 return op.call(self, kernel_size, stride, padding, dilation, ceil_mode);
3865}
3866
3867// aten::quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
3868at::Tensor quantized_max_pool2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
3869
3870 static auto op = create_quantized_max_pool2d_typed_handle();
3871 return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode);
3872}
3873
3874STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(amin, name, "aten::amin")
3875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(amin, overload_name, "")
3876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(amin, schema_str, "amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor")
3877
3878// aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor
3879static C10_NOINLINE c10::TypedOperatorHandle<amin::schema> create_amin_typed_handle() {
3880 return c10::Dispatcher::singleton()
3881 .findSchemaOrThrow(amin::name, amin::overload_name)
3882 .typed<amin::schema>();
3883}
3884
3885// aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor
3886at::Tensor amin::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
3887
3888 static auto op = create_amin_typed_handle();
3889 return op.call(self, dim, keepdim);
3890}
3891
3892// aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor
3893at::Tensor amin::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
3894
3895 static auto op = create_amin_typed_handle();
3896 return op.redispatch(dispatchKeySet, self, dim, keepdim);
3897}
3898
3899STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(amin_out, name, "aten::amin")
3900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(amin_out, overload_name, "out")
3901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(amin_out, schema_str, "amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)")
3902
3903// aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
3904static C10_NOINLINE c10::TypedOperatorHandle<amin_out::schema> create_amin_out_typed_handle() {
3905 return c10::Dispatcher::singleton()
3906 .findSchemaOrThrow(amin_out::name, amin_out::overload_name)
3907 .typed<amin_out::schema>();
3908}
3909
3910// aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
3911at::Tensor & amin_out::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
3912
3913 static auto op = create_amin_out_typed_handle();
3914 return op.call(self, dim, keepdim, out);
3915}
3916
3917// aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
3918at::Tensor & amin_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
3919
3920 static auto op = create_amin_out_typed_handle();
3921 return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
3922}
3923
3924STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_mps_convolution, name, "aten::_mps_convolution")
3925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_mps_convolution, overload_name, "")
3926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_mps_convolution, schema_str, "_mps_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor")
3927
3928// aten::_mps_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor
3929static C10_NOINLINE c10::TypedOperatorHandle<_mps_convolution::schema> create__mps_convolution_typed_handle() {
3930 return c10::Dispatcher::singleton()
3931 .findSchemaOrThrow(_mps_convolution::name, _mps_convolution::overload_name)
3932 .typed<_mps_convolution::schema>();
3933}
3934
3935// aten::_mps_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor
3936at::Tensor _mps_convolution::call(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
3937
3938 static auto op = create__mps_convolution_typed_handle();
3939 return op.call(self, weight, bias, padding, stride, dilation, groups);
3940}
3941
3942// aten::_mps_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor
3943at::Tensor _mps_convolution::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
3944
3945 static auto op = create__mps_convolution_typed_handle();
3946 return op.redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups);
3947}
3948
3949STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_rnn_layer_backward, name, "aten::mkldnn_rnn_layer_backward")
3950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_rnn_layer_backward, overload_name, "")
3951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_rnn_layer_backward, schema_str, "mkldnn_rnn_layer_backward(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)")
3952
3953// aten::mkldnn_rnn_layer_backward(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)
3954static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_rnn_layer_backward::schema> create_mkldnn_rnn_layer_backward_typed_handle() {
3955 return c10::Dispatcher::singleton()
3956 .findSchemaOrThrow(mkldnn_rnn_layer_backward::name, mkldnn_rnn_layer_backward::overload_name)
3957 .typed<mkldnn_rnn_layer_backward::schema>();
3958}
3959
3960// aten::mkldnn_rnn_layer_backward(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)
3961::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> mkldnn_rnn_layer_backward::call(const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace) {
3962
3963 static auto op = create_mkldnn_rnn_layer_backward_typed_handle();
3964 return op.call(input, weight1, weight2, weight3, weight4, hx_, cx_tmp, output, hy_, cy_, grad_output, grad_hy, grad_cy, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace);
3965}
3966
3967// aten::mkldnn_rnn_layer_backward(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)
3968::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> mkldnn_rnn_layer_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace) {
3969
3970 static auto op = create_mkldnn_rnn_layer_backward_typed_handle();
3971 return op.redispatch(dispatchKeySet, input, weight1, weight2, weight3, weight4, hx_, cx_tmp, output, hy_, cy_, grad_output, grad_hy, grad_cy, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace);
3972}
3973
3974STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_depthwise_convolution, name, "aten::miopen_depthwise_convolution")
3975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_depthwise_convolution, overload_name, "")
3976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_depthwise_convolution, schema_str, "miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor")
3977
3978// aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
3979static C10_NOINLINE c10::TypedOperatorHandle<miopen_depthwise_convolution::schema> create_miopen_depthwise_convolution_typed_handle() {
3980 return c10::Dispatcher::singleton()
3981 .findSchemaOrThrow(miopen_depthwise_convolution::name, miopen_depthwise_convolution::overload_name)
3982 .typed<miopen_depthwise_convolution::schema>();
3983}
3984
3985// aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
3986at::Tensor miopen_depthwise_convolution::call(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
3987
3988 static auto op = create_miopen_depthwise_convolution_typed_handle();
3989 return op.call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic);
3990}
3991
3992// aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
3993at::Tensor miopen_depthwise_convolution::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
3994
3995 static auto op = create_miopen_depthwise_convolution_typed_handle();
3996 return op.redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic);
3997}
3998
3999STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_batch_norm, name, "aten::native_batch_norm")
4000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_batch_norm, overload_name, "")
4001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_batch_norm, schema_str, "native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)")
4002
4003// aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
4004static C10_NOINLINE c10::TypedOperatorHandle<native_batch_norm::schema> create_native_batch_norm_typed_handle() {
4005 return c10::Dispatcher::singleton()
4006 .findSchemaOrThrow(native_batch_norm::name, native_batch_norm::overload_name)
4007 .typed<native_batch_norm::schema>();
4008}
4009
4010// aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
4011::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_batch_norm::call(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps) {
4012
4013 static auto op = create_native_batch_norm_typed_handle();
4014 return op.call(input, weight, bias, running_mean, running_var, training, momentum, eps);
4015}
4016
4017// aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
4018::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_batch_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps) {
4019
4020 static auto op = create_native_batch_norm_typed_handle();
4021 return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps);
4022}
4023
4024STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_batch_norm_out, name, "aten::native_batch_norm")
4025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_batch_norm_out, overload_name, "out")
4026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_batch_norm_out, schema_str, "native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
4027
4028// aten::native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))
4029static C10_NOINLINE c10::TypedOperatorHandle<native_batch_norm_out::schema> create_native_batch_norm_out_typed_handle() {
4030 return c10::Dispatcher::singleton()
4031 .findSchemaOrThrow(native_batch_norm_out::name, native_batch_norm_out::overload_name)
4032 .typed<native_batch_norm_out::schema>();
4033}
4034
4035// aten::native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))
4036::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_batch_norm_out::call(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
4037
4038 static auto op = create_native_batch_norm_out_typed_handle();
4039 return op.call(input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd);
4040}
4041
4042// aten::native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))
4043::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_batch_norm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
4044
4045 static auto op = create_native_batch_norm_out_typed_handle();
4046 return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd);
4047}
4048
4049STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_stats, name, "aten::batch_norm_stats")
4050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_stats, overload_name, "")
4051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_stats, schema_str, "batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor)")
4052
4053// aten::batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor)
4054static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_stats::schema> create_batch_norm_stats_typed_handle() {
4055 return c10::Dispatcher::singleton()
4056 .findSchemaOrThrow(batch_norm_stats::name, batch_norm_stats::overload_name)
4057 .typed<batch_norm_stats::schema>();
4058}
4059
4060// aten::batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor)
4061::std::tuple<at::Tensor,at::Tensor> batch_norm_stats::call(const at::Tensor & input, double eps) {
4062
4063 static auto op = create_batch_norm_stats_typed_handle();
4064 return op.call(input, eps);
4065}
4066
4067// aten::batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor)
4068::std::tuple<at::Tensor,at::Tensor> batch_norm_stats::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double eps) {
4069
4070 static auto op = create_batch_norm_stats_typed_handle();
4071 return op.redispatch(dispatchKeySet, input, eps);
4072}
4073
4074STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_gather_stats, name, "aten::batch_norm_gather_stats")
4075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_gather_stats, overload_name, "")
4076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_gather_stats, schema_str, "batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor)")
4077
4078// aten::batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor)
4079static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_gather_stats::schema> create_batch_norm_gather_stats_typed_handle() {
4080 return c10::Dispatcher::singleton()
4081 .findSchemaOrThrow(batch_norm_gather_stats::name, batch_norm_gather_stats::overload_name)
4082 .typed<batch_norm_gather_stats::schema>();
4083}
4084
4085// aten::batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor)
4086::std::tuple<at::Tensor,at::Tensor> batch_norm_gather_stats::call(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, int64_t count) {
4087
4088 static auto op = create_batch_norm_gather_stats_typed_handle();
4089 return op.call(input, mean, invstd, running_mean, running_var, momentum, eps, count);
4090}
4091
4092// aten::batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor)
4093::std::tuple<at::Tensor,at::Tensor> batch_norm_gather_stats::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, int64_t count) {
4094
4095 static auto op = create_batch_norm_gather_stats_typed_handle();
4096 return op.redispatch(dispatchKeySet, input, mean, invstd, running_mean, running_var, momentum, eps, count);
4097}
4098
4099STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_batch_norm_backward, name, "aten::native_batch_norm_backward")
4100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_batch_norm_backward, overload_name, "")
4101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_batch_norm_backward, schema_str, "native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor)")
4102
4103// aten::native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
4104static C10_NOINLINE c10::TypedOperatorHandle<native_batch_norm_backward::schema> create_native_batch_norm_backward_typed_handle() {
4105 return c10::Dispatcher::singleton()
4106 .findSchemaOrThrow(native_batch_norm_backward::name, native_batch_norm_backward::overload_name)
4107 .typed<native_batch_norm_backward::schema>();
4108}
4109
4110// aten::native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
4111::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_batch_norm_backward::call(const at::Tensor & grad_out, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask) {
4112
4113 static auto op = create_native_batch_norm_backward_typed_handle();
4114 return op.call(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask);
4115}
4116
4117// aten::native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
4118::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_batch_norm_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask) {
4119
4120 static auto op = create_native_batch_norm_backward_typed_handle();
4121 return op.redispatch(dispatchKeySet, grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask);
4122}
4123
4124STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_backward_reduce, name, "aten::batch_norm_backward_reduce")
4125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_backward_reduce, overload_name, "")
4126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_backward_reduce, schema_str, "batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor)")
4127
4128// aten::batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor)
4129static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_backward_reduce::schema> create_batch_norm_backward_reduce_typed_handle() {
4130 return c10::Dispatcher::singleton()
4131 .findSchemaOrThrow(batch_norm_backward_reduce::name, batch_norm_backward_reduce::overload_name)
4132 .typed<batch_norm_backward_reduce::schema>();
4133}
4134
4135// aten::batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor)
4136::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> batch_norm_backward_reduce::call(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, bool input_g, bool weight_g, bool bias_g) {
4137
4138 static auto op = create_batch_norm_backward_reduce_typed_handle();
4139 return op.call(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g);
4140}
4141
4142// aten::batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor)
4143::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> batch_norm_backward_reduce::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, bool input_g, bool weight_g, bool bias_g) {
4144
4145 static auto op = create_batch_norm_backward_reduce_typed_handle();
4146 return op.redispatch(dispatchKeySet, grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g);
4147}
4148
4149STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_vulkan_available, name, "aten::is_vulkan_available")
4150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_vulkan_available, overload_name, "")
4151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(is_vulkan_available, schema_str, "is_vulkan_available() -> bool")
4152
4153// aten::is_vulkan_available() -> bool
4154static C10_NOINLINE c10::TypedOperatorHandle<is_vulkan_available::schema> create_is_vulkan_available_typed_handle() {
4155 return c10::Dispatcher::singleton()
4156 .findSchemaOrThrow(is_vulkan_available::name, is_vulkan_available::overload_name)
4157 .typed<is_vulkan_available::schema>();
4158}
4159
4160// aten::is_vulkan_available() -> bool
4161bool is_vulkan_available::call() {
4162
4163 static auto op = create_is_vulkan_available_typed_handle();
4164 return op.call();
4165}
4166
4167// aten::is_vulkan_available() -> bool
4168bool is_vulkan_available::redispatch(c10::DispatchKeySet dispatchKeySet) {
4169
4170 static auto op = create_is_vulkan_available_typed_handle();
4171 return op.redispatch(dispatchKeySet);
4172}
4173
4174STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nnpack_spatial_convolution, name, "aten::_nnpack_spatial_convolution")
4175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nnpack_spatial_convolution, overload_name, "")
4176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nnpack_spatial_convolution, schema_str, "_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1) -> Tensor")
4177
4178// aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1) -> Tensor
4179static C10_NOINLINE c10::TypedOperatorHandle<_nnpack_spatial_convolution::schema> create__nnpack_spatial_convolution_typed_handle() {
4180 return c10::Dispatcher::singleton()
4181 .findSchemaOrThrow(_nnpack_spatial_convolution::name, _nnpack_spatial_convolution::overload_name)
4182 .typed<_nnpack_spatial_convolution::schema>();
4183}
4184
4185// aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1) -> Tensor
4186at::Tensor _nnpack_spatial_convolution::call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride) {
4187
4188 static auto op = create__nnpack_spatial_convolution_typed_handle();
4189 return op.call(input, weight, bias, padding, stride);
4190}
4191
4192// aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1) -> Tensor
4193at::Tensor _nnpack_spatial_convolution::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride) {
4194
4195 static auto op = create__nnpack_spatial_convolution_typed_handle();
4196 return op.redispatch(dispatchKeySet, input, weight, bias, padding, stride);
4197}
4198
4199STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ones_names, name, "aten::ones")
4200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ones_names, overload_name, "names")
4201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ones_names, schema_str, "ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
4202
4203// aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
4204static C10_NOINLINE c10::TypedOperatorHandle<ones_names::schema> create_ones_names_typed_handle() {
4205 return c10::Dispatcher::singleton()
4206 .findSchemaOrThrow(ones_names::name, ones_names::overload_name)
4207 .typed<ones_names::schema>();
4208}
4209
4210// aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
4211at::Tensor ones_names::call(at::IntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4212
4213 static auto op = create_ones_names_typed_handle();
4214 return op.call(size, names, dtype, layout, device, pin_memory);
4215}
4216
4217// aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
4218at::Tensor ones_names::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4219
4220 static auto op = create_ones_names_typed_handle();
4221 return op.redispatch(dispatchKeySet, size, names, dtype, layout, device, pin_memory);
4222}
4223
4224STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ones, name, "aten::ones")
4225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ones, overload_name, "")
4226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ones, schema_str, "ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
4227
4228// aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
4229static C10_NOINLINE c10::TypedOperatorHandle<ones::schema> create_ones_typed_handle() {
4230 return c10::Dispatcher::singleton()
4231 .findSchemaOrThrow(ones::name, ones::overload_name)
4232 .typed<ones::schema>();
4233}
4234
4235// aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
4236at::Tensor ones::call(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4237
4238 static auto op = create_ones_typed_handle();
4239 return op.call(size, dtype, layout, device, pin_memory);
4240}
4241
4242// aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
4243at::Tensor ones::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4244
4245 static auto op = create_ones_typed_handle();
4246 return op.redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory);
4247}
4248
4249STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ones_out, name, "aten::ones")
4250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ones_out, overload_name, "out")
4251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ones_out, schema_str, "ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)")
4252
4253// aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
4254static C10_NOINLINE c10::TypedOperatorHandle<ones_out::schema> create_ones_out_typed_handle() {
4255 return c10::Dispatcher::singleton()
4256 .findSchemaOrThrow(ones_out::name, ones_out::overload_name)
4257 .typed<ones_out::schema>();
4258}
4259
4260// aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
4261at::Tensor & ones_out::call(c10::SymIntArrayRef size, at::Tensor & out) {
4262
4263 static auto op = create_ones_out_typed_handle();
4264 return op.call(size, out);
4265}
4266
4267// aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
4268at::Tensor & ones_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out) {
4269
4270 static auto op = create_ones_out_typed_handle();
4271 return op.redispatch(dispatchKeySet, size, out);
4272}
4273
4274STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cdist_forward, name, "aten::_cdist_forward")
4275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cdist_forward, overload_name, "")
4276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cdist_forward, schema_str, "_cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor")
4277
4278// aten::_cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor
4279static C10_NOINLINE c10::TypedOperatorHandle<_cdist_forward::schema> create__cdist_forward_typed_handle() {
4280 return c10::Dispatcher::singleton()
4281 .findSchemaOrThrow(_cdist_forward::name, _cdist_forward::overload_name)
4282 .typed<_cdist_forward::schema>();
4283}
4284
4285// aten::_cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor
4286at::Tensor _cdist_forward::call(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode) {
4287
4288 static auto op = create__cdist_forward_typed_handle();
4289 return op.call(x1, x2, p, compute_mode);
4290}
4291
4292// aten::_cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor
4293at::Tensor _cdist_forward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode) {
4294
4295 static auto op = create__cdist_forward_typed_handle();
4296 return op.redispatch(dispatchKeySet, x1, x2, p, compute_mode);
4297}
4298
4299STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cosine_similarity, name, "aten::cosine_similarity")
4300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cosine_similarity, overload_name, "")
4301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cosine_similarity, schema_str, "cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor")
4302
4303// aten::cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor
4304static C10_NOINLINE c10::TypedOperatorHandle<cosine_similarity::schema> create_cosine_similarity_typed_handle() {
4305 return c10::Dispatcher::singleton()
4306 .findSchemaOrThrow(cosine_similarity::name, cosine_similarity::overload_name)
4307 .typed<cosine_similarity::schema>();
4308}
4309
4310// aten::cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor
4311at::Tensor cosine_similarity::call(const at::Tensor & x1, const at::Tensor & x2, int64_t dim, double eps) {
4312
4313 static auto op = create_cosine_similarity_typed_handle();
4314 return op.call(x1, x2, dim, eps);
4315}
4316
4317// aten::cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor
4318at::Tensor cosine_similarity::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, int64_t dim, double eps) {
4319
4320 static auto op = create_cosine_similarity_typed_handle();
4321 return op.redispatch(dispatchKeySet, x1, x2, dim, eps);
4322}
4323
4324STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(movedim_intlist, name, "aten::movedim")
4325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(movedim_intlist, overload_name, "intlist")
4326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(movedim_intlist, schema_str, "movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)")
4327
4328// aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)
4329static C10_NOINLINE c10::TypedOperatorHandle<movedim_intlist::schema> create_movedim_intlist_typed_handle() {
4330 return c10::Dispatcher::singleton()
4331 .findSchemaOrThrow(movedim_intlist::name, movedim_intlist::overload_name)
4332 .typed<movedim_intlist::schema>();
4333}
4334
4335// aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)
4336at::Tensor movedim_intlist::call(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) {
4337
4338 static auto op = create_movedim_intlist_typed_handle();
4339 return op.call(self, source, destination);
4340}
4341
4342// aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)
4343at::Tensor movedim_intlist::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) {
4344
4345 static auto op = create_movedim_intlist_typed_handle();
4346 return op.redispatch(dispatchKeySet, self, source, destination);
4347}
4348
4349STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(movedim_int, name, "aten::movedim")
4350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(movedim_int, overload_name, "int")
4351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(movedim_int, schema_str, "movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a)")
4352
4353// aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a)
4354static C10_NOINLINE c10::TypedOperatorHandle<movedim_int::schema> create_movedim_int_typed_handle() {
4355 return c10::Dispatcher::singleton()
4356 .findSchemaOrThrow(movedim_int::name, movedim_int::overload_name)
4357 .typed<movedim_int::schema>();
4358}
4359
4360// aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a)
4361at::Tensor movedim_int::call(const at::Tensor & self, int64_t source, int64_t destination) {
4362
4363 static auto op = create_movedim_int_typed_handle();
4364 return op.call(self, source, destination);
4365}
4366
4367// aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a)
4368at::Tensor movedim_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t source, int64_t destination) {
4369
4370 static auto op = create_movedim_int_typed_handle();
4371 return op.redispatch(dispatchKeySet, self, source, destination);
4372}
4373
4374STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(numpy_T, name, "aten::numpy_T")
4375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(numpy_T, overload_name, "")
4376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(numpy_T, schema_str, "numpy_T(Tensor(a) self) -> Tensor(a)")
4377
4378// aten::numpy_T(Tensor(a) self) -> Tensor(a)
4379static C10_NOINLINE c10::TypedOperatorHandle<numpy_T::schema> create_numpy_T_typed_handle() {
4380 return c10::Dispatcher::singleton()
4381 .findSchemaOrThrow(numpy_T::name, numpy_T::overload_name)
4382 .typed<numpy_T::schema>();
4383}
4384
4385// aten::numpy_T(Tensor(a) self) -> Tensor(a)
4386at::Tensor numpy_T::call(const at::Tensor & self) {
4387
4388 static auto op = create_numpy_T_typed_handle();
4389 return op.call(self);
4390}
4391
4392// aten::numpy_T(Tensor(a) self) -> Tensor(a)
4393at::Tensor numpy_T::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
4394
4395 static auto op = create_numpy_T_typed_handle();
4396 return op.redispatch(dispatchKeySet, self);
4397}
4398
4399STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mH, name, "aten::mH")
4400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mH, overload_name, "")
4401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mH, schema_str, "mH(Tensor(a) self) -> Tensor(a)")
4402
4403// aten::mH(Tensor(a) self) -> Tensor(a)
4404static C10_NOINLINE c10::TypedOperatorHandle<mH::schema> create_mH_typed_handle() {
4405 return c10::Dispatcher::singleton()
4406 .findSchemaOrThrow(mH::name, mH::overload_name)
4407 .typed<mH::schema>();
4408}
4409
4410// aten::mH(Tensor(a) self) -> Tensor(a)
4411at::Tensor mH::call(const at::Tensor & self) {
4412
4413 static auto op = create_mH_typed_handle();
4414 return op.call(self);
4415}
4416
4417// aten::mH(Tensor(a) self) -> Tensor(a)
4418at::Tensor mH::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
4419
4420 static auto op = create_mH_typed_handle();
4421 return op.redispatch(dispatchKeySet, self);
4422}
4423
4424STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rand_like, name, "aten::rand_like")
4425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rand_like, overload_name, "")
4426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rand_like, schema_str, "rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor")
4427
4428// aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
4429static C10_NOINLINE c10::TypedOperatorHandle<rand_like::schema> create_rand_like_typed_handle() {
4430 return c10::Dispatcher::singleton()
4431 .findSchemaOrThrow(rand_like::name, rand_like::overload_name)
4432 .typed<rand_like::schema>();
4433}
4434
4435// aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
4436at::Tensor rand_like::call(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
4437
4438 static auto op = create_rand_like_typed_handle();
4439 return op.call(self, dtype, layout, device, pin_memory, memory_format);
4440}
4441
4442// aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
4443at::Tensor rand_like::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
4444
4445 static auto op = create_rand_like_typed_handle();
4446 return op.redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, memory_format);
4447}
4448
4449STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randint_like, name, "aten::randint_like")
4450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randint_like, overload_name, "")
4451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randint_like, schema_str, "randint_like(Tensor self, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor")
4452
4453// aten::randint_like(Tensor self, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
4454static C10_NOINLINE c10::TypedOperatorHandle<randint_like::schema> create_randint_like_typed_handle() {
4455 return c10::Dispatcher::singleton()
4456 .findSchemaOrThrow(randint_like::name, randint_like::overload_name)
4457 .typed<randint_like::schema>();
4458}
4459
4460// aten::randint_like(Tensor self, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
4461at::Tensor randint_like::call(const at::Tensor & self, int64_t high, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
4462
4463 static auto op = create_randint_like_typed_handle();
4464 return op.call(self, high, dtype, layout, device, pin_memory, memory_format);
4465}
4466
4467// aten::randint_like(Tensor self, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
4468at::Tensor randint_like::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t high, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
4469
4470 static auto op = create_randint_like_typed_handle();
4471 return op.redispatch(dispatchKeySet, self, high, dtype, layout, device, pin_memory, memory_format);
4472}
4473
4474STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randint_like_low_dtype, name, "aten::randint_like")
4475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randint_like_low_dtype, overload_name, "low_dtype")
4476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randint_like_low_dtype, schema_str, "randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor")
4477
4478// aten::randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
4479static C10_NOINLINE c10::TypedOperatorHandle<randint_like_low_dtype::schema> create_randint_like_low_dtype_typed_handle() {
4480 return c10::Dispatcher::singleton()
4481 .findSchemaOrThrow(randint_like_low_dtype::name, randint_like_low_dtype::overload_name)
4482 .typed<randint_like_low_dtype::schema>();
4483}
4484
4485// aten::randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
4486at::Tensor randint_like_low_dtype::call(const at::Tensor & self, int64_t low, int64_t high, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
4487
4488 static auto op = create_randint_like_low_dtype_typed_handle();
4489 return op.call(self, low, high, dtype, layout, device, pin_memory, memory_format);
4490}
4491
4492// aten::randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
4493at::Tensor randint_like_low_dtype::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t low, int64_t high, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
4494
4495 static auto op = create_randint_like_low_dtype_typed_handle();
4496 return op.redispatch(dispatchKeySet, self, low, high, dtype, layout, device, pin_memory, memory_format);
4497}
4498
4499STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(round, name, "aten::round")
4500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(round, overload_name, "")
4501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(round, schema_str, "round(Tensor self) -> Tensor")
4502
4503// aten::round(Tensor self) -> Tensor
4504static C10_NOINLINE c10::TypedOperatorHandle<round::schema> create_round_typed_handle() {
4505 return c10::Dispatcher::singleton()
4506 .findSchemaOrThrow(round::name, round::overload_name)
4507 .typed<round::schema>();
4508}
4509
4510// aten::round(Tensor self) -> Tensor
4511at::Tensor round::call(const at::Tensor & self) {
4512
4513 static auto op = create_round_typed_handle();
4514 return op.call(self);
4515}
4516
4517// aten::round(Tensor self) -> Tensor
4518at::Tensor round::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
4519
4520 static auto op = create_round_typed_handle();
4521 return op.redispatch(dispatchKeySet, self);
4522}
4523
4524STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(round_, name, "aten::round_")
4525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(round_, overload_name, "")
4526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(round_, schema_str, "round_(Tensor(a!) self) -> Tensor(a!)")
4527
4528// aten::round_(Tensor(a!) self) -> Tensor(a!)
4529static C10_NOINLINE c10::TypedOperatorHandle<round_::schema> create_round__typed_handle() {
4530 return c10::Dispatcher::singleton()
4531 .findSchemaOrThrow(round_::name, round_::overload_name)
4532 .typed<round_::schema>();
4533}
4534
4535// aten::round_(Tensor(a!) self) -> Tensor(a!)
4536at::Tensor & round_::call(at::Tensor & self) {
4537
4538 static auto op = create_round__typed_handle();
4539 return op.call(self);
4540}
4541
4542// aten::round_(Tensor(a!) self) -> Tensor(a!)
4543at::Tensor & round_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
4544
4545 static auto op = create_round__typed_handle();
4546 return op.redispatch(dispatchKeySet, self);
4547}
4548
4549STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(round_out, name, "aten::round")
4550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(round_out, overload_name, "out")
4551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(round_out, schema_str, "round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
4552
4553// aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4554static C10_NOINLINE c10::TypedOperatorHandle<round_out::schema> create_round_out_typed_handle() {
4555 return c10::Dispatcher::singleton()
4556 .findSchemaOrThrow(round_out::name, round_out::overload_name)
4557 .typed<round_out::schema>();
4558}
4559
4560// aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4561at::Tensor & round_out::call(const at::Tensor & self, at::Tensor & out) {
4562
4563 static auto op = create_round_out_typed_handle();
4564 return op.call(self, out);
4565}
4566
4567// aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4568at::Tensor & round_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
4569
4570 static auto op = create_round_out_typed_handle();
4571 return op.redispatch(dispatchKeySet, self, out);
4572}
4573
4574STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(round_decimals, name, "aten::round")
4575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(round_decimals, overload_name, "decimals")
4576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(round_decimals, schema_str, "round.decimals(Tensor self, *, int decimals) -> Tensor")
4577
4578// aten::round.decimals(Tensor self, *, int decimals) -> Tensor
4579static C10_NOINLINE c10::TypedOperatorHandle<round_decimals::schema> create_round_decimals_typed_handle() {
4580 return c10::Dispatcher::singleton()
4581 .findSchemaOrThrow(round_decimals::name, round_decimals::overload_name)
4582 .typed<round_decimals::schema>();
4583}
4584
4585// aten::round.decimals(Tensor self, *, int decimals) -> Tensor
4586at::Tensor round_decimals::call(const at::Tensor & self, int64_t decimals) {
4587
4588 static auto op = create_round_decimals_typed_handle();
4589 return op.call(self, decimals);
4590}
4591
4592// aten::round.decimals(Tensor self, *, int decimals) -> Tensor
4593at::Tensor round_decimals::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t decimals) {
4594
4595 static auto op = create_round_decimals_typed_handle();
4596 return op.redispatch(dispatchKeySet, self, decimals);
4597}
4598
4599STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(round__decimals, name, "aten::round_")
4600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(round__decimals, overload_name, "decimals")
4601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(round__decimals, schema_str, "round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!)")
4602
4603// aten::round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!)
4604static C10_NOINLINE c10::TypedOperatorHandle<round__decimals::schema> create_round__decimals_typed_handle() {
4605 return c10::Dispatcher::singleton()
4606 .findSchemaOrThrow(round__decimals::name, round__decimals::overload_name)
4607 .typed<round__decimals::schema>();
4608}
4609
4610// aten::round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!)
4611at::Tensor & round__decimals::call(at::Tensor & self, int64_t decimals) {
4612
4613 static auto op = create_round__decimals_typed_handle();
4614 return op.call(self, decimals);
4615}
4616
4617// aten::round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!)
4618at::Tensor & round__decimals::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t decimals) {
4619
4620 static auto op = create_round__decimals_typed_handle();
4621 return op.redispatch(dispatchKeySet, self, decimals);
4622}
4623
4624STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(round_decimals_out, name, "aten::round")
4625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(round_decimals_out, overload_name, "decimals_out")
4626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(round_decimals_out, schema_str, "round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!)")
4627
4628// aten::round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!)
4629static C10_NOINLINE c10::TypedOperatorHandle<round_decimals_out::schema> create_round_decimals_out_typed_handle() {
4630 return c10::Dispatcher::singleton()
4631 .findSchemaOrThrow(round_decimals_out::name, round_decimals_out::overload_name)
4632 .typed<round_decimals_out::schema>();
4633}
4634
4635// aten::round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!)
4636at::Tensor & round_decimals_out::call(const at::Tensor & self, int64_t decimals, at::Tensor & out) {
4637
4638 static auto op = create_round_decimals_out_typed_handle();
4639 return op.call(self, decimals, out);
4640}
4641
4642// aten::round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!)
4643at::Tensor & round_decimals_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t decimals, at::Tensor & out) {
4644
4645 static auto op = create_round_decimals_out_typed_handle();
4646 return op.redispatch(dispatchKeySet, self, decimals, out);
4647}
4648
4649STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gelu_out, name, "aten::gelu")
4650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gelu_out, overload_name, "out")
4651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gelu_out, schema_str, "gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!)")
4652
4653// aten::gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!)
4654static C10_NOINLINE c10::TypedOperatorHandle<gelu_out::schema> create_gelu_out_typed_handle() {
4655 return c10::Dispatcher::singleton()
4656 .findSchemaOrThrow(gelu_out::name, gelu_out::overload_name)
4657 .typed<gelu_out::schema>();
4658}
4659
4660// aten::gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!)
4661at::Tensor & gelu_out::call(const at::Tensor & self, c10::string_view approximate, at::Tensor & out) {
4662
4663 static auto op = create_gelu_out_typed_handle();
4664 return op.call(self, approximate, out);
4665}
4666
4667// aten::gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!)
4668at::Tensor & gelu_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view approximate, at::Tensor & out) {
4669
4670 static auto op = create_gelu_out_typed_handle();
4671 return op.redispatch(dispatchKeySet, self, approximate, out);
4672}
4673
4674STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gelu_, name, "aten::gelu_")
4675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gelu_, overload_name, "")
4676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gelu_, schema_str, "gelu_(Tensor(a!) self, *, str approximate='none') -> Tensor(a!)")
4677
4678// aten::gelu_(Tensor(a!) self, *, str approximate='none') -> Tensor(a!)
4679static C10_NOINLINE c10::TypedOperatorHandle<gelu_::schema> create_gelu__typed_handle() {
4680 return c10::Dispatcher::singleton()
4681 .findSchemaOrThrow(gelu_::name, gelu_::overload_name)
4682 .typed<gelu_::schema>();
4683}
4684
4685// aten::gelu_(Tensor(a!) self, *, str approximate='none') -> Tensor(a!)
4686at::Tensor & gelu_::call(at::Tensor & self, c10::string_view approximate) {
4687
4688 static auto op = create_gelu__typed_handle();
4689 return op.call(self, approximate);
4690}
4691
4692// aten::gelu_(Tensor(a!) self, *, str approximate='none') -> Tensor(a!)
4693at::Tensor & gelu_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, c10::string_view approximate) {
4694
4695 static auto op = create_gelu__typed_handle();
4696 return op.redispatch(dispatchKeySet, self, approximate);
4697}
4698
4699STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gelu, name, "aten::gelu")
4700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gelu, overload_name, "")
4701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gelu, schema_str, "gelu(Tensor self, *, str approximate='none') -> Tensor")
4702
4703// aten::gelu(Tensor self, *, str approximate='none') -> Tensor
4704static C10_NOINLINE c10::TypedOperatorHandle<gelu::schema> create_gelu_typed_handle() {
4705 return c10::Dispatcher::singleton()
4706 .findSchemaOrThrow(gelu::name, gelu::overload_name)
4707 .typed<gelu::schema>();
4708}
4709
4710// aten::gelu(Tensor self, *, str approximate='none') -> Tensor
4711at::Tensor gelu::call(const at::Tensor & self, c10::string_view approximate) {
4712
4713 static auto op = create_gelu_typed_handle();
4714 return op.call(self, approximate);
4715}
4716
4717// aten::gelu(Tensor self, *, str approximate='none') -> Tensor
4718at::Tensor gelu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view approximate) {
4719
4720 static auto op = create_gelu_typed_handle();
4721 return op.redispatch(dispatchKeySet, self, approximate);
4722}
4723
4724STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardshrink_out, name, "aten::hardshrink")
4725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardshrink_out, overload_name, "out")
4726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardshrink_out, schema_str, "hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)")
4727
4728// aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)
4729static C10_NOINLINE c10::TypedOperatorHandle<hardshrink_out::schema> create_hardshrink_out_typed_handle() {
4730 return c10::Dispatcher::singleton()
4731 .findSchemaOrThrow(hardshrink_out::name, hardshrink_out::overload_name)
4732 .typed<hardshrink_out::schema>();
4733}
4734
4735// aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)
4736at::Tensor & hardshrink_out::call(const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out) {
4737
4738 static auto op = create_hardshrink_out_typed_handle();
4739 return op.call(self, lambd, out);
4740}
4741
4742// aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)
4743at::Tensor & hardshrink_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out) {
4744
4745 static auto op = create_hardshrink_out_typed_handle();
4746 return op.redispatch(dispatchKeySet, self, lambd, out);
4747}
4748
4749STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardshrink, name, "aten::hardshrink")
4750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardshrink, overload_name, "")
4751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardshrink, schema_str, "hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor")
4752
4753// aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor
4754static C10_NOINLINE c10::TypedOperatorHandle<hardshrink::schema> create_hardshrink_typed_handle() {
4755 return c10::Dispatcher::singleton()
4756 .findSchemaOrThrow(hardshrink::name, hardshrink::overload_name)
4757 .typed<hardshrink::schema>();
4758}
4759
4760// aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor
4761at::Tensor hardshrink::call(const at::Tensor & self, const at::Scalar & lambd) {
4762
4763 static auto op = create_hardshrink_typed_handle();
4764 return op.call(self, lambd);
4765}
4766
4767// aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor
4768at::Tensor hardshrink::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lambd) {
4769
4770 static auto op = create_hardshrink_typed_handle();
4771 return op.redispatch(dispatchKeySet, self, lambd);
4772}
4773
4774STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(select_backward, name, "aten::select_backward")
4775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(select_backward, overload_name, "")
4776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(select_backward, schema_str, "select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor")
4777
4778// aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor
4779static C10_NOINLINE c10::TypedOperatorHandle<select_backward::schema> create_select_backward_typed_handle() {
4780 return c10::Dispatcher::singleton()
4781 .findSchemaOrThrow(select_backward::name, select_backward::overload_name)
4782 .typed<select_backward::schema>();
4783}
4784
4785// aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor
4786at::Tensor select_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index) {
4787
4788 static auto op = create_select_backward_typed_handle();
4789 return op.call(grad_output, input_sizes, dim, index);
4790}
4791
4792// aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor
4793at::Tensor select_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index) {
4794
4795 static auto op = create_select_backward_typed_handle();
4796 return op.redispatch(dispatchKeySet, grad_output, input_sizes, dim, index);
4797}
4798
4799STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mish, name, "aten::mish")
4800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mish, overload_name, "")
4801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mish, schema_str, "mish(Tensor self) -> Tensor")
4802
4803// aten::mish(Tensor self) -> Tensor
4804static C10_NOINLINE c10::TypedOperatorHandle<mish::schema> create_mish_typed_handle() {
4805 return c10::Dispatcher::singleton()
4806 .findSchemaOrThrow(mish::name, mish::overload_name)
4807 .typed<mish::schema>();
4808}
4809
4810// aten::mish(Tensor self) -> Tensor
4811at::Tensor mish::call(const at::Tensor & self) {
4812
4813 static auto op = create_mish_typed_handle();
4814 return op.call(self);
4815}
4816
4817// aten::mish(Tensor self) -> Tensor
4818at::Tensor mish::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
4819
4820 static auto op = create_mish_typed_handle();
4821 return op.redispatch(dispatchKeySet, self);
4822}
4823
4824STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mish_, name, "aten::mish_")
4825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mish_, overload_name, "")
4826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mish_, schema_str, "mish_(Tensor(a!) self) -> Tensor(a!)")
4827
4828// aten::mish_(Tensor(a!) self) -> Tensor(a!)
4829static C10_NOINLINE c10::TypedOperatorHandle<mish_::schema> create_mish__typed_handle() {
4830 return c10::Dispatcher::singleton()
4831 .findSchemaOrThrow(mish_::name, mish_::overload_name)
4832 .typed<mish_::schema>();
4833}
4834
4835// aten::mish_(Tensor(a!) self) -> Tensor(a!)
4836at::Tensor & mish_::call(at::Tensor & self) {
4837
4838 static auto op = create_mish__typed_handle();
4839 return op.call(self);
4840}
4841
4842// aten::mish_(Tensor(a!) self) -> Tensor(a!)
4843at::Tensor & mish_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
4844
4845 static auto op = create_mish__typed_handle();
4846 return op.redispatch(dispatchKeySet, self);
4847}
4848
4849STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mish_out, name, "aten::mish")
4850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mish_out, overload_name, "out")
4851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mish_out, schema_str, "mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
4852
4853// aten::mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4854static C10_NOINLINE c10::TypedOperatorHandle<mish_out::schema> create_mish_out_typed_handle() {
4855 return c10::Dispatcher::singleton()
4856 .findSchemaOrThrow(mish_out::name, mish_out::overload_name)
4857 .typed<mish_out::schema>();
4858}
4859
4860// aten::mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4861at::Tensor & mish_out::call(const at::Tensor & self, at::Tensor & out) {
4862
4863 static auto op = create_mish_out_typed_handle();
4864 return op.call(self, out);
4865}
4866
4867// aten::mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4868at::Tensor & mish_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
4869
4870 static auto op = create_mish_out_typed_handle();
4871 return op.redispatch(dispatchKeySet, self, out);
4872}
4873
4874STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sigmoid, name, "aten::sigmoid")
4875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sigmoid, overload_name, "")
4876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sigmoid, schema_str, "sigmoid(Tensor self) -> Tensor")
4877
4878// aten::sigmoid(Tensor self) -> Tensor
4879static C10_NOINLINE c10::TypedOperatorHandle<sigmoid::schema> create_sigmoid_typed_handle() {
4880 return c10::Dispatcher::singleton()
4881 .findSchemaOrThrow(sigmoid::name, sigmoid::overload_name)
4882 .typed<sigmoid::schema>();
4883}
4884
4885// aten::sigmoid(Tensor self) -> Tensor
4886at::Tensor sigmoid::call(const at::Tensor & self) {
4887
4888 static auto op = create_sigmoid_typed_handle();
4889 return op.call(self);
4890}
4891
4892// aten::sigmoid(Tensor self) -> Tensor
4893at::Tensor sigmoid::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
4894
4895 static auto op = create_sigmoid_typed_handle();
4896 return op.redispatch(dispatchKeySet, self);
4897}
4898
4899STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sigmoid_, name, "aten::sigmoid_")
4900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sigmoid_, overload_name, "")
4901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sigmoid_, schema_str, "sigmoid_(Tensor(a!) self) -> Tensor(a!)")
4902
4903// aten::sigmoid_(Tensor(a!) self) -> Tensor(a!)
4904static C10_NOINLINE c10::TypedOperatorHandle<sigmoid_::schema> create_sigmoid__typed_handle() {
4905 return c10::Dispatcher::singleton()
4906 .findSchemaOrThrow(sigmoid_::name, sigmoid_::overload_name)
4907 .typed<sigmoid_::schema>();
4908}
4909
4910// aten::sigmoid_(Tensor(a!) self) -> Tensor(a!)
4911at::Tensor & sigmoid_::call(at::Tensor & self) {
4912
4913 static auto op = create_sigmoid__typed_handle();
4914 return op.call(self);
4915}
4916
4917// aten::sigmoid_(Tensor(a!) self) -> Tensor(a!)
4918at::Tensor & sigmoid_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
4919
4920 static auto op = create_sigmoid__typed_handle();
4921 return op.redispatch(dispatchKeySet, self);
4922}
4923
4924STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sigmoid_out, name, "aten::sigmoid")
4925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sigmoid_out, overload_name, "out")
4926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sigmoid_out, schema_str, "sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
4927
4928// aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4929static C10_NOINLINE c10::TypedOperatorHandle<sigmoid_out::schema> create_sigmoid_out_typed_handle() {
4930 return c10::Dispatcher::singleton()
4931 .findSchemaOrThrow(sigmoid_out::name, sigmoid_out::overload_name)
4932 .typed<sigmoid_out::schema>();
4933}
4934
4935// aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4936at::Tensor & sigmoid_out::call(const at::Tensor & self, at::Tensor & out) {
4937
4938 static auto op = create_sigmoid_out_typed_handle();
4939 return op.call(self, out);
4940}
4941
4942// aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4943at::Tensor & sigmoid_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
4944
4945 static auto op = create_sigmoid_out_typed_handle();
4946 return op.redispatch(dispatchKeySet, self, out);
4947}
4948
4949STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(detach, name, "aten::detach")
4950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(detach, overload_name, "")
4951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(detach, schema_str, "detach(Tensor(a) self) -> Tensor(a)")
4952
4953// aten::detach(Tensor(a) self) -> Tensor(a)
4954static C10_NOINLINE c10::TypedOperatorHandle<detach::schema> create_detach_typed_handle() {
4955 return c10::Dispatcher::singleton()
4956 .findSchemaOrThrow(detach::name, detach::overload_name)
4957 .typed<detach::schema>();
4958}
4959
4960// aten::detach(Tensor(a) self) -> Tensor(a)
4961at::Tensor detach::call(const at::Tensor & self) {
4962
4963 static auto op = create_detach_typed_handle();
4964 return op.call(self);
4965}
4966
4967// aten::detach(Tensor(a) self) -> Tensor(a)
4968at::Tensor detach::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
4969
4970 static auto op = create_detach_typed_handle();
4971 return op.redispatch(dispatchKeySet, self);
4972}
4973
4974STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(detach_, name, "aten::detach_")
4975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(detach_, overload_name, "")
4976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(detach_, schema_str, "detach_(Tensor(a!) self) -> Tensor(a!)")
4977
4978// aten::detach_(Tensor(a!) self) -> Tensor(a!)
4979static C10_NOINLINE c10::TypedOperatorHandle<detach_::schema> create_detach__typed_handle() {
4980 return c10::Dispatcher::singleton()
4981 .findSchemaOrThrow(detach_::name, detach_::overload_name)
4982 .typed<detach_::schema>();
4983}
4984
4985// aten::detach_(Tensor(a!) self) -> Tensor(a!)
4986at::Tensor & detach_::call(at::Tensor & self) {
4987
4988 static auto op = create_detach__typed_handle();
4989 return op.call(self);
4990}
4991
4992// aten::detach_(Tensor(a!) self) -> Tensor(a!)
4993at::Tensor & detach_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
4994
4995 static auto op = create_detach__typed_handle();
4996 return op.redispatch(dispatchKeySet, self);
4997}
4998
4999STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(size_int, name, "aten::size")
5000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(size_int, overload_name, "int")
5001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(size_int, schema_str, "size.int(Tensor self, int dim) -> int")
5002
5003// aten::size.int(Tensor self, int dim) -> int
5004static C10_NOINLINE c10::TypedOperatorHandle<size_int::schema> create_size_int_typed_handle() {
5005 return c10::Dispatcher::singleton()
5006 .findSchemaOrThrow(size_int::name, size_int::overload_name)
5007 .typed<size_int::schema>();
5008}
5009
5010// aten::size.int(Tensor self, int dim) -> int
5011int64_t size_int::call(const at::Tensor & self, int64_t dim) {
5012
5013 static auto op = create_size_int_typed_handle();
5014 return op.call(self, dim);
5015}
5016
5017// aten::size.int(Tensor self, int dim) -> int
5018int64_t size_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
5019
5020 static auto op = create_size_int_typed_handle();
5021 return op.redispatch(dispatchKeySet, self, dim);
5022}
5023
5024STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(size_Dimname, name, "aten::size")
5025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(size_Dimname, overload_name, "Dimname")
5026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(size_Dimname, schema_str, "size.Dimname(Tensor self, Dimname dim) -> int")
5027
5028// aten::size.Dimname(Tensor self, Dimname dim) -> int
5029static C10_NOINLINE c10::TypedOperatorHandle<size_Dimname::schema> create_size_Dimname_typed_handle() {
5030 return c10::Dispatcher::singleton()
5031 .findSchemaOrThrow(size_Dimname::name, size_Dimname::overload_name)
5032 .typed<size_Dimname::schema>();
5033}
5034
5035// aten::size.Dimname(Tensor self, Dimname dim) -> int
5036int64_t size_Dimname::call(const at::Tensor & self, at::Dimname dim) {
5037
5038 static auto op = create_size_Dimname_typed_handle();
5039 return op.call(self, dim);
5040}
5041
5042// aten::size.Dimname(Tensor self, Dimname dim) -> int
5043int64_t size_Dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) {
5044
5045 static auto op = create_size_Dimname_typed_handle();
5046 return op.redispatch(dispatchKeySet, self, dim);
5047}
5048
5049STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slice_scatter, name, "aten::slice_scatter")
5050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slice_scatter, overload_name, "")
5051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slice_scatter, schema_str, "slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor")
5052
5053// aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
5054static C10_NOINLINE c10::TypedOperatorHandle<slice_scatter::schema> create_slice_scatter_typed_handle() {
5055 return c10::Dispatcher::singleton()
5056 .findSchemaOrThrow(slice_scatter::name, slice_scatter::overload_name)
5057 .typed<slice_scatter::schema>();
5058}
5059
5060// aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
5061at::Tensor slice_scatter::call(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
5062
5063 static auto op = create_slice_scatter_typed_handle();
5064 return op.call(self, src, dim, start, end, step);
5065}
5066
5067// aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
5068at::Tensor slice_scatter::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
5069
5070 static auto op = create_slice_scatter_typed_handle();
5071 return op.redispatch(dispatchKeySet, self, src, dim, start, end, step);
5072}
5073
5074STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_softmax_backward_data, name, "aten::_softmax_backward_data")
5075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_softmax_backward_data, overload_name, "")
5076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_softmax_backward_data, schema_str, "_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor")
5077
5078// aten::_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor
5079static C10_NOINLINE c10::TypedOperatorHandle<_softmax_backward_data::schema> create__softmax_backward_data_typed_handle() {
5080 return c10::Dispatcher::singleton()
5081 .findSchemaOrThrow(_softmax_backward_data::name, _softmax_backward_data::overload_name)
5082 .typed<_softmax_backward_data::schema>();
5083}
5084
5085// aten::_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor
5086at::Tensor _softmax_backward_data::call(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
5087
5088 static auto op = create__softmax_backward_data_typed_handle();
5089 return op.call(grad_output, output, dim, input_dtype);
5090}
5091
5092// aten::_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor
5093at::Tensor _softmax_backward_data::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
5094
5095 static auto op = create__softmax_backward_data_typed_handle();
5096 return op.redispatch(dispatchKeySet, grad_output, output, dim, input_dtype);
5097}
5098
5099STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_softmax_backward_data_out, name, "aten::_softmax_backward_data")
5100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_softmax_backward_data_out, overload_name, "out")
5101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_softmax_backward_data_out, schema_str, "_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!)")
5102
5103// aten::_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!)
5104static C10_NOINLINE c10::TypedOperatorHandle<_softmax_backward_data_out::schema> create__softmax_backward_data_out_typed_handle() {
5105 return c10::Dispatcher::singleton()
5106 .findSchemaOrThrow(_softmax_backward_data_out::name, _softmax_backward_data_out::overload_name)
5107 .typed<_softmax_backward_data_out::schema>();
5108}
5109
5110// aten::_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!)
5111at::Tensor & _softmax_backward_data_out::call(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & grad_input) {
5112
5113 static auto op = create__softmax_backward_data_out_typed_handle();
5114 return op.call(grad_output, output, dim, input_dtype, grad_input);
5115}
5116
5117// aten::_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!)
5118at::Tensor & _softmax_backward_data_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & grad_input) {
5119
5120 static auto op = create__softmax_backward_data_out_typed_handle();
5121 return op.redispatch(dispatchKeySet, grad_output, output, dim, input_dtype, grad_input);
5122}
5123
5124STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(split_with_sizes, name, "aten::split_with_sizes")
5125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(split_with_sizes, overload_name, "")
5126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(split_with_sizes, schema_str, "split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[]")
5127
5128// aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[]
5129static C10_NOINLINE c10::TypedOperatorHandle<split_with_sizes::schema> create_split_with_sizes_typed_handle() {
5130 return c10::Dispatcher::singleton()
5131 .findSchemaOrThrow(split_with_sizes::name, split_with_sizes::overload_name)
5132 .typed<split_with_sizes::schema>();
5133}
5134
5135// aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[]
5136::std::vector<at::Tensor> split_with_sizes::call(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
5137
5138 static auto op = create_split_with_sizes_typed_handle();
5139 return op.call(self, split_sizes, dim);
5140}
5141
5142// aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[]
5143::std::vector<at::Tensor> split_with_sizes::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
5144
5145 static auto op = create_split_with_sizes_typed_handle();
5146 return op.redispatch(dispatchKeySet, self, split_sizes, dim);
5147}
5148
5149STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hsplit_int, name, "aten::hsplit")
5150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hsplit_int, overload_name, "int")
5151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hsplit_int, schema_str, "hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]")
5152
5153// aten::hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
5154static C10_NOINLINE c10::TypedOperatorHandle<hsplit_int::schema> create_hsplit_int_typed_handle() {
5155 return c10::Dispatcher::singleton()
5156 .findSchemaOrThrow(hsplit_int::name, hsplit_int::overload_name)
5157 .typed<hsplit_int::schema>();
5158}
5159
5160// aten::hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
5161::std::vector<at::Tensor> hsplit_int::call(const at::Tensor & self, int64_t sections) {
5162
5163 static auto op = create_hsplit_int_typed_handle();
5164 return op.call(self, sections);
5165}
5166
5167// aten::hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
5168::std::vector<at::Tensor> hsplit_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sections) {
5169
5170 static auto op = create_hsplit_int_typed_handle();
5171 return op.redispatch(dispatchKeySet, self, sections);
5172}
5173
5174STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hsplit_array, name, "aten::hsplit")
5175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hsplit_array, overload_name, "array")
5176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hsplit_array, schema_str, "hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]")
5177
5178// aten::hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
5179static C10_NOINLINE c10::TypedOperatorHandle<hsplit_array::schema> create_hsplit_array_typed_handle() {
5180 return c10::Dispatcher::singleton()
5181 .findSchemaOrThrow(hsplit_array::name, hsplit_array::overload_name)
5182 .typed<hsplit_array::schema>();
5183}
5184
5185// aten::hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
5186::std::vector<at::Tensor> hsplit_array::call(const at::Tensor & self, at::IntArrayRef indices) {
5187
5188 static auto op = create_hsplit_array_typed_handle();
5189 return op.call(self, indices);
5190}
5191
5192// aten::hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
5193::std::vector<at::Tensor> hsplit_array::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef indices) {
5194
5195 static auto op = create_hsplit_array_typed_handle();
5196 return op.redispatch(dispatchKeySet, self, indices);
5197}
5198
5199STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(stack, name, "aten::stack")
5200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(stack, overload_name, "")
5201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(stack, schema_str, "stack(Tensor[] tensors, int dim=0) -> Tensor")
5202
5203// aten::stack(Tensor[] tensors, int dim=0) -> Tensor
5204static C10_NOINLINE c10::TypedOperatorHandle<stack::schema> create_stack_typed_handle() {
5205 return c10::Dispatcher::singleton()
5206 .findSchemaOrThrow(stack::name, stack::overload_name)
5207 .typed<stack::schema>();
5208}
5209
5210// aten::stack(Tensor[] tensors, int dim=0) -> Tensor
5211at::Tensor stack::call(at::TensorList tensors, int64_t dim) {
5212
5213 static auto op = create_stack_typed_handle();
5214 return op.call(tensors, dim);
5215}
5216
5217// aten::stack(Tensor[] tensors, int dim=0) -> Tensor
5218at::Tensor stack::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim) {
5219
5220 static auto op = create_stack_typed_handle();
5221 return op.redispatch(dispatchKeySet, tensors, dim);
5222}
5223
5224STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(stack_out, name, "aten::stack")
5225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(stack_out, overload_name, "out")
5226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(stack_out, schema_str, "stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)")
5227
5228// aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
5229static C10_NOINLINE c10::TypedOperatorHandle<stack_out::schema> create_stack_out_typed_handle() {
5230 return c10::Dispatcher::singleton()
5231 .findSchemaOrThrow(stack_out::name, stack_out::overload_name)
5232 .typed<stack_out::schema>();
5233}
5234
5235// aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
5236at::Tensor & stack_out::call(at::TensorList tensors, int64_t dim, at::Tensor & out) {
5237
5238 static auto op = create_stack_out_typed_handle();
5239 return op.call(tensors, dim, out);
5240}
5241
5242// aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
5243at::Tensor & stack_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, at::Tensor & out) {
5244
5245 static auto op = create_stack_out_typed_handle();
5246 return op.redispatch(dispatchKeySet, tensors, dim, out);
5247}
5248
5249STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_stack, name, "aten::_stack")
5250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_stack, overload_name, "")
5251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_stack, schema_str, "_stack(Tensor[] tensors, int dim=0) -> Tensor")
5252
5253// aten::_stack(Tensor[] tensors, int dim=0) -> Tensor
5254static C10_NOINLINE c10::TypedOperatorHandle<_stack::schema> create__stack_typed_handle() {
5255 return c10::Dispatcher::singleton()
5256 .findSchemaOrThrow(_stack::name, _stack::overload_name)
5257 .typed<_stack::schema>();
5258}
5259
5260// aten::_stack(Tensor[] tensors, int dim=0) -> Tensor
5261at::Tensor _stack::call(at::TensorList tensors, int64_t dim) {
5262
5263 static auto op = create__stack_typed_handle();
5264 return op.call(tensors, dim);
5265}
5266
5267// aten::_stack(Tensor[] tensors, int dim=0) -> Tensor
5268at::Tensor _stack::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim) {
5269
5270 static auto op = create__stack_typed_handle();
5271 return op.redispatch(dispatchKeySet, tensors, dim);
5272}
5273
5274STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_stack_out, name, "aten::_stack")
5275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_stack_out, overload_name, "out")
5276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_stack_out, schema_str, "_stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)")
5277
5278// aten::_stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
5279static C10_NOINLINE c10::TypedOperatorHandle<_stack_out::schema> create__stack_out_typed_handle() {
5280 return c10::Dispatcher::singleton()
5281 .findSchemaOrThrow(_stack_out::name, _stack_out::overload_name)
5282 .typed<_stack_out::schema>();
5283}
5284
5285// aten::_stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
5286at::Tensor & _stack_out::call(at::TensorList tensors, int64_t dim, at::Tensor & out) {
5287
5288 static auto op = create__stack_out_typed_handle();
5289 return op.call(tensors, dim, out);
5290}
5291
5292// aten::_stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
5293at::Tensor & _stack_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, at::Tensor & out) {
5294
5295 static auto op = create__stack_out_typed_handle();
5296 return op.redispatch(dispatchKeySet, tensors, dim, out);
5297}
5298
5299STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(square, name, "aten::square")
5300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(square, overload_name, "")
5301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(square, schema_str, "square(Tensor self) -> Tensor")
5302
5303// aten::square(Tensor self) -> Tensor
5304static C10_NOINLINE c10::TypedOperatorHandle<square::schema> create_square_typed_handle() {
5305 return c10::Dispatcher::singleton()
5306 .findSchemaOrThrow(square::name, square::overload_name)
5307 .typed<square::schema>();
5308}
5309
5310// aten::square(Tensor self) -> Tensor
5311at::Tensor square::call(const at::Tensor & self) {
5312
5313 static auto op = create_square_typed_handle();
5314 return op.call(self);
5315}
5316
5317// aten::square(Tensor self) -> Tensor
5318at::Tensor square::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
5319
5320 static auto op = create_square_typed_handle();
5321 return op.redispatch(dispatchKeySet, self);
5322}
5323
5324STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(square_, name, "aten::square_")
5325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(square_, overload_name, "")
5326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(square_, schema_str, "square_(Tensor(a!) self) -> Tensor(a!)")
5327
5328// aten::square_(Tensor(a!) self) -> Tensor(a!)
5329static C10_NOINLINE c10::TypedOperatorHandle<square_::schema> create_square__typed_handle() {
5330 return c10::Dispatcher::singleton()
5331 .findSchemaOrThrow(square_::name, square_::overload_name)
5332 .typed<square_::schema>();
5333}
5334
5335// aten::square_(Tensor(a!) self) -> Tensor(a!)
5336at::Tensor & square_::call(at::Tensor & self) {
5337
5338 static auto op = create_square__typed_handle();
5339 return op.call(self);
5340}
5341
5342// aten::square_(Tensor(a!) self) -> Tensor(a!)
5343at::Tensor & square_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
5344
5345 static auto op = create_square__typed_handle();
5346 return op.redispatch(dispatchKeySet, self);
5347}
5348
5349STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(square_out, name, "aten::square")
5350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(square_out, overload_name, "out")
5351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(square_out, schema_str, "square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
5352
5353// aten::square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
5354static C10_NOINLINE c10::TypedOperatorHandle<square_out::schema> create_square_out_typed_handle() {
5355 return c10::Dispatcher::singleton()
5356 .findSchemaOrThrow(square_out::name, square_out::overload_name)
5357 .typed<square_out::schema>();
5358}
5359
5360// aten::square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
5361at::Tensor & square_out::call(const at::Tensor & self, at::Tensor & out) {
5362
5363 static auto op = create_square_out_typed_handle();
5364 return op.call(self, out);
5365}
5366
5367// aten::square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
5368at::Tensor & square_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
5369
5370 static auto op = create_square_out_typed_handle();
5371 return op.redispatch(dispatchKeySet, self, out);
5372}
5373
5374STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tanh, name, "aten::tanh")
5375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tanh, overload_name, "")
5376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tanh, schema_str, "tanh(Tensor self) -> Tensor")
5377
5378// aten::tanh(Tensor self) -> Tensor
5379static C10_NOINLINE c10::TypedOperatorHandle<tanh::schema> create_tanh_typed_handle() {
5380 return c10::Dispatcher::singleton()
5381 .findSchemaOrThrow(tanh::name, tanh::overload_name)
5382 .typed<tanh::schema>();
5383}
5384
5385// aten::tanh(Tensor self) -> Tensor
5386at::Tensor tanh::call(const at::Tensor & self) {
5387
5388 static auto op = create_tanh_typed_handle();
5389 return op.call(self);
5390}
5391
5392// aten::tanh(Tensor self) -> Tensor
5393at::Tensor tanh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
5394
5395 static auto op = create_tanh_typed_handle();
5396 return op.redispatch(dispatchKeySet, self);
5397}
5398
5399STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tanh_, name, "aten::tanh_")
5400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tanh_, overload_name, "")
5401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tanh_, schema_str, "tanh_(Tensor(a!) self) -> Tensor(a!)")
5402
5403// aten::tanh_(Tensor(a!) self) -> Tensor(a!)
5404static C10_NOINLINE c10::TypedOperatorHandle<tanh_::schema> create_tanh__typed_handle() {
5405 return c10::Dispatcher::singleton()
5406 .findSchemaOrThrow(tanh_::name, tanh_::overload_name)
5407 .typed<tanh_::schema>();
5408}
5409
5410// aten::tanh_(Tensor(a!) self) -> Tensor(a!)
5411at::Tensor & tanh_::call(at::Tensor & self) {
5412
5413 static auto op = create_tanh__typed_handle();
5414 return op.call(self);
5415}
5416
5417// aten::tanh_(Tensor(a!) self) -> Tensor(a!)
5418at::Tensor & tanh_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
5419
5420 static auto op = create_tanh__typed_handle();
5421 return op.redispatch(dispatchKeySet, self);
5422}
5423
5424STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tanh_out, name, "aten::tanh")
5425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tanh_out, overload_name, "out")
5426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tanh_out, schema_str, "tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
5427
5428// aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
5429static C10_NOINLINE c10::TypedOperatorHandle<tanh_out::schema> create_tanh_out_typed_handle() {
5430 return c10::Dispatcher::singleton()
5431 .findSchemaOrThrow(tanh_out::name, tanh_out::overload_name)
5432 .typed<tanh_out::schema>();
5433}
5434
5435// aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
5436at::Tensor & tanh_out::call(const at::Tensor & self, at::Tensor & out) {
5437
5438 static auto op = create_tanh_out_typed_handle();
5439 return op.call(self, out);
5440}
5441
5442// aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
5443at::Tensor & tanh_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
5444
5445 static auto op = create_tanh_out_typed_handle();
5446 return op.redispatch(dispatchKeySet, self, out);
5447}
5448
5449STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tensordot, name, "aten::tensordot")
5450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tensordot, overload_name, "")
5451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tensordot, schema_str, "tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor")
5452
5453// aten::tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor
5454static C10_NOINLINE c10::TypedOperatorHandle<tensordot::schema> create_tensordot_typed_handle() {
5455 return c10::Dispatcher::singleton()
5456 .findSchemaOrThrow(tensordot::name, tensordot::overload_name)
5457 .typed<tensordot::schema>();
5458}
5459
5460// aten::tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor
5461at::Tensor tensordot::call(const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other) {
5462
5463 static auto op = create_tensordot_typed_handle();
5464 return op.call(self, other, dims_self, dims_other);
5465}
5466
5467// aten::tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor
5468at::Tensor tensordot::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other) {
5469
5470 static auto op = create_tensordot_typed_handle();
5471 return op.redispatch(dispatchKeySet, self, other, dims_self, dims_other);
5472}
5473
5474STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tensordot_out, name, "aten::tensordot")
5475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tensordot_out, overload_name, "out")
5476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tensordot_out, schema_str, "tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!)")
5477
5478// aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!)
5479static C10_NOINLINE c10::TypedOperatorHandle<tensordot_out::schema> create_tensordot_out_typed_handle() {
5480 return c10::Dispatcher::singleton()
5481 .findSchemaOrThrow(tensordot_out::name, tensordot_out::overload_name)
5482 .typed<tensordot_out::schema>();
5483}
5484
5485// aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!)
5486at::Tensor & tensordot_out::call(const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other, at::Tensor & out) {
5487
5488 static auto op = create_tensordot_out_typed_handle();
5489 return op.call(self, other, dims_self, dims_other, out);
5490}
5491
5492// aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!)
5493at::Tensor & tensordot_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other, at::Tensor & out) {
5494
5495 static auto op = create_tensordot_out_typed_handle();
5496 return op.redispatch(dispatchKeySet, self, other, dims_self, dims_other, out);
5497}
5498
5499STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tile, name, "aten::tile")
5500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tile, overload_name, "")
5501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(tile, schema_str, "tile(Tensor self, int[] dims) -> Tensor")
5502
5503// aten::tile(Tensor self, int[] dims) -> Tensor
5504static C10_NOINLINE c10::TypedOperatorHandle<tile::schema> create_tile_typed_handle() {
5505 return c10::Dispatcher::singleton()
5506 .findSchemaOrThrow(tile::name, tile::overload_name)
5507 .typed<tile::schema>();
5508}
5509
5510// aten::tile(Tensor self, int[] dims) -> Tensor
5511at::Tensor tile::call(const at::Tensor & self, at::IntArrayRef dims) {
5512
5513 static auto op = create_tile_typed_handle();
5514 return op.call(self, dims);
5515}
5516
5517// aten::tile(Tensor self, int[] dims) -> Tensor
5518at::Tensor tile::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims) {
5519
5520 static auto op = create_tile_typed_handle();
5521 return op.redispatch(dispatchKeySet, self, dims);
5522}
5523
5524STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_mkldnn_transpose, name, "aten::_mkldnn_transpose")
5525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_mkldnn_transpose, overload_name, "")
5526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_mkldnn_transpose, schema_str, "_mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor")
5527
5528// aten::_mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor
5529static C10_NOINLINE c10::TypedOperatorHandle<_mkldnn_transpose::schema> create__mkldnn_transpose_typed_handle() {
5530 return c10::Dispatcher::singleton()
5531 .findSchemaOrThrow(_mkldnn_transpose::name, _mkldnn_transpose::overload_name)
5532 .typed<_mkldnn_transpose::schema>();
5533}
5534
5535// aten::_mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor
5536at::Tensor _mkldnn_transpose::call(const at::Tensor & self, int64_t dim0, int64_t dim1) {
5537
5538 static auto op = create__mkldnn_transpose_typed_handle();
5539 return op.call(self, dim0, dim1);
5540}
5541
5542// aten::_mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor
5543at::Tensor _mkldnn_transpose::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1) {
5544
5545 static auto op = create__mkldnn_transpose_typed_handle();
5546 return op.redispatch(dispatchKeySet, self, dim0, dim1);
5547}
5548
5549STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_mkldnn_transpose_, name, "aten::_mkldnn_transpose_")
5550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_mkldnn_transpose_, overload_name, "")
5551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_mkldnn_transpose_, schema_str, "_mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)")
5552
5553// aten::_mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
5554static C10_NOINLINE c10::TypedOperatorHandle<_mkldnn_transpose_::schema> create__mkldnn_transpose__typed_handle() {
5555 return c10::Dispatcher::singleton()
5556 .findSchemaOrThrow(_mkldnn_transpose_::name, _mkldnn_transpose_::overload_name)
5557 .typed<_mkldnn_transpose_::schema>();
5558}
5559
5560// aten::_mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
5561at::Tensor & _mkldnn_transpose_::call(at::Tensor & self, int64_t dim0, int64_t dim1) {
5562
5563 static auto op = create__mkldnn_transpose__typed_handle();
5564 return op.call(self, dim0, dim1);
5565}
5566
5567// aten::_mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
5568at::Tensor & _mkldnn_transpose_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim0, int64_t dim1) {
5569
5570 static auto op = create__mkldnn_transpose__typed_handle();
5571 return op.redispatch(dispatchKeySet, self, dim0, dim1);
5572}
5573
5574STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fliplr, name, "aten::fliplr")
5575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fliplr, overload_name, "")
5576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fliplr, schema_str, "fliplr(Tensor self) -> Tensor")
5577
5578// aten::fliplr(Tensor self) -> Tensor
5579static C10_NOINLINE c10::TypedOperatorHandle<fliplr::schema> create_fliplr_typed_handle() {
5580 return c10::Dispatcher::singleton()
5581 .findSchemaOrThrow(fliplr::name, fliplr::overload_name)
5582 .typed<fliplr::schema>();
5583}
5584
5585// aten::fliplr(Tensor self) -> Tensor
5586at::Tensor fliplr::call(const at::Tensor & self) {
5587
5588 static auto op = create_fliplr_typed_handle();
5589 return op.call(self);
5590}
5591
5592// aten::fliplr(Tensor self) -> Tensor
5593at::Tensor fliplr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
5594
5595 static auto op = create_fliplr_typed_handle();
5596 return op.redispatch(dispatchKeySet, self);
5597}
5598
5599STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_from_padded_and_nested_example, name, "aten::_nested_from_padded_and_nested_example")
5600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_from_padded_and_nested_example, overload_name, "")
5601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_from_padded_and_nested_example, schema_str, "_nested_from_padded_and_nested_example(Tensor padded, Tensor nt_example) -> Tensor")
5602
5603// aten::_nested_from_padded_and_nested_example(Tensor padded, Tensor nt_example) -> Tensor
5604static C10_NOINLINE c10::TypedOperatorHandle<_nested_from_padded_and_nested_example::schema> create__nested_from_padded_and_nested_example_typed_handle() {
5605 return c10::Dispatcher::singleton()
5606 .findSchemaOrThrow(_nested_from_padded_and_nested_example::name, _nested_from_padded_and_nested_example::overload_name)
5607 .typed<_nested_from_padded_and_nested_example::schema>();
5608}
5609
5610// aten::_nested_from_padded_and_nested_example(Tensor padded, Tensor nt_example) -> Tensor
5611at::Tensor _nested_from_padded_and_nested_example::call(const at::Tensor & padded, const at::Tensor & nt_example) {
5612
5613 static auto op = create__nested_from_padded_and_nested_example_typed_handle();
5614 return op.call(padded, nt_example);
5615}
5616
5617// aten::_nested_from_padded_and_nested_example(Tensor padded, Tensor nt_example) -> Tensor
5618at::Tensor _nested_from_padded_and_nested_example::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & padded, const at::Tensor & nt_example) {
5619
5620 static auto op = create__nested_from_padded_and_nested_example_typed_handle();
5621 return op.redispatch(dispatchKeySet, padded, nt_example);
5622}
5623
5624STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fix, name, "aten::fix")
5625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fix, overload_name, "")
5626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fix, schema_str, "fix(Tensor self) -> Tensor")
5627
5628// aten::fix(Tensor self) -> Tensor
5629static C10_NOINLINE c10::TypedOperatorHandle<fix::schema> create_fix_typed_handle() {
5630 return c10::Dispatcher::singleton()
5631 .findSchemaOrThrow(fix::name, fix::overload_name)
5632 .typed<fix::schema>();
5633}
5634
5635// aten::fix(Tensor self) -> Tensor
5636at::Tensor fix::call(const at::Tensor & self) {
5637
5638 static auto op = create_fix_typed_handle();
5639 return op.call(self);
5640}
5641
5642// aten::fix(Tensor self) -> Tensor
5643at::Tensor fix::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
5644
5645 static auto op = create_fix_typed_handle();
5646 return op.redispatch(dispatchKeySet, self);
5647}
5648
5649STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fix_, name, "aten::fix_")
5650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fix_, overload_name, "")
5651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fix_, schema_str, "fix_(Tensor(a!) self) -> Tensor(a!)")
5652
5653// aten::fix_(Tensor(a!) self) -> Tensor(a!)
5654static C10_NOINLINE c10::TypedOperatorHandle<fix_::schema> create_fix__typed_handle() {
5655 return c10::Dispatcher::singleton()
5656 .findSchemaOrThrow(fix_::name, fix_::overload_name)
5657 .typed<fix_::schema>();
5658}
5659
5660// aten::fix_(Tensor(a!) self) -> Tensor(a!)
5661at::Tensor & fix_::call(at::Tensor & self) {
5662
5663 static auto op = create_fix__typed_handle();
5664 return op.call(self);
5665}
5666
5667// aten::fix_(Tensor(a!) self) -> Tensor(a!)
5668at::Tensor & fix_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
5669
5670 static auto op = create_fix__typed_handle();
5671 return op.redispatch(dispatchKeySet, self);
5672}
5673
5674STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fix_out, name, "aten::fix")
5675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fix_out, overload_name, "out")
5676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fix_out, schema_str, "fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
5677
5678// aten::fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
5679static C10_NOINLINE c10::TypedOperatorHandle<fix_out::schema> create_fix_out_typed_handle() {
5680 return c10::Dispatcher::singleton()
5681 .findSchemaOrThrow(fix_out::name, fix_out::overload_name)
5682 .typed<fix_out::schema>();
5683}
5684
5685// aten::fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
5686at::Tensor & fix_out::call(const at::Tensor & self, at::Tensor & out) {
5687
5688 static auto op = create_fix_out_typed_handle();
5689 return op.call(self, out);
5690}
5691
5692// aten::fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
5693at::Tensor & fix_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
5694
5695 static auto op = create_fix_out_typed_handle();
5696 return op.redispatch(dispatchKeySet, self, out);
5697}
5698
5699STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unique_dim, name, "aten::unique_dim")
5700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unique_dim, overload_name, "")
5701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unique_dim, schema_str, "unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)")
5702
5703// aten::unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
5704static C10_NOINLINE c10::TypedOperatorHandle<unique_dim::schema> create_unique_dim_typed_handle() {
5705 return c10::Dispatcher::singleton()
5706 .findSchemaOrThrow(unique_dim::name, unique_dim::overload_name)
5707 .typed<unique_dim::schema>();
5708}
5709
5710// aten::unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
5711::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_dim::call(const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts) {
5712
5713 static auto op = create_unique_dim_typed_handle();
5714 return op.call(self, dim, sorted, return_inverse, return_counts);
5715}
5716
5717// aten::unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
5718::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts) {
5719
5720 static auto op = create_unique_dim_typed_handle();
5721 return op.redispatch(dispatchKeySet, self, dim, sorted, return_inverse, return_counts);
5722}
5723
5724STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unique_consecutive, name, "aten::unique_consecutive")
5725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unique_consecutive, overload_name, "")
5726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unique_consecutive, schema_str, "unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor)")
5727
5728// aten::unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor)
5729static C10_NOINLINE c10::TypedOperatorHandle<unique_consecutive::schema> create_unique_consecutive_typed_handle() {
5730 return c10::Dispatcher::singleton()
5731 .findSchemaOrThrow(unique_consecutive::name, unique_consecutive::overload_name)
5732 .typed<unique_consecutive::schema>();
5733}
5734
5735// aten::unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor)
5736::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_consecutive::call(const at::Tensor & self, bool return_inverse, bool return_counts, c10::optional<int64_t> dim) {
5737
5738 static auto op = create_unique_consecutive_typed_handle();
5739 return op.call(self, return_inverse, return_counts, dim);
5740}
5741
5742// aten::unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor)
5743::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_consecutive::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool return_inverse, bool return_counts, c10::optional<int64_t> dim) {
5744
5745 static auto op = create_unique_consecutive_typed_handle();
5746 return op.redispatch(dispatchKeySet, self, return_inverse, return_counts, dim);
5747}
5748
5749STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(vander, name, "aten::vander")
5750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(vander, overload_name, "")
5751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(vander, schema_str, "vander(Tensor x, int? N=None, bool increasing=False) -> Tensor")
5752
5753// aten::vander(Tensor x, int? N=None, bool increasing=False) -> Tensor
5754static C10_NOINLINE c10::TypedOperatorHandle<vander::schema> create_vander_typed_handle() {
5755 return c10::Dispatcher::singleton()
5756 .findSchemaOrThrow(vander::name, vander::overload_name)
5757 .typed<vander::schema>();
5758}
5759
5760// aten::vander(Tensor x, int? N=None, bool increasing=False) -> Tensor
5761at::Tensor vander::call(const at::Tensor & x, c10::optional<int64_t> N, bool increasing) {
5762
5763 static auto op = create_vander_typed_handle();
5764 return op.call(x, N, increasing);
5765}
5766
5767// aten::vander(Tensor x, int? N=None, bool increasing=False) -> Tensor
5768at::Tensor vander::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, c10::optional<int64_t> N, bool increasing) {
5769
5770 static auto op = create_vander_typed_handle();
5771 return op.redispatch(dispatchKeySet, x, N, increasing);
5772}
5773
5774STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(view_as, name, "aten::view_as")
5775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(view_as, overload_name, "")
5776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(view_as, schema_str, "view_as(Tensor(a) self, Tensor other) -> Tensor(a)")
5777
5778// aten::view_as(Tensor(a) self, Tensor other) -> Tensor(a)
5779static C10_NOINLINE c10::TypedOperatorHandle<view_as::schema> create_view_as_typed_handle() {
5780 return c10::Dispatcher::singleton()
5781 .findSchemaOrThrow(view_as::name, view_as::overload_name)
5782 .typed<view_as::schema>();
5783}
5784
5785// aten::view_as(Tensor(a) self, Tensor other) -> Tensor(a)
5786at::Tensor view_as::call(const at::Tensor & self, const at::Tensor & other) {
5787
5788 static auto op = create_view_as_typed_handle();
5789 return op.call(self, other);
5790}
5791
5792// aten::view_as(Tensor(a) self, Tensor other) -> Tensor(a)
5793at::Tensor view_as::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
5794
5795 static auto op = create_view_as_typed_handle();
5796 return op.redispatch(dispatchKeySet, self, other);
5797}
5798
5799STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_dirichlet_grad, name, "aten::_dirichlet_grad")
5800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_dirichlet_grad, overload_name, "")
5801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_dirichlet_grad, schema_str, "_dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor")
5802
5803// aten::_dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor
5804static C10_NOINLINE c10::TypedOperatorHandle<_dirichlet_grad::schema> create__dirichlet_grad_typed_handle() {
5805 return c10::Dispatcher::singleton()
5806 .findSchemaOrThrow(_dirichlet_grad::name, _dirichlet_grad::overload_name)
5807 .typed<_dirichlet_grad::schema>();
5808}
5809
5810// aten::_dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor
5811at::Tensor _dirichlet_grad::call(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) {
5812
5813 static auto op = create__dirichlet_grad_typed_handle();
5814 return op.call(x, alpha, total);
5815}
5816
5817// aten::_dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor
5818at::Tensor _dirichlet_grad::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) {
5819
5820 static auto op = create__dirichlet_grad_typed_handle();
5821 return op.redispatch(dispatchKeySet, x, alpha, total);
5822}
5823
5824STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(frobenius_norm_dim, name, "aten::frobenius_norm")
5825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(frobenius_norm_dim, overload_name, "dim")
5826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(frobenius_norm_dim, schema_str, "frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor")
5827
5828// aten::frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
5829static C10_NOINLINE c10::TypedOperatorHandle<frobenius_norm_dim::schema> create_frobenius_norm_dim_typed_handle() {
5830 return c10::Dispatcher::singleton()
5831 .findSchemaOrThrow(frobenius_norm_dim::name, frobenius_norm_dim::overload_name)
5832 .typed<frobenius_norm_dim::schema>();
5833}
5834
5835// aten::frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
5836at::Tensor frobenius_norm_dim::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
5837
5838 static auto op = create_frobenius_norm_dim_typed_handle();
5839 return op.call(self, dim, keepdim);
5840}
5841
5842// aten::frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
5843at::Tensor frobenius_norm_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
5844
5845 static auto op = create_frobenius_norm_dim_typed_handle();
5846 return op.redispatch(dispatchKeySet, self, dim, keepdim);
5847}
5848
5849STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(frobenius_norm_out, name, "aten::frobenius_norm")
5850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(frobenius_norm_out, overload_name, "out")
5851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(frobenius_norm_out, schema_str, "frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)")
5852
5853// aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
5854static C10_NOINLINE c10::TypedOperatorHandle<frobenius_norm_out::schema> create_frobenius_norm_out_typed_handle() {
5855 return c10::Dispatcher::singleton()
5856 .findSchemaOrThrow(frobenius_norm_out::name, frobenius_norm_out::overload_name)
5857 .typed<frobenius_norm_out::schema>();
5858}
5859
5860// aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
5861at::Tensor & frobenius_norm_out::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
5862
5863 static auto op = create_frobenius_norm_out_typed_handle();
5864 return op.call(self, dim, keepdim, out);
5865}
5866
5867// aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
5868at::Tensor & frobenius_norm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
5869
5870 static auto op = create_frobenius_norm_out_typed_handle();
5871 return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
5872}
5873
5874STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(clone, name, "aten::clone")
5875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(clone, overload_name, "")
5876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(clone, schema_str, "clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor")
5877
5878// aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor
5879static C10_NOINLINE c10::TypedOperatorHandle<clone::schema> create_clone_typed_handle() {
5880 return c10::Dispatcher::singleton()
5881 .findSchemaOrThrow(clone::name, clone::overload_name)
5882 .typed<clone::schema>();
5883}
5884
5885// aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor
5886at::Tensor clone::call(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) {
5887
5888 static auto op = create_clone_typed_handle();
5889 return op.call(self, memory_format);
5890}
5891
5892// aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor
5893at::Tensor clone::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) {
5894
5895 static auto op = create_clone_typed_handle();
5896 return op.redispatch(dispatchKeySet, self, memory_format);
5897}
5898
5899STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(positive, name, "aten::positive")
5900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(positive, overload_name, "")
5901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(positive, schema_str, "positive(Tensor(a) self) -> Tensor(a)")
5902
5903// aten::positive(Tensor(a) self) -> Tensor(a)
5904static C10_NOINLINE c10::TypedOperatorHandle<positive::schema> create_positive_typed_handle() {
5905 return c10::Dispatcher::singleton()
5906 .findSchemaOrThrow(positive::name, positive::overload_name)
5907 .typed<positive::schema>();
5908}
5909
5910// aten::positive(Tensor(a) self) -> Tensor(a)
5911at::Tensor positive::call(const at::Tensor & self) {
5912
5913 static auto op = create_positive_typed_handle();
5914 return op.call(self);
5915}
5916
5917// aten::positive(Tensor(a) self) -> Tensor(a)
5918at::Tensor positive::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
5919
5920 static auto op = create_positive_typed_handle();
5921 return op.redispatch(dispatchKeySet, self);
5922}
5923
5924STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resize_as_, name, "aten::resize_as_")
5925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resize_as_, overload_name, "")
5926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resize_as_, schema_str, "resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!)")
5927
5928// aten::resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!)
5929static C10_NOINLINE c10::TypedOperatorHandle<resize_as_::schema> create_resize_as__typed_handle() {
5930 return c10::Dispatcher::singleton()
5931 .findSchemaOrThrow(resize_as_::name, resize_as_::overload_name)
5932 .typed<resize_as_::schema>();
5933}
5934
5935// aten::resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!)
5936const at::Tensor & resize_as_::call(const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format) {
5937
5938 static auto op = create_resize_as__typed_handle();
5939 return op.call(self, the_template, memory_format);
5940}
5941
5942// aten::resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!)
5943const at::Tensor & resize_as_::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format) {
5944
5945 static auto op = create_resize_as__typed_handle();
5946 return op.redispatch(dispatchKeySet, self, the_template, memory_format);
5947}
5948
5949STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resize_as_sparse_, name, "aten::resize_as_sparse_")
5950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resize_as_sparse_, overload_name, "")
5951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resize_as_sparse_, schema_str, "resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!)")
5952
5953// aten::resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!)
5954static C10_NOINLINE c10::TypedOperatorHandle<resize_as_sparse_::schema> create_resize_as_sparse__typed_handle() {
5955 return c10::Dispatcher::singleton()
5956 .findSchemaOrThrow(resize_as_sparse_::name, resize_as_sparse_::overload_name)
5957 .typed<resize_as_sparse_::schema>();
5958}
5959
5960// aten::resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!)
5961const at::Tensor & resize_as_sparse_::call(const at::Tensor & self, const at::Tensor & the_template) {
5962
5963 static auto op = create_resize_as_sparse__typed_handle();
5964 return op.call(self, the_template);
5965}
5966
5967// aten::resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!)
5968const at::Tensor & resize_as_sparse_::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template) {
5969
5970 static auto op = create_resize_as_sparse__typed_handle();
5971 return op.redispatch(dispatchKeySet, self, the_template);
5972}
5973
5974STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_sampled_addmm_out, name, "aten::sparse_sampled_addmm")
5975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_sampled_addmm_out, overload_name, "out")
5976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_sampled_addmm_out, schema_str, "sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)")
5977
5978// aten::sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
5979static C10_NOINLINE c10::TypedOperatorHandle<sparse_sampled_addmm_out::schema> create_sparse_sampled_addmm_out_typed_handle() {
5980 return c10::Dispatcher::singleton()
5981 .findSchemaOrThrow(sparse_sampled_addmm_out::name, sparse_sampled_addmm_out::overload_name)
5982 .typed<sparse_sampled_addmm_out::schema>();
5983}
5984
5985// aten::sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
5986at::Tensor & sparse_sampled_addmm_out::call(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
5987
5988 static auto op = create_sparse_sampled_addmm_out_typed_handle();
5989 return op.call(self, mat1, mat2, beta, alpha, out);
5990}
5991
5992// aten::sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
5993at::Tensor & sparse_sampled_addmm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
5994
5995 static auto op = create_sparse_sampled_addmm_out_typed_handle();
5996 return op.redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, out);
5997}
5998
5999STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_sampled_addmm, name, "aten::sparse_sampled_addmm")
6000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_sampled_addmm, overload_name, "")
6001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_sampled_addmm, schema_str, "sparse_sampled_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor")
6002
6003// aten::sparse_sampled_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
6004static C10_NOINLINE c10::TypedOperatorHandle<sparse_sampled_addmm::schema> create_sparse_sampled_addmm_typed_handle() {
6005 return c10::Dispatcher::singleton()
6006 .findSchemaOrThrow(sparse_sampled_addmm::name, sparse_sampled_addmm::overload_name)
6007 .typed<sparse_sampled_addmm::schema>();
6008}
6009
6010// aten::sparse_sampled_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
6011at::Tensor sparse_sampled_addmm::call(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
6012
6013 static auto op = create_sparse_sampled_addmm_typed_handle();
6014 return op.call(self, mat1, mat2, beta, alpha);
6015}
6016
6017// aten::sparse_sampled_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
6018at::Tensor sparse_sampled_addmm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
6019
6020 static auto op = create_sparse_sampled_addmm_typed_handle();
6021 return op.redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha);
6022}
6023
6024STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_csr_tensor_crow_col_value_size, name, "aten::sparse_csr_tensor")
6025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_csr_tensor_crow_col_value_size, overload_name, "crow_col_value_size")
6026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_csr_tensor_crow_col_value_size, schema_str, "sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor")
6027
6028// aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
6029static C10_NOINLINE c10::TypedOperatorHandle<sparse_csr_tensor_crow_col_value_size::schema> create_sparse_csr_tensor_crow_col_value_size_typed_handle() {
6030 return c10::Dispatcher::singleton()
6031 .findSchemaOrThrow(sparse_csr_tensor_crow_col_value_size::name, sparse_csr_tensor_crow_col_value_size::overload_name)
6032 .typed<sparse_csr_tensor_crow_col_value_size::schema>();
6033}
6034
6035// aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
6036at::Tensor sparse_csr_tensor_crow_col_value_size::call(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
6037
6038 static auto op = create_sparse_csr_tensor_crow_col_value_size_typed_handle();
6039 return op.call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
6040}
6041
6042// aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
6043at::Tensor sparse_csr_tensor_crow_col_value_size::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
6044
6045 static auto op = create_sparse_csr_tensor_crow_col_value_size_typed_handle();
6046 return op.redispatch(dispatchKeySet, crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
6047}
6048
6049STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_csr_tensor_crow_col_value, name, "aten::sparse_csr_tensor")
6050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_csr_tensor_crow_col_value, overload_name, "crow_col_value")
6051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(sparse_csr_tensor_crow_col_value, schema_str, "sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor")
6052
6053// aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
6054static C10_NOINLINE c10::TypedOperatorHandle<sparse_csr_tensor_crow_col_value::schema> create_sparse_csr_tensor_crow_col_value_typed_handle() {
6055 return c10::Dispatcher::singleton()
6056 .findSchemaOrThrow(sparse_csr_tensor_crow_col_value::name, sparse_csr_tensor_crow_col_value::overload_name)
6057 .typed<sparse_csr_tensor_crow_col_value::schema>();
6058}
6059
6060// aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
6061at::Tensor sparse_csr_tensor_crow_col_value::call(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
6062
6063 static auto op = create_sparse_csr_tensor_crow_col_value_typed_handle();
6064 return op.call(crow_indices, col_indices, values, dtype, layout, device, pin_memory);
6065}
6066
6067// aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
6068at::Tensor sparse_csr_tensor_crow_col_value::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
6069
6070 static auto op = create_sparse_csr_tensor_crow_col_value_typed_handle();
6071 return op.redispatch(dispatchKeySet, crow_indices, col_indices, values, dtype, layout, device, pin_memory);
6072}
6073
6074STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_bsc_tensor_unsafe, name, "aten::_sparse_bsc_tensor_unsafe")
6075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_bsc_tensor_unsafe, overload_name, "")
6076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_sparse_bsc_tensor_unsafe, schema_str, "_sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
6077
6078// aten::_sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6079static C10_NOINLINE c10::TypedOperatorHandle<_sparse_bsc_tensor_unsafe::schema> create__sparse_bsc_tensor_unsafe_typed_handle() {
6080 return c10::Dispatcher::singleton()
6081 .findSchemaOrThrow(_sparse_bsc_tensor_unsafe::name, _sparse_bsc_tensor_unsafe::overload_name)
6082 .typed<_sparse_bsc_tensor_unsafe::schema>();
6083}
6084
6085// aten::_sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6086at::Tensor _sparse_bsc_tensor_unsafe::call(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
6087
6088 static auto op = create__sparse_bsc_tensor_unsafe_typed_handle();
6089 return op.call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
6090}
6091
6092// aten::_sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6093at::Tensor _sparse_bsc_tensor_unsafe::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
6094
6095 static auto op = create__sparse_bsc_tensor_unsafe_typed_handle();
6096 return op.redispatch(dispatchKeySet, ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
6097}
6098
6099STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dense_dim, name, "aten::dense_dim")
6100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dense_dim, overload_name, "")
6101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dense_dim, schema_str, "dense_dim(Tensor self) -> int")
6102
6103// aten::dense_dim(Tensor self) -> int
6104static C10_NOINLINE c10::TypedOperatorHandle<dense_dim::schema> create_dense_dim_typed_handle() {
6105 return c10::Dispatcher::singleton()
6106 .findSchemaOrThrow(dense_dim::name, dense_dim::overload_name)
6107 .typed<dense_dim::schema>();
6108}
6109
6110// aten::dense_dim(Tensor self) -> int
6111int64_t dense_dim::call(const at::Tensor & self) {
6112
6113 static auto op = create_dense_dim_typed_handle();
6114 return op.call(self);
6115}
6116
6117// aten::dense_dim(Tensor self) -> int
6118int64_t dense_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
6119
6120 static auto op = create_dense_dim_typed_handle();
6121 return op.redispatch(dispatchKeySet, self);
6122}
6123
6124STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_dimV, name, "aten::_dimV")
6125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_dimV, overload_name, "")
6126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_dimV, schema_str, "_dimV(Tensor self) -> int")
6127
6128// aten::_dimV(Tensor self) -> int
6129static C10_NOINLINE c10::TypedOperatorHandle<_dimV::schema> create__dimV_typed_handle() {
6130 return c10::Dispatcher::singleton()
6131 .findSchemaOrThrow(_dimV::name, _dimV::overload_name)
6132 .typed<_dimV::schema>();
6133}
6134
6135// aten::_dimV(Tensor self) -> int
6136int64_t _dimV::call(const at::Tensor & self) {
6137
6138 static auto op = create__dimV_typed_handle();
6139 return op.call(self);
6140}
6141
6142// aten::_dimV(Tensor self) -> int
6143int64_t _dimV::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
6144
6145 static auto op = create__dimV_typed_handle();
6146 return op.redispatch(dispatchKeySet, self);
6147}
6148
6149STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(coalesce, name, "aten::coalesce")
6150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(coalesce, overload_name, "")
6151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(coalesce, schema_str, "coalesce(Tensor(a) self) -> Tensor(a)")
6152
6153// aten::coalesce(Tensor(a) self) -> Tensor(a)
6154static C10_NOINLINE c10::TypedOperatorHandle<coalesce::schema> create_coalesce_typed_handle() {
6155 return c10::Dispatcher::singleton()
6156 .findSchemaOrThrow(coalesce::name, coalesce::overload_name)
6157 .typed<coalesce::schema>();
6158}
6159
6160// aten::coalesce(Tensor(a) self) -> Tensor(a)
6161at::Tensor coalesce::call(const at::Tensor & self) {
6162
6163 static auto op = create_coalesce_typed_handle();
6164 return op.call(self);
6165}
6166
6167// aten::coalesce(Tensor(a) self) -> Tensor(a)
6168at::Tensor coalesce::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
6169
6170 static auto op = create_coalesce_typed_handle();
6171 return op.redispatch(dispatchKeySet, self);
6172}
6173
6174STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_indices, name, "aten::_indices")
6175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_indices, overload_name, "")
6176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_indices, schema_str, "_indices(Tensor(a) self) -> Tensor(a)")
6177
6178// aten::_indices(Tensor(a) self) -> Tensor(a)
6179static C10_NOINLINE c10::TypedOperatorHandle<_indices::schema> create__indices_typed_handle() {
6180 return c10::Dispatcher::singleton()
6181 .findSchemaOrThrow(_indices::name, _indices::overload_name)
6182 .typed<_indices::schema>();
6183}
6184
6185// aten::_indices(Tensor(a) self) -> Tensor(a)
6186at::Tensor _indices::call(const at::Tensor & self) {
6187
6188 static auto op = create__indices_typed_handle();
6189 return op.call(self);
6190}
6191
6192// aten::_indices(Tensor(a) self) -> Tensor(a)
6193at::Tensor _indices::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
6194
6195 static auto op = create__indices_typed_handle();
6196 return op.redispatch(dispatchKeySet, self);
6197}
6198
6199STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse_csc, name, "aten::to_sparse_csc")
6200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse_csc, overload_name, "")
6201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse_csc, schema_str, "to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor")
6202
6203// aten::to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor
6204static C10_NOINLINE c10::TypedOperatorHandle<to_sparse_csc::schema> create_to_sparse_csc_typed_handle() {
6205 return c10::Dispatcher::singleton()
6206 .findSchemaOrThrow(to_sparse_csc::name, to_sparse_csc::overload_name)
6207 .typed<to_sparse_csc::schema>();
6208}
6209
6210// aten::to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor
6211at::Tensor to_sparse_csc::call(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
6212
6213 static auto op = create_to_sparse_csc_typed_handle();
6214 return op.call(self, dense_dim);
6215}
6216
6217// aten::to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor
6218at::Tensor to_sparse_csc::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> dense_dim) {
6219
6220 static auto op = create_to_sparse_csc_typed_handle();
6221 return op.redispatch(dispatchKeySet, self, dense_dim);
6222}
6223
6224STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_reorder_conv2d_weight, name, "aten::mkldnn_reorder_conv2d_weight")
6225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_reorder_conv2d_weight, overload_name, "")
6226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_reorder_conv2d_weight, schema_str, "mkldnn_reorder_conv2d_weight(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None) -> Tensor")
6227
6228// aten::mkldnn_reorder_conv2d_weight(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None) -> Tensor
6229static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_reorder_conv2d_weight::schema> create_mkldnn_reorder_conv2d_weight_typed_handle() {
6230 return c10::Dispatcher::singleton()
6231 .findSchemaOrThrow(mkldnn_reorder_conv2d_weight::name, mkldnn_reorder_conv2d_weight::overload_name)
6232 .typed<mkldnn_reorder_conv2d_weight::schema>();
6233}
6234
6235// aten::mkldnn_reorder_conv2d_weight(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None) -> Tensor
6236at::Tensor mkldnn_reorder_conv2d_weight::call(const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::OptionalIntArrayRef input_size) {
6237
6238 static auto op = create_mkldnn_reorder_conv2d_weight_typed_handle();
6239 return op.call(self, padding, stride, dilation, groups, input_size);
6240}
6241
6242// aten::mkldnn_reorder_conv2d_weight(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None) -> Tensor
6243at::Tensor mkldnn_reorder_conv2d_weight::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::OptionalIntArrayRef input_size) {
6244
6245 static auto op = create_mkldnn_reorder_conv2d_weight_typed_handle();
6246 return op.redispatch(dispatchKeySet, self, padding, stride, dilation, groups, input_size);
6247}
6248
6249STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantize_per_channel, name, "aten::quantize_per_channel")
6250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantize_per_channel, overload_name, "")
6251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantize_per_channel, schema_str, "quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor")
6252
6253// aten::quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor
6254static C10_NOINLINE c10::TypedOperatorHandle<quantize_per_channel::schema> create_quantize_per_channel_typed_handle() {
6255 return c10::Dispatcher::singleton()
6256 .findSchemaOrThrow(quantize_per_channel::name, quantize_per_channel::overload_name)
6257 .typed<quantize_per_channel::schema>();
6258}
6259
6260// aten::quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor
6261at::Tensor quantize_per_channel::call(const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype) {
6262
6263 static auto op = create_quantize_per_channel_typed_handle();
6264 return op.call(self, scales, zero_points, axis, dtype);
6265}
6266
6267// aten::quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor
6268at::Tensor quantize_per_channel::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype) {
6269
6270 static auto op = create_quantize_per_channel_typed_handle();
6271 return op.redispatch(dispatchKeySet, self, scales, zero_points, axis, dtype);
6272}
6273
6274STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dequantize_self, name, "aten::dequantize")
6275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dequantize_self, overload_name, "self")
6276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dequantize_self, schema_str, "dequantize.self(Tensor self) -> Tensor")
6277
6278// aten::dequantize.self(Tensor self) -> Tensor
6279static C10_NOINLINE c10::TypedOperatorHandle<dequantize_self::schema> create_dequantize_self_typed_handle() {
6280 return c10::Dispatcher::singleton()
6281 .findSchemaOrThrow(dequantize_self::name, dequantize_self::overload_name)
6282 .typed<dequantize_self::schema>();
6283}
6284
6285// aten::dequantize.self(Tensor self) -> Tensor
6286at::Tensor dequantize_self::call(const at::Tensor & self) {
6287
6288 static auto op = create_dequantize_self_typed_handle();
6289 return op.call(self);
6290}
6291
6292// aten::dequantize.self(Tensor self) -> Tensor
6293at::Tensor dequantize_self::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
6294
6295 static auto op = create_dequantize_self_typed_handle();
6296 return op.redispatch(dispatchKeySet, self);
6297}
6298
6299STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dequantize_tensors, name, "aten::dequantize")
6300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dequantize_tensors, overload_name, "tensors")
6301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dequantize_tensors, schema_str, "dequantize.tensors(Tensor[] tensors) -> Tensor[]")
6302
6303// aten::dequantize.tensors(Tensor[] tensors) -> Tensor[]
6304static C10_NOINLINE c10::TypedOperatorHandle<dequantize_tensors::schema> create_dequantize_tensors_typed_handle() {
6305 return c10::Dispatcher::singleton()
6306 .findSchemaOrThrow(dequantize_tensors::name, dequantize_tensors::overload_name)
6307 .typed<dequantize_tensors::schema>();
6308}
6309
6310// aten::dequantize.tensors(Tensor[] tensors) -> Tensor[]
6311::std::vector<at::Tensor> dequantize_tensors::call(at::TensorList tensors) {
6312
6313 static auto op = create_dequantize_tensors_typed_handle();
6314 return op.call(tensors);
6315}
6316
6317// aten::dequantize.tensors(Tensor[] tensors) -> Tensor[]
6318::std::vector<at::Tensor> dequantize_tensors::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
6319
6320 static auto op = create_dequantize_tensors_typed_handle();
6321 return op.redispatch(dispatchKeySet, tensors);
6322}
6323
6324STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(q_per_channel_zero_points, name, "aten::q_per_channel_zero_points")
6325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(q_per_channel_zero_points, overload_name, "")
6326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(q_per_channel_zero_points, schema_str, "q_per_channel_zero_points(Tensor self) -> Tensor")
6327
6328// aten::q_per_channel_zero_points(Tensor self) -> Tensor
6329static C10_NOINLINE c10::TypedOperatorHandle<q_per_channel_zero_points::schema> create_q_per_channel_zero_points_typed_handle() {
6330 return c10::Dispatcher::singleton()
6331 .findSchemaOrThrow(q_per_channel_zero_points::name, q_per_channel_zero_points::overload_name)
6332 .typed<q_per_channel_zero_points::schema>();
6333}
6334
6335// aten::q_per_channel_zero_points(Tensor self) -> Tensor
6336at::Tensor q_per_channel_zero_points::call(const at::Tensor & self) {
6337
6338 static auto op = create_q_per_channel_zero_points_typed_handle();
6339 return op.call(self);
6340}
6341
6342// aten::q_per_channel_zero_points(Tensor self) -> Tensor
6343at::Tensor q_per_channel_zero_points::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
6344
6345 static auto op = create_q_per_channel_zero_points_typed_handle();
6346 return op.redispatch(dispatchKeySet, self);
6347}
6348
6349STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fake_quantize_per_tensor_affine, name, "aten::fake_quantize_per_tensor_affine")
6350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fake_quantize_per_tensor_affine, overload_name, "")
6351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fake_quantize_per_tensor_affine, schema_str, "fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor")
6352
6353// aten::fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor
6354static C10_NOINLINE c10::TypedOperatorHandle<fake_quantize_per_tensor_affine::schema> create_fake_quantize_per_tensor_affine_typed_handle() {
6355 return c10::Dispatcher::singleton()
6356 .findSchemaOrThrow(fake_quantize_per_tensor_affine::name, fake_quantize_per_tensor_affine::overload_name)
6357 .typed<fake_quantize_per_tensor_affine::schema>();
6358}
6359
6360// aten::fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor
6361at::Tensor fake_quantize_per_tensor_affine::call(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
6362
6363 static auto op = create_fake_quantize_per_tensor_affine_typed_handle();
6364 return op.call(self, scale, zero_point, quant_min, quant_max);
6365}
6366
6367// aten::fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor
6368at::Tensor fake_quantize_per_tensor_affine::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
6369
6370 static auto op = create_fake_quantize_per_tensor_affine_typed_handle();
6371 return op.redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max);
6372}
6373
6374STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fake_quantize_per_tensor_affine_tensor_qparams, name, "aten::fake_quantize_per_tensor_affine")
6375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fake_quantize_per_tensor_affine_tensor_qparams, overload_name, "tensor_qparams")
6376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fake_quantize_per_tensor_affine_tensor_qparams, schema_str, "fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor")
6377
6378// aten::fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor
6379static C10_NOINLINE c10::TypedOperatorHandle<fake_quantize_per_tensor_affine_tensor_qparams::schema> create_fake_quantize_per_tensor_affine_tensor_qparams_typed_handle() {
6380 return c10::Dispatcher::singleton()
6381 .findSchemaOrThrow(fake_quantize_per_tensor_affine_tensor_qparams::name, fake_quantize_per_tensor_affine_tensor_qparams::overload_name)
6382 .typed<fake_quantize_per_tensor_affine_tensor_qparams::schema>();
6383}
6384
6385// aten::fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor
6386at::Tensor fake_quantize_per_tensor_affine_tensor_qparams::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max) {
6387
6388 static auto op = create_fake_quantize_per_tensor_affine_tensor_qparams_typed_handle();
6389 return op.call(self, scale, zero_point, quant_min, quant_max);
6390}
6391
6392// aten::fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor
6393at::Tensor fake_quantize_per_tensor_affine_tensor_qparams::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max) {
6394
6395 static auto op = create_fake_quantize_per_tensor_affine_tensor_qparams_typed_handle();
6396 return op.redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max);
6397}
6398
6399STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fake_quantize_learnable_per_channel_affine, name, "aten::_fake_quantize_learnable_per_channel_affine")
6400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fake_quantize_learnable_per_channel_affine, overload_name, "")
6401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fake_quantize_learnable_per_channel_affine, schema_str, "_fake_quantize_learnable_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor")
6402
6403// aten::_fake_quantize_learnable_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor
6404static C10_NOINLINE c10::TypedOperatorHandle<_fake_quantize_learnable_per_channel_affine::schema> create__fake_quantize_learnable_per_channel_affine_typed_handle() {
6405 return c10::Dispatcher::singleton()
6406 .findSchemaOrThrow(_fake_quantize_learnable_per_channel_affine::name, _fake_quantize_learnable_per_channel_affine::overload_name)
6407 .typed<_fake_quantize_learnable_per_channel_affine::schema>();
6408}
6409
6410// aten::_fake_quantize_learnable_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor
6411at::Tensor _fake_quantize_learnable_per_channel_affine::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) {
6412
6413 static auto op = create__fake_quantize_learnable_per_channel_affine_typed_handle();
6414 return op.call(self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
6415}
6416
6417// aten::_fake_quantize_learnable_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor
6418at::Tensor _fake_quantize_learnable_per_channel_affine::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) {
6419
6420 static auto op = create__fake_quantize_learnable_per_channel_affine_typed_handle();
6421 return op.redispatch(dispatchKeySet, self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
6422}
6423
6424STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_autocast_to_full_precision, name, "aten::_autocast_to_full_precision")
6425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_autocast_to_full_precision, overload_name, "")
6426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_autocast_to_full_precision, schema_str, "_autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a)")
6427
6428// aten::_autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a)
6429static C10_NOINLINE c10::TypedOperatorHandle<_autocast_to_full_precision::schema> create__autocast_to_full_precision_typed_handle() {
6430 return c10::Dispatcher::singleton()
6431 .findSchemaOrThrow(_autocast_to_full_precision::name, _autocast_to_full_precision::overload_name)
6432 .typed<_autocast_to_full_precision::schema>();
6433}
6434
6435// aten::_autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a)
6436at::Tensor _autocast_to_full_precision::call(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled) {
6437
6438 static auto op = create__autocast_to_full_precision_typed_handle();
6439 return op.call(self, cuda_enabled, cpu_enabled);
6440}
6441
6442// aten::_autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a)
6443at::Tensor _autocast_to_full_precision::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool cuda_enabled, bool cpu_enabled) {
6444
6445 static auto op = create__autocast_to_full_precision_typed_handle();
6446 return op.redispatch(dispatchKeySet, self, cuda_enabled, cpu_enabled);
6447}
6448
6449STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_dtype_layout, name, "aten::to")
6450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_dtype_layout, overload_name, "dtype_layout")
6451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_dtype_layout, schema_str, "to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)")
6452
6453// aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
6454static C10_NOINLINE c10::TypedOperatorHandle<to_dtype_layout::schema> create_to_dtype_layout_typed_handle() {
6455 return c10::Dispatcher::singleton()
6456 .findSchemaOrThrow(to_dtype_layout::name, to_dtype_layout::overload_name)
6457 .typed<to_dtype_layout::schema>();
6458}
6459
6460// aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
6461at::Tensor to_dtype_layout::call(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
6462
6463 static auto op = create_to_dtype_layout_typed_handle();
6464 return op.call(self, dtype, layout, device, pin_memory, non_blocking, copy, memory_format);
6465}
6466
6467// aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
6468at::Tensor to_dtype_layout::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
6469
6470 static auto op = create_to_dtype_layout_typed_handle();
6471 return op.redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, non_blocking, copy, memory_format);
6472}
6473
6474STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_device, name, "aten::to")
6475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_device, overload_name, "device")
6476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_device, schema_str, "to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)")
6477
6478// aten::to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
6479static C10_NOINLINE c10::TypedOperatorHandle<to_device::schema> create_to_device_typed_handle() {
6480 return c10::Dispatcher::singleton()
6481 .findSchemaOrThrow(to_device::name, to_device::overload_name)
6482 .typed<to_device::schema>();
6483}
6484
6485// aten::to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
6486at::Tensor to_device::call(const at::Tensor & self, at::Device device, at::ScalarType dtype, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
6487
6488 static auto op = create_to_device_typed_handle();
6489 return op.call(self, device, dtype, non_blocking, copy, memory_format);
6490}
6491
6492// aten::to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
6493at::Tensor to_device::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Device device, at::ScalarType dtype, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
6494
6495 static auto op = create_to_device_typed_handle();
6496 return op.redispatch(dispatchKeySet, self, device, dtype, non_blocking, copy, memory_format);
6497}
6498
6499STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_dtype, name, "aten::to")
6500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_dtype, overload_name, "dtype")
6501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_dtype, schema_str, "to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)")
6502
6503// aten::to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
6504static C10_NOINLINE c10::TypedOperatorHandle<to_dtype::schema> create_to_dtype_typed_handle() {
6505 return c10::Dispatcher::singleton()
6506 .findSchemaOrThrow(to_dtype::name, to_dtype::overload_name)
6507 .typed<to_dtype::schema>();
6508}
6509
6510// aten::to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
6511at::Tensor to_dtype::call(const at::Tensor & self, at::ScalarType dtype, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
6512
6513 static auto op = create_to_dtype_typed_handle();
6514 return op.call(self, dtype, non_blocking, copy, memory_format);
6515}
6516
6517// aten::to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
6518at::Tensor to_dtype::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
6519
6520 static auto op = create_to_dtype_typed_handle();
6521 return op.redispatch(dispatchKeySet, self, dtype, non_blocking, copy, memory_format);
6522}
6523
6524STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_other, name, "aten::to")
6525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_other, overload_name, "other")
6526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_other, schema_str, "to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)")
6527
6528// aten::to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
6529static C10_NOINLINE c10::TypedOperatorHandle<to_other::schema> create_to_other_typed_handle() {
6530 return c10::Dispatcher::singleton()
6531 .findSchemaOrThrow(to_other::name, to_other::overload_name)
6532 .typed<to_other::schema>();
6533}
6534
6535// aten::to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
6536at::Tensor to_other::call(const at::Tensor & self, const at::Tensor & other, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
6537
6538 static auto op = create_to_other_typed_handle();
6539 return op.call(self, other, non_blocking, copy, memory_format);
6540}
6541
6542// aten::to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
6543at::Tensor to_other::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
6544
6545 static auto op = create_to_other_typed_handle();
6546 return op.redispatch(dispatchKeySet, self, other, non_blocking, copy, memory_format);
6547}
6548
6549STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(combinations, name, "aten::combinations")
6550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(combinations, overload_name, "")
6551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(combinations, schema_str, "combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor")
6552
6553// aten::combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor
6554static C10_NOINLINE c10::TypedOperatorHandle<combinations::schema> create_combinations_typed_handle() {
6555 return c10::Dispatcher::singleton()
6556 .findSchemaOrThrow(combinations::name, combinations::overload_name)
6557 .typed<combinations::schema>();
6558}
6559
6560// aten::combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor
6561at::Tensor combinations::call(const at::Tensor & self, int64_t r, bool with_replacement) {
6562
6563 static auto op = create_combinations_typed_handle();
6564 return op.call(self, r, with_replacement);
6565}
6566
6567// aten::combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor
6568at::Tensor combinations::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t r, bool with_replacement) {
6569
6570 static auto op = create_combinations_typed_handle();
6571 return op.redispatch(dispatchKeySet, self, r, with_replacement);
6572}
6573
6574STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(item, name, "aten::item")
6575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(item, overload_name, "")
6576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(item, schema_str, "item(Tensor self) -> Scalar")
6577
6578// aten::item(Tensor self) -> Scalar
6579static C10_NOINLINE c10::TypedOperatorHandle<item::schema> create_item_typed_handle() {
6580 return c10::Dispatcher::singleton()
6581 .findSchemaOrThrow(item::name, item::overload_name)
6582 .typed<item::schema>();
6583}
6584
6585// aten::item(Tensor self) -> Scalar
6586at::Scalar item::call(const at::Tensor & self) {
6587
6588 static auto op = create_item_typed_handle();
6589 return op.call(self);
6590}
6591
6592// aten::item(Tensor self) -> Scalar
6593at::Scalar item::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
6594
6595 static auto op = create_item_typed_handle();
6596 return op.redispatch(dispatchKeySet, self);
6597}
6598
6599STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_lstm_mps, name, "aten::_lstm_mps")
6600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_lstm_mps, overload_name, "")
6601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_lstm_mps, schema_str, "_lstm_mps(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor, Tensor, Tensor)")
6602
6603// aten::_lstm_mps(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
6604static C10_NOINLINE c10::TypedOperatorHandle<_lstm_mps::schema> create__lstm_mps_typed_handle() {
6605 return c10::Dispatcher::singleton()
6606 .findSchemaOrThrow(_lstm_mps::name, _lstm_mps::overload_name)
6607 .typed<_lstm_mps::schema>();
6608}
6609
6610// aten::_lstm_mps(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
6611::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _lstm_mps::call(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
6612
6613 static auto op = create__lstm_mps_typed_handle();
6614 return op.call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
6615}
6616
6617// aten::_lstm_mps(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
6618::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _lstm_mps::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
6619
6620 static auto op = create__lstm_mps_typed_handle();
6621 return op.redispatch(dispatchKeySet, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
6622}
6623
6624STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_thnn_fused_lstm_cell, name, "aten::_thnn_fused_lstm_cell")
6625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_thnn_fused_lstm_cell, overload_name, "")
6626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_thnn_fused_lstm_cell, schema_str, "_thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor)")
6627
6628// aten::_thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor)
6629static C10_NOINLINE c10::TypedOperatorHandle<_thnn_fused_lstm_cell::schema> create__thnn_fused_lstm_cell_typed_handle() {
6630 return c10::Dispatcher::singleton()
6631 .findSchemaOrThrow(_thnn_fused_lstm_cell::name, _thnn_fused_lstm_cell::overload_name)
6632 .typed<_thnn_fused_lstm_cell::schema>();
6633}
6634
6635// aten::_thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor)
6636::std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell::call(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias) {
6637
6638 static auto op = create__thnn_fused_lstm_cell_typed_handle();
6639 return op.call(input_gates, hidden_gates, cx, input_bias, hidden_bias);
6640}
6641
6642// aten::_thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor)
6643::std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias) {
6644
6645 static auto op = create__thnn_fused_lstm_cell_typed_handle();
6646 return op.redispatch(dispatchKeySet, input_gates, hidden_gates, cx, input_bias, hidden_bias);
6647}
6648
6649STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lstm_input, name, "aten::lstm")
6650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lstm_input, overload_name, "input")
6651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lstm_input, schema_str, "lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor)")
6652
6653// aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor)
6654static C10_NOINLINE c10::TypedOperatorHandle<lstm_input::schema> create_lstm_input_typed_handle() {
6655 return c10::Dispatcher::singleton()
6656 .findSchemaOrThrow(lstm_input::name, lstm_input::overload_name)
6657 .typed<lstm_input::schema>();
6658}
6659
6660// aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor)
6661::std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm_input::call(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
6662
6663 static auto op = create_lstm_input_typed_handle();
6664 return op.call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
6665}
6666
6667// aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor)
6668::std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
6669
6670 static auto op = create_lstm_input_typed_handle();
6671 return op.redispatch(dispatchKeySet, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
6672}
6673
6674STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lstm_data, name, "aten::lstm")
6675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lstm_data, overload_name, "data")
6676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lstm_data, schema_str, "lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor)")
6677
6678// aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor)
6679static C10_NOINLINE c10::TypedOperatorHandle<lstm_data::schema> create_lstm_data_typed_handle() {
6680 return c10::Dispatcher::singleton()
6681 .findSchemaOrThrow(lstm_data::name, lstm_data::overload_name)
6682 .typed<lstm_data::schema>();
6683}
6684
6685// aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor)
6686::std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm_data::call(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
6687
6688 static auto op = create_lstm_data_typed_handle();
6689 return op.call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
6690}
6691
6692// aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor)
6693::std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm_data::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
6694
6695 static auto op = create_lstm_data_typed_handle();
6696 return op.redispatch(dispatchKeySet, data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
6697}
6698
6699STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gru_input, name, "aten::gru")
6700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gru_input, overload_name, "input")
6701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gru_input, schema_str, "gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)")
6702
6703// aten::gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
6704static C10_NOINLINE c10::TypedOperatorHandle<gru_input::schema> create_gru_input_typed_handle() {
6705 return c10::Dispatcher::singleton()
6706 .findSchemaOrThrow(gru_input::name, gru_input::overload_name)
6707 .typed<gru_input::schema>();
6708}
6709
6710// aten::gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
6711::std::tuple<at::Tensor,at::Tensor> gru_input::call(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
6712
6713 static auto op = create_gru_input_typed_handle();
6714 return op.call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
6715}
6716
6717// aten::gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
6718::std::tuple<at::Tensor,at::Tensor> gru_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
6719
6720 static auto op = create_gru_input_typed_handle();
6721 return op.redispatch(dispatchKeySet, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
6722}
6723
6724STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gru_data, name, "aten::gru")
6725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gru_data, overload_name, "data")
6726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(gru_data, schema_str, "gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)")
6727
6728// aten::gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
6729static C10_NOINLINE c10::TypedOperatorHandle<gru_data::schema> create_gru_data_typed_handle() {
6730 return c10::Dispatcher::singleton()
6731 .findSchemaOrThrow(gru_data::name, gru_data::overload_name)
6732 .typed<gru_data::schema>();
6733}
6734
6735// aten::gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
6736::std::tuple<at::Tensor,at::Tensor> gru_data::call(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
6737
6738 static auto op = create_gru_data_typed_handle();
6739 return op.call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
6740}
6741
6742// aten::gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
6743::std::tuple<at::Tensor,at::Tensor> gru_data::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
6744
6745 static auto op = create_gru_data_typed_handle();
6746 return op.redispatch(dispatchKeySet, data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
6747}
6748
6749STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rnn_tanh_input, name, "aten::rnn_tanh")
6750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rnn_tanh_input, overload_name, "input")
6751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rnn_tanh_input, schema_str, "rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)")
6752
6753// aten::rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
6754static C10_NOINLINE c10::TypedOperatorHandle<rnn_tanh_input::schema> create_rnn_tanh_input_typed_handle() {
6755 return c10::Dispatcher::singleton()
6756 .findSchemaOrThrow(rnn_tanh_input::name, rnn_tanh_input::overload_name)
6757 .typed<rnn_tanh_input::schema>();
6758}
6759
6760// aten::rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
6761::std::tuple<at::Tensor,at::Tensor> rnn_tanh_input::call(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
6762
6763 static auto op = create_rnn_tanh_input_typed_handle();
6764 return op.call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
6765}
6766
6767// aten::rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
6768::std::tuple<at::Tensor,at::Tensor> rnn_tanh_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
6769
6770 static auto op = create_rnn_tanh_input_typed_handle();
6771 return op.redispatch(dispatchKeySet, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
6772}
6773
6774STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rnn_tanh_data, name, "aten::rnn_tanh")
6775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rnn_tanh_data, overload_name, "data")
6776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rnn_tanh_data, schema_str, "rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)")
6777
6778// aten::rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
6779static C10_NOINLINE c10::TypedOperatorHandle<rnn_tanh_data::schema> create_rnn_tanh_data_typed_handle() {
6780 return c10::Dispatcher::singleton()
6781 .findSchemaOrThrow(rnn_tanh_data::name, rnn_tanh_data::overload_name)
6782 .typed<rnn_tanh_data::schema>();
6783}
6784
6785// aten::rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
6786::std::tuple<at::Tensor,at::Tensor> rnn_tanh_data::call(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
6787
6788 static auto op = create_rnn_tanh_data_typed_handle();
6789 return op.call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
6790}
6791
6792// aten::rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
6793::std::tuple<at::Tensor,at::Tensor> rnn_tanh_data::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
6794
6795 static auto op = create_rnn_tanh_data_typed_handle();
6796 return op.redispatch(dispatchKeySet, data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
6797}
6798
6799STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rnn_relu_cell, name, "aten::rnn_relu_cell")
6800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rnn_relu_cell, overload_name, "")
6801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rnn_relu_cell, schema_str, "rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor")
6802
6803// aten::rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
6804static C10_NOINLINE c10::TypedOperatorHandle<rnn_relu_cell::schema> create_rnn_relu_cell_typed_handle() {
6805 return c10::Dispatcher::singleton()
6806 .findSchemaOrThrow(rnn_relu_cell::name, rnn_relu_cell::overload_name)
6807 .typed<rnn_relu_cell::schema>();
6808}
6809
6810// aten::rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
6811at::Tensor rnn_relu_cell::call(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
6812
6813 static auto op = create_rnn_relu_cell_typed_handle();
6814 return op.call(input, hx, w_ih, w_hh, b_ih, b_hh);
6815}
6816
6817// aten::rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
6818at::Tensor rnn_relu_cell::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
6819
6820 static auto op = create_rnn_relu_cell_typed_handle();
6821 return op.redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh);
6822}
6823
6824STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pad_packed_sequence, name, "aten::_pad_packed_sequence")
6825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pad_packed_sequence, overload_name, "")
6826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pad_packed_sequence, schema_str, "_pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor)")
6827
6828// aten::_pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor)
6829static C10_NOINLINE c10::TypedOperatorHandle<_pad_packed_sequence::schema> create__pad_packed_sequence_typed_handle() {
6830 return c10::Dispatcher::singleton()
6831 .findSchemaOrThrow(_pad_packed_sequence::name, _pad_packed_sequence::overload_name)
6832 .typed<_pad_packed_sequence::schema>();
6833}
6834
6835// aten::_pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor)
6836::std::tuple<at::Tensor,at::Tensor> _pad_packed_sequence::call(const at::Tensor & data, const at::Tensor & batch_sizes, bool batch_first, const at::Scalar & padding_value, int64_t total_length) {
6837
6838 static auto op = create__pad_packed_sequence_typed_handle();
6839 return op.call(data, batch_sizes, batch_first, padding_value, total_length);
6840}
6841
6842// aten::_pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor)
6843::std::tuple<at::Tensor,at::Tensor> _pad_packed_sequence::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, bool batch_first, const at::Scalar & padding_value, int64_t total_length) {
6844
6845 static auto op = create__pad_packed_sequence_typed_handle();
6846 return op.redispatch(dispatchKeySet, data, batch_sizes, batch_first, padding_value, total_length);
6847}
6848
6849STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lift_fresh_copy, name, "aten::lift_fresh_copy")
6850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lift_fresh_copy, overload_name, "")
6851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lift_fresh_copy, schema_str, "lift_fresh_copy(Tensor self) -> Tensor")
6852
6853// aten::lift_fresh_copy(Tensor self) -> Tensor
6854static C10_NOINLINE c10::TypedOperatorHandle<lift_fresh_copy::schema> create_lift_fresh_copy_typed_handle() {
6855 return c10::Dispatcher::singleton()
6856 .findSchemaOrThrow(lift_fresh_copy::name, lift_fresh_copy::overload_name)
6857 .typed<lift_fresh_copy::schema>();
6858}
6859
6860// aten::lift_fresh_copy(Tensor self) -> Tensor
6861at::Tensor lift_fresh_copy::call(const at::Tensor & self) {
6862
6863 static auto op = create_lift_fresh_copy_typed_handle();
6864 return op.call(self);
6865}
6866
6867// aten::lift_fresh_copy(Tensor self) -> Tensor
6868at::Tensor lift_fresh_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
6869
6870 static auto op = create_lift_fresh_copy_typed_handle();
6871 return op.redispatch(dispatchKeySet, self);
6872}
6873
6874STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_reduce_out, name, "aten::index_reduce")
6875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_reduce_out, overload_name, "out")
6876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_reduce_out, schema_str, "index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)")
6877
6878// aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)
6879static C10_NOINLINE c10::TypedOperatorHandle<index_reduce_out::schema> create_index_reduce_out_typed_handle() {
6880 return c10::Dispatcher::singleton()
6881 .findSchemaOrThrow(index_reduce_out::name, index_reduce_out::overload_name)
6882 .typed<index_reduce_out::schema>();
6883}
6884
6885// aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)
6886at::Tensor & index_reduce_out::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out) {
6887
6888 static auto op = create_index_reduce_out_typed_handle();
6889 return op.call(self, dim, index, source, reduce, include_self, out);
6890}
6891
6892// aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)
6893at::Tensor & index_reduce_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out) {
6894
6895 static auto op = create_index_reduce_out_typed_handle();
6896 return op.redispatch(dispatchKeySet, self, dim, index, source, reduce, include_self, out);
6897}
6898
6899STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_reduce_, name, "aten::index_reduce_")
6900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_reduce_, overload_name, "")
6901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_reduce_, schema_str, "index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!)")
6902
6903// aten::index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!)
6904static C10_NOINLINE c10::TypedOperatorHandle<index_reduce_::schema> create_index_reduce__typed_handle() {
6905 return c10::Dispatcher::singleton()
6906 .findSchemaOrThrow(index_reduce_::name, index_reduce_::overload_name)
6907 .typed<index_reduce_::schema>();
6908}
6909
6910// aten::index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!)
6911at::Tensor & index_reduce_::call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
6912
6913 static auto op = create_index_reduce__typed_handle();
6914 return op.call(self, dim, index, source, reduce, include_self);
6915}
6916
6917// aten::index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!)
6918at::Tensor & index_reduce_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
6919
6920 static auto op = create_index_reduce__typed_handle();
6921 return op.redispatch(dispatchKeySet, self, dim, index, source, reduce, include_self);
6922}
6923
6924STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_reduce, name, "aten::index_reduce")
6925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_reduce, overload_name, "")
6926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_reduce, schema_str, "index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor")
6927
6928// aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor
6929static C10_NOINLINE c10::TypedOperatorHandle<index_reduce::schema> create_index_reduce_typed_handle() {
6930 return c10::Dispatcher::singleton()
6931 .findSchemaOrThrow(index_reduce::name, index_reduce::overload_name)
6932 .typed<index_reduce::schema>();
6933}
6934
6935// aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor
6936at::Tensor index_reduce::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
6937
6938 static auto op = create_index_reduce_typed_handle();
6939 return op.call(self, dim, index, source, reduce, include_self);
6940}
6941
6942// aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor
6943at::Tensor index_reduce::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
6944
6945 static auto op = create_index_reduce_typed_handle();
6946 return op.redispatch(dispatchKeySet, self, dim, index, source, reduce, include_self);
6947}
6948
6949STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill__int_Scalar, name, "aten::index_fill_")
6950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill__int_Scalar, overload_name, "int_Scalar")
6951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill__int_Scalar, schema_str, "index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)")
6952
6953// aten::index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
6954static C10_NOINLINE c10::TypedOperatorHandle<index_fill__int_Scalar::schema> create_index_fill__int_Scalar_typed_handle() {
6955 return c10::Dispatcher::singleton()
6956 .findSchemaOrThrow(index_fill__int_Scalar::name, index_fill__int_Scalar::overload_name)
6957 .typed<index_fill__int_Scalar::schema>();
6958}
6959
6960// aten::index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
6961at::Tensor & index_fill__int_Scalar::call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
6962
6963 static auto op = create_index_fill__int_Scalar_typed_handle();
6964 return op.call(self, dim, index, value);
6965}
6966
6967// aten::index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
6968at::Tensor & index_fill__int_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
6969
6970 static auto op = create_index_fill__int_Scalar_typed_handle();
6971 return op.redispatch(dispatchKeySet, self, dim, index, value);
6972}
6973
6974STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill_int_Scalar, name, "aten::index_fill")
6975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill_int_Scalar, overload_name, "int_Scalar")
6976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill_int_Scalar, schema_str, "index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor")
6977
6978// aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
6979static C10_NOINLINE c10::TypedOperatorHandle<index_fill_int_Scalar::schema> create_index_fill_int_Scalar_typed_handle() {
6980 return c10::Dispatcher::singleton()
6981 .findSchemaOrThrow(index_fill_int_Scalar::name, index_fill_int_Scalar::overload_name)
6982 .typed<index_fill_int_Scalar::schema>();
6983}
6984
6985// aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
6986at::Tensor index_fill_int_Scalar::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
6987
6988 static auto op = create_index_fill_int_Scalar_typed_handle();
6989 return op.call(self, dim, index, value);
6990}
6991
6992// aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
6993at::Tensor index_fill_int_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
6994
6995 static auto op = create_index_fill_int_Scalar_typed_handle();
6996 return op.redispatch(dispatchKeySet, self, dim, index, value);
6997}
6998
6999STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill__int_Tensor, name, "aten::index_fill_")
7000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill__int_Tensor, overload_name, "int_Tensor")
7001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill__int_Tensor, schema_str, "index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)")
7002
7003// aten::index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)
7004static C10_NOINLINE c10::TypedOperatorHandle<index_fill__int_Tensor::schema> create_index_fill__int_Tensor_typed_handle() {
7005 return c10::Dispatcher::singleton()
7006 .findSchemaOrThrow(index_fill__int_Tensor::name, index_fill__int_Tensor::overload_name)
7007 .typed<index_fill__int_Tensor::schema>();
7008}
7009
7010// aten::index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)
7011at::Tensor & index_fill__int_Tensor::call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) {
7012
7013 static auto op = create_index_fill__int_Tensor_typed_handle();
7014 return op.call(self, dim, index, value);
7015}
7016
7017// aten::index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)
7018at::Tensor & index_fill__int_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) {
7019
7020 static auto op = create_index_fill__int_Tensor_typed_handle();
7021 return op.redispatch(dispatchKeySet, self, dim, index, value);
7022}
7023
7024STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill_int_Tensor, name, "aten::index_fill")
7025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill_int_Tensor, overload_name, "int_Tensor")
7026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill_int_Tensor, schema_str, "index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor")
7027
7028// aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor
7029static C10_NOINLINE c10::TypedOperatorHandle<index_fill_int_Tensor::schema> create_index_fill_int_Tensor_typed_handle() {
7030 return c10::Dispatcher::singleton()
7031 .findSchemaOrThrow(index_fill_int_Tensor::name, index_fill_int_Tensor::overload_name)
7032 .typed<index_fill_int_Tensor::schema>();
7033}
7034
7035// aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor
7036at::Tensor index_fill_int_Tensor::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) {
7037
7038 static auto op = create_index_fill_int_Tensor_typed_handle();
7039 return op.call(self, dim, index, value);
7040}
7041
7042// aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor
7043at::Tensor index_fill_int_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) {
7044
7045 static auto op = create_index_fill_int_Tensor_typed_handle();
7046 return op.redispatch(dispatchKeySet, self, dim, index, value);
7047}
7048
7049STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill__Dimname_Scalar, name, "aten::index_fill_")
7050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill__Dimname_Scalar, overload_name, "Dimname_Scalar")
7051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill__Dimname_Scalar, schema_str, "index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!)")
7052
7053// aten::index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!)
7054static C10_NOINLINE c10::TypedOperatorHandle<index_fill__Dimname_Scalar::schema> create_index_fill__Dimname_Scalar_typed_handle() {
7055 return c10::Dispatcher::singleton()
7056 .findSchemaOrThrow(index_fill__Dimname_Scalar::name, index_fill__Dimname_Scalar::overload_name)
7057 .typed<index_fill__Dimname_Scalar::schema>();
7058}
7059
7060// aten::index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!)
7061at::Tensor & index_fill__Dimname_Scalar::call(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
7062
7063 static auto op = create_index_fill__Dimname_Scalar_typed_handle();
7064 return op.call(self, dim, index, value);
7065}
7066
7067// aten::index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!)
7068at::Tensor & index_fill__Dimname_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
7069
7070 static auto op = create_index_fill__Dimname_Scalar_typed_handle();
7071 return op.redispatch(dispatchKeySet, self, dim, index, value);
7072}
7073
7074STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill__Dimname_Tensor, name, "aten::index_fill_")
7075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill__Dimname_Tensor, overload_name, "Dimname_Tensor")
7076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill__Dimname_Tensor, schema_str, "index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!)")
7077
7078// aten::index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!)
7079static C10_NOINLINE c10::TypedOperatorHandle<index_fill__Dimname_Tensor::schema> create_index_fill__Dimname_Tensor_typed_handle() {
7080 return c10::Dispatcher::singleton()
7081 .findSchemaOrThrow(index_fill__Dimname_Tensor::name, index_fill__Dimname_Tensor::overload_name)
7082 .typed<index_fill__Dimname_Tensor::schema>();
7083}
7084
7085// aten::index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!)
7086at::Tensor & index_fill__Dimname_Tensor::call(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) {
7087
7088 static auto op = create_index_fill__Dimname_Tensor_typed_handle();
7089 return op.call(self, dim, index, value);
7090}
7091
7092// aten::index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!)
7093at::Tensor & index_fill__Dimname_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) {
7094
7095 static auto op = create_index_fill__Dimname_Tensor_typed_handle();
7096 return op.redispatch(dispatchKeySet, self, dim, index, value);
7097}
7098
7099STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill_Dimname_Scalar, name, "aten::index_fill")
7100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill_Dimname_Scalar, overload_name, "Dimname_Scalar")
7101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill_Dimname_Scalar, schema_str, "index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor")
7102
7103// aten::index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
7104static C10_NOINLINE c10::TypedOperatorHandle<index_fill_Dimname_Scalar::schema> create_index_fill_Dimname_Scalar_typed_handle() {
7105 return c10::Dispatcher::singleton()
7106 .findSchemaOrThrow(index_fill_Dimname_Scalar::name, index_fill_Dimname_Scalar::overload_name)
7107 .typed<index_fill_Dimname_Scalar::schema>();
7108}
7109
7110// aten::index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
7111at::Tensor index_fill_Dimname_Scalar::call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
7112
7113 static auto op = create_index_fill_Dimname_Scalar_typed_handle();
7114 return op.call(self, dim, index, value);
7115}
7116
7117// aten::index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
7118at::Tensor index_fill_Dimname_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
7119
7120 static auto op = create_index_fill_Dimname_Scalar_typed_handle();
7121 return op.redispatch(dispatchKeySet, self, dim, index, value);
7122}
7123
7124STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill_Dimname_Tensor, name, "aten::index_fill")
7125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill_Dimname_Tensor, overload_name, "Dimname_Tensor")
7126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill_Dimname_Tensor, schema_str, "index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor")
7127
7128// aten::index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor
7129static C10_NOINLINE c10::TypedOperatorHandle<index_fill_Dimname_Tensor::schema> create_index_fill_Dimname_Tensor_typed_handle() {
7130 return c10::Dispatcher::singleton()
7131 .findSchemaOrThrow(index_fill_Dimname_Tensor::name, index_fill_Dimname_Tensor::overload_name)
7132 .typed<index_fill_Dimname_Tensor::schema>();
7133}
7134
7135// aten::index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor
7136at::Tensor index_fill_Dimname_Tensor::call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) {
7137
7138 static auto op = create_index_fill_Dimname_Tensor_typed_handle();
7139 return op.call(self, dim, index, value);
7140}
7141
7142// aten::index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor
7143at::Tensor index_fill_Dimname_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) {
7144
7145 static auto op = create_index_fill_Dimname_Tensor_typed_handle();
7146 return op.redispatch(dispatchKeySet, self, dim, index, value);
7147}
7148
7149STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(scatter_add, name, "aten::scatter_add")
7150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(scatter_add, overload_name, "")
7151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(scatter_add, schema_str, "scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor")
7152
7153// aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
7154static C10_NOINLINE c10::TypedOperatorHandle<scatter_add::schema> create_scatter_add_typed_handle() {
7155 return c10::Dispatcher::singleton()
7156 .findSchemaOrThrow(scatter_add::name, scatter_add::overload_name)
7157 .typed<scatter_add::schema>();
7158}
7159
7160// aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
7161at::Tensor scatter_add::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
7162
7163 static auto op = create_scatter_add_typed_handle();
7164 return op.call(self, dim, index, src);
7165}
7166
7167// aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
7168at::Tensor scatter_add::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
7169
7170 static auto op = create_scatter_add_typed_handle();
7171 return op.redispatch(dispatchKeySet, self, dim, index, src);
7172}
7173
7174STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(scatter_add_, name, "aten::scatter_add_")
7175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(scatter_add_, overload_name, "")
7176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(scatter_add_, schema_str, "scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)")
7177
7178// aten::scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
7179static C10_NOINLINE c10::TypedOperatorHandle<scatter_add_::schema> create_scatter_add__typed_handle() {
7180 return c10::Dispatcher::singleton()
7181 .findSchemaOrThrow(scatter_add_::name, scatter_add_::overload_name)
7182 .typed<scatter_add_::schema>();
7183}
7184
7185// aten::scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
7186at::Tensor & scatter_add_::call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
7187
7188 static auto op = create_scatter_add__typed_handle();
7189 return op.call(self, dim, index, src);
7190}
7191
7192// aten::scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
7193at::Tensor & scatter_add_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
7194
7195 static auto op = create_scatter_add__typed_handle();
7196 return op.redispatch(dispatchKeySet, self, dim, index, src);
7197}
7198
7199STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(scatter_add_out, name, "aten::scatter_add")
7200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(scatter_add_out, overload_name, "out")
7201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(scatter_add_out, schema_str, "scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)")
7202
7203// aten::scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)
7204static C10_NOINLINE c10::TypedOperatorHandle<scatter_add_out::schema> create_scatter_add_out_typed_handle() {
7205 return c10::Dispatcher::singleton()
7206 .findSchemaOrThrow(scatter_add_out::name, scatter_add_out::overload_name)
7207 .typed<scatter_add_out::schema>();
7208}
7209
7210// aten::scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)
7211at::Tensor & scatter_add_out::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) {
7212
7213 static auto op = create_scatter_add_out_typed_handle();
7214 return op.call(self, dim, index, src, out);
7215}
7216
7217// aten::scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)
7218at::Tensor & scatter_add_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) {
7219
7220 static auto op = create_scatter_add_out_typed_handle();
7221 return op.redispatch(dispatchKeySet, self, dim, index, src, out);
7222}
7223
7224STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(scatter_add_dimname, name, "aten::scatter_add")
7225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(scatter_add_dimname, overload_name, "dimname")
7226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(scatter_add_dimname, schema_str, "scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor")
7227
7228// aten::scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
7229static C10_NOINLINE c10::TypedOperatorHandle<scatter_add_dimname::schema> create_scatter_add_dimname_typed_handle() {
7230 return c10::Dispatcher::singleton()
7231 .findSchemaOrThrow(scatter_add_dimname::name, scatter_add_dimname::overload_name)
7232 .typed<scatter_add_dimname::schema>();
7233}
7234
7235// aten::scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
7236at::Tensor scatter_add_dimname::call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) {
7237
7238 static auto op = create_scatter_add_dimname_typed_handle();
7239 return op.call(self, dim, index, src);
7240}
7241
7242// aten::scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
7243at::Tensor scatter_add_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) {
7244
7245 static auto op = create_scatter_add_dimname_typed_handle();
7246 return op.redispatch(dispatchKeySet, self, dim, index, src);
7247}
7248
7249STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(digamma_, name, "aten::digamma_")
7250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(digamma_, overload_name, "")
7251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(digamma_, schema_str, "digamma_(Tensor(a!) self) -> Tensor(a!)")
7252
7253// aten::digamma_(Tensor(a!) self) -> Tensor(a!)
7254static C10_NOINLINE c10::TypedOperatorHandle<digamma_::schema> create_digamma__typed_handle() {
7255 return c10::Dispatcher::singleton()
7256 .findSchemaOrThrow(digamma_::name, digamma_::overload_name)
7257 .typed<digamma_::schema>();
7258}
7259
7260// aten::digamma_(Tensor(a!) self) -> Tensor(a!)
7261at::Tensor & digamma_::call(at::Tensor & self) {
7262
7263 static auto op = create_digamma__typed_handle();
7264 return op.call(self);
7265}
7266
7267// aten::digamma_(Tensor(a!) self) -> Tensor(a!)
7268at::Tensor & digamma_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
7269
7270 static auto op = create_digamma__typed_handle();
7271 return op.redispatch(dispatchKeySet, self);
7272}
7273
7274STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(random__from, name, "aten::random_")
7275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(random__from, overload_name, "from")
7276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(random__from, schema_str, "random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)")
7277
7278// aten::random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)
7279static C10_NOINLINE c10::TypedOperatorHandle<random__from::schema> create_random__from_typed_handle() {
7280 return c10::Dispatcher::singleton()
7281 .findSchemaOrThrow(random__from::name, random__from::overload_name)
7282 .typed<random__from::schema>();
7283}
7284
7285// aten::random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)
7286at::Tensor & random__from::call(at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator) {
7287
7288 static auto op = create_random__from_typed_handle();
7289 return op.call(self, from, to, generator);
7290}
7291
7292// aten::random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)
7293at::Tensor & random__from::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator) {
7294
7295 static auto op = create_random__from_typed_handle();
7296 return op.redispatch(dispatchKeySet, self, from, to, generator);
7297}
7298
7299STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(random__to, name, "aten::random_")
7300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(random__to, overload_name, "to")
7301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(random__to, schema_str, "random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!)")
7302
7303// aten::random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!)
7304static C10_NOINLINE c10::TypedOperatorHandle<random__to::schema> create_random__to_typed_handle() {
7305 return c10::Dispatcher::singleton()
7306 .findSchemaOrThrow(random__to::name, random__to::overload_name)
7307 .typed<random__to::schema>();
7308}
7309
7310// aten::random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!)
7311at::Tensor & random__to::call(at::Tensor & self, int64_t to, c10::optional<at::Generator> generator) {
7312
7313 static auto op = create_random__to_typed_handle();
7314 return op.call(self, to, generator);
7315}
7316
7317// aten::random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!)
7318at::Tensor & random__to::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t to, c10::optional<at::Generator> generator) {
7319
7320 static auto op = create_random__to_typed_handle();
7321 return op.redispatch(dispatchKeySet, self, to, generator);
7322}
7323
7324STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(random_, name, "aten::random_")
7325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(random_, overload_name, "")
7326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(random_, schema_str, "random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!)")
7327
7328// aten::random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!)
7329static C10_NOINLINE c10::TypedOperatorHandle<random_::schema> create_random__typed_handle() {
7330 return c10::Dispatcher::singleton()
7331 .findSchemaOrThrow(random_::name, random_::overload_name)
7332 .typed<random_::schema>();
7333}
7334
7335// aten::random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!)
7336at::Tensor & random_::call(at::Tensor & self, c10::optional<at::Generator> generator) {
7337
7338 static auto op = create_random__typed_handle();
7339 return op.call(self, generator);
7340}
7341
7342// aten::random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!)
7343at::Tensor & random_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, c10::optional<at::Generator> generator) {
7344
7345 static auto op = create_random__typed_handle();
7346 return op.redispatch(dispatchKeySet, self, generator);
7347}
7348
7349STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cauchy_, name, "aten::cauchy_")
7350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cauchy_, overload_name, "")
7351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cauchy_, schema_str, "cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!)")
7352
7353// aten::cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!)
7354static C10_NOINLINE c10::TypedOperatorHandle<cauchy_::schema> create_cauchy__typed_handle() {
7355 return c10::Dispatcher::singleton()
7356 .findSchemaOrThrow(cauchy_::name, cauchy_::overload_name)
7357 .typed<cauchy_::schema>();
7358}
7359
7360// aten::cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!)
7361at::Tensor & cauchy_::call(at::Tensor & self, double median, double sigma, c10::optional<at::Generator> generator) {
7362
7363 static auto op = create_cauchy__typed_handle();
7364 return op.call(self, median, sigma, generator);
7365}
7366
7367// aten::cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!)
7368at::Tensor & cauchy_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double median, double sigma, c10::optional<at::Generator> generator) {
7369
7370 static auto op = create_cauchy__typed_handle();
7371 return op.redispatch(dispatchKeySet, self, median, sigma, generator);
7372}
7373
7374STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log_normal_, name, "aten::log_normal_")
7375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log_normal_, overload_name, "")
7376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log_normal_, schema_str, "log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!)")
7377
7378// aten::log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!)
7379static C10_NOINLINE c10::TypedOperatorHandle<log_normal_::schema> create_log_normal__typed_handle() {
7380 return c10::Dispatcher::singleton()
7381 .findSchemaOrThrow(log_normal_::name, log_normal_::overload_name)
7382 .typed<log_normal_::schema>();
7383}
7384
7385// aten::log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!)
7386at::Tensor & log_normal_::call(at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
7387
7388 static auto op = create_log_normal__typed_handle();
7389 return op.call(self, mean, std, generator);
7390}
7391
7392// aten::log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!)
7393at::Tensor & log_normal_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
7394
7395 static auto op = create_log_normal__typed_handle();
7396 return op.redispatch(dispatchKeySet, self, mean, std, generator);
7397}
7398
7399STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cross_out, name, "aten::cross")
7400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cross_out, overload_name, "out")
7401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cross_out, schema_str, "cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)")
7402
7403// aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
7404static C10_NOINLINE c10::TypedOperatorHandle<cross_out::schema> create_cross_out_typed_handle() {
7405 return c10::Dispatcher::singleton()
7406 .findSchemaOrThrow(cross_out::name, cross_out::overload_name)
7407 .typed<cross_out::schema>();
7408}
7409
7410// aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
7411at::Tensor & cross_out::call(const at::Tensor & self, const at::Tensor & other, c10::optional<int64_t> dim, at::Tensor & out) {
7412
7413 static auto op = create_cross_out_typed_handle();
7414 return op.call(self, other, dim, out);
7415}
7416
7417// aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
7418at::Tensor & cross_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional<int64_t> dim, at::Tensor & out) {
7419
7420 static auto op = create_cross_out_typed_handle();
7421 return op.redispatch(dispatchKeySet, self, other, dim, out);
7422}
7423
7424STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cross, name, "aten::cross")
7425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cross, overload_name, "")
7426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cross, schema_str, "cross(Tensor self, Tensor other, int? dim=None) -> Tensor")
7427
7428// aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor
7429static C10_NOINLINE c10::TypedOperatorHandle<cross::schema> create_cross_typed_handle() {
7430 return c10::Dispatcher::singleton()
7431 .findSchemaOrThrow(cross::name, cross::overload_name)
7432 .typed<cross::schema>();
7433}
7434
7435// aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor
7436at::Tensor cross::call(const at::Tensor & self, const at::Tensor & other, c10::optional<int64_t> dim) {
7437
7438 static auto op = create_cross_typed_handle();
7439 return op.call(self, other, dim);
7440}
7441
7442// aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor
7443at::Tensor cross::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional<int64_t> dim) {
7444
7445 static auto op = create_cross_typed_handle();
7446 return op.redispatch(dispatchKeySet, self, other, dim);
7447}
7448
7449STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ne_Scalar_out, name, "aten::ne")
7450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ne_Scalar_out, overload_name, "Scalar_out")
7451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ne_Scalar_out, schema_str, "ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
7452
7453// aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
7454static C10_NOINLINE c10::TypedOperatorHandle<ne_Scalar_out::schema> create_ne_Scalar_out_typed_handle() {
7455 return c10::Dispatcher::singleton()
7456 .findSchemaOrThrow(ne_Scalar_out::name, ne_Scalar_out::overload_name)
7457 .typed<ne_Scalar_out::schema>();
7458}
7459
7460// aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
7461at::Tensor & ne_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
7462
7463 static auto op = create_ne_Scalar_out_typed_handle();
7464 return op.call(self, other, out);
7465}
7466
7467// aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
7468at::Tensor & ne_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
7469
7470 static auto op = create_ne_Scalar_out_typed_handle();
7471 return op.redispatch(dispatchKeySet, self, other, out);
7472}
7473
7474STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ne_Scalar, name, "aten::ne")
7475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ne_Scalar, overload_name, "Scalar")
7476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ne_Scalar, schema_str, "ne.Scalar(Tensor self, Scalar other) -> Tensor")
7477
7478// aten::ne.Scalar(Tensor self, Scalar other) -> Tensor
7479static C10_NOINLINE c10::TypedOperatorHandle<ne_Scalar::schema> create_ne_Scalar_typed_handle() {
7480 return c10::Dispatcher::singleton()
7481 .findSchemaOrThrow(ne_Scalar::name, ne_Scalar::overload_name)
7482 .typed<ne_Scalar::schema>();
7483}
7484
7485// aten::ne.Scalar(Tensor self, Scalar other) -> Tensor
7486at::Tensor ne_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
7487
7488 static auto op = create_ne_Scalar_typed_handle();
7489 return op.call(self, other);
7490}
7491
7492// aten::ne.Scalar(Tensor self, Scalar other) -> Tensor
7493at::Tensor ne_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
7494
7495 static auto op = create_ne_Scalar_typed_handle();
7496 return op.redispatch(dispatchKeySet, self, other);
7497}
7498
7499STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ne_Tensor_out, name, "aten::ne")
7500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ne_Tensor_out, overload_name, "Tensor_out")
7501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ne_Tensor_out, schema_str, "ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
7502
7503// aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
7504static C10_NOINLINE c10::TypedOperatorHandle<ne_Tensor_out::schema> create_ne_Tensor_out_typed_handle() {
7505 return c10::Dispatcher::singleton()
7506 .findSchemaOrThrow(ne_Tensor_out::name, ne_Tensor_out::overload_name)
7507 .typed<ne_Tensor_out::schema>();
7508}
7509
7510// aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
7511at::Tensor & ne_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
7512
7513 static auto op = create_ne_Tensor_out_typed_handle();
7514 return op.call(self, other, out);
7515}
7516
7517// aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
7518at::Tensor & ne_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
7519
7520 static auto op = create_ne_Tensor_out_typed_handle();
7521 return op.redispatch(dispatchKeySet, self, other, out);
7522}
7523
7524STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ne_Tensor, name, "aten::ne")
7525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ne_Tensor, overload_name, "Tensor")
7526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ne_Tensor, schema_str, "ne.Tensor(Tensor self, Tensor other) -> Tensor")
7527
7528// aten::ne.Tensor(Tensor self, Tensor other) -> Tensor
7529static C10_NOINLINE c10::TypedOperatorHandle<ne_Tensor::schema> create_ne_Tensor_typed_handle() {
7530 return c10::Dispatcher::singleton()
7531 .findSchemaOrThrow(ne_Tensor::name, ne_Tensor::overload_name)
7532 .typed<ne_Tensor::schema>();
7533}
7534
7535// aten::ne.Tensor(Tensor self, Tensor other) -> Tensor
7536at::Tensor ne_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
7537
7538 static auto op = create_ne_Tensor_typed_handle();
7539 return op.call(self, other);
7540}
7541
7542// aten::ne.Tensor(Tensor self, Tensor other) -> Tensor
7543at::Tensor ne_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
7544
7545 static auto op = create_ne_Tensor_typed_handle();
7546 return op.redispatch(dispatchKeySet, self, other);
7547}
7548
7549STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ne__Scalar, name, "aten::ne_")
7550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ne__Scalar, overload_name, "Scalar")
7551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ne__Scalar, schema_str, "ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
7552
7553// aten::ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7554static C10_NOINLINE c10::TypedOperatorHandle<ne__Scalar::schema> create_ne__Scalar_typed_handle() {
7555 return c10::Dispatcher::singleton()
7556 .findSchemaOrThrow(ne__Scalar::name, ne__Scalar::overload_name)
7557 .typed<ne__Scalar::schema>();
7558}
7559
7560// aten::ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7561at::Tensor & ne__Scalar::call(at::Tensor & self, const at::Scalar & other) {
7562
7563 static auto op = create_ne__Scalar_typed_handle();
7564 return op.call(self, other);
7565}
7566
7567// aten::ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7568at::Tensor & ne__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
7569
7570 static auto op = create_ne__Scalar_typed_handle();
7571 return op.redispatch(dispatchKeySet, self, other);
7572}
7573
7574STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ne__Tensor, name, "aten::ne_")
7575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ne__Tensor, overload_name, "Tensor")
7576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ne__Tensor, schema_str, "ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
7577
7578// aten::ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
7579static C10_NOINLINE c10::TypedOperatorHandle<ne__Tensor::schema> create_ne__Tensor_typed_handle() {
7580 return c10::Dispatcher::singleton()
7581 .findSchemaOrThrow(ne__Tensor::name, ne__Tensor::overload_name)
7582 .typed<ne__Tensor::schema>();
7583}
7584
7585// aten::ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
7586at::Tensor & ne__Tensor::call(at::Tensor & self, const at::Tensor & other) {
7587
7588 static auto op = create_ne__Tensor_typed_handle();
7589 return op.call(self, other);
7590}
7591
7592// aten::ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
7593at::Tensor & ne__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
7594
7595 static auto op = create_ne__Tensor_typed_handle();
7596 return op.redispatch(dispatchKeySet, self, other);
7597}
7598
7599STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ge_Scalar_out, name, "aten::ge")
7600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ge_Scalar_out, overload_name, "Scalar_out")
7601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ge_Scalar_out, schema_str, "ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
7602
7603// aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
7604static C10_NOINLINE c10::TypedOperatorHandle<ge_Scalar_out::schema> create_ge_Scalar_out_typed_handle() {
7605 return c10::Dispatcher::singleton()
7606 .findSchemaOrThrow(ge_Scalar_out::name, ge_Scalar_out::overload_name)
7607 .typed<ge_Scalar_out::schema>();
7608}
7609
7610// aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
7611at::Tensor & ge_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
7612
7613 static auto op = create_ge_Scalar_out_typed_handle();
7614 return op.call(self, other, out);
7615}
7616
7617// aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
7618at::Tensor & ge_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
7619
7620 static auto op = create_ge_Scalar_out_typed_handle();
7621 return op.redispatch(dispatchKeySet, self, other, out);
7622}
7623
7624STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ge_Scalar, name, "aten::ge")
7625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ge_Scalar, overload_name, "Scalar")
7626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ge_Scalar, schema_str, "ge.Scalar(Tensor self, Scalar other) -> Tensor")
7627
7628// aten::ge.Scalar(Tensor self, Scalar other) -> Tensor
7629static C10_NOINLINE c10::TypedOperatorHandle<ge_Scalar::schema> create_ge_Scalar_typed_handle() {
7630 return c10::Dispatcher::singleton()
7631 .findSchemaOrThrow(ge_Scalar::name, ge_Scalar::overload_name)
7632 .typed<ge_Scalar::schema>();
7633}
7634
7635// aten::ge.Scalar(Tensor self, Scalar other) -> Tensor
7636at::Tensor ge_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
7637
7638 static auto op = create_ge_Scalar_typed_handle();
7639 return op.call(self, other);
7640}
7641
7642// aten::ge.Scalar(Tensor self, Scalar other) -> Tensor
7643at::Tensor ge_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
7644
7645 static auto op = create_ge_Scalar_typed_handle();
7646 return op.redispatch(dispatchKeySet, self, other);
7647}
7648
7649STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ge_Tensor_out, name, "aten::ge")
7650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ge_Tensor_out, overload_name, "Tensor_out")
7651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ge_Tensor_out, schema_str, "ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
7652
7653// aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
7654static C10_NOINLINE c10::TypedOperatorHandle<ge_Tensor_out::schema> create_ge_Tensor_out_typed_handle() {
7655 return c10::Dispatcher::singleton()
7656 .findSchemaOrThrow(ge_Tensor_out::name, ge_Tensor_out::overload_name)
7657 .typed<ge_Tensor_out::schema>();
7658}
7659
7660// aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
7661at::Tensor & ge_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
7662
7663 static auto op = create_ge_Tensor_out_typed_handle();
7664 return op.call(self, other, out);
7665}
7666
7667// aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
7668at::Tensor & ge_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
7669
7670 static auto op = create_ge_Tensor_out_typed_handle();
7671 return op.redispatch(dispatchKeySet, self, other, out);
7672}
7673
7674STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ge_Tensor, name, "aten::ge")
7675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ge_Tensor, overload_name, "Tensor")
7676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ge_Tensor, schema_str, "ge.Tensor(Tensor self, Tensor other) -> Tensor")
7677
7678// aten::ge.Tensor(Tensor self, Tensor other) -> Tensor
7679static C10_NOINLINE c10::TypedOperatorHandle<ge_Tensor::schema> create_ge_Tensor_typed_handle() {
7680 return c10::Dispatcher::singleton()
7681 .findSchemaOrThrow(ge_Tensor::name, ge_Tensor::overload_name)
7682 .typed<ge_Tensor::schema>();
7683}
7684
7685// aten::ge.Tensor(Tensor self, Tensor other) -> Tensor
7686at::Tensor ge_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
7687
7688 static auto op = create_ge_Tensor_typed_handle();
7689 return op.call(self, other);
7690}
7691
7692// aten::ge.Tensor(Tensor self, Tensor other) -> Tensor
7693at::Tensor ge_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
7694
7695 static auto op = create_ge_Tensor_typed_handle();
7696 return op.redispatch(dispatchKeySet, self, other);
7697}
7698
7699STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ge__Scalar, name, "aten::ge_")
7700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ge__Scalar, overload_name, "Scalar")
7701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ge__Scalar, schema_str, "ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
7702
7703// aten::ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7704static C10_NOINLINE c10::TypedOperatorHandle<ge__Scalar::schema> create_ge__Scalar_typed_handle() {
7705 return c10::Dispatcher::singleton()
7706 .findSchemaOrThrow(ge__Scalar::name, ge__Scalar::overload_name)
7707 .typed<ge__Scalar::schema>();
7708}
7709
7710// aten::ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7711at::Tensor & ge__Scalar::call(at::Tensor & self, const at::Scalar & other) {
7712
7713 static auto op = create_ge__Scalar_typed_handle();
7714 return op.call(self, other);
7715}
7716
7717// aten::ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7718at::Tensor & ge__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
7719
7720 static auto op = create_ge__Scalar_typed_handle();
7721 return op.redispatch(dispatchKeySet, self, other);
7722}
7723
7724STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ge__Tensor, name, "aten::ge_")
7725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ge__Tensor, overload_name, "Tensor")
7726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ge__Tensor, schema_str, "ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
7727
7728// aten::ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
7729static C10_NOINLINE c10::TypedOperatorHandle<ge__Tensor::schema> create_ge__Tensor_typed_handle() {
7730 return c10::Dispatcher::singleton()
7731 .findSchemaOrThrow(ge__Tensor::name, ge__Tensor::overload_name)
7732 .typed<ge__Tensor::schema>();
7733}
7734
7735// aten::ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
7736at::Tensor & ge__Tensor::call(at::Tensor & self, const at::Tensor & other) {
7737
7738 static auto op = create_ge__Tensor_typed_handle();
7739 return op.call(self, other);
7740}
7741
7742// aten::ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
7743at::Tensor & ge__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
7744
7745 static auto op = create_ge__Tensor_typed_handle();
7746 return op.redispatch(dispatchKeySet, self, other);
7747}
7748
7749STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_gather_sparse_backward, name, "aten::_gather_sparse_backward")
7750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_gather_sparse_backward, overload_name, "")
7751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_gather_sparse_backward, schema_str, "_gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor")
7752
7753// aten::_gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor
7754static C10_NOINLINE c10::TypedOperatorHandle<_gather_sparse_backward::schema> create__gather_sparse_backward_typed_handle() {
7755 return c10::Dispatcher::singleton()
7756 .findSchemaOrThrow(_gather_sparse_backward::name, _gather_sparse_backward::overload_name)
7757 .typed<_gather_sparse_backward::schema>();
7758}
7759
7760// aten::_gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor
7761at::Tensor _gather_sparse_backward::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & grad) {
7762
7763 static auto op = create__gather_sparse_backward_typed_handle();
7764 return op.call(self, dim, index, grad);
7765}
7766
7767// aten::_gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor
7768at::Tensor _gather_sparse_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & grad) {
7769
7770 static auto op = create__gather_sparse_backward_typed_handle();
7771 return op.redispatch(dispatchKeySet, self, dim, index, grad);
7772}
7773
7774STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_vander, name, "aten::linalg_vander")
7775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_vander, overload_name, "")
7776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_vander, schema_str, "linalg_vander(Tensor x, *, int? N=None) -> Tensor")
7777
7778// aten::linalg_vander(Tensor x, *, int? N=None) -> Tensor
7779static C10_NOINLINE c10::TypedOperatorHandle<linalg_vander::schema> create_linalg_vander_typed_handle() {
7780 return c10::Dispatcher::singleton()
7781 .findSchemaOrThrow(linalg_vander::name, linalg_vander::overload_name)
7782 .typed<linalg_vander::schema>();
7783}
7784
7785// aten::linalg_vander(Tensor x, *, int? N=None) -> Tensor
7786at::Tensor linalg_vander::call(const at::Tensor & x, c10::optional<int64_t> N) {
7787
7788 static auto op = create_linalg_vander_typed_handle();
7789 return op.call(x, N);
7790}
7791
7792// aten::linalg_vander(Tensor x, *, int? N=None) -> Tensor
7793at::Tensor linalg_vander::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, c10::optional<int64_t> N) {
7794
7795 static auto op = create_linalg_vander_typed_handle();
7796 return op.redispatch(dispatchKeySet, x, N);
7797}
7798
7799STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(swapaxes, name, "aten::swapaxes")
7800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(swapaxes, overload_name, "")
7801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(swapaxes, schema_str, "swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a)")
7802
7803// aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a)
7804static C10_NOINLINE c10::TypedOperatorHandle<swapaxes::schema> create_swapaxes_typed_handle() {
7805 return c10::Dispatcher::singleton()
7806 .findSchemaOrThrow(swapaxes::name, swapaxes::overload_name)
7807 .typed<swapaxes::schema>();
7808}
7809
7810// aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a)
7811at::Tensor swapaxes::call(const at::Tensor & self, int64_t axis0, int64_t axis1) {
7812
7813 static auto op = create_swapaxes_typed_handle();
7814 return op.call(self, axis0, axis1);
7815}
7816
7817// aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a)
7818at::Tensor swapaxes::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t axis0, int64_t axis1) {
7819
7820 static auto op = create_swapaxes_typed_handle();
7821 return op.redispatch(dispatchKeySet, self, axis0, axis1);
7822}
7823
7824STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(swapaxes_, name, "aten::swapaxes_")
7825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(swapaxes_, overload_name, "")
7826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(swapaxes_, schema_str, "swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!)")
7827
7828// aten::swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!)
7829static C10_NOINLINE c10::TypedOperatorHandle<swapaxes_::schema> create_swapaxes__typed_handle() {
7830 return c10::Dispatcher::singleton()
7831 .findSchemaOrThrow(swapaxes_::name, swapaxes_::overload_name)
7832 .typed<swapaxes_::schema>();
7833}
7834
7835// aten::swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!)
7836at::Tensor & swapaxes_::call(at::Tensor & self, int64_t axis0, int64_t axis1) {
7837
7838 static auto op = create_swapaxes__typed_handle();
7839 return op.call(self, axis0, axis1);
7840}
7841
7842// aten::swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!)
7843at::Tensor & swapaxes_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t axis0, int64_t axis1) {
7844
7845 static auto op = create_swapaxes__typed_handle();
7846 return op.redispatch(dispatchKeySet, self, axis0, axis1);
7847}
7848
7849STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cholesky_solve_out, name, "aten::cholesky_solve")
7850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cholesky_solve_out, overload_name, "out")
7851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cholesky_solve_out, schema_str, "cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)")
7852
7853// aten::cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
7854static C10_NOINLINE c10::TypedOperatorHandle<cholesky_solve_out::schema> create_cholesky_solve_out_typed_handle() {
7855 return c10::Dispatcher::singleton()
7856 .findSchemaOrThrow(cholesky_solve_out::name, cholesky_solve_out::overload_name)
7857 .typed<cholesky_solve_out::schema>();
7858}
7859
7860// aten::cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
7861at::Tensor & cholesky_solve_out::call(const at::Tensor & self, const at::Tensor & input2, bool upper, at::Tensor & out) {
7862
7863 static auto op = create_cholesky_solve_out_typed_handle();
7864 return op.call(self, input2, upper, out);
7865}
7866
7867// aten::cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
7868at::Tensor & cholesky_solve_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, bool upper, at::Tensor & out) {
7869
7870 static auto op = create_cholesky_solve_out_typed_handle();
7871 return op.redispatch(dispatchKeySet, self, input2, upper, out);
7872}
7873
7874STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cholesky_solve, name, "aten::cholesky_solve")
7875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cholesky_solve, overload_name, "")
7876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cholesky_solve, schema_str, "cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor")
7877
7878// aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor
7879static C10_NOINLINE c10::TypedOperatorHandle<cholesky_solve::schema> create_cholesky_solve_typed_handle() {
7880 return c10::Dispatcher::singleton()
7881 .findSchemaOrThrow(cholesky_solve::name, cholesky_solve::overload_name)
7882 .typed<cholesky_solve::schema>();
7883}
7884
7885// aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor
7886at::Tensor cholesky_solve::call(const at::Tensor & self, const at::Tensor & input2, bool upper) {
7887
7888 static auto op = create_cholesky_solve_typed_handle();
7889 return op.call(self, input2, upper);
7890}
7891
7892// aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor
7893at::Tensor cholesky_solve::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, bool upper) {
7894
7895 static auto op = create_cholesky_solve_typed_handle();
7896 return op.redispatch(dispatchKeySet, self, input2, upper);
7897}
7898
7899STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(qr_Q, name, "aten::qr")
7900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(qr_Q, overload_name, "Q")
7901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(qr_Q, schema_str, "qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)")
7902
7903// aten::qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)
7904static C10_NOINLINE c10::TypedOperatorHandle<qr_Q::schema> create_qr_Q_typed_handle() {
7905 return c10::Dispatcher::singleton()
7906 .findSchemaOrThrow(qr_Q::name, qr_Q::overload_name)
7907 .typed<qr_Q::schema>();
7908}
7909
7910// aten::qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)
7911::std::tuple<at::Tensor &,at::Tensor &> qr_Q::call(const at::Tensor & self, bool some, at::Tensor & Q, at::Tensor & R) {
7912
7913 static auto op = create_qr_Q_typed_handle();
7914 return op.call(self, some, Q, R);
7915}
7916
7917// aten::qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)
7918::std::tuple<at::Tensor &,at::Tensor &> qr_Q::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool some, at::Tensor & Q, at::Tensor & R) {
7919
7920 static auto op = create_qr_Q_typed_handle();
7921 return op.redispatch(dispatchKeySet, self, some, Q, R);
7922}
7923
7924STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(qr, name, "aten::qr")
7925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(qr, overload_name, "")
7926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(qr, schema_str, "qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R)")
7927
7928// aten::qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R)
7929static C10_NOINLINE c10::TypedOperatorHandle<qr::schema> create_qr_typed_handle() {
7930 return c10::Dispatcher::singleton()
7931 .findSchemaOrThrow(qr::name, qr::overload_name)
7932 .typed<qr::schema>();
7933}
7934
7935// aten::qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R)
7936::std::tuple<at::Tensor,at::Tensor> qr::call(const at::Tensor & self, bool some) {
7937
7938 static auto op = create_qr_typed_handle();
7939 return op.call(self, some);
7940}
7941
7942// aten::qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R)
7943::std::tuple<at::Tensor,at::Tensor> qr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool some) {
7944
7945 static auto op = create_qr_typed_handle();
7946 return op.redispatch(dispatchKeySet, self, some);
7947}
7948
7949STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(digamma_out, name, "aten::digamma")
7950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(digamma_out, overload_name, "out")
7951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(digamma_out, schema_str, "digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
7952
7953// aten::digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7954static C10_NOINLINE c10::TypedOperatorHandle<digamma_out::schema> create_digamma_out_typed_handle() {
7955 return c10::Dispatcher::singleton()
7956 .findSchemaOrThrow(digamma_out::name, digamma_out::overload_name)
7957 .typed<digamma_out::schema>();
7958}
7959
7960// aten::digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7961at::Tensor & digamma_out::call(const at::Tensor & self, at::Tensor & out) {
7962
7963 static auto op = create_digamma_out_typed_handle();
7964 return op.call(self, out);
7965}
7966
7967// aten::digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7968at::Tensor & digamma_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
7969
7970 static auto op = create_digamma_out_typed_handle();
7971 return op.redispatch(dispatchKeySet, self, out);
7972}
7973
7974STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(digamma, name, "aten::digamma")
7975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(digamma, overload_name, "")
7976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(digamma, schema_str, "digamma(Tensor self) -> Tensor")
7977
7978// aten::digamma(Tensor self) -> Tensor
7979static C10_NOINLINE c10::TypedOperatorHandle<digamma::schema> create_digamma_typed_handle() {
7980 return c10::Dispatcher::singleton()
7981 .findSchemaOrThrow(digamma::name, digamma::overload_name)
7982 .typed<digamma::schema>();
7983}
7984
7985// aten::digamma(Tensor self) -> Tensor
7986at::Tensor digamma::call(const at::Tensor & self) {
7987
7988 static auto op = create_digamma_typed_handle();
7989 return op.call(self);
7990}
7991
7992// aten::digamma(Tensor self) -> Tensor
7993at::Tensor digamma::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
7994
7995 static auto op = create_digamma_typed_handle();
7996 return op.redispatch(dispatchKeySet, self);
7997}
7998
7999STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(polygamma_out, name, "aten::polygamma")
8000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(polygamma_out, overload_name, "out")
8001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(polygamma_out, schema_str, "polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
8002
8003// aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
8004static C10_NOINLINE c10::TypedOperatorHandle<polygamma_out::schema> create_polygamma_out_typed_handle() {
8005 return c10::Dispatcher::singleton()
8006 .findSchemaOrThrow(polygamma_out::name, polygamma_out::overload_name)
8007 .typed<polygamma_out::schema>();
8008}
8009
8010// aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
8011at::Tensor & polygamma_out::call(int64_t n, const at::Tensor & self, at::Tensor & out) {
8012
8013 static auto op = create_polygamma_out_typed_handle();
8014 return op.call(n, self, out);
8015}
8016
8017// aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
8018at::Tensor & polygamma_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t n, const at::Tensor & self, at::Tensor & out) {
8019
8020 static auto op = create_polygamma_out_typed_handle();
8021 return op.redispatch(dispatchKeySet, n, self, out);
8022}
8023
8024STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(polygamma, name, "aten::polygamma")
8025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(polygamma, overload_name, "")
8026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(polygamma, schema_str, "polygamma(int n, Tensor self) -> Tensor")
8027
8028// aten::polygamma(int n, Tensor self) -> Tensor
8029static C10_NOINLINE c10::TypedOperatorHandle<polygamma::schema> create_polygamma_typed_handle() {
8030 return c10::Dispatcher::singleton()
8031 .findSchemaOrThrow(polygamma::name, polygamma::overload_name)
8032 .typed<polygamma::schema>();
8033}
8034
8035// aten::polygamma(int n, Tensor self) -> Tensor
8036at::Tensor polygamma::call(int64_t n, const at::Tensor & self) {
8037
8038 static auto op = create_polygamma_typed_handle();
8039 return op.call(n, self);
8040}
8041
8042// aten::polygamma(int n, Tensor self) -> Tensor
8043at::Tensor polygamma::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t n, const at::Tensor & self) {
8044
8045 static auto op = create_polygamma_typed_handle();
8046 return op.redispatch(dispatchKeySet, n, self);
8047}
8048
8049STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(polygamma_, name, "aten::polygamma_")
8050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(polygamma_, overload_name, "")
8051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(polygamma_, schema_str, "polygamma_(Tensor(a!) self, int n) -> Tensor(a!)")
8052
8053// aten::polygamma_(Tensor(a!) self, int n) -> Tensor(a!)
8054static C10_NOINLINE c10::TypedOperatorHandle<polygamma_::schema> create_polygamma__typed_handle() {
8055 return c10::Dispatcher::singleton()
8056 .findSchemaOrThrow(polygamma_::name, polygamma_::overload_name)
8057 .typed<polygamma_::schema>();
8058}
8059
8060// aten::polygamma_(Tensor(a!) self, int n) -> Tensor(a!)
8061at::Tensor & polygamma_::call(at::Tensor & self, int64_t n) {
8062
8063 static auto op = create_polygamma__typed_handle();
8064 return op.call(self, n);
8065}
8066
8067// aten::polygamma_(Tensor(a!) self, int n) -> Tensor(a!)
8068at::Tensor & polygamma_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t n) {
8069
8070 static auto op = create_polygamma__typed_handle();
8071 return op.redispatch(dispatchKeySet, self, n);
8072}
8073
8074STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(histc_out, name, "aten::histc")
8075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(histc_out, overload_name, "out")
8076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(histc_out, schema_str, "histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)")
8077
8078// aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)
8079static C10_NOINLINE c10::TypedOperatorHandle<histc_out::schema> create_histc_out_typed_handle() {
8080 return c10::Dispatcher::singleton()
8081 .findSchemaOrThrow(histc_out::name, histc_out::overload_name)
8082 .typed<histc_out::schema>();
8083}
8084
8085// aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)
8086at::Tensor & histc_out::call(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out) {
8087
8088 static auto op = create_histc_out_typed_handle();
8089 return op.call(self, bins, min, max, out);
8090}
8091
8092// aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)
8093at::Tensor & histc_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out) {
8094
8095 static auto op = create_histc_out_typed_handle();
8096 return op.redispatch(dispatchKeySet, self, bins, min, max, out);
8097}
8098
8099STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(histc, name, "aten::histc")
8100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(histc, overload_name, "")
8101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(histc, schema_str, "histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor")
8102
8103// aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor
8104static C10_NOINLINE c10::TypedOperatorHandle<histc::schema> create_histc_typed_handle() {
8105 return c10::Dispatcher::singleton()
8106 .findSchemaOrThrow(histc::name, histc::overload_name)
8107 .typed<histc::schema>();
8108}
8109
8110// aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor
8111at::Tensor histc::call(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max) {
8112
8113 static auto op = create_histc_typed_handle();
8114 return op.call(self, bins, min, max);
8115}
8116
8117// aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor
8118at::Tensor histc::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max) {
8119
8120 static auto op = create_histc_typed_handle();
8121 return op.redispatch(dispatchKeySet, self, bins, min, max);
8122}
8123
8124STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_histogramdd_bin_edges, name, "aten::_histogramdd_bin_edges")
8125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_histogramdd_bin_edges, overload_name, "")
8126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_histogramdd_bin_edges, schema_str, "_histogramdd_bin_edges(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor[]")
8127
8128// aten::_histogramdd_bin_edges(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor[]
8129static C10_NOINLINE c10::TypedOperatorHandle<_histogramdd_bin_edges::schema> create__histogramdd_bin_edges_typed_handle() {
8130 return c10::Dispatcher::singleton()
8131 .findSchemaOrThrow(_histogramdd_bin_edges::name, _histogramdd_bin_edges::overload_name)
8132 .typed<_histogramdd_bin_edges::schema>();
8133}
8134
8135// aten::_histogramdd_bin_edges(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor[]
8136::std::vector<at::Tensor> _histogramdd_bin_edges::call(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
8137
8138 static auto op = create__histogramdd_bin_edges_typed_handle();
8139 return op.call(self, bins, range, weight, density);
8140}
8141
8142// aten::_histogramdd_bin_edges(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor[]
8143::std::vector<at::Tensor> _histogramdd_bin_edges::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
8144
8145 static auto op = create__histogramdd_bin_edges_typed_handle();
8146 return op.redispatch(dispatchKeySet, self, bins, range, weight, density);
8147}
8148
8149STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_histogramdd_from_bin_tensors, name, "aten::_histogramdd_from_bin_tensors")
8150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_histogramdd_from_bin_tensors, overload_name, "")
8151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_histogramdd_from_bin_tensors, schema_str, "_histogramdd_from_bin_tensors(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False) -> Tensor")
8152
8153// aten::_histogramdd_from_bin_tensors(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False) -> Tensor
8154static C10_NOINLINE c10::TypedOperatorHandle<_histogramdd_from_bin_tensors::schema> create__histogramdd_from_bin_tensors_typed_handle() {
8155 return c10::Dispatcher::singleton()
8156 .findSchemaOrThrow(_histogramdd_from_bin_tensors::name, _histogramdd_from_bin_tensors::overload_name)
8157 .typed<_histogramdd_from_bin_tensors::schema>();
8158}
8159
8160// aten::_histogramdd_from_bin_tensors(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False) -> Tensor
8161at::Tensor _histogramdd_from_bin_tensors::call(const at::Tensor & self, at::TensorList bins, const c10::optional<at::Tensor> & weight, bool density) {
8162
8163 static auto op = create__histogramdd_from_bin_tensors_typed_handle();
8164 return op.call(self, bins, weight, density);
8165}
8166
8167// aten::_histogramdd_from_bin_tensors(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False) -> Tensor
8168at::Tensor _histogramdd_from_bin_tensors::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList bins, const c10::optional<at::Tensor> & weight, bool density) {
8169
8170 static auto op = create__histogramdd_from_bin_tensors_typed_handle();
8171 return op.redispatch(dispatchKeySet, self, bins, weight, density);
8172}
8173
8174STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nextafter_out, name, "aten::nextafter")
8175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nextafter_out, overload_name, "out")
8176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nextafter_out, schema_str, "nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
8177
8178// aten::nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
8179static C10_NOINLINE c10::TypedOperatorHandle<nextafter_out::schema> create_nextafter_out_typed_handle() {
8180 return c10::Dispatcher::singleton()
8181 .findSchemaOrThrow(nextafter_out::name, nextafter_out::overload_name)
8182 .typed<nextafter_out::schema>();
8183}
8184
8185// aten::nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
8186at::Tensor & nextafter_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
8187
8188 static auto op = create_nextafter_out_typed_handle();
8189 return op.call(self, other, out);
8190}
8191
8192// aten::nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
8193at::Tensor & nextafter_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
8194
8195 static auto op = create_nextafter_out_typed_handle();
8196 return op.redispatch(dispatchKeySet, self, other, out);
8197}
8198
8199STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nextafter, name, "aten::nextafter")
8200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nextafter, overload_name, "")
8201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nextafter, schema_str, "nextafter(Tensor self, Tensor other) -> Tensor")
8202
8203// aten::nextafter(Tensor self, Tensor other) -> Tensor
8204static C10_NOINLINE c10::TypedOperatorHandle<nextafter::schema> create_nextafter_typed_handle() {
8205 return c10::Dispatcher::singleton()
8206 .findSchemaOrThrow(nextafter::name, nextafter::overload_name)
8207 .typed<nextafter::schema>();
8208}
8209
8210// aten::nextafter(Tensor self, Tensor other) -> Tensor
8211at::Tensor nextafter::call(const at::Tensor & self, const at::Tensor & other) {
8212
8213 static auto op = create_nextafter_typed_handle();
8214 return op.call(self, other);
8215}
8216
8217// aten::nextafter(Tensor self, Tensor other) -> Tensor
8218at::Tensor nextafter::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
8219
8220 static auto op = create_nextafter_typed_handle();
8221 return op.redispatch(dispatchKeySet, self, other);
8222}
8223
8224STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nextafter_, name, "aten::nextafter_")
8225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nextafter_, overload_name, "")
8226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nextafter_, schema_str, "nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!)")
8227
8228// aten::nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!)
8229static C10_NOINLINE c10::TypedOperatorHandle<nextafter_::schema> create_nextafter__typed_handle() {
8230 return c10::Dispatcher::singleton()
8231 .findSchemaOrThrow(nextafter_::name, nextafter_::overload_name)
8232 .typed<nextafter_::schema>();
8233}
8234
8235// aten::nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!)
8236at::Tensor & nextafter_::call(at::Tensor & self, const at::Tensor & other) {
8237
8238 static auto op = create_nextafter__typed_handle();
8239 return op.call(self, other);
8240}
8241
8242// aten::nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!)
8243at::Tensor & nextafter_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
8244
8245 static auto op = create_nextafter__typed_handle();
8246 return op.redispatch(dispatchKeySet, self, other);
8247}
8248
8249STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(maximum, name, "aten::maximum")
8250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(maximum, overload_name, "")
8251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(maximum, schema_str, "maximum(Tensor self, Tensor other) -> Tensor")
8252
8253// aten::maximum(Tensor self, Tensor other) -> Tensor
8254static C10_NOINLINE c10::TypedOperatorHandle<maximum::schema> create_maximum_typed_handle() {
8255 return c10::Dispatcher::singleton()
8256 .findSchemaOrThrow(maximum::name, maximum::overload_name)
8257 .typed<maximum::schema>();
8258}
8259
8260// aten::maximum(Tensor self, Tensor other) -> Tensor
8261at::Tensor maximum::call(const at::Tensor & self, const at::Tensor & other) {
8262
8263 static auto op = create_maximum_typed_handle();
8264 return op.call(self, other);
8265}
8266
8267// aten::maximum(Tensor self, Tensor other) -> Tensor
8268at::Tensor maximum::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
8269
8270 static auto op = create_maximum_typed_handle();
8271 return op.redispatch(dispatchKeySet, self, other);
8272}
8273
8274STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(maximum_out, name, "aten::maximum")
8275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(maximum_out, overload_name, "out")
8276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(maximum_out, schema_str, "maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
8277
8278// aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
8279static C10_NOINLINE c10::TypedOperatorHandle<maximum_out::schema> create_maximum_out_typed_handle() {
8280 return c10::Dispatcher::singleton()
8281 .findSchemaOrThrow(maximum_out::name, maximum_out::overload_name)
8282 .typed<maximum_out::schema>();
8283}
8284
8285// aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
8286at::Tensor & maximum_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
8287
8288 static auto op = create_maximum_out_typed_handle();
8289 return op.call(self, other, out);
8290}
8291
8292// aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
8293at::Tensor & maximum_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
8294
8295 static auto op = create_maximum_out_typed_handle();
8296 return op.redispatch(dispatchKeySet, self, other, out);
8297}
8298
8299STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(minimum, name, "aten::minimum")
8300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(minimum, overload_name, "")
8301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(minimum, schema_str, "minimum(Tensor self, Tensor other) -> Tensor")
8302
8303// aten::minimum(Tensor self, Tensor other) -> Tensor
8304static C10_NOINLINE c10::TypedOperatorHandle<minimum::schema> create_minimum_typed_handle() {
8305 return c10::Dispatcher::singleton()
8306 .findSchemaOrThrow(minimum::name, minimum::overload_name)
8307 .typed<minimum::schema>();
8308}
8309
8310// aten::minimum(Tensor self, Tensor other) -> Tensor
8311at::Tensor minimum::call(const at::Tensor & self, const at::Tensor & other) {
8312
8313 static auto op = create_minimum_typed_handle();
8314 return op.call(self, other);
8315}
8316
8317// aten::minimum(Tensor self, Tensor other) -> Tensor
8318at::Tensor minimum::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
8319
8320 static auto op = create_minimum_typed_handle();
8321 return op.redispatch(dispatchKeySet, self, other);
8322}
8323
8324STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(minimum_out, name, "aten::minimum")
8325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(minimum_out, overload_name, "out")
8326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(minimum_out, schema_str, "minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
8327
8328// aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
8329static C10_NOINLINE c10::TypedOperatorHandle<minimum_out::schema> create_minimum_out_typed_handle() {
8330 return c10::Dispatcher::singleton()
8331 .findSchemaOrThrow(minimum_out::name, minimum_out::overload_name)
8332 .typed<minimum_out::schema>();
8333}
8334
8335// aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
8336at::Tensor & minimum_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
8337
8338 static auto op = create_minimum_out_typed_handle();
8339 return op.call(self, other, out);
8340}
8341
8342// aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
8343at::Tensor & minimum_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
8344
8345 static auto op = create_minimum_out_typed_handle();
8346 return op.redispatch(dispatchKeySet, self, other, out);
8347}
8348
8349STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantile, name, "aten::quantile")
8350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantile, overload_name, "")
8351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantile, schema_str, "quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor")
8352
8353// aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
8354static C10_NOINLINE c10::TypedOperatorHandle<quantile::schema> create_quantile_typed_handle() {
8355 return c10::Dispatcher::singleton()
8356 .findSchemaOrThrow(quantile::name, quantile::overload_name)
8357 .typed<quantile::schema>();
8358}
8359
8360// aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
8361at::Tensor quantile::call(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
8362
8363 static auto op = create_quantile_typed_handle();
8364 return op.call(self, q, dim, keepdim, interpolation);
8365}
8366
8367// aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
8368at::Tensor quantile::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
8369
8370 static auto op = create_quantile_typed_handle();
8371 return op.redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation);
8372}
8373
8374STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantile_out, name, "aten::quantile")
8375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantile_out, overload_name, "out")
8376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantile_out, schema_str, "quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)")
8377
8378// aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
8379static C10_NOINLINE c10::TypedOperatorHandle<quantile_out::schema> create_quantile_out_typed_handle() {
8380 return c10::Dispatcher::singleton()
8381 .findSchemaOrThrow(quantile_out::name, quantile_out::overload_name)
8382 .typed<quantile_out::schema>();
8383}
8384
8385// aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
8386at::Tensor & quantile_out::call(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
8387
8388 static auto op = create_quantile_out_typed_handle();
8389 return op.call(self, q, dim, keepdim, interpolation, out);
8390}
8391
8392// aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
8393at::Tensor & quantile_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
8394
8395 static auto op = create_quantile_out_typed_handle();
8396 return op.redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation, out);
8397}
8398
8399STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantile_scalar, name, "aten::quantile")
8400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantile_scalar, overload_name, "scalar")
8401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantile_scalar, schema_str, "quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor")
8402
8403// aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
8404static C10_NOINLINE c10::TypedOperatorHandle<quantile_scalar::schema> create_quantile_scalar_typed_handle() {
8405 return c10::Dispatcher::singleton()
8406 .findSchemaOrThrow(quantile_scalar::name, quantile_scalar::overload_name)
8407 .typed<quantile_scalar::schema>();
8408}
8409
8410// aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
8411at::Tensor quantile_scalar::call(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
8412
8413 static auto op = create_quantile_scalar_typed_handle();
8414 return op.call(self, q, dim, keepdim, interpolation);
8415}
8416
8417// aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
8418at::Tensor quantile_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
8419
8420 static auto op = create_quantile_scalar_typed_handle();
8421 return op.redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation);
8422}
8423
8424STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantile_scalar_out, name, "aten::quantile")
8425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantile_scalar_out, overload_name, "scalar_out")
8426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantile_scalar_out, schema_str, "quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)")
8427
8428// aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
8429static C10_NOINLINE c10::TypedOperatorHandle<quantile_scalar_out::schema> create_quantile_scalar_out_typed_handle() {
8430 return c10::Dispatcher::singleton()
8431 .findSchemaOrThrow(quantile_scalar_out::name, quantile_scalar_out::overload_name)
8432 .typed<quantile_scalar_out::schema>();
8433}
8434
8435// aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
8436at::Tensor & quantile_scalar_out::call(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
8437
8438 static auto op = create_quantile_scalar_out_typed_handle();
8439 return op.call(self, q, dim, keepdim, interpolation, out);
8440}
8441
8442// aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
8443at::Tensor & quantile_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
8444
8445 static auto op = create_quantile_scalar_out_typed_handle();
8446 return op.redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation, out);
8447}
8448
8449STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(msort_out, name, "aten::msort")
8450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(msort_out, overload_name, "out")
8451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(msort_out, schema_str, "msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
8452
8453// aten::msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
8454static C10_NOINLINE c10::TypedOperatorHandle<msort_out::schema> create_msort_out_typed_handle() {
8455 return c10::Dispatcher::singleton()
8456 .findSchemaOrThrow(msort_out::name, msort_out::overload_name)
8457 .typed<msort_out::schema>();
8458}
8459
8460// aten::msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
8461at::Tensor & msort_out::call(const at::Tensor & self, at::Tensor & out) {
8462
8463 static auto op = create_msort_out_typed_handle();
8464 return op.call(self, out);
8465}
8466
8467// aten::msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
8468at::Tensor & msort_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
8469
8470 static auto op = create_msort_out_typed_handle();
8471 return op.redispatch(dispatchKeySet, self, out);
8472}
8473
8474STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(msort, name, "aten::msort")
8475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(msort, overload_name, "")
8476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(msort, schema_str, "msort(Tensor self) -> Tensor")
8477
8478// aten::msort(Tensor self) -> Tensor
8479static C10_NOINLINE c10::TypedOperatorHandle<msort::schema> create_msort_typed_handle() {
8480 return c10::Dispatcher::singleton()
8481 .findSchemaOrThrow(msort::name, msort::overload_name)
8482 .typed<msort::schema>();
8483}
8484
8485// aten::msort(Tensor self) -> Tensor
8486at::Tensor msort::call(const at::Tensor & self) {
8487
8488 static auto op = create_msort_typed_handle();
8489 return op.call(self);
8490}
8491
8492// aten::msort(Tensor self) -> Tensor
8493at::Tensor msort::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
8494
8495 static auto op = create_msort_typed_handle();
8496 return op.redispatch(dispatchKeySet, self);
8497}
8498
8499STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(argsort, name, "aten::argsort")
8500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(argsort, overload_name, "")
8501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(argsort, schema_str, "argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor")
8502
8503// aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor
8504static C10_NOINLINE c10::TypedOperatorHandle<argsort::schema> create_argsort_typed_handle() {
8505 return c10::Dispatcher::singleton()
8506 .findSchemaOrThrow(argsort::name, argsort::overload_name)
8507 .typed<argsort::schema>();
8508}
8509
8510// aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor
8511at::Tensor argsort::call(const at::Tensor & self, int64_t dim, bool descending) {
8512
8513 static auto op = create_argsort_typed_handle();
8514 return op.call(self, dim, descending);
8515}
8516
8517// aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor
8518at::Tensor argsort::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool descending) {
8519
8520 static auto op = create_argsort_typed_handle();
8521 return op.redispatch(dispatchKeySet, self, dim, descending);
8522}
8523
8524STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(argsort_stable, name, "aten::argsort")
8525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(argsort_stable, overload_name, "stable")
8526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(argsort_stable, schema_str, "argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor")
8527
8528// aten::argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor
8529static C10_NOINLINE c10::TypedOperatorHandle<argsort_stable::schema> create_argsort_stable_typed_handle() {
8530 return c10::Dispatcher::singleton()
8531 .findSchemaOrThrow(argsort_stable::name, argsort_stable::overload_name)
8532 .typed<argsort_stable::schema>();
8533}
8534
8535// aten::argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor
8536at::Tensor argsort_stable::call(const at::Tensor & self, bool stable, int64_t dim, bool descending) {
8537
8538 static auto op = create_argsort_stable_typed_handle();
8539 return op.call(self, stable, dim, descending);
8540}
8541
8542// aten::argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor
8543at::Tensor argsort_stable::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool stable, int64_t dim, bool descending) {
8544
8545 static auto op = create_argsort_stable_typed_handle();
8546 return op.redispatch(dispatchKeySet, self, stable, dim, descending);
8547}
8548
8549STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(argsort_dimname, name, "aten::argsort")
8550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(argsort_dimname, overload_name, "dimname")
8551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(argsort_dimname, schema_str, "argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor")
8552
8553// aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor
8554static C10_NOINLINE c10::TypedOperatorHandle<argsort_dimname::schema> create_argsort_dimname_typed_handle() {
8555 return c10::Dispatcher::singleton()
8556 .findSchemaOrThrow(argsort_dimname::name, argsort_dimname::overload_name)
8557 .typed<argsort_dimname::schema>();
8558}
8559
8560// aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor
8561at::Tensor argsort_dimname::call(const at::Tensor & self, at::Dimname dim, bool descending) {
8562
8563 static auto op = create_argsort_dimname_typed_handle();
8564 return op.call(self, dim, descending);
8565}
8566
8567// aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor
8568at::Tensor argsort_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool descending) {
8569
8570 static auto op = create_argsort_dimname_typed_handle();
8571 return op.redispatch(dispatchKeySet, self, dim, descending);
8572}
8573
8574STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(topk_values, name, "aten::topk")
8575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(topk_values, overload_name, "values")
8576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(topk_values, schema_str, "topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)")
8577
8578// aten::topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
8579static C10_NOINLINE c10::TypedOperatorHandle<topk_values::schema> create_topk_values_typed_handle() {
8580 return c10::Dispatcher::singleton()
8581 .findSchemaOrThrow(topk_values::name, topk_values::overload_name)
8582 .typed<topk_values::schema>();
8583}
8584
8585// aten::topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
8586::std::tuple<at::Tensor &,at::Tensor &> topk_values::call(const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted, at::Tensor & values, at::Tensor & indices) {
8587
8588 static auto op = create_topk_values_typed_handle();
8589 return op.call(self, k, dim, largest, sorted, values, indices);
8590}
8591
8592// aten::topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
8593::std::tuple<at::Tensor &,at::Tensor &> topk_values::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted, at::Tensor & values, at::Tensor & indices) {
8594
8595 static auto op = create_topk_values_typed_handle();
8596 return op.redispatch(dispatchKeySet, self, k, dim, largest, sorted, values, indices);
8597}
8598
8599STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(topk, name, "aten::topk")
8600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(topk, overload_name, "")
8601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(topk, schema_str, "topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)")
8602
8603// aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)
8604static C10_NOINLINE c10::TypedOperatorHandle<topk::schema> create_topk_typed_handle() {
8605 return c10::Dispatcher::singleton()
8606 .findSchemaOrThrow(topk::name, topk::overload_name)
8607 .typed<topk::schema>();
8608}
8609
8610// aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)
8611::std::tuple<at::Tensor,at::Tensor> topk::call(const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) {
8612
8613 static auto op = create_topk_typed_handle();
8614 return op.call(self, k, dim, largest, sorted);
8615}
8616
8617// aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)
8618::std::tuple<at::Tensor,at::Tensor> topk::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) {
8619
8620 static auto op = create_topk_typed_handle();
8621 return op.redispatch(dispatchKeySet, self, k, dim, largest, sorted);
8622}
8623
8624STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unfold_backward, name, "aten::unfold_backward")
8625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unfold_backward, overload_name, "")
8626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unfold_backward, schema_str, "unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor")
8627
8628// aten::unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor
8629static C10_NOINLINE c10::TypedOperatorHandle<unfold_backward::schema> create_unfold_backward_typed_handle() {
8630 return c10::Dispatcher::singleton()
8631 .findSchemaOrThrow(unfold_backward::name, unfold_backward::overload_name)
8632 .typed<unfold_backward::schema>();
8633}
8634
8635// aten::unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor
8636at::Tensor unfold_backward::call(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) {
8637
8638 static auto op = create_unfold_backward_typed_handle();
8639 return op.call(grad_in, input_sizes, dim, size, step);
8640}
8641
8642// aten::unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor
8643at::Tensor unfold_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) {
8644
8645 static auto op = create_unfold_backward_typed_handle();
8646 return op.redispatch(dispatchKeySet, grad_in, input_sizes, dim, size, step);
8647}
8648
8649STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_, name, "aten::normal_")
8650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_, overload_name, "")
8651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_, schema_str, "normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)")
8652
8653// aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)
8654static C10_NOINLINE c10::TypedOperatorHandle<normal_::schema> create_normal__typed_handle() {
8655 return c10::Dispatcher::singleton()
8656 .findSchemaOrThrow(normal_::name, normal_::overload_name)
8657 .typed<normal_::schema>();
8658}
8659
8660// aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)
8661at::Tensor & normal_::call(at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
8662
8663 static auto op = create_normal__typed_handle();
8664 return op.call(self, mean, std, generator);
8665}
8666
8667// aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)
8668at::Tensor & normal_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
8669
8670 static auto op = create_normal__typed_handle();
8671 return op.redispatch(dispatchKeySet, self, mean, std, generator);
8672}
8673
8674STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_functional, name, "aten::normal_functional")
8675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_functional, overload_name, "")
8676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_functional, schema_str, "normal_functional(Tensor self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor")
8677
8678// aten::normal_functional(Tensor self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor
8679static C10_NOINLINE c10::TypedOperatorHandle<normal_functional::schema> create_normal_functional_typed_handle() {
8680 return c10::Dispatcher::singleton()
8681 .findSchemaOrThrow(normal_functional::name, normal_functional::overload_name)
8682 .typed<normal_functional::schema>();
8683}
8684
8685// aten::normal_functional(Tensor self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor
8686at::Tensor normal_functional::call(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
8687
8688 static auto op = create_normal_functional_typed_handle();
8689 return op.call(self, mean, std, generator);
8690}
8691
8692// aten::normal_functional(Tensor self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor
8693at::Tensor normal_functional::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
8694
8695 static auto op = create_normal_functional_typed_handle();
8696 return op.redispatch(dispatchKeySet, self, mean, std, generator);
8697}
8698
8699STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_Tensor_float_out, name, "aten::normal")
8700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_Tensor_float_out, overload_name, "Tensor_float_out")
8701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_Tensor_float_out, schema_str, "normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)")
8702
8703// aten::normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
8704static C10_NOINLINE c10::TypedOperatorHandle<normal_Tensor_float_out::schema> create_normal_Tensor_float_out_typed_handle() {
8705 return c10::Dispatcher::singleton()
8706 .findSchemaOrThrow(normal_Tensor_float_out::name, normal_Tensor_float_out::overload_name)
8707 .typed<normal_Tensor_float_out::schema>();
8708}
8709
8710// aten::normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
8711at::Tensor & normal_Tensor_float_out::call(const at::Tensor & mean, double std, c10::optional<at::Generator> generator, at::Tensor & out) {
8712
8713 static auto op = create_normal_Tensor_float_out_typed_handle();
8714 return op.call(mean, std, generator, out);
8715}
8716
8717// aten::normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
8718at::Tensor & normal_Tensor_float_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mean, double std, c10::optional<at::Generator> generator, at::Tensor & out) {
8719
8720 static auto op = create_normal_Tensor_float_out_typed_handle();
8721 return op.redispatch(dispatchKeySet, mean, std, generator, out);
8722}
8723
8724STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_Tensor_float, name, "aten::normal")
8725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_Tensor_float, overload_name, "Tensor_float")
8726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_Tensor_float, schema_str, "normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor")
8727
8728// aten::normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor
8729static C10_NOINLINE c10::TypedOperatorHandle<normal_Tensor_float::schema> create_normal_Tensor_float_typed_handle() {
8730 return c10::Dispatcher::singleton()
8731 .findSchemaOrThrow(normal_Tensor_float::name, normal_Tensor_float::overload_name)
8732 .typed<normal_Tensor_float::schema>();
8733}
8734
8735// aten::normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor
8736at::Tensor normal_Tensor_float::call(const at::Tensor & mean, double std, c10::optional<at::Generator> generator) {
8737
8738 static auto op = create_normal_Tensor_float_typed_handle();
8739 return op.call(mean, std, generator);
8740}
8741
8742// aten::normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor
8743at::Tensor normal_Tensor_float::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mean, double std, c10::optional<at::Generator> generator) {
8744
8745 static auto op = create_normal_Tensor_float_typed_handle();
8746 return op.redispatch(dispatchKeySet, mean, std, generator);
8747}
8748
8749STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_float_Tensor_out, name, "aten::normal")
8750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_float_Tensor_out, overload_name, "float_Tensor_out")
8751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_float_Tensor_out, schema_str, "normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)")
8752
8753// aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
8754static C10_NOINLINE c10::TypedOperatorHandle<normal_float_Tensor_out::schema> create_normal_float_Tensor_out_typed_handle() {
8755 return c10::Dispatcher::singleton()
8756 .findSchemaOrThrow(normal_float_Tensor_out::name, normal_float_Tensor_out::overload_name)
8757 .typed<normal_float_Tensor_out::schema>();
8758}
8759
8760// aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
8761at::Tensor & normal_float_Tensor_out::call(double mean, const at::Tensor & std, c10::optional<at::Generator> generator, at::Tensor & out) {
8762
8763 static auto op = create_normal_float_Tensor_out_typed_handle();
8764 return op.call(mean, std, generator, out);
8765}
8766
8767// aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
8768at::Tensor & normal_float_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, double mean, const at::Tensor & std, c10::optional<at::Generator> generator, at::Tensor & out) {
8769
8770 static auto op = create_normal_float_Tensor_out_typed_handle();
8771 return op.redispatch(dispatchKeySet, mean, std, generator, out);
8772}
8773
8774STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_float_Tensor, name, "aten::normal")
8775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_float_Tensor, overload_name, "float_Tensor")
8776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_float_Tensor, schema_str, "normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor")
8777
8778// aten::normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor
8779static C10_NOINLINE c10::TypedOperatorHandle<normal_float_Tensor::schema> create_normal_float_Tensor_typed_handle() {
8780 return c10::Dispatcher::singleton()
8781 .findSchemaOrThrow(normal_float_Tensor::name, normal_float_Tensor::overload_name)
8782 .typed<normal_float_Tensor::schema>();
8783}
8784
8785// aten::normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor
8786at::Tensor normal_float_Tensor::call(double mean, const at::Tensor & std, c10::optional<at::Generator> generator) {
8787
8788 static auto op = create_normal_float_Tensor_typed_handle();
8789 return op.call(mean, std, generator);
8790}
8791
8792// aten::normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor
8793at::Tensor normal_float_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, double mean, const at::Tensor & std, c10::optional<at::Generator> generator) {
8794
8795 static auto op = create_normal_float_Tensor_typed_handle();
8796 return op.redispatch(dispatchKeySet, mean, std, generator);
8797}
8798
8799STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_Tensor_Tensor_out, name, "aten::normal")
8800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_Tensor_Tensor_out, overload_name, "Tensor_Tensor_out")
8801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_Tensor_Tensor_out, schema_str, "normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)")
8802
8803// aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
8804static C10_NOINLINE c10::TypedOperatorHandle<normal_Tensor_Tensor_out::schema> create_normal_Tensor_Tensor_out_typed_handle() {
8805 return c10::Dispatcher::singleton()
8806 .findSchemaOrThrow(normal_Tensor_Tensor_out::name, normal_Tensor_Tensor_out::overload_name)
8807 .typed<normal_Tensor_Tensor_out::schema>();
8808}
8809
8810// aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
8811at::Tensor & normal_Tensor_Tensor_out::call(const at::Tensor & mean, const at::Tensor & std, c10::optional<at::Generator> generator, at::Tensor & out) {
8812
8813 static auto op = create_normal_Tensor_Tensor_out_typed_handle();
8814 return op.call(mean, std, generator, out);
8815}
8816
8817// aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
8818at::Tensor & normal_Tensor_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mean, const at::Tensor & std, c10::optional<at::Generator> generator, at::Tensor & out) {
8819
8820 static auto op = create_normal_Tensor_Tensor_out_typed_handle();
8821 return op.redispatch(dispatchKeySet, mean, std, generator, out);
8822}
8823
8824STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_Tensor_Tensor, name, "aten::normal")
8825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_Tensor_Tensor, overload_name, "Tensor_Tensor")
8826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_Tensor_Tensor, schema_str, "normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor")
8827
8828// aten::normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor
8829static C10_NOINLINE c10::TypedOperatorHandle<normal_Tensor_Tensor::schema> create_normal_Tensor_Tensor_typed_handle() {
8830 return c10::Dispatcher::singleton()
8831 .findSchemaOrThrow(normal_Tensor_Tensor::name, normal_Tensor_Tensor::overload_name)
8832 .typed<normal_Tensor_Tensor::schema>();
8833}
8834
8835// aten::normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor
8836at::Tensor normal_Tensor_Tensor::call(const at::Tensor & mean, const at::Tensor & std, c10::optional<at::Generator> generator) {
8837
8838 static auto op = create_normal_Tensor_Tensor_typed_handle();
8839 return op.call(mean, std, generator);
8840}
8841
8842// aten::normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor
8843at::Tensor normal_Tensor_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mean, const at::Tensor & std, c10::optional<at::Generator> generator) {
8844
8845 static auto op = create_normal_Tensor_Tensor_typed_handle();
8846 return op.redispatch(dispatchKeySet, mean, std, generator);
8847}
8848
8849STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_float_float, name, "aten::normal")
8850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_float_float, overload_name, "float_float")
8851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_float_float, schema_str, "normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
8852
8853// aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8854static C10_NOINLINE c10::TypedOperatorHandle<normal_float_float::schema> create_normal_float_float_typed_handle() {
8855 return c10::Dispatcher::singleton()
8856 .findSchemaOrThrow(normal_float_float::name, normal_float_float::overload_name)
8857 .typed<normal_float_float::schema>();
8858}
8859
8860// aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8861at::Tensor normal_float_float::call(double mean, double std, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
8862
8863 static auto op = create_normal_float_float_typed_handle();
8864 return op.call(mean, std, size, generator, dtype, layout, device, pin_memory);
8865}
8866
8867// aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8868at::Tensor normal_float_float::redispatch(c10::DispatchKeySet dispatchKeySet, double mean, double std, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
8869
8870 static auto op = create_normal_float_float_typed_handle();
8871 return op.redispatch(dispatchKeySet, mean, std, size, generator, dtype, layout, device, pin_memory);
8872}
8873
8874STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_float_float_out, name, "aten::normal")
8875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_float_float_out, overload_name, "float_float_out")
8876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_float_float_out, schema_str, "normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)")
8877
8878// aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
8879static C10_NOINLINE c10::TypedOperatorHandle<normal_float_float_out::schema> create_normal_float_float_out_typed_handle() {
8880 return c10::Dispatcher::singleton()
8881 .findSchemaOrThrow(normal_float_float_out::name, normal_float_float_out::overload_name)
8882 .typed<normal_float_float_out::schema>();
8883}
8884
8885// aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
8886at::Tensor & normal_float_float_out::call(double mean, double std, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
8887
8888 static auto op = create_normal_float_float_out_typed_handle();
8889 return op.call(mean, std, size, generator, out);
8890}
8891
8892// aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
8893at::Tensor & normal_float_float_out::redispatch(c10::DispatchKeySet dispatchKeySet, double mean, double std, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
8894
8895 static auto op = create_normal_float_float_out_typed_handle();
8896 return op.redispatch(dispatchKeySet, mean, std, size, generator, out);
8897}
8898
8899STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(alias, name, "aten::alias")
8900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(alias, overload_name, "")
8901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(alias, schema_str, "alias(Tensor(a) self) -> Tensor(a)")
8902
8903// aten::alias(Tensor(a) self) -> Tensor(a)
8904static C10_NOINLINE c10::TypedOperatorHandle<alias::schema> create_alias_typed_handle() {
8905 return c10::Dispatcher::singleton()
8906 .findSchemaOrThrow(alias::name, alias::overload_name)
8907 .typed<alias::schema>();
8908}
8909
8910// aten::alias(Tensor(a) self) -> Tensor(a)
8911at::Tensor alias::call(const at::Tensor & self) {
8912
8913 static auto op = create_alias_typed_handle();
8914 return op.call(self);
8915}
8916
8917// aten::alias(Tensor(a) self) -> Tensor(a)
8918at::Tensor alias::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
8919
8920 static auto op = create_alias_typed_handle();
8921 return op.redispatch(dispatchKeySet, self);
8922}
8923
8924STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sub_Scalar, name, "aten::_foreach_sub")
8925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sub_Scalar, overload_name, "Scalar")
8926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sub_Scalar, schema_str, "_foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]")
8927
8928// aten::_foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
8929static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sub_Scalar::schema> create__foreach_sub_Scalar_typed_handle() {
8930 return c10::Dispatcher::singleton()
8931 .findSchemaOrThrow(_foreach_sub_Scalar::name, _foreach_sub_Scalar::overload_name)
8932 .typed<_foreach_sub_Scalar::schema>();
8933}
8934
8935// aten::_foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
8936::std::vector<at::Tensor> _foreach_sub_Scalar::call(at::TensorList self, const at::Scalar & scalar) {
8937
8938 static auto op = create__foreach_sub_Scalar_typed_handle();
8939 return op.call(self, scalar);
8940}
8941
8942// aten::_foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
8943::std::vector<at::Tensor> _foreach_sub_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
8944
8945 static auto op = create__foreach_sub_Scalar_typed_handle();
8946 return op.redispatch(dispatchKeySet, self, scalar);
8947}
8948
8949STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sub__Scalar, name, "aten::_foreach_sub_")
8950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sub__Scalar, overload_name, "Scalar")
8951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sub__Scalar, schema_str, "_foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()")
8952
8953// aten::_foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
8954static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sub__Scalar::schema> create__foreach_sub__Scalar_typed_handle() {
8955 return c10::Dispatcher::singleton()
8956 .findSchemaOrThrow(_foreach_sub__Scalar::name, _foreach_sub__Scalar::overload_name)
8957 .typed<_foreach_sub__Scalar::schema>();
8958}
8959
8960// aten::_foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
8961void _foreach_sub__Scalar::call(at::TensorList self, const at::Scalar & scalar) {
8962
8963 static auto op = create__foreach_sub__Scalar_typed_handle();
8964 return op.call(self, scalar);
8965}
8966
8967// aten::_foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
8968void _foreach_sub__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
8969
8970 static auto op = create__foreach_sub__Scalar_typed_handle();
8971 return op.redispatch(dispatchKeySet, self, scalar);
8972}
8973
8974STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_maximum_Scalar, name, "aten::_foreach_maximum")
8975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_maximum_Scalar, overload_name, "Scalar")
8976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_maximum_Scalar, schema_str, "_foreach_maximum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]")
8977
8978// aten::_foreach_maximum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
8979static C10_NOINLINE c10::TypedOperatorHandle<_foreach_maximum_Scalar::schema> create__foreach_maximum_Scalar_typed_handle() {
8980 return c10::Dispatcher::singleton()
8981 .findSchemaOrThrow(_foreach_maximum_Scalar::name, _foreach_maximum_Scalar::overload_name)
8982 .typed<_foreach_maximum_Scalar::schema>();
8983}
8984
8985// aten::_foreach_maximum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
8986::std::vector<at::Tensor> _foreach_maximum_Scalar::call(at::TensorList self, const at::Scalar & scalar) {
8987
8988 static auto op = create__foreach_maximum_Scalar_typed_handle();
8989 return op.call(self, scalar);
8990}
8991
8992// aten::_foreach_maximum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
8993::std::vector<at::Tensor> _foreach_maximum_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
8994
8995 static auto op = create__foreach_maximum_Scalar_typed_handle();
8996 return op.redispatch(dispatchKeySet, self, scalar);
8997}
8998
8999STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_maximum__Scalar, name, "aten::_foreach_maximum_")
9000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_maximum__Scalar, overload_name, "Scalar")
9001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_maximum__Scalar, schema_str, "_foreach_maximum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()")
9002
9003// aten::_foreach_maximum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
9004static C10_NOINLINE c10::TypedOperatorHandle<_foreach_maximum__Scalar::schema> create__foreach_maximum__Scalar_typed_handle() {
9005 return c10::Dispatcher::singleton()
9006 .findSchemaOrThrow(_foreach_maximum__Scalar::name, _foreach_maximum__Scalar::overload_name)
9007 .typed<_foreach_maximum__Scalar::schema>();
9008}
9009
9010// aten::_foreach_maximum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
9011void _foreach_maximum__Scalar::call(at::TensorList self, const at::Scalar & scalar) {
9012
9013 static auto op = create__foreach_maximum__Scalar_typed_handle();
9014 return op.call(self, scalar);
9015}
9016
9017// aten::_foreach_maximum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
9018void _foreach_maximum__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
9019
9020 static auto op = create__foreach_maximum__Scalar_typed_handle();
9021 return op.redispatch(dispatchKeySet, self, scalar);
9022}
9023
9024STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sub_List, name, "aten::_foreach_sub")
9025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sub_List, overload_name, "List")
9026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sub_List, schema_str, "_foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]")
9027
9028// aten::_foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]
9029static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sub_List::schema> create__foreach_sub_List_typed_handle() {
9030 return c10::Dispatcher::singleton()
9031 .findSchemaOrThrow(_foreach_sub_List::name, _foreach_sub_List::overload_name)
9032 .typed<_foreach_sub_List::schema>();
9033}
9034
9035// aten::_foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]
9036::std::vector<at::Tensor> _foreach_sub_List::call(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
9037
9038 static auto op = create__foreach_sub_List_typed_handle();
9039 return op.call(self, other, alpha);
9040}
9041
9042// aten::_foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]
9043::std::vector<at::Tensor> _foreach_sub_List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
9044
9045 static auto op = create__foreach_sub_List_typed_handle();
9046 return op.redispatch(dispatchKeySet, self, other, alpha);
9047}
9048
9049STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sub__List, name, "aten::_foreach_sub_")
9050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sub__List, overload_name, "List")
9051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sub__List, schema_str, "_foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()")
9052
9053// aten::_foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()
9054static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sub__List::schema> create__foreach_sub__List_typed_handle() {
9055 return c10::Dispatcher::singleton()
9056 .findSchemaOrThrow(_foreach_sub__List::name, _foreach_sub__List::overload_name)
9057 .typed<_foreach_sub__List::schema>();
9058}
9059
9060// aten::_foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()
9061void _foreach_sub__List::call(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
9062
9063 static auto op = create__foreach_sub__List_typed_handle();
9064 return op.call(self, other, alpha);
9065}
9066
9067// aten::_foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()
9068void _foreach_sub__List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
9069
9070 static auto op = create__foreach_sub__List_typed_handle();
9071 return op.redispatch(dispatchKeySet, self, other, alpha);
9072}
9073
9074STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_maximum_List, name, "aten::_foreach_maximum")
9075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_maximum_List, overload_name, "List")
9076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_maximum_List, schema_str, "_foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[]")
9077
9078// aten::_foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[]
9079static C10_NOINLINE c10::TypedOperatorHandle<_foreach_maximum_List::schema> create__foreach_maximum_List_typed_handle() {
9080 return c10::Dispatcher::singleton()
9081 .findSchemaOrThrow(_foreach_maximum_List::name, _foreach_maximum_List::overload_name)
9082 .typed<_foreach_maximum_List::schema>();
9083}
9084
9085// aten::_foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[]
9086::std::vector<at::Tensor> _foreach_maximum_List::call(at::TensorList self, at::TensorList other) {
9087
9088 static auto op = create__foreach_maximum_List_typed_handle();
9089 return op.call(self, other);
9090}
9091
9092// aten::_foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[]
9093::std::vector<at::Tensor> _foreach_maximum_List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
9094
9095 static auto op = create__foreach_maximum_List_typed_handle();
9096 return op.redispatch(dispatchKeySet, self, other);
9097}
9098
9099STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_maximum__List, name, "aten::_foreach_maximum_")
9100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_maximum__List, overload_name, "List")
9101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_maximum__List, schema_str, "_foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> ()")
9102
9103// aten::_foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> ()
9104static C10_NOINLINE c10::TypedOperatorHandle<_foreach_maximum__List::schema> create__foreach_maximum__List_typed_handle() {
9105 return c10::Dispatcher::singleton()
9106 .findSchemaOrThrow(_foreach_maximum__List::name, _foreach_maximum__List::overload_name)
9107 .typed<_foreach_maximum__List::schema>();
9108}
9109
9110// aten::_foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> ()
9111void _foreach_maximum__List::call(at::TensorList self, at::TensorList other) {
9112
9113 static auto op = create__foreach_maximum__List_typed_handle();
9114 return op.call(self, other);
9115}
9116
9117// aten::_foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> ()
9118void _foreach_maximum__List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
9119
9120 static auto op = create__foreach_maximum__List_typed_handle();
9121 return op.redispatch(dispatchKeySet, self, other);
9122}
9123
9124STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sub_ScalarList, name, "aten::_foreach_sub")
9125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sub_ScalarList, overload_name, "ScalarList")
9126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sub_ScalarList, schema_str, "_foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]")
9127
9128// aten::_foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
9129static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sub_ScalarList::schema> create__foreach_sub_ScalarList_typed_handle() {
9130 return c10::Dispatcher::singleton()
9131 .findSchemaOrThrow(_foreach_sub_ScalarList::name, _foreach_sub_ScalarList::overload_name)
9132 .typed<_foreach_sub_ScalarList::schema>();
9133}
9134
9135// aten::_foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
9136::std::vector<at::Tensor> _foreach_sub_ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
9137
9138 static auto op = create__foreach_sub_ScalarList_typed_handle();
9139 return op.call(self, scalars);
9140}
9141
9142// aten::_foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
9143::std::vector<at::Tensor> _foreach_sub_ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
9144
9145 static auto op = create__foreach_sub_ScalarList_typed_handle();
9146 return op.redispatch(dispatchKeySet, self, scalars);
9147}
9148
9149STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sub__ScalarList, name, "aten::_foreach_sub_")
9150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sub__ScalarList, overload_name, "ScalarList")
9151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sub__ScalarList, schema_str, "_foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()")
9152
9153// aten::_foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
9154static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sub__ScalarList::schema> create__foreach_sub__ScalarList_typed_handle() {
9155 return c10::Dispatcher::singleton()
9156 .findSchemaOrThrow(_foreach_sub__ScalarList::name, _foreach_sub__ScalarList::overload_name)
9157 .typed<_foreach_sub__ScalarList::schema>();
9158}
9159
9160// aten::_foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
9161void _foreach_sub__ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
9162
9163 static auto op = create__foreach_sub__ScalarList_typed_handle();
9164 return op.call(self, scalars);
9165}
9166
9167// aten::_foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
9168void _foreach_sub__ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
9169
9170 static auto op = create__foreach_sub__ScalarList_typed_handle();
9171 return op.redispatch(dispatchKeySet, self, scalars);
9172}
9173
9174STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_maximum_ScalarList, name, "aten::_foreach_maximum")
9175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_maximum_ScalarList, overload_name, "ScalarList")
9176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_maximum_ScalarList, schema_str, "_foreach_maximum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]")
9177
9178// aten::_foreach_maximum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
9179static C10_NOINLINE c10::TypedOperatorHandle<_foreach_maximum_ScalarList::schema> create__foreach_maximum_ScalarList_typed_handle() {
9180 return c10::Dispatcher::singleton()
9181 .findSchemaOrThrow(_foreach_maximum_ScalarList::name, _foreach_maximum_ScalarList::overload_name)
9182 .typed<_foreach_maximum_ScalarList::schema>();
9183}
9184
9185// aten::_foreach_maximum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
9186::std::vector<at::Tensor> _foreach_maximum_ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
9187
9188 static auto op = create__foreach_maximum_ScalarList_typed_handle();
9189 return op.call(self, scalars);
9190}
9191
9192// aten::_foreach_maximum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
9193::std::vector<at::Tensor> _foreach_maximum_ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
9194
9195 static auto op = create__foreach_maximum_ScalarList_typed_handle();
9196 return op.redispatch(dispatchKeySet, self, scalars);
9197}
9198
9199STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_maximum__ScalarList, name, "aten::_foreach_maximum_")
9200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_maximum__ScalarList, overload_name, "ScalarList")
9201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_maximum__ScalarList, schema_str, "_foreach_maximum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()")
9202
9203// aten::_foreach_maximum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
9204static C10_NOINLINE c10::TypedOperatorHandle<_foreach_maximum__ScalarList::schema> create__foreach_maximum__ScalarList_typed_handle() {
9205 return c10::Dispatcher::singleton()
9206 .findSchemaOrThrow(_foreach_maximum__ScalarList::name, _foreach_maximum__ScalarList::overload_name)
9207 .typed<_foreach_maximum__ScalarList::schema>();
9208}
9209
9210// aten::_foreach_maximum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
9211void _foreach_maximum__ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
9212
9213 static auto op = create__foreach_maximum__ScalarList_typed_handle();
9214 return op.call(self, scalars);
9215}
9216
9217// aten::_foreach_maximum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
9218void _foreach_maximum__ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
9219
9220 static auto op = create__foreach_maximum__ScalarList_typed_handle();
9221 return op.redispatch(dispatchKeySet, self, scalars);
9222}
9223
9224STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_acos, name, "aten::_foreach_acos")
9225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_acos, overload_name, "")
9226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_acos, schema_str, "_foreach_acos(Tensor[] self) -> Tensor[]")
9227
9228// aten::_foreach_acos(Tensor[] self) -> Tensor[]
9229static C10_NOINLINE c10::TypedOperatorHandle<_foreach_acos::schema> create__foreach_acos_typed_handle() {
9230 return c10::Dispatcher::singleton()
9231 .findSchemaOrThrow(_foreach_acos::name, _foreach_acos::overload_name)
9232 .typed<_foreach_acos::schema>();
9233}
9234
9235// aten::_foreach_acos(Tensor[] self) -> Tensor[]
9236::std::vector<at::Tensor> _foreach_acos::call(at::TensorList self) {
9237
9238 static auto op = create__foreach_acos_typed_handle();
9239 return op.call(self);
9240}
9241
9242// aten::_foreach_acos(Tensor[] self) -> Tensor[]
9243::std::vector<at::Tensor> _foreach_acos::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
9244
9245 static auto op = create__foreach_acos_typed_handle();
9246 return op.redispatch(dispatchKeySet, self);
9247}
9248
9249STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_acos_, name, "aten::_foreach_acos_")
9250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_acos_, overload_name, "")
9251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_acos_, schema_str, "_foreach_acos_(Tensor(a!)[] self) -> ()")
9252
9253// aten::_foreach_acos_(Tensor(a!)[] self) -> ()
9254static C10_NOINLINE c10::TypedOperatorHandle<_foreach_acos_::schema> create__foreach_acos__typed_handle() {
9255 return c10::Dispatcher::singleton()
9256 .findSchemaOrThrow(_foreach_acos_::name, _foreach_acos_::overload_name)
9257 .typed<_foreach_acos_::schema>();
9258}
9259
9260// aten::_foreach_acos_(Tensor(a!)[] self) -> ()
9261void _foreach_acos_::call(at::TensorList self) {
9262
9263 static auto op = create__foreach_acos__typed_handle();
9264 return op.call(self);
9265}
9266
9267// aten::_foreach_acos_(Tensor(a!)[] self) -> ()
9268void _foreach_acos_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
9269
9270 static auto op = create__foreach_acos__typed_handle();
9271 return op.redispatch(dispatchKeySet, self);
9272}
9273
9274STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_atan, name, "aten::_foreach_atan")
9275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_atan, overload_name, "")
9276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_atan, schema_str, "_foreach_atan(Tensor[] self) -> Tensor[]")
9277
9278// aten::_foreach_atan(Tensor[] self) -> Tensor[]
9279static C10_NOINLINE c10::TypedOperatorHandle<_foreach_atan::schema> create__foreach_atan_typed_handle() {
9280 return c10::Dispatcher::singleton()
9281 .findSchemaOrThrow(_foreach_atan::name, _foreach_atan::overload_name)
9282 .typed<_foreach_atan::schema>();
9283}
9284
9285// aten::_foreach_atan(Tensor[] self) -> Tensor[]
9286::std::vector<at::Tensor> _foreach_atan::call(at::TensorList self) {
9287
9288 static auto op = create__foreach_atan_typed_handle();
9289 return op.call(self);
9290}
9291
9292// aten::_foreach_atan(Tensor[] self) -> Tensor[]
9293::std::vector<at::Tensor> _foreach_atan::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
9294
9295 static auto op = create__foreach_atan_typed_handle();
9296 return op.redispatch(dispatchKeySet, self);
9297}
9298
9299STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_atan_, name, "aten::_foreach_atan_")
9300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_atan_, overload_name, "")
9301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_atan_, schema_str, "_foreach_atan_(Tensor(a!)[] self) -> ()")
9302
9303// aten::_foreach_atan_(Tensor(a!)[] self) -> ()
9304static C10_NOINLINE c10::TypedOperatorHandle<_foreach_atan_::schema> create__foreach_atan__typed_handle() {
9305 return c10::Dispatcher::singleton()
9306 .findSchemaOrThrow(_foreach_atan_::name, _foreach_atan_::overload_name)
9307 .typed<_foreach_atan_::schema>();
9308}
9309
9310// aten::_foreach_atan_(Tensor(a!)[] self) -> ()
9311void _foreach_atan_::call(at::TensorList self) {
9312
9313 static auto op = create__foreach_atan__typed_handle();
9314 return op.call(self);
9315}
9316
9317// aten::_foreach_atan_(Tensor(a!)[] self) -> ()
9318void _foreach_atan_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
9319
9320 static auto op = create__foreach_atan__typed_handle();
9321 return op.redispatch(dispatchKeySet, self);
9322}
9323
9324STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_ceil, name, "aten::_foreach_ceil")
9325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_ceil, overload_name, "")
9326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_ceil, schema_str, "_foreach_ceil(Tensor[] self) -> Tensor[]")
9327
9328// aten::_foreach_ceil(Tensor[] self) -> Tensor[]
9329static C10_NOINLINE c10::TypedOperatorHandle<_foreach_ceil::schema> create__foreach_ceil_typed_handle() {
9330 return c10::Dispatcher::singleton()
9331 .findSchemaOrThrow(_foreach_ceil::name, _foreach_ceil::overload_name)
9332 .typed<_foreach_ceil::schema>();
9333}
9334
9335// aten::_foreach_ceil(Tensor[] self) -> Tensor[]
9336::std::vector<at::Tensor> _foreach_ceil::call(at::TensorList self) {
9337
9338 static auto op = create__foreach_ceil_typed_handle();
9339 return op.call(self);
9340}
9341
9342// aten::_foreach_ceil(Tensor[] self) -> Tensor[]
9343::std::vector<at::Tensor> _foreach_ceil::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
9344
9345 static auto op = create__foreach_ceil_typed_handle();
9346 return op.redispatch(dispatchKeySet, self);
9347}
9348
9349STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_ceil_, name, "aten::_foreach_ceil_")
9350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_ceil_, overload_name, "")
9351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_ceil_, schema_str, "_foreach_ceil_(Tensor(a!)[] self) -> ()")
9352
9353// aten::_foreach_ceil_(Tensor(a!)[] self) -> ()
9354static C10_NOINLINE c10::TypedOperatorHandle<_foreach_ceil_::schema> create__foreach_ceil__typed_handle() {
9355 return c10::Dispatcher::singleton()
9356 .findSchemaOrThrow(_foreach_ceil_::name, _foreach_ceil_::overload_name)
9357 .typed<_foreach_ceil_::schema>();
9358}
9359
9360// aten::_foreach_ceil_(Tensor(a!)[] self) -> ()
9361void _foreach_ceil_::call(at::TensorList self) {
9362
9363 static auto op = create__foreach_ceil__typed_handle();
9364 return op.call(self);
9365}
9366
9367// aten::_foreach_ceil_(Tensor(a!)[] self) -> ()
9368void _foreach_ceil_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
9369
9370 static auto op = create__foreach_ceil__typed_handle();
9371 return op.redispatch(dispatchKeySet, self);
9372}
9373
9374STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_erf, name, "aten::_foreach_erf")
9375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_erf, overload_name, "")
9376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_erf, schema_str, "_foreach_erf(Tensor[] self) -> Tensor[]")
9377
9378// aten::_foreach_erf(Tensor[] self) -> Tensor[]
9379static C10_NOINLINE c10::TypedOperatorHandle<_foreach_erf::schema> create__foreach_erf_typed_handle() {
9380 return c10::Dispatcher::singleton()
9381 .findSchemaOrThrow(_foreach_erf::name, _foreach_erf::overload_name)
9382 .typed<_foreach_erf::schema>();
9383}
9384
9385// aten::_foreach_erf(Tensor[] self) -> Tensor[]
9386::std::vector<at::Tensor> _foreach_erf::call(at::TensorList self) {
9387
9388 static auto op = create__foreach_erf_typed_handle();
9389 return op.call(self);
9390}
9391
9392// aten::_foreach_erf(Tensor[] self) -> Tensor[]
9393::std::vector<at::Tensor> _foreach_erf::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
9394
9395 static auto op = create__foreach_erf_typed_handle();
9396 return op.redispatch(dispatchKeySet, self);
9397}
9398
9399STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_erf_, name, "aten::_foreach_erf_")
9400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_erf_, overload_name, "")
9401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_erf_, schema_str, "_foreach_erf_(Tensor(a!)[] self) -> ()")
9402
9403// aten::_foreach_erf_(Tensor(a!)[] self) -> ()
9404static C10_NOINLINE c10::TypedOperatorHandle<_foreach_erf_::schema> create__foreach_erf__typed_handle() {
9405 return c10::Dispatcher::singleton()
9406 .findSchemaOrThrow(_foreach_erf_::name, _foreach_erf_::overload_name)
9407 .typed<_foreach_erf_::schema>();
9408}
9409
9410// aten::_foreach_erf_(Tensor(a!)[] self) -> ()
9411void _foreach_erf_::call(at::TensorList self) {
9412
9413 static auto op = create__foreach_erf__typed_handle();
9414 return op.call(self);
9415}
9416
9417// aten::_foreach_erf_(Tensor(a!)[] self) -> ()
9418void _foreach_erf_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
9419
9420 static auto op = create__foreach_erf__typed_handle();
9421 return op.redispatch(dispatchKeySet, self);
9422}
9423
9424STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_log2, name, "aten::_foreach_log2")
9425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_log2, overload_name, "")
9426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_log2, schema_str, "_foreach_log2(Tensor[] self) -> Tensor[]")
9427
9428// aten::_foreach_log2(Tensor[] self) -> Tensor[]
9429static C10_NOINLINE c10::TypedOperatorHandle<_foreach_log2::schema> create__foreach_log2_typed_handle() {
9430 return c10::Dispatcher::singleton()
9431 .findSchemaOrThrow(_foreach_log2::name, _foreach_log2::overload_name)
9432 .typed<_foreach_log2::schema>();
9433}
9434
9435// aten::_foreach_log2(Tensor[] self) -> Tensor[]
9436::std::vector<at::Tensor> _foreach_log2::call(at::TensorList self) {
9437
9438 static auto op = create__foreach_log2_typed_handle();
9439 return op.call(self);
9440}
9441
9442// aten::_foreach_log2(Tensor[] self) -> Tensor[]
9443::std::vector<at::Tensor> _foreach_log2::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
9444
9445 static auto op = create__foreach_log2_typed_handle();
9446 return op.redispatch(dispatchKeySet, self);
9447}
9448
9449STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_log2_, name, "aten::_foreach_log2_")
9450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_log2_, overload_name, "")
9451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_log2_, schema_str, "_foreach_log2_(Tensor(a!)[] self) -> ()")
9452
9453// aten::_foreach_log2_(Tensor(a!)[] self) -> ()
9454static C10_NOINLINE c10::TypedOperatorHandle<_foreach_log2_::schema> create__foreach_log2__typed_handle() {
9455 return c10::Dispatcher::singleton()
9456 .findSchemaOrThrow(_foreach_log2_::name, _foreach_log2_::overload_name)
9457 .typed<_foreach_log2_::schema>();
9458}
9459
9460// aten::_foreach_log2_(Tensor(a!)[] self) -> ()
9461void _foreach_log2_::call(at::TensorList self) {
9462
9463 static auto op = create__foreach_log2__typed_handle();
9464 return op.call(self);
9465}
9466
9467// aten::_foreach_log2_(Tensor(a!)[] self) -> ()
9468void _foreach_log2_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
9469
9470 static auto op = create__foreach_log2__typed_handle();
9471 return op.redispatch(dispatchKeySet, self);
9472}
9473
9474STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bucketize_Tensor, name, "aten::bucketize")
9475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bucketize_Tensor, overload_name, "Tensor")
9476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bucketize_Tensor, schema_str, "bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor")
9477
9478// aten::bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor
9479static C10_NOINLINE c10::TypedOperatorHandle<bucketize_Tensor::schema> create_bucketize_Tensor_typed_handle() {
9480 return c10::Dispatcher::singleton()
9481 .findSchemaOrThrow(bucketize_Tensor::name, bucketize_Tensor::overload_name)
9482 .typed<bucketize_Tensor::schema>();
9483}
9484
9485// aten::bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor
9486at::Tensor bucketize_Tensor::call(const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right) {
9487
9488 static auto op = create_bucketize_Tensor_typed_handle();
9489 return op.call(self, boundaries, out_int32, right);
9490}
9491
9492// aten::bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor
9493at::Tensor bucketize_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right) {
9494
9495 static auto op = create_bucketize_Tensor_typed_handle();
9496 return op.redispatch(dispatchKeySet, self, boundaries, out_int32, right);
9497}
9498
9499STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bucketize_Tensor_out, name, "aten::bucketize")
9500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bucketize_Tensor_out, overload_name, "Tensor_out")
9501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bucketize_Tensor_out, schema_str, "bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)")
9502
9503// aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)
9504static C10_NOINLINE c10::TypedOperatorHandle<bucketize_Tensor_out::schema> create_bucketize_Tensor_out_typed_handle() {
9505 return c10::Dispatcher::singleton()
9506 .findSchemaOrThrow(bucketize_Tensor_out::name, bucketize_Tensor_out::overload_name)
9507 .typed<bucketize_Tensor_out::schema>();
9508}
9509
9510// aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)
9511at::Tensor & bucketize_Tensor_out::call(const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right, at::Tensor & out) {
9512
9513 static auto op = create_bucketize_Tensor_out_typed_handle();
9514 return op.call(self, boundaries, out_int32, right, out);
9515}
9516
9517// aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)
9518at::Tensor & bucketize_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right, at::Tensor & out) {
9519
9520 static auto op = create_bucketize_Tensor_out_typed_handle();
9521 return op.redispatch(dispatchKeySet, self, boundaries, out_int32, right, out);
9522}
9523
9524STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bucketize_Scalar, name, "aten::bucketize")
9525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bucketize_Scalar, overload_name, "Scalar")
9526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bucketize_Scalar, schema_str, "bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor")
9527
9528// aten::bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor
9529static C10_NOINLINE c10::TypedOperatorHandle<bucketize_Scalar::schema> create_bucketize_Scalar_typed_handle() {
9530 return c10::Dispatcher::singleton()
9531 .findSchemaOrThrow(bucketize_Scalar::name, bucketize_Scalar::overload_name)
9532 .typed<bucketize_Scalar::schema>();
9533}
9534
9535// aten::bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor
9536at::Tensor bucketize_Scalar::call(const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right) {
9537
9538 static auto op = create_bucketize_Scalar_typed_handle();
9539 return op.call(self, boundaries, out_int32, right);
9540}
9541
9542// aten::bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor
9543at::Tensor bucketize_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right) {
9544
9545 static auto op = create_bucketize_Scalar_typed_handle();
9546 return op.redispatch(dispatchKeySet, self, boundaries, out_int32, right);
9547}
9548
9549STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mse_loss_out, name, "aten::mse_loss")
9550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mse_loss_out, overload_name, "out")
9551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mse_loss_out, schema_str, "mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)")
9552
9553// aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
9554static C10_NOINLINE c10::TypedOperatorHandle<mse_loss_out::schema> create_mse_loss_out_typed_handle() {
9555 return c10::Dispatcher::singleton()
9556 .findSchemaOrThrow(mse_loss_out::name, mse_loss_out::overload_name)
9557 .typed<mse_loss_out::schema>();
9558}
9559
9560// aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
9561at::Tensor & mse_loss_out::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) {
9562
9563 static auto op = create_mse_loss_out_typed_handle();
9564 return op.call(self, target, reduction, out);
9565}
9566
9567// aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
9568at::Tensor & mse_loss_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) {
9569
9570 static auto op = create_mse_loss_out_typed_handle();
9571 return op.redispatch(dispatchKeySet, self, target, reduction, out);
9572}
9573
9574STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mse_loss, name, "aten::mse_loss")
9575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mse_loss, overload_name, "")
9576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mse_loss, schema_str, "mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor")
9577
9578// aten::mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
9579static C10_NOINLINE c10::TypedOperatorHandle<mse_loss::schema> create_mse_loss_typed_handle() {
9580 return c10::Dispatcher::singleton()
9581 .findSchemaOrThrow(mse_loss::name, mse_loss::overload_name)
9582 .typed<mse_loss::schema>();
9583}
9584
9585// aten::mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
9586at::Tensor mse_loss::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
9587
9588 static auto op = create_mse_loss_typed_handle();
9589 return op.call(self, target, reduction);
9590}
9591
9592// aten::mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
9593at::Tensor mse_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
9594
9595 static auto op = create_mse_loss_typed_handle();
9596 return op.redispatch(dispatchKeySet, self, target, reduction);
9597}
9598
9599STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(l1_loss, name, "aten::l1_loss")
9600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(l1_loss, overload_name, "")
9601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(l1_loss, schema_str, "l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor")
9602
9603// aten::l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
9604static C10_NOINLINE c10::TypedOperatorHandle<l1_loss::schema> create_l1_loss_typed_handle() {
9605 return c10::Dispatcher::singleton()
9606 .findSchemaOrThrow(l1_loss::name, l1_loss::overload_name)
9607 .typed<l1_loss::schema>();
9608}
9609
9610// aten::l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
9611at::Tensor l1_loss::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
9612
9613 static auto op = create_l1_loss_typed_handle();
9614 return op.call(self, target, reduction);
9615}
9616
9617// aten::l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
9618at::Tensor l1_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
9619
9620 static auto op = create_l1_loss_typed_handle();
9621 return op.redispatch(dispatchKeySet, self, target, reduction);
9622}
9623
9624STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss_nd, name, "aten::nll_loss_nd")
9625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss_nd, overload_name, "")
9626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss_nd, schema_str, "nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor")
9627
9628// aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
9629static C10_NOINLINE c10::TypedOperatorHandle<nll_loss_nd::schema> create_nll_loss_nd_typed_handle() {
9630 return c10::Dispatcher::singleton()
9631 .findSchemaOrThrow(nll_loss_nd::name, nll_loss_nd::overload_name)
9632 .typed<nll_loss_nd::schema>();
9633}
9634
9635// aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
9636at::Tensor nll_loss_nd::call(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
9637
9638 static auto op = create_nll_loss_nd_typed_handle();
9639 return op.call(self, target, weight, reduction, ignore_index);
9640}
9641
9642// aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
9643at::Tensor nll_loss_nd::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
9644
9645 static auto op = create_nll_loss_nd_typed_handle();
9646 return op.redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index);
9647}
9648
9649STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss2d_out, name, "aten::nll_loss2d")
9650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss2d_out, overload_name, "out")
9651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss2d_out, schema_str, "nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)")
9652
9653// aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
9654static C10_NOINLINE c10::TypedOperatorHandle<nll_loss2d_out::schema> create_nll_loss2d_out_typed_handle() {
9655 return c10::Dispatcher::singleton()
9656 .findSchemaOrThrow(nll_loss2d_out::name, nll_loss2d_out::overload_name)
9657 .typed<nll_loss2d_out::schema>();
9658}
9659
9660// aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
9661at::Tensor & nll_loss2d_out::call(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out) {
9662
9663 static auto op = create_nll_loss2d_out_typed_handle();
9664 return op.call(self, target, weight, reduction, ignore_index, out);
9665}
9666
9667// aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
9668at::Tensor & nll_loss2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out) {
9669
9670 static auto op = create_nll_loss2d_out_typed_handle();
9671 return op.redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, out);
9672}
9673
9674STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss2d, name, "aten::nll_loss2d")
9675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss2d, overload_name, "")
9676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss2d, schema_str, "nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor")
9677
9678// aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
9679static C10_NOINLINE c10::TypedOperatorHandle<nll_loss2d::schema> create_nll_loss2d_typed_handle() {
9680 return c10::Dispatcher::singleton()
9681 .findSchemaOrThrow(nll_loss2d::name, nll_loss2d::overload_name)
9682 .typed<nll_loss2d::schema>();
9683}
9684
9685// aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
9686at::Tensor nll_loss2d::call(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
9687
9688 static auto op = create_nll_loss2d_typed_handle();
9689 return op.call(self, target, weight, reduction, ignore_index);
9690}
9691
9692// aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
9693at::Tensor nll_loss2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
9694
9695 static auto op = create_nll_loss2d_typed_handle();
9696 return op.redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index);
9697}
9698
9699STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss2d_forward_output, name, "aten::nll_loss2d_forward")
9700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss2d_forward_output, overload_name, "output")
9701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss2d_forward_output, schema_str, "nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))")
9702
9703// aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
9704static C10_NOINLINE c10::TypedOperatorHandle<nll_loss2d_forward_output::schema> create_nll_loss2d_forward_output_typed_handle() {
9705 return c10::Dispatcher::singleton()
9706 .findSchemaOrThrow(nll_loss2d_forward_output::name, nll_loss2d_forward_output::overload_name)
9707 .typed<nll_loss2d_forward_output::schema>();
9708}
9709
9710// aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
9711::std::tuple<at::Tensor &,at::Tensor &> nll_loss2d_forward_output::call(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) {
9712
9713 static auto op = create_nll_loss2d_forward_output_typed_handle();
9714 return op.call(self, target, weight, reduction, ignore_index, output, total_weight);
9715}
9716
9717// aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
9718::std::tuple<at::Tensor &,at::Tensor &> nll_loss2d_forward_output::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) {
9719
9720 static auto op = create_nll_loss2d_forward_output_typed_handle();
9721 return op.redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, output, total_weight);
9722}
9723
9724STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss2d_forward, name, "aten::nll_loss2d_forward")
9725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss2d_forward, overload_name, "")
9726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss2d_forward, schema_str, "nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)")
9727
9728// aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)
9729static C10_NOINLINE c10::TypedOperatorHandle<nll_loss2d_forward::schema> create_nll_loss2d_forward_typed_handle() {
9730 return c10::Dispatcher::singleton()
9731 .findSchemaOrThrow(nll_loss2d_forward::name, nll_loss2d_forward::overload_name)
9732 .typed<nll_loss2d_forward::schema>();
9733}
9734
9735// aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)
9736::std::tuple<at::Tensor,at::Tensor> nll_loss2d_forward::call(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
9737
9738 static auto op = create_nll_loss2d_forward_typed_handle();
9739 return op.call(self, target, weight, reduction, ignore_index);
9740}
9741
9742// aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)
9743::std::tuple<at::Tensor,at::Tensor> nll_loss2d_forward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
9744
9745 static auto op = create_nll_loss2d_forward_typed_handle();
9746 return op.redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index);
9747}
9748
9749STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss2d_backward_grad_input, name, "aten::nll_loss2d_backward")
9750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss2d_backward_grad_input, overload_name, "grad_input")
9751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss2d_backward_grad_input, schema_str, "nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)")
9752
9753// aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
9754static C10_NOINLINE c10::TypedOperatorHandle<nll_loss2d_backward_grad_input::schema> create_nll_loss2d_backward_grad_input_typed_handle() {
9755 return c10::Dispatcher::singleton()
9756 .findSchemaOrThrow(nll_loss2d_backward_grad_input::name, nll_loss2d_backward_grad_input::overload_name)
9757 .typed<nll_loss2d_backward_grad_input::schema>();
9758}
9759
9760// aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
9761at::Tensor & nll_loss2d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) {
9762
9763 static auto op = create_nll_loss2d_backward_grad_input_typed_handle();
9764 return op.call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
9765}
9766
9767// aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
9768at::Tensor & nll_loss2d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) {
9769
9770 static auto op = create_nll_loss2d_backward_grad_input_typed_handle();
9771 return op.redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
9772}
9773
9774STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss2d_backward, name, "aten::nll_loss2d_backward")
9775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss2d_backward, overload_name, "")
9776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(nll_loss2d_backward, schema_str, "nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor")
9777
9778// aten::nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor
9779static C10_NOINLINE c10::TypedOperatorHandle<nll_loss2d_backward::schema> create_nll_loss2d_backward_typed_handle() {
9780 return c10::Dispatcher::singleton()
9781 .findSchemaOrThrow(nll_loss2d_backward::name, nll_loss2d_backward::overload_name)
9782 .typed<nll_loss2d_backward::schema>();
9783}
9784
9785// aten::nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor
9786at::Tensor nll_loss2d_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
9787
9788 static auto op = create_nll_loss2d_backward_typed_handle();
9789 return op.call(grad_output, self, target, weight, reduction, ignore_index, total_weight);
9790}
9791
9792// aten::nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor
9793at::Tensor nll_loss2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
9794
9795 static auto op = create_nll_loss2d_backward_typed_handle();
9796 return op.redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, ignore_index, total_weight);
9797}
9798
9799STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(soft_margin_loss_out, name, "aten::soft_margin_loss")
9800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(soft_margin_loss_out, overload_name, "out")
9801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(soft_margin_loss_out, schema_str, "soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)")
9802
9803// aten::soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
9804static C10_NOINLINE c10::TypedOperatorHandle<soft_margin_loss_out::schema> create_soft_margin_loss_out_typed_handle() {
9805 return c10::Dispatcher::singleton()
9806 .findSchemaOrThrow(soft_margin_loss_out::name, soft_margin_loss_out::overload_name)
9807 .typed<soft_margin_loss_out::schema>();
9808}
9809
9810// aten::soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
9811at::Tensor & soft_margin_loss_out::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) {
9812
9813 static auto op = create_soft_margin_loss_out_typed_handle();
9814 return op.call(self, target, reduction, out);
9815}
9816
9817// aten::soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
9818at::Tensor & soft_margin_loss_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) {
9819
9820 static auto op = create_soft_margin_loss_out_typed_handle();
9821 return op.redispatch(dispatchKeySet, self, target, reduction, out);
9822}
9823
9824STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(soft_margin_loss, name, "aten::soft_margin_loss")
9825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(soft_margin_loss, overload_name, "")
9826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(soft_margin_loss, schema_str, "soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor")
9827
9828// aten::soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
9829static C10_NOINLINE c10::TypedOperatorHandle<soft_margin_loss::schema> create_soft_margin_loss_typed_handle() {
9830 return c10::Dispatcher::singleton()
9831 .findSchemaOrThrow(soft_margin_loss::name, soft_margin_loss::overload_name)
9832 .typed<soft_margin_loss::schema>();
9833}
9834
9835// aten::soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
9836at::Tensor soft_margin_loss::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
9837
9838 static auto op = create_soft_margin_loss_typed_handle();
9839 return op.call(self, target, reduction);
9840}
9841
9842// aten::soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
9843at::Tensor soft_margin_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
9844
9845 static auto op = create_soft_margin_loss_typed_handle();
9846 return op.redispatch(dispatchKeySet, self, target, reduction);
9847}
9848
9849STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(glu_out, name, "aten::glu")
9850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(glu_out, overload_name, "out")
9851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(glu_out, schema_str, "glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)")
9852
9853// aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)
9854static C10_NOINLINE c10::TypedOperatorHandle<glu_out::schema> create_glu_out_typed_handle() {
9855 return c10::Dispatcher::singleton()
9856 .findSchemaOrThrow(glu_out::name, glu_out::overload_name)
9857 .typed<glu_out::schema>();
9858}
9859
9860// aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)
9861at::Tensor & glu_out::call(const at::Tensor & self, int64_t dim, at::Tensor & out) {
9862
9863 static auto op = create_glu_out_typed_handle();
9864 return op.call(self, dim, out);
9865}
9866
9867// aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)
9868at::Tensor & glu_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) {
9869
9870 static auto op = create_glu_out_typed_handle();
9871 return op.redispatch(dispatchKeySet, self, dim, out);
9872}
9873
9874STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(glu, name, "aten::glu")
9875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(glu, overload_name, "")
9876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(glu, schema_str, "glu(Tensor self, int dim=-1) -> Tensor")
9877
9878// aten::glu(Tensor self, int dim=-1) -> Tensor
9879static C10_NOINLINE c10::TypedOperatorHandle<glu::schema> create_glu_typed_handle() {
9880 return c10::Dispatcher::singleton()
9881 .findSchemaOrThrow(glu::name, glu::overload_name)
9882 .typed<glu::schema>();
9883}
9884
9885// aten::glu(Tensor self, int dim=-1) -> Tensor
9886at::Tensor glu::call(const at::Tensor & self, int64_t dim) {
9887
9888 static auto op = create_glu_typed_handle();
9889 return op.call(self, dim);
9890}
9891
9892// aten::glu(Tensor self, int dim=-1) -> Tensor
9893at::Tensor glu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
9894
9895 static auto op = create_glu_typed_handle();
9896 return op.redispatch(dispatchKeySet, self, dim);
9897}
9898
9899STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(glu_backward_jvp, name, "aten::glu_backward_jvp")
9900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(glu_backward_jvp, overload_name, "")
9901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(glu_backward_jvp, schema_str, "glu_backward_jvp(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim) -> Tensor")
9902
9903// aten::glu_backward_jvp(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim) -> Tensor
9904static C10_NOINLINE c10::TypedOperatorHandle<glu_backward_jvp::schema> create_glu_backward_jvp_typed_handle() {
9905 return c10::Dispatcher::singleton()
9906 .findSchemaOrThrow(glu_backward_jvp::name, glu_backward_jvp::overload_name)
9907 .typed<glu_backward_jvp::schema>();
9908}
9909
9910// aten::glu_backward_jvp(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim) -> Tensor
9911at::Tensor glu_backward_jvp::call(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim) {
9912
9913 static auto op = create_glu_backward_jvp_typed_handle();
9914 return op.call(grad_x, grad_glu, x, dgrad_glu, dx, dim);
9915}
9916
9917// aten::glu_backward_jvp(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim) -> Tensor
9918at::Tensor glu_backward_jvp::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim) {
9919
9920 static auto op = create_glu_backward_jvp_typed_handle();
9921 return op.redispatch(dispatchKeySet, grad_x, grad_glu, x, dgrad_glu, dx, dim);
9922}
9923
9924STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardtanh_out, name, "aten::hardtanh")
9925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardtanh_out, overload_name, "out")
9926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardtanh_out, schema_str, "hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)")
9927
9928// aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)
9929static C10_NOINLINE c10::TypedOperatorHandle<hardtanh_out::schema> create_hardtanh_out_typed_handle() {
9930 return c10::Dispatcher::singleton()
9931 .findSchemaOrThrow(hardtanh_out::name, hardtanh_out::overload_name)
9932 .typed<hardtanh_out::schema>();
9933}
9934
9935// aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)
9936at::Tensor & hardtanh_out::call(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out) {
9937
9938 static auto op = create_hardtanh_out_typed_handle();
9939 return op.call(self, min_val, max_val, out);
9940}
9941
9942// aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)
9943at::Tensor & hardtanh_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out) {
9944
9945 static auto op = create_hardtanh_out_typed_handle();
9946 return op.redispatch(dispatchKeySet, self, min_val, max_val, out);
9947}
9948
9949STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardtanh, name, "aten::hardtanh")
9950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardtanh, overload_name, "")
9951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardtanh, schema_str, "hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor")
9952
9953// aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor
9954static C10_NOINLINE c10::TypedOperatorHandle<hardtanh::schema> create_hardtanh_typed_handle() {
9955 return c10::Dispatcher::singleton()
9956 .findSchemaOrThrow(hardtanh::name, hardtanh::overload_name)
9957 .typed<hardtanh::schema>();
9958}
9959
9960// aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor
9961at::Tensor hardtanh::call(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
9962
9963 static auto op = create_hardtanh_typed_handle();
9964 return op.call(self, min_val, max_val);
9965}
9966
9967// aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor
9968at::Tensor hardtanh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
9969
9970 static auto op = create_hardtanh_typed_handle();
9971 return op.redispatch(dispatchKeySet, self, min_val, max_val);
9972}
9973
9974STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardtanh_, name, "aten::hardtanh_")
9975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardtanh_, overload_name, "")
9976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardtanh_, schema_str, "hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!)")
9977
9978// aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!)
9979static C10_NOINLINE c10::TypedOperatorHandle<hardtanh_::schema> create_hardtanh__typed_handle() {
9980 return c10::Dispatcher::singleton()
9981 .findSchemaOrThrow(hardtanh_::name, hardtanh_::overload_name)
9982 .typed<hardtanh_::schema>();
9983}
9984
9985// aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!)
9986at::Tensor & hardtanh_::call(at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
9987
9988 static auto op = create_hardtanh__typed_handle();
9989 return op.call(self, min_val, max_val);
9990}
9991
9992// aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!)
9993at::Tensor & hardtanh_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
9994
9995 static auto op = create_hardtanh__typed_handle();
9996 return op.redispatch(dispatchKeySet, self, min_val, max_val);
9997}
9998
9999STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardswish_backward, name, "aten::hardswish_backward")
10000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardswish_backward, overload_name, "")
10001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardswish_backward, schema_str, "hardswish_backward(Tensor grad_output, Tensor self) -> Tensor")
10002
10003// aten::hardswish_backward(Tensor grad_output, Tensor self) -> Tensor
10004static C10_NOINLINE c10::TypedOperatorHandle<hardswish_backward::schema> create_hardswish_backward_typed_handle() {
10005 return c10::Dispatcher::singleton()
10006 .findSchemaOrThrow(hardswish_backward::name, hardswish_backward::overload_name)
10007 .typed<hardswish_backward::schema>();
10008}
10009
10010// aten::hardswish_backward(Tensor grad_output, Tensor self) -> Tensor
10011at::Tensor hardswish_backward::call(const at::Tensor & grad_output, const at::Tensor & self) {
10012
10013 static auto op = create_hardswish_backward_typed_handle();
10014 return op.call(grad_output, self);
10015}
10016
10017// aten::hardswish_backward(Tensor grad_output, Tensor self) -> Tensor
10018at::Tensor hardswish_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) {
10019
10020 static auto op = create_hardswish_backward_typed_handle();
10021 return op.redispatch(dispatchKeySet, grad_output, self);
10022}
10023
10024STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(leaky_relu_out, name, "aten::leaky_relu")
10025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(leaky_relu_out, overload_name, "out")
10026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(leaky_relu_out, schema_str, "leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)")
10027
10028// aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)
10029static C10_NOINLINE c10::TypedOperatorHandle<leaky_relu_out::schema> create_leaky_relu_out_typed_handle() {
10030 return c10::Dispatcher::singleton()
10031 .findSchemaOrThrow(leaky_relu_out::name, leaky_relu_out::overload_name)
10032 .typed<leaky_relu_out::schema>();
10033}
10034
10035// aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)
10036at::Tensor & leaky_relu_out::call(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out) {
10037
10038 static auto op = create_leaky_relu_out_typed_handle();
10039 return op.call(self, negative_slope, out);
10040}
10041
10042// aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)
10043at::Tensor & leaky_relu_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out) {
10044
10045 static auto op = create_leaky_relu_out_typed_handle();
10046 return op.redispatch(dispatchKeySet, self, negative_slope, out);
10047}
10048
10049STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(leaky_relu, name, "aten::leaky_relu")
10050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(leaky_relu, overload_name, "")
10051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(leaky_relu, schema_str, "leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor")
10052
10053// aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor
10054static C10_NOINLINE c10::TypedOperatorHandle<leaky_relu::schema> create_leaky_relu_typed_handle() {
10055 return c10::Dispatcher::singleton()
10056 .findSchemaOrThrow(leaky_relu::name, leaky_relu::overload_name)
10057 .typed<leaky_relu::schema>();
10058}
10059
10060// aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor
10061at::Tensor leaky_relu::call(const at::Tensor & self, const at::Scalar & negative_slope) {
10062
10063 static auto op = create_leaky_relu_typed_handle();
10064 return op.call(self, negative_slope);
10065}
10066
10067// aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor
10068at::Tensor leaky_relu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & negative_slope) {
10069
10070 static auto op = create_leaky_relu_typed_handle();
10071 return op.redispatch(dispatchKeySet, self, negative_slope);
10072}
10073
10074STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(leaky_relu_, name, "aten::leaky_relu_")
10075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(leaky_relu_, overload_name, "")
10076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(leaky_relu_, schema_str, "leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)")
10077
10078// aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)
10079static C10_NOINLINE c10::TypedOperatorHandle<leaky_relu_::schema> create_leaky_relu__typed_handle() {
10080 return c10::Dispatcher::singleton()
10081 .findSchemaOrThrow(leaky_relu_::name, leaky_relu_::overload_name)
10082 .typed<leaky_relu_::schema>();
10083}
10084
10085// aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)
10086at::Tensor & leaky_relu_::call(at::Tensor & self, const at::Scalar & negative_slope) {
10087
10088 static auto op = create_leaky_relu__typed_handle();
10089 return op.call(self, negative_slope);
10090}
10091
10092// aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)
10093at::Tensor & leaky_relu_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & negative_slope) {
10094
10095 static auto op = create_leaky_relu__typed_handle();
10096 return op.redispatch(dispatchKeySet, self, negative_slope);
10097}
10098
10099STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log_sigmoid_forward_output, name, "aten::log_sigmoid_forward")
10100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log_sigmoid_forward_output, overload_name, "output")
10101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log_sigmoid_forward_output, schema_str, "log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!))")
10102
10103// aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!))
10104static C10_NOINLINE c10::TypedOperatorHandle<log_sigmoid_forward_output::schema> create_log_sigmoid_forward_output_typed_handle() {
10105 return c10::Dispatcher::singleton()
10106 .findSchemaOrThrow(log_sigmoid_forward_output::name, log_sigmoid_forward_output::overload_name)
10107 .typed<log_sigmoid_forward_output::schema>();
10108}
10109
10110// aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!))
10111::std::tuple<at::Tensor &,at::Tensor &> log_sigmoid_forward_output::call(const at::Tensor & self, at::Tensor & output, at::Tensor & buffer) {
10112
10113 static auto op = create_log_sigmoid_forward_output_typed_handle();
10114 return op.call(self, output, buffer);
10115}
10116
10117// aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!))
10118::std::tuple<at::Tensor &,at::Tensor &> log_sigmoid_forward_output::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & output, at::Tensor & buffer) {
10119
10120 static auto op = create_log_sigmoid_forward_output_typed_handle();
10121 return op.redispatch(dispatchKeySet, self, output, buffer);
10122}
10123
10124STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log_sigmoid_forward, name, "aten::log_sigmoid_forward")
10125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log_sigmoid_forward, overload_name, "")
10126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log_sigmoid_forward, schema_str, "log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)")
10127
10128// aten::log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)
10129static C10_NOINLINE c10::TypedOperatorHandle<log_sigmoid_forward::schema> create_log_sigmoid_forward_typed_handle() {
10130 return c10::Dispatcher::singleton()
10131 .findSchemaOrThrow(log_sigmoid_forward::name, log_sigmoid_forward::overload_name)
10132 .typed<log_sigmoid_forward::schema>();
10133}
10134
10135// aten::log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)
10136::std::tuple<at::Tensor,at::Tensor> log_sigmoid_forward::call(const at::Tensor & self) {
10137
10138 static auto op = create_log_sigmoid_forward_typed_handle();
10139 return op.call(self);
10140}
10141
10142// aten::log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)
10143::std::tuple<at::Tensor,at::Tensor> log_sigmoid_forward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
10144
10145 static auto op = create_log_sigmoid_forward_typed_handle();
10146 return op.redispatch(dispatchKeySet, self);
10147}
10148
10149STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log_sigmoid_backward_grad_input, name, "aten::log_sigmoid_backward")
10150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log_sigmoid_backward_grad_input, overload_name, "grad_input")
10151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log_sigmoid_backward_grad_input, schema_str, "log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)")
10152
10153// aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)
10154static C10_NOINLINE c10::TypedOperatorHandle<log_sigmoid_backward_grad_input::schema> create_log_sigmoid_backward_grad_input_typed_handle() {
10155 return c10::Dispatcher::singleton()
10156 .findSchemaOrThrow(log_sigmoid_backward_grad_input::name, log_sigmoid_backward_grad_input::overload_name)
10157 .typed<log_sigmoid_backward_grad_input::schema>();
10158}
10159
10160// aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)
10161at::Tensor & log_sigmoid_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input) {
10162
10163 static auto op = create_log_sigmoid_backward_grad_input_typed_handle();
10164 return op.call(grad_output, self, buffer, grad_input);
10165}
10166
10167// aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)
10168at::Tensor & log_sigmoid_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input) {
10169
10170 static auto op = create_log_sigmoid_backward_grad_input_typed_handle();
10171 return op.redispatch(dispatchKeySet, grad_output, self, buffer, grad_input);
10172}
10173
10174STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log_sigmoid_backward, name, "aten::log_sigmoid_backward")
10175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log_sigmoid_backward, overload_name, "")
10176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log_sigmoid_backward, schema_str, "log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor")
10177
10178// aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor
10179static C10_NOINLINE c10::TypedOperatorHandle<log_sigmoid_backward::schema> create_log_sigmoid_backward_typed_handle() {
10180 return c10::Dispatcher::singleton()
10181 .findSchemaOrThrow(log_sigmoid_backward::name, log_sigmoid_backward::overload_name)
10182 .typed<log_sigmoid_backward::schema>();
10183}
10184
10185// aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor
10186at::Tensor log_sigmoid_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) {
10187
10188 static auto op = create_log_sigmoid_backward_typed_handle();
10189 return op.call(grad_output, self, buffer);
10190}
10191
10192// aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor
10193at::Tensor log_sigmoid_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) {
10194
10195 static auto op = create_log_sigmoid_backward_typed_handle();
10196 return op.redispatch(dispatchKeySet, grad_output, self, buffer);
10197}
10198
10199STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(softshrink_out, name, "aten::softshrink")
10200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(softshrink_out, overload_name, "out")
10201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(softshrink_out, schema_str, "softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)")
10202
10203// aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)
10204static C10_NOINLINE c10::TypedOperatorHandle<softshrink_out::schema> create_softshrink_out_typed_handle() {
10205 return c10::Dispatcher::singleton()
10206 .findSchemaOrThrow(softshrink_out::name, softshrink_out::overload_name)
10207 .typed<softshrink_out::schema>();
10208}
10209
10210// aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)
10211at::Tensor & softshrink_out::call(const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out) {
10212
10213 static auto op = create_softshrink_out_typed_handle();
10214 return op.call(self, lambd, out);
10215}
10216
10217// aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)
10218at::Tensor & softshrink_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out) {
10219
10220 static auto op = create_softshrink_out_typed_handle();
10221 return op.redispatch(dispatchKeySet, self, lambd, out);
10222}
10223
10224STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(softshrink, name, "aten::softshrink")
10225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(softshrink, overload_name, "")
10226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(softshrink, schema_str, "softshrink(Tensor self, Scalar lambd=0.5) -> Tensor")
10227
10228// aten::softshrink(Tensor self, Scalar lambd=0.5) -> Tensor
10229static C10_NOINLINE c10::TypedOperatorHandle<softshrink::schema> create_softshrink_typed_handle() {
10230 return c10::Dispatcher::singleton()
10231 .findSchemaOrThrow(softshrink::name, softshrink::overload_name)
10232 .typed<softshrink::schema>();
10233}
10234
10235// aten::softshrink(Tensor self, Scalar lambd=0.5) -> Tensor
10236at::Tensor softshrink::call(const at::Tensor & self, const at::Scalar & lambd) {
10237
10238 static auto op = create_softshrink_typed_handle();
10239 return op.call(self, lambd);
10240}
10241
10242// aten::softshrink(Tensor self, Scalar lambd=0.5) -> Tensor
10243at::Tensor softshrink::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lambd) {
10244
10245 static auto op = create_softshrink_typed_handle();
10246 return op.redispatch(dispatchKeySet, self, lambd);
10247}
10248
10249STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_avg_pool3d_backward_grad_input, name, "aten::adaptive_avg_pool3d_backward")
10250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_avg_pool3d_backward_grad_input, overload_name, "grad_input")
10251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_avg_pool3d_backward_grad_input, schema_str, "adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)")
10252
10253// aten::adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
10254static C10_NOINLINE c10::TypedOperatorHandle<adaptive_avg_pool3d_backward_grad_input::schema> create_adaptive_avg_pool3d_backward_grad_input_typed_handle() {
10255 return c10::Dispatcher::singleton()
10256 .findSchemaOrThrow(adaptive_avg_pool3d_backward_grad_input::name, adaptive_avg_pool3d_backward_grad_input::overload_name)
10257 .typed<adaptive_avg_pool3d_backward_grad_input::schema>();
10258}
10259
10260// aten::adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
10261at::Tensor & adaptive_avg_pool3d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) {
10262
10263 static auto op = create_adaptive_avg_pool3d_backward_grad_input_typed_handle();
10264 return op.call(grad_output, self, grad_input);
10265}
10266
10267// aten::adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
10268at::Tensor & adaptive_avg_pool3d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) {
10269
10270 static auto op = create_adaptive_avg_pool3d_backward_grad_input_typed_handle();
10271 return op.redispatch(dispatchKeySet, grad_output, self, grad_input);
10272}
10273
10274STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_adaptive_avg_pool3d_backward, name, "aten::_adaptive_avg_pool3d_backward")
10275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_adaptive_avg_pool3d_backward, overload_name, "")
10276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_adaptive_avg_pool3d_backward, schema_str, "_adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor")
10277
10278// aten::_adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor
10279static C10_NOINLINE c10::TypedOperatorHandle<_adaptive_avg_pool3d_backward::schema> create__adaptive_avg_pool3d_backward_typed_handle() {
10280 return c10::Dispatcher::singleton()
10281 .findSchemaOrThrow(_adaptive_avg_pool3d_backward::name, _adaptive_avg_pool3d_backward::overload_name)
10282 .typed<_adaptive_avg_pool3d_backward::schema>();
10283}
10284
10285// aten::_adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor
10286at::Tensor _adaptive_avg_pool3d_backward::call(const at::Tensor & grad_output, const at::Tensor & self) {
10287
10288 static auto op = create__adaptive_avg_pool3d_backward_typed_handle();
10289 return op.call(grad_output, self);
10290}
10291
10292// aten::_adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor
10293at::Tensor _adaptive_avg_pool3d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) {
10294
10295 static auto op = create__adaptive_avg_pool3d_backward_typed_handle();
10296 return op.redispatch(dispatchKeySet, grad_output, self);
10297}
10298
10299STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_max_pool2d_backward_grad_input, name, "aten::adaptive_max_pool2d_backward")
10300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_max_pool2d_backward_grad_input, overload_name, "grad_input")
10301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_max_pool2d_backward_grad_input, schema_str, "adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)")
10302
10303// aten::adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
10304static C10_NOINLINE c10::TypedOperatorHandle<adaptive_max_pool2d_backward_grad_input::schema> create_adaptive_max_pool2d_backward_grad_input_typed_handle() {
10305 return c10::Dispatcher::singleton()
10306 .findSchemaOrThrow(adaptive_max_pool2d_backward_grad_input::name, adaptive_max_pool2d_backward_grad_input::overload_name)
10307 .typed<adaptive_max_pool2d_backward_grad_input::schema>();
10308}
10309
10310// aten::adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
10311at::Tensor & adaptive_max_pool2d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) {
10312
10313 static auto op = create_adaptive_max_pool2d_backward_grad_input_typed_handle();
10314 return op.call(grad_output, self, indices, grad_input);
10315}
10316
10317// aten::adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
10318at::Tensor & adaptive_max_pool2d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) {
10319
10320 static auto op = create_adaptive_max_pool2d_backward_grad_input_typed_handle();
10321 return op.redispatch(dispatchKeySet, grad_output, self, indices, grad_input);
10322}
10323
10324STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_max_pool2d_backward, name, "aten::adaptive_max_pool2d_backward")
10325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_max_pool2d_backward, overload_name, "")
10326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_max_pool2d_backward, schema_str, "adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor")
10327
10328// aten::adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor
10329static C10_NOINLINE c10::TypedOperatorHandle<adaptive_max_pool2d_backward::schema> create_adaptive_max_pool2d_backward_typed_handle() {
10330 return c10::Dispatcher::singleton()
10331 .findSchemaOrThrow(adaptive_max_pool2d_backward::name, adaptive_max_pool2d_backward::overload_name)
10332 .typed<adaptive_max_pool2d_backward::schema>();
10333}
10334
10335// aten::adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor
10336at::Tensor adaptive_max_pool2d_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
10337
10338 static auto op = create_adaptive_max_pool2d_backward_typed_handle();
10339 return op.call(grad_output, self, indices);
10340}
10341
10342// aten::adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor
10343at::Tensor adaptive_max_pool2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
10344
10345 static auto op = create_adaptive_max_pool2d_backward_typed_handle();
10346 return op.redispatch(dispatchKeySet, grad_output, self, indices);
10347}
10348
10349STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_max_pool3d_backward_grad_input, name, "aten::adaptive_max_pool3d_backward")
10350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_max_pool3d_backward_grad_input, overload_name, "grad_input")
10351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_max_pool3d_backward_grad_input, schema_str, "adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)")
10352
10353// aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
10354static C10_NOINLINE c10::TypedOperatorHandle<adaptive_max_pool3d_backward_grad_input::schema> create_adaptive_max_pool3d_backward_grad_input_typed_handle() {
10355 return c10::Dispatcher::singleton()
10356 .findSchemaOrThrow(adaptive_max_pool3d_backward_grad_input::name, adaptive_max_pool3d_backward_grad_input::overload_name)
10357 .typed<adaptive_max_pool3d_backward_grad_input::schema>();
10358}
10359
10360// aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
10361at::Tensor & adaptive_max_pool3d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) {
10362
10363 static auto op = create_adaptive_max_pool3d_backward_grad_input_typed_handle();
10364 return op.call(grad_output, self, indices, grad_input);
10365}
10366
10367// aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
10368at::Tensor & adaptive_max_pool3d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) {
10369
10370 static auto op = create_adaptive_max_pool3d_backward_grad_input_typed_handle();
10371 return op.redispatch(dispatchKeySet, grad_output, self, indices, grad_input);
10372}
10373
10374STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_max_pool3d_backward, name, "aten::adaptive_max_pool3d_backward")
10375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_max_pool3d_backward, overload_name, "")
10376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(adaptive_max_pool3d_backward, schema_str, "adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor")
10377
10378// aten::adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor
10379static C10_NOINLINE c10::TypedOperatorHandle<adaptive_max_pool3d_backward::schema> create_adaptive_max_pool3d_backward_typed_handle() {
10380 return c10::Dispatcher::singleton()
10381 .findSchemaOrThrow(adaptive_max_pool3d_backward::name, adaptive_max_pool3d_backward::overload_name)
10382 .typed<adaptive_max_pool3d_backward::schema>();
10383}
10384
10385// aten::adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor
10386at::Tensor adaptive_max_pool3d_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
10387
10388 static auto op = create_adaptive_max_pool3d_backward_typed_handle();
10389 return op.call(grad_output, self, indices);
10390}
10391
10392// aten::adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor
10393at::Tensor adaptive_max_pool3d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
10394
10395 static auto op = create_adaptive_max_pool3d_backward_typed_handle();
10396 return op.redispatch(dispatchKeySet, grad_output, self, indices);
10397}
10398
10399STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fractional_max_pool3d_output, name, "aten::fractional_max_pool3d")
10400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fractional_max_pool3d_output, overload_name, "output")
10401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fractional_max_pool3d_output, schema_str, "fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))")
10402
10403// aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
10404static C10_NOINLINE c10::TypedOperatorHandle<fractional_max_pool3d_output::schema> create_fractional_max_pool3d_output_typed_handle() {
10405 return c10::Dispatcher::singleton()
10406 .findSchemaOrThrow(fractional_max_pool3d_output::name, fractional_max_pool3d_output::overload_name)
10407 .typed<fractional_max_pool3d_output::schema>();
10408}
10409
10410// aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
10411::std::tuple<at::Tensor &,at::Tensor &> fractional_max_pool3d_output::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) {
10412
10413 static auto op = create_fractional_max_pool3d_output_typed_handle();
10414 return op.call(self, kernel_size, output_size, random_samples, output, indices);
10415}
10416
10417// aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
10418::std::tuple<at::Tensor &,at::Tensor &> fractional_max_pool3d_output::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) {
10419
10420 static auto op = create_fractional_max_pool3d_output_typed_handle();
10421 return op.redispatch(dispatchKeySet, self, kernel_size, output_size, random_samples, output, indices);
10422}
10423
10424STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fractional_max_pool3d, name, "aten::fractional_max_pool3d")
10425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fractional_max_pool3d, overload_name, "")
10426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fractional_max_pool3d, schema_str, "fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor)")
10427
10428// aten::fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor)
10429static C10_NOINLINE c10::TypedOperatorHandle<fractional_max_pool3d::schema> create_fractional_max_pool3d_typed_handle() {
10430 return c10::Dispatcher::singleton()
10431 .findSchemaOrThrow(fractional_max_pool3d::name, fractional_max_pool3d::overload_name)
10432 .typed<fractional_max_pool3d::schema>();
10433}
10434
10435// aten::fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor)
10436::std::tuple<at::Tensor,at::Tensor> fractional_max_pool3d::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
10437
10438 static auto op = create_fractional_max_pool3d_typed_handle();
10439 return op.call(self, kernel_size, output_size, random_samples);
10440}
10441
10442// aten::fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor)
10443::std::tuple<at::Tensor,at::Tensor> fractional_max_pool3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
10444
10445 static auto op = create_fractional_max_pool3d_typed_handle();
10446 return op.redispatch(dispatchKeySet, self, kernel_size, output_size, random_samples);
10447}
10448
10449STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reflection_pad3d_out, name, "aten::reflection_pad3d")
10450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reflection_pad3d_out, overload_name, "out")
10451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reflection_pad3d_out, schema_str, "reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)")
10452
10453// aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
10454static C10_NOINLINE c10::TypedOperatorHandle<reflection_pad3d_out::schema> create_reflection_pad3d_out_typed_handle() {
10455 return c10::Dispatcher::singleton()
10456 .findSchemaOrThrow(reflection_pad3d_out::name, reflection_pad3d_out::overload_name)
10457 .typed<reflection_pad3d_out::schema>();
10458}
10459
10460// aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
10461at::Tensor & reflection_pad3d_out::call(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
10462
10463 static auto op = create_reflection_pad3d_out_typed_handle();
10464 return op.call(self, padding, out);
10465}
10466
10467// aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
10468at::Tensor & reflection_pad3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
10469
10470 static auto op = create_reflection_pad3d_out_typed_handle();
10471 return op.redispatch(dispatchKeySet, self, padding, out);
10472}
10473
10474STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reflection_pad3d, name, "aten::reflection_pad3d")
10475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reflection_pad3d, overload_name, "")
10476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(reflection_pad3d, schema_str, "reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor")
10477
10478// aten::reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor
10479static C10_NOINLINE c10::TypedOperatorHandle<reflection_pad3d::schema> create_reflection_pad3d_typed_handle() {
10480 return c10::Dispatcher::singleton()
10481 .findSchemaOrThrow(reflection_pad3d::name, reflection_pad3d::overload_name)
10482 .typed<reflection_pad3d::schema>();
10483}
10484
10485// aten::reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor
10486at::Tensor reflection_pad3d::call(const at::Tensor & self, c10::SymIntArrayRef padding) {
10487
10488 static auto op = create_reflection_pad3d_typed_handle();
10489 return op.call(self, padding);
10490}
10491
10492// aten::reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor
10493at::Tensor reflection_pad3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding) {
10494
10495 static auto op = create_reflection_pad3d_typed_handle();
10496 return op.redispatch(dispatchKeySet, self, padding);
10497}
10498
10499STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(replication_pad1d_out, name, "aten::replication_pad1d")
10500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(replication_pad1d_out, overload_name, "out")
10501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(replication_pad1d_out, schema_str, "replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)")
10502
10503// aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
10504static C10_NOINLINE c10::TypedOperatorHandle<replication_pad1d_out::schema> create_replication_pad1d_out_typed_handle() {
10505 return c10::Dispatcher::singleton()
10506 .findSchemaOrThrow(replication_pad1d_out::name, replication_pad1d_out::overload_name)
10507 .typed<replication_pad1d_out::schema>();
10508}
10509
10510// aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
10511at::Tensor & replication_pad1d_out::call(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
10512
10513 static auto op = create_replication_pad1d_out_typed_handle();
10514 return op.call(self, padding, out);
10515}
10516
10517// aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
10518at::Tensor & replication_pad1d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
10519
10520 static auto op = create_replication_pad1d_out_typed_handle();
10521 return op.redispatch(dispatchKeySet, self, padding, out);
10522}
10523
10524STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(replication_pad1d, name, "aten::replication_pad1d")
10525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(replication_pad1d, overload_name, "")
10526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(replication_pad1d, schema_str, "replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor")
10527
10528// aten::replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor
10529static C10_NOINLINE c10::TypedOperatorHandle<replication_pad1d::schema> create_replication_pad1d_typed_handle() {
10530 return c10::Dispatcher::singleton()
10531 .findSchemaOrThrow(replication_pad1d::name, replication_pad1d::overload_name)
10532 .typed<replication_pad1d::schema>();
10533}
10534
10535// aten::replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor
10536at::Tensor replication_pad1d::call(const at::Tensor & self, c10::SymIntArrayRef padding) {
10537
10538 static auto op = create_replication_pad1d_typed_handle();
10539 return op.call(self, padding);
10540}
10541
10542// aten::replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor
10543at::Tensor replication_pad1d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding) {
10544
10545 static auto op = create_replication_pad1d_typed_handle();
10546 return op.redispatch(dispatchKeySet, self, padding);
10547}
10548
10549STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(replication_pad2d_out, name, "aten::replication_pad2d")
10550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(replication_pad2d_out, overload_name, "out")
10551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(replication_pad2d_out, schema_str, "replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)")
10552
10553// aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
10554static C10_NOINLINE c10::TypedOperatorHandle<replication_pad2d_out::schema> create_replication_pad2d_out_typed_handle() {
10555 return c10::Dispatcher::singleton()
10556 .findSchemaOrThrow(replication_pad2d_out::name, replication_pad2d_out::overload_name)
10557 .typed<replication_pad2d_out::schema>();
10558}
10559
10560// aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
10561at::Tensor & replication_pad2d_out::call(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
10562
10563 static auto op = create_replication_pad2d_out_typed_handle();
10564 return op.call(self, padding, out);
10565}
10566
10567// aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
10568at::Tensor & replication_pad2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
10569
10570 static auto op = create_replication_pad2d_out_typed_handle();
10571 return op.redispatch(dispatchKeySet, self, padding, out);
10572}
10573
10574STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(replication_pad2d, name, "aten::replication_pad2d")
10575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(replication_pad2d, overload_name, "")
10576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(replication_pad2d, schema_str, "replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor")
10577
10578// aten::replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor
10579static C10_NOINLINE c10::TypedOperatorHandle<replication_pad2d::schema> create_replication_pad2d_typed_handle() {
10580 return c10::Dispatcher::singleton()
10581 .findSchemaOrThrow(replication_pad2d::name, replication_pad2d::overload_name)
10582 .typed<replication_pad2d::schema>();
10583}
10584
10585// aten::replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor
10586at::Tensor replication_pad2d::call(const at::Tensor & self, c10::SymIntArrayRef padding) {
10587
10588 static auto op = create_replication_pad2d_typed_handle();
10589 return op.call(self, padding);
10590}
10591
10592// aten::replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor
10593at::Tensor replication_pad2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding) {
10594
10595 static auto op = create_replication_pad2d_typed_handle();
10596 return op.redispatch(dispatchKeySet, self, padding);
10597}
10598
10599STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pad_circular, name, "aten::_pad_circular")
10600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pad_circular, overload_name, "")
10601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_pad_circular, schema_str, "_pad_circular(Tensor self, SymInt[] pad) -> Tensor")
10602
10603// aten::_pad_circular(Tensor self, SymInt[] pad) -> Tensor
10604static C10_NOINLINE c10::TypedOperatorHandle<_pad_circular::schema> create__pad_circular_typed_handle() {
10605 return c10::Dispatcher::singleton()
10606 .findSchemaOrThrow(_pad_circular::name, _pad_circular::overload_name)
10607 .typed<_pad_circular::schema>();
10608}
10609
10610// aten::_pad_circular(Tensor self, SymInt[] pad) -> Tensor
10611at::Tensor _pad_circular::call(const at::Tensor & self, c10::SymIntArrayRef pad) {
10612
10613 static auto op = create__pad_circular_typed_handle();
10614 return op.call(self, pad);
10615}
10616
10617// aten::_pad_circular(Tensor self, SymInt[] pad) -> Tensor
10618at::Tensor _pad_circular::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef pad) {
10619
10620 static auto op = create__pad_circular_typed_handle();
10621 return op.redispatch(dispatchKeySet, self, pad);
10622}
10623
10624STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pad, name, "aten::pad")
10625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pad, overload_name, "")
10626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(pad, schema_str, "pad(Tensor self, SymInt[] pad, str mode=\"constant\", float? value=None) -> Tensor")
10627
10628// aten::pad(Tensor self, SymInt[] pad, str mode="constant", float? value=None) -> Tensor
10629static C10_NOINLINE c10::TypedOperatorHandle<pad::schema> create_pad_typed_handle() {
10630 return c10::Dispatcher::singleton()
10631 .findSchemaOrThrow(pad::name, pad::overload_name)
10632 .typed<pad::schema>();
10633}
10634
10635// aten::pad(Tensor self, SymInt[] pad, str mode="constant", float? value=None) -> Tensor
10636at::Tensor pad::call(const at::Tensor & self, c10::SymIntArrayRef pad, c10::string_view mode, c10::optional<double> value) {
10637
10638 static auto op = create_pad_typed_handle();
10639 return op.call(self, pad, mode, value);
10640}
10641
10642// aten::pad(Tensor self, SymInt[] pad, str mode="constant", float? value=None) -> Tensor
10643at::Tensor pad::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef pad, c10::string_view mode, c10::optional<double> value) {
10644
10645 static auto op = create_pad_typed_handle();
10646 return op.redispatch(dispatchKeySet, self, pad, mode, value);
10647}
10648
10649STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest1d_vec, name, "aten::upsample_nearest1d")
10650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest1d_vec, overload_name, "vec")
10651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest1d_vec, schema_str, "upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor")
10652
10653// aten::upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
10654static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest1d_vec::schema> create_upsample_nearest1d_vec_typed_handle() {
10655 return c10::Dispatcher::singleton()
10656 .findSchemaOrThrow(upsample_nearest1d_vec::name, upsample_nearest1d_vec::overload_name)
10657 .typed<upsample_nearest1d_vec::schema>();
10658}
10659
10660// aten::upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
10661at::Tensor upsample_nearest1d_vec::call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
10662
10663 static auto op = create_upsample_nearest1d_vec_typed_handle();
10664 return op.call(input, output_size, scale_factors);
10665}
10666
10667// aten::upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
10668at::Tensor upsample_nearest1d_vec::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
10669
10670 static auto op = create_upsample_nearest1d_vec_typed_handle();
10671 return op.redispatch(dispatchKeySet, input, output_size, scale_factors);
10672}
10673
10674STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_nearest_exact1d_vec, name, "aten::_upsample_nearest_exact1d")
10675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_nearest_exact1d_vec, overload_name, "vec")
10676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_nearest_exact1d_vec, schema_str, "_upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor")
10677
10678// aten::_upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
10679static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact1d_vec::schema> create__upsample_nearest_exact1d_vec_typed_handle() {
10680 return c10::Dispatcher::singleton()
10681 .findSchemaOrThrow(_upsample_nearest_exact1d_vec::name, _upsample_nearest_exact1d_vec::overload_name)
10682 .typed<_upsample_nearest_exact1d_vec::schema>();
10683}
10684
10685// aten::_upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
10686at::Tensor _upsample_nearest_exact1d_vec::call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
10687
10688 static auto op = create__upsample_nearest_exact1d_vec_typed_handle();
10689 return op.call(input, output_size, scale_factors);
10690}
10691
10692// aten::_upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
10693at::Tensor _upsample_nearest_exact1d_vec::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
10694
10695 static auto op = create__upsample_nearest_exact1d_vec_typed_handle();
10696 return op.redispatch(dispatchKeySet, input, output_size, scale_factors);
10697}
10698
10699STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest1d_out, name, "aten::upsample_nearest1d")
10700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest1d_out, overload_name, "out")
10701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest1d_out, schema_str, "upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)")
10702
10703// aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
10704static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest1d_out::schema> create_upsample_nearest1d_out_typed_handle() {
10705 return c10::Dispatcher::singleton()
10706 .findSchemaOrThrow(upsample_nearest1d_out::name, upsample_nearest1d_out::overload_name)
10707 .typed<upsample_nearest1d_out::schema>();
10708}
10709
10710// aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
10711at::Tensor & upsample_nearest1d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales, at::Tensor & out) {
10712
10713 static auto op = create_upsample_nearest1d_out_typed_handle();
10714 return op.call(self, output_size, scales, out);
10715}
10716
10717// aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
10718at::Tensor & upsample_nearest1d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales, at::Tensor & out) {
10719
10720 static auto op = create_upsample_nearest1d_out_typed_handle();
10721 return op.redispatch(dispatchKeySet, self, output_size, scales, out);
10722}
10723
10724STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_nearest_exact1d_out, name, "aten::_upsample_nearest_exact1d")
10725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_nearest_exact1d_out, overload_name, "out")
10726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_nearest_exact1d_out, schema_str, "_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)")
10727
10728// aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
10729static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact1d_out::schema> create__upsample_nearest_exact1d_out_typed_handle() {
10730 return c10::Dispatcher::singleton()
10731 .findSchemaOrThrow(_upsample_nearest_exact1d_out::name, _upsample_nearest_exact1d_out::overload_name)
10732 .typed<_upsample_nearest_exact1d_out::schema>();
10733}
10734
10735// aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
10736at::Tensor & _upsample_nearest_exact1d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales, at::Tensor & out) {
10737
10738 static auto op = create__upsample_nearest_exact1d_out_typed_handle();
10739 return op.call(self, output_size, scales, out);
10740}
10741
10742// aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
10743at::Tensor & _upsample_nearest_exact1d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales, at::Tensor & out) {
10744
10745 static auto op = create__upsample_nearest_exact1d_out_typed_handle();
10746 return op.redispatch(dispatchKeySet, self, output_size, scales, out);
10747}
10748
10749STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest1d, name, "aten::upsample_nearest1d")
10750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest1d, overload_name, "")
10751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(upsample_nearest1d, schema_str, "upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor")
10752
10753// aten::upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor
10754static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest1d::schema> create_upsample_nearest1d_typed_handle() {
10755 return c10::Dispatcher::singleton()
10756 .findSchemaOrThrow(upsample_nearest1d::name, upsample_nearest1d::overload_name)
10757 .typed<upsample_nearest1d::schema>();
10758}
10759
10760// aten::upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor
10761at::Tensor upsample_nearest1d::call(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales) {
10762
10763 static auto op = create_upsample_nearest1d_typed_handle();
10764 return op.call(self, output_size, scales);
10765}
10766
10767// aten::upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor
10768at::Tensor upsample_nearest1d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales) {
10769
10770 static auto op = create_upsample_nearest1d_typed_handle();
10771 return op.redispatch(dispatchKeySet, self, output_size, scales);
10772}
10773
10774STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_nearest_exact1d, name, "aten::_upsample_nearest_exact1d")
10775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_nearest_exact1d, overload_name, "")
10776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_upsample_nearest_exact1d, schema_str, "_upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor")
10777
10778// aten::_upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor
10779static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact1d::schema> create__upsample_nearest_exact1d_typed_handle() {
10780 return c10::Dispatcher::singleton()
10781 .findSchemaOrThrow(_upsample_nearest_exact1d::name, _upsample_nearest_exact1d::overload_name)
10782 .typed<_upsample_nearest_exact1d::schema>();
10783}
10784
10785// aten::_upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor
10786at::Tensor _upsample_nearest_exact1d::call(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales) {
10787
10788 static auto op = create__upsample_nearest_exact1d_typed_handle();
10789 return op.call(self, output_size, scales);
10790}
10791
10792// aten::_upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor
10793at::Tensor _upsample_nearest_exact1d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales) {
10794
10795 static auto op = create__upsample_nearest_exact1d_typed_handle();
10796 return op.redispatch(dispatchKeySet, self, output_size, scales);
10797}
10798
10799STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_conv_depthwise2d_out, name, "aten::_conv_depthwise2d")
10800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_conv_depthwise2d_out, overload_name, "out")
10801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_conv_depthwise2d_out, schema_str, "_conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!)")
10802
10803// aten::_conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!)
10804static C10_NOINLINE c10::TypedOperatorHandle<_conv_depthwise2d_out::schema> create__conv_depthwise2d_out_typed_handle() {
10805 return c10::Dispatcher::singleton()
10806 .findSchemaOrThrow(_conv_depthwise2d_out::name, _conv_depthwise2d_out::overload_name)
10807 .typed<_conv_depthwise2d_out::schema>();
10808}
10809
10810// aten::_conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!)
10811const at::Tensor & _conv_depthwise2d_out::call(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, const at::Tensor & out) {
10812
10813 static auto op = create__conv_depthwise2d_out_typed_handle();
10814 return op.call(self, weight, kernel_size, bias, stride, padding, dilation, out);
10815}
10816
10817// aten::_conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!)
10818const at::Tensor & _conv_depthwise2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, const at::Tensor & out) {
10819
10820 static auto op = create__conv_depthwise2d_out_typed_handle();
10821 return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation, out);
10822}
10823
10824STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_conv_depthwise2d, name, "aten::_conv_depthwise2d")
10825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_conv_depthwise2d, overload_name, "")
10826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_conv_depthwise2d, schema_str, "_conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation) -> Tensor")
10827
10828// aten::_conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation) -> Tensor
10829static C10_NOINLINE c10::TypedOperatorHandle<_conv_depthwise2d::schema> create__conv_depthwise2d_typed_handle() {
10830 return c10::Dispatcher::singleton()
10831 .findSchemaOrThrow(_conv_depthwise2d::name, _conv_depthwise2d::overload_name)
10832 .typed<_conv_depthwise2d::schema>();
10833}
10834
10835// aten::_conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation) -> Tensor
10836at::Tensor _conv_depthwise2d::call(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
10837
10838 static auto op = create__conv_depthwise2d_typed_handle();
10839 return op.call(self, weight, kernel_size, bias, stride, padding, dilation);
10840}
10841
10842// aten::_conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation) -> Tensor
10843at::Tensor _conv_depthwise2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
10844
10845 static auto op = create__conv_depthwise2d_typed_handle();
10846 return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation);
10847}
10848
10849STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv3d_out, name, "aten::slow_conv3d")
10850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv3d_out, overload_name, "out")
10851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv3d_out, schema_str, "slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!)")
10852
10853// aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!)
10854static C10_NOINLINE c10::TypedOperatorHandle<slow_conv3d_out::schema> create_slow_conv3d_out_typed_handle() {
10855 return c10::Dispatcher::singleton()
10856 .findSchemaOrThrow(slow_conv3d_out::name, slow_conv3d_out::overload_name)
10857 .typed<slow_conv3d_out::schema>();
10858}
10859
10860// aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!)
10861at::Tensor & slow_conv3d_out::call(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out) {
10862
10863 static auto op = create_slow_conv3d_out_typed_handle();
10864 return op.call(self, weight, kernel_size, bias, stride, padding, out);
10865}
10866
10867// aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!)
10868at::Tensor & slow_conv3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out) {
10869
10870 static auto op = create_slow_conv3d_out_typed_handle();
10871 return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, out);
10872}
10873
10874STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv3d, name, "aten::slow_conv3d")
10875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv3d, overload_name, "")
10876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slow_conv3d, schema_str, "slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0) -> Tensor")
10877
10878// aten::slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0) -> Tensor
10879static C10_NOINLINE c10::TypedOperatorHandle<slow_conv3d::schema> create_slow_conv3d_typed_handle() {
10880 return c10::Dispatcher::singleton()
10881 .findSchemaOrThrow(slow_conv3d::name, slow_conv3d::overload_name)
10882 .typed<slow_conv3d::schema>();
10883}
10884
10885// aten::slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0) -> Tensor
10886at::Tensor slow_conv3d::call(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding) {
10887
10888 static auto op = create_slow_conv3d_typed_handle();
10889 return op.call(self, weight, kernel_size, bias, stride, padding);
10890}
10891
10892// aten::slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0) -> Tensor
10893at::Tensor slow_conv3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding) {
10894
10895 static auto op = create_slow_conv3d_typed_handle();
10896 return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding);
10897}
10898
10899STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_remove_batch_dim, name, "aten::_remove_batch_dim")
10900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_remove_batch_dim, overload_name, "")
10901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_remove_batch_dim, schema_str, "_remove_batch_dim(Tensor self, int level, int batch_size, int out_dim) -> Tensor")
10902
10903// aten::_remove_batch_dim(Tensor self, int level, int batch_size, int out_dim) -> Tensor
10904static C10_NOINLINE c10::TypedOperatorHandle<_remove_batch_dim::schema> create__remove_batch_dim_typed_handle() {
10905 return c10::Dispatcher::singleton()
10906 .findSchemaOrThrow(_remove_batch_dim::name, _remove_batch_dim::overload_name)
10907 .typed<_remove_batch_dim::schema>();
10908}
10909
10910// aten::_remove_batch_dim(Tensor self, int level, int batch_size, int out_dim) -> Tensor
10911at::Tensor _remove_batch_dim::call(const at::Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim) {
10912
10913 static auto op = create__remove_batch_dim_typed_handle();
10914 return op.call(self, level, batch_size, out_dim);
10915}
10916
10917// aten::_remove_batch_dim(Tensor self, int level, int batch_size, int out_dim) -> Tensor
10918at::Tensor _remove_batch_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim) {
10919
10920 static auto op = create__remove_batch_dim_typed_handle();
10921 return op.redispatch(dispatchKeySet, self, level, batch_size, out_dim);
10922}
10923
10924STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_log_ndtr, name, "aten::special_log_ndtr")
10925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_log_ndtr, overload_name, "")
10926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_log_ndtr, schema_str, "special_log_ndtr(Tensor self) -> Tensor")
10927
10928// aten::special_log_ndtr(Tensor self) -> Tensor
10929static C10_NOINLINE c10::TypedOperatorHandle<special_log_ndtr::schema> create_special_log_ndtr_typed_handle() {
10930 return c10::Dispatcher::singleton()
10931 .findSchemaOrThrow(special_log_ndtr::name, special_log_ndtr::overload_name)
10932 .typed<special_log_ndtr::schema>();
10933}
10934
10935// aten::special_log_ndtr(Tensor self) -> Tensor
10936at::Tensor special_log_ndtr::call(const at::Tensor & self) {
10937
10938 static auto op = create_special_log_ndtr_typed_handle();
10939 return op.call(self);
10940}
10941
10942// aten::special_log_ndtr(Tensor self) -> Tensor
10943at::Tensor special_log_ndtr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
10944
10945 static auto op = create_special_log_ndtr_typed_handle();
10946 return op.redispatch(dispatchKeySet, self);
10947}
10948
10949STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_log_ndtr_out, name, "aten::special_log_ndtr")
10950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_log_ndtr_out, overload_name, "out")
10951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_log_ndtr_out, schema_str, "special_log_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
10952
10953// aten::special_log_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10954static C10_NOINLINE c10::TypedOperatorHandle<special_log_ndtr_out::schema> create_special_log_ndtr_out_typed_handle() {
10955 return c10::Dispatcher::singleton()
10956 .findSchemaOrThrow(special_log_ndtr_out::name, special_log_ndtr_out::overload_name)
10957 .typed<special_log_ndtr_out::schema>();
10958}
10959
10960// aten::special_log_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10961at::Tensor & special_log_ndtr_out::call(const at::Tensor & self, at::Tensor & out) {
10962
10963 static auto op = create_special_log_ndtr_out_typed_handle();
10964 return op.call(self, out);
10965}
10966
10967// aten::special_log_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10968at::Tensor & special_log_ndtr_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
10969
10970 static auto op = create_special_log_ndtr_out_typed_handle();
10971 return op.redispatch(dispatchKeySet, self, out);
10972}
10973
10974STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_erf, name, "aten::special_erf")
10975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_erf, overload_name, "")
10976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_erf, schema_str, "special_erf(Tensor self) -> Tensor")
10977
10978// aten::special_erf(Tensor self) -> Tensor
10979static C10_NOINLINE c10::TypedOperatorHandle<special_erf::schema> create_special_erf_typed_handle() {
10980 return c10::Dispatcher::singleton()
10981 .findSchemaOrThrow(special_erf::name, special_erf::overload_name)
10982 .typed<special_erf::schema>();
10983}
10984
10985// aten::special_erf(Tensor self) -> Tensor
10986at::Tensor special_erf::call(const at::Tensor & self) {
10987
10988 static auto op = create_special_erf_typed_handle();
10989 return op.call(self);
10990}
10991
10992// aten::special_erf(Tensor self) -> Tensor
10993at::Tensor special_erf::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
10994
10995 static auto op = create_special_erf_typed_handle();
10996 return op.redispatch(dispatchKeySet, self);
10997}
10998
10999STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_erf_out, name, "aten::special_erf")
11000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_erf_out, overload_name, "out")
11001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_erf_out, schema_str, "special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
11002
11003// aten::special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11004static C10_NOINLINE c10::TypedOperatorHandle<special_erf_out::schema> create_special_erf_out_typed_handle() {
11005 return c10::Dispatcher::singleton()
11006 .findSchemaOrThrow(special_erf_out::name, special_erf_out::overload_name)
11007 .typed<special_erf_out::schema>();
11008}
11009
11010// aten::special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11011at::Tensor & special_erf_out::call(const at::Tensor & self, at::Tensor & out) {
11012
11013 static auto op = create_special_erf_out_typed_handle();
11014 return op.call(self, out);
11015}
11016
11017// aten::special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11018at::Tensor & special_erf_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
11019
11020 static auto op = create_special_erf_out_typed_handle();
11021 return op.redispatch(dispatchKeySet, self, out);
11022}
11023
11024STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlogy, name, "aten::special_xlogy")
11025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlogy, overload_name, "")
11026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlogy, schema_str, "special_xlogy(Tensor self, Tensor other) -> Tensor")
11027
11028// aten::special_xlogy(Tensor self, Tensor other) -> Tensor
11029static C10_NOINLINE c10::TypedOperatorHandle<special_xlogy::schema> create_special_xlogy_typed_handle() {
11030 return c10::Dispatcher::singleton()
11031 .findSchemaOrThrow(special_xlogy::name, special_xlogy::overload_name)
11032 .typed<special_xlogy::schema>();
11033}
11034
11035// aten::special_xlogy(Tensor self, Tensor other) -> Tensor
11036at::Tensor special_xlogy::call(const at::Tensor & self, const at::Tensor & other) {
11037
11038 static auto op = create_special_xlogy_typed_handle();
11039 return op.call(self, other);
11040}
11041
11042// aten::special_xlogy(Tensor self, Tensor other) -> Tensor
11043at::Tensor special_xlogy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
11044
11045 static auto op = create_special_xlogy_typed_handle();
11046 return op.redispatch(dispatchKeySet, self, other);
11047}
11048
11049STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlogy_self_scalar, name, "aten::special_xlogy")
11050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlogy_self_scalar, overload_name, "self_scalar")
11051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlogy_self_scalar, schema_str, "special_xlogy.self_scalar(Scalar self, Tensor other) -> Tensor")
11052
11053// aten::special_xlogy.self_scalar(Scalar self, Tensor other) -> Tensor
11054static C10_NOINLINE c10::TypedOperatorHandle<special_xlogy_self_scalar::schema> create_special_xlogy_self_scalar_typed_handle() {
11055 return c10::Dispatcher::singleton()
11056 .findSchemaOrThrow(special_xlogy_self_scalar::name, special_xlogy_self_scalar::overload_name)
11057 .typed<special_xlogy_self_scalar::schema>();
11058}
11059
11060// aten::special_xlogy.self_scalar(Scalar self, Tensor other) -> Tensor
11061at::Tensor special_xlogy_self_scalar::call(const at::Scalar & self, const at::Tensor & other) {
11062
11063 static auto op = create_special_xlogy_self_scalar_typed_handle();
11064 return op.call(self, other);
11065}
11066
11067// aten::special_xlogy.self_scalar(Scalar self, Tensor other) -> Tensor
11068at::Tensor special_xlogy_self_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) {
11069
11070 static auto op = create_special_xlogy_self_scalar_typed_handle();
11071 return op.redispatch(dispatchKeySet, self, other);
11072}
11073
11074STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlogy_other_scalar, name, "aten::special_xlogy")
11075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlogy_other_scalar, overload_name, "other_scalar")
11076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlogy_other_scalar, schema_str, "special_xlogy.other_scalar(Tensor self, Scalar other) -> Tensor")
11077
11078// aten::special_xlogy.other_scalar(Tensor self, Scalar other) -> Tensor
11079static C10_NOINLINE c10::TypedOperatorHandle<special_xlogy_other_scalar::schema> create_special_xlogy_other_scalar_typed_handle() {
11080 return c10::Dispatcher::singleton()
11081 .findSchemaOrThrow(special_xlogy_other_scalar::name, special_xlogy_other_scalar::overload_name)
11082 .typed<special_xlogy_other_scalar::schema>();
11083}
11084
11085// aten::special_xlogy.other_scalar(Tensor self, Scalar other) -> Tensor
11086at::Tensor special_xlogy_other_scalar::call(const at::Tensor & self, const at::Scalar & other) {
11087
11088 static auto op = create_special_xlogy_other_scalar_typed_handle();
11089 return op.call(self, other);
11090}
11091
11092// aten::special_xlogy.other_scalar(Tensor self, Scalar other) -> Tensor
11093at::Tensor special_xlogy_other_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
11094
11095 static auto op = create_special_xlogy_other_scalar_typed_handle();
11096 return op.redispatch(dispatchKeySet, self, other);
11097}
11098
11099STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlogy_out, name, "aten::special_xlogy")
11100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlogy_out, overload_name, "out")
11101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlogy_out, schema_str, "special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
11102
11103// aten::special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
11104static C10_NOINLINE c10::TypedOperatorHandle<special_xlogy_out::schema> create_special_xlogy_out_typed_handle() {
11105 return c10::Dispatcher::singleton()
11106 .findSchemaOrThrow(special_xlogy_out::name, special_xlogy_out::overload_name)
11107 .typed<special_xlogy_out::schema>();
11108}
11109
11110// aten::special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
11111at::Tensor & special_xlogy_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
11112
11113 static auto op = create_special_xlogy_out_typed_handle();
11114 return op.call(self, other, out);
11115}
11116
11117// aten::special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
11118at::Tensor & special_xlogy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
11119
11120 static auto op = create_special_xlogy_out_typed_handle();
11121 return op.redispatch(dispatchKeySet, self, other, out);
11122}
11123
11124STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlogy_self_scalar_out, name, "aten::special_xlogy")
11125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlogy_self_scalar_out, overload_name, "self_scalar_out")
11126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlogy_self_scalar_out, schema_str, "special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
11127
11128// aten::special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
11129static C10_NOINLINE c10::TypedOperatorHandle<special_xlogy_self_scalar_out::schema> create_special_xlogy_self_scalar_out_typed_handle() {
11130 return c10::Dispatcher::singleton()
11131 .findSchemaOrThrow(special_xlogy_self_scalar_out::name, special_xlogy_self_scalar_out::overload_name)
11132 .typed<special_xlogy_self_scalar_out::schema>();
11133}
11134
11135// aten::special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
11136at::Tensor & special_xlogy_self_scalar_out::call(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
11137
11138 static auto op = create_special_xlogy_self_scalar_out_typed_handle();
11139 return op.call(self, other, out);
11140}
11141
11142// aten::special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
11143at::Tensor & special_xlogy_self_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
11144
11145 static auto op = create_special_xlogy_self_scalar_out_typed_handle();
11146 return op.redispatch(dispatchKeySet, self, other, out);
11147}
11148
11149STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlogy_other_scalar_out, name, "aten::special_xlogy")
11150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlogy_other_scalar_out, overload_name, "other_scalar_out")
11151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_xlogy_other_scalar_out, schema_str, "special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
11152
11153// aten::special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
11154static C10_NOINLINE c10::TypedOperatorHandle<special_xlogy_other_scalar_out::schema> create_special_xlogy_other_scalar_out_typed_handle() {
11155 return c10::Dispatcher::singleton()
11156 .findSchemaOrThrow(special_xlogy_other_scalar_out::name, special_xlogy_other_scalar_out::overload_name)
11157 .typed<special_xlogy_other_scalar_out::schema>();
11158}
11159
11160// aten::special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
11161at::Tensor & special_xlogy_other_scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
11162
11163 static auto op = create_special_xlogy_other_scalar_out_typed_handle();
11164 return op.call(self, other, out);
11165}
11166
11167// aten::special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
11168at::Tensor & special_xlogy_other_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
11169
11170 static auto op = create_special_xlogy_other_scalar_out_typed_handle();
11171 return op.redispatch(dispatchKeySet, self, other, out);
11172}
11173
11174STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_expit, name, "aten::special_expit")
11175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_expit, overload_name, "")
11176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_expit, schema_str, "special_expit(Tensor self) -> Tensor")
11177
11178// aten::special_expit(Tensor self) -> Tensor
11179static C10_NOINLINE c10::TypedOperatorHandle<special_expit::schema> create_special_expit_typed_handle() {
11180 return c10::Dispatcher::singleton()
11181 .findSchemaOrThrow(special_expit::name, special_expit::overload_name)
11182 .typed<special_expit::schema>();
11183}
11184
11185// aten::special_expit(Tensor self) -> Tensor
11186at::Tensor special_expit::call(const at::Tensor & self) {
11187
11188 static auto op = create_special_expit_typed_handle();
11189 return op.call(self);
11190}
11191
11192// aten::special_expit(Tensor self) -> Tensor
11193at::Tensor special_expit::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
11194
11195 static auto op = create_special_expit_typed_handle();
11196 return op.redispatch(dispatchKeySet, self);
11197}
11198
11199STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_expit_out, name, "aten::special_expit")
11200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_expit_out, overload_name, "out")
11201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_expit_out, schema_str, "special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
11202
11203// aten::special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11204static C10_NOINLINE c10::TypedOperatorHandle<special_expit_out::schema> create_special_expit_out_typed_handle() {
11205 return c10::Dispatcher::singleton()
11206 .findSchemaOrThrow(special_expit_out::name, special_expit_out::overload_name)
11207 .typed<special_expit_out::schema>();
11208}
11209
11210// aten::special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11211at::Tensor & special_expit_out::call(const at::Tensor & self, at::Tensor & out) {
11212
11213 static auto op = create_special_expit_out_typed_handle();
11214 return op.call(self, out);
11215}
11216
11217// aten::special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11218at::Tensor & special_expit_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
11219
11220 static auto op = create_special_expit_out_typed_handle();
11221 return op.redispatch(dispatchKeySet, self, out);
11222}
11223
11224STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_sinc, name, "aten::special_sinc")
11225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_sinc, overload_name, "")
11226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_sinc, schema_str, "special_sinc(Tensor self) -> Tensor")
11227
11228// aten::special_sinc(Tensor self) -> Tensor
11229static C10_NOINLINE c10::TypedOperatorHandle<special_sinc::schema> create_special_sinc_typed_handle() {
11230 return c10::Dispatcher::singleton()
11231 .findSchemaOrThrow(special_sinc::name, special_sinc::overload_name)
11232 .typed<special_sinc::schema>();
11233}
11234
11235// aten::special_sinc(Tensor self) -> Tensor
11236at::Tensor special_sinc::call(const at::Tensor & self) {
11237
11238 static auto op = create_special_sinc_typed_handle();
11239 return op.call(self);
11240}
11241
11242// aten::special_sinc(Tensor self) -> Tensor
11243at::Tensor special_sinc::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
11244
11245 static auto op = create_special_sinc_typed_handle();
11246 return op.redispatch(dispatchKeySet, self);
11247}
11248
11249STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_sinc_out, name, "aten::special_sinc")
11250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_sinc_out, overload_name, "out")
11251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_sinc_out, schema_str, "special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
11252
11253// aten::special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11254static C10_NOINLINE c10::TypedOperatorHandle<special_sinc_out::schema> create_special_sinc_out_typed_handle() {
11255 return c10::Dispatcher::singleton()
11256 .findSchemaOrThrow(special_sinc_out::name, special_sinc_out::overload_name)
11257 .typed<special_sinc_out::schema>();
11258}
11259
11260// aten::special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11261at::Tensor & special_sinc_out::call(const at::Tensor & self, at::Tensor & out) {
11262
11263 static auto op = create_special_sinc_out_typed_handle();
11264 return op.call(self, out);
11265}
11266
11267// aten::special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11268at::Tensor & special_sinc_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
11269
11270 static auto op = create_special_sinc_out_typed_handle();
11271 return op.redispatch(dispatchKeySet, self, out);
11272}
11273
11274STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_softmax, name, "aten::special_softmax")
11275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_softmax, overload_name, "")
11276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_softmax, schema_str, "special_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor")
11277
11278// aten::special_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
11279static C10_NOINLINE c10::TypedOperatorHandle<special_softmax::schema> create_special_softmax_typed_handle() {
11280 return c10::Dispatcher::singleton()
11281 .findSchemaOrThrow(special_softmax::name, special_softmax::overload_name)
11282 .typed<special_softmax::schema>();
11283}
11284
11285// aten::special_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
11286at::Tensor special_softmax::call(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
11287
11288 static auto op = create_special_softmax_typed_handle();
11289 return op.call(self, dim, dtype);
11290}
11291
11292// aten::special_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
11293at::Tensor special_softmax::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
11294
11295 static auto op = create_special_softmax_typed_handle();
11296 return op.redispatch(dispatchKeySet, self, dim, dtype);
11297}
11298
11299STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_fft, name, "aten::fft_fft")
11300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_fft, overload_name, "")
11301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_fft, schema_str, "fft_fft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor")
11302
11303// aten::fft_fft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor
11304static C10_NOINLINE c10::TypedOperatorHandle<fft_fft::schema> create_fft_fft_typed_handle() {
11305 return c10::Dispatcher::singleton()
11306 .findSchemaOrThrow(fft_fft::name, fft_fft::overload_name)
11307 .typed<fft_fft::schema>();
11308}
11309
11310// aten::fft_fft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor
11311at::Tensor fft_fft::call(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
11312
11313 static auto op = create_fft_fft_typed_handle();
11314 return op.call(self, n, dim, norm);
11315}
11316
11317// aten::fft_fft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor
11318at::Tensor fft_fft::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
11319
11320 static auto op = create_fft_fft_typed_handle();
11321 return op.redispatch(dispatchKeySet, self, n, dim, norm);
11322}
11323
11324STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_fft_out, name, "aten::fft_fft")
11325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_fft_out, overload_name, "out")
11326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_fft_out, schema_str, "fft_fft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)")
11327
11328// aten::fft_fft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
11329static C10_NOINLINE c10::TypedOperatorHandle<fft_fft_out::schema> create_fft_fft_out_typed_handle() {
11330 return c10::Dispatcher::singleton()
11331 .findSchemaOrThrow(fft_fft_out::name, fft_fft_out::overload_name)
11332 .typed<fft_fft_out::schema>();
11333}
11334
11335// aten::fft_fft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
11336at::Tensor & fft_fft_out::call(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11337
11338 static auto op = create_fft_fft_out_typed_handle();
11339 return op.call(self, n, dim, norm, out);
11340}
11341
11342// aten::fft_fft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
11343at::Tensor & fft_fft_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11344
11345 static auto op = create_fft_fft_out_typed_handle();
11346 return op.redispatch(dispatchKeySet, self, n, dim, norm, out);
11347}
11348
11349STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_rfft, name, "aten::fft_rfft")
11350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_rfft, overload_name, "")
11351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_rfft, schema_str, "fft_rfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor")
11352
11353// aten::fft_rfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor
11354static C10_NOINLINE c10::TypedOperatorHandle<fft_rfft::schema> create_fft_rfft_typed_handle() {
11355 return c10::Dispatcher::singleton()
11356 .findSchemaOrThrow(fft_rfft::name, fft_rfft::overload_name)
11357 .typed<fft_rfft::schema>();
11358}
11359
11360// aten::fft_rfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor
11361at::Tensor fft_rfft::call(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
11362
11363 static auto op = create_fft_rfft_typed_handle();
11364 return op.call(self, n, dim, norm);
11365}
11366
11367// aten::fft_rfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor
11368at::Tensor fft_rfft::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
11369
11370 static auto op = create_fft_rfft_typed_handle();
11371 return op.redispatch(dispatchKeySet, self, n, dim, norm);
11372}
11373
11374STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_rfft_out, name, "aten::fft_rfft")
11375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_rfft_out, overload_name, "out")
11376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_rfft_out, schema_str, "fft_rfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)")
11377
11378// aten::fft_rfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
11379static C10_NOINLINE c10::TypedOperatorHandle<fft_rfft_out::schema> create_fft_rfft_out_typed_handle() {
11380 return c10::Dispatcher::singleton()
11381 .findSchemaOrThrow(fft_rfft_out::name, fft_rfft_out::overload_name)
11382 .typed<fft_rfft_out::schema>();
11383}
11384
11385// aten::fft_rfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
11386at::Tensor & fft_rfft_out::call(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11387
11388 static auto op = create_fft_rfft_out_typed_handle();
11389 return op.call(self, n, dim, norm, out);
11390}
11391
11392// aten::fft_rfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
11393at::Tensor & fft_rfft_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11394
11395 static auto op = create_fft_rfft_out_typed_handle();
11396 return op.redispatch(dispatchKeySet, self, n, dim, norm, out);
11397}
11398
11399STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_hfft2, name, "aten::fft_hfft2")
11400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_hfft2, overload_name, "")
11401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_hfft2, schema_str, "fft_hfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor")
11402
11403// aten::fft_hfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
11404static C10_NOINLINE c10::TypedOperatorHandle<fft_hfft2::schema> create_fft_hfft2_typed_handle() {
11405 return c10::Dispatcher::singleton()
11406 .findSchemaOrThrow(fft_hfft2::name, fft_hfft2::overload_name)
11407 .typed<fft_hfft2::schema>();
11408}
11409
11410// aten::fft_hfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
11411at::Tensor fft_hfft2::call(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
11412
11413 static auto op = create_fft_hfft2_typed_handle();
11414 return op.call(self, s, dim, norm);
11415}
11416
11417// aten::fft_hfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
11418at::Tensor fft_hfft2::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
11419
11420 static auto op = create_fft_hfft2_typed_handle();
11421 return op.redispatch(dispatchKeySet, self, s, dim, norm);
11422}
11423
11424STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_hfft2_out, name, "aten::fft_hfft2")
11425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_hfft2_out, overload_name, "out")
11426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_hfft2_out, schema_str, "fft_hfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)")
11427
11428// aten::fft_hfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
11429static C10_NOINLINE c10::TypedOperatorHandle<fft_hfft2_out::schema> create_fft_hfft2_out_typed_handle() {
11430 return c10::Dispatcher::singleton()
11431 .findSchemaOrThrow(fft_hfft2_out::name, fft_hfft2_out::overload_name)
11432 .typed<fft_hfft2_out::schema>();
11433}
11434
11435// aten::fft_hfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
11436const at::Tensor & fft_hfft2_out::call(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) {
11437
11438 static auto op = create_fft_hfft2_out_typed_handle();
11439 return op.call(self, s, dim, norm, out);
11440}
11441
11442// aten::fft_hfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
11443const at::Tensor & fft_hfft2_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) {
11444
11445 static auto op = create_fft_hfft2_out_typed_handle();
11446 return op.redispatch(dispatchKeySet, self, s, dim, norm, out);
11447}
11448
11449STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_ifftn, name, "aten::fft_ifftn")
11450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_ifftn, overload_name, "")
11451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_ifftn, schema_str, "fft_ifftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor")
11452
11453// aten::fft_ifftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
11454static C10_NOINLINE c10::TypedOperatorHandle<fft_ifftn::schema> create_fft_ifftn_typed_handle() {
11455 return c10::Dispatcher::singleton()
11456 .findSchemaOrThrow(fft_ifftn::name, fft_ifftn::overload_name)
11457 .typed<fft_ifftn::schema>();
11458}
11459
11460// aten::fft_ifftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
11461at::Tensor fft_ifftn::call(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
11462
11463 static auto op = create_fft_ifftn_typed_handle();
11464 return op.call(self, s, dim, norm);
11465}
11466
11467// aten::fft_ifftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
11468at::Tensor fft_ifftn::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
11469
11470 static auto op = create_fft_ifftn_typed_handle();
11471 return op.redispatch(dispatchKeySet, self, s, dim, norm);
11472}
11473
11474STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_ifftn_out, name, "aten::fft_ifftn")
11475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_ifftn_out, overload_name, "out")
11476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_ifftn_out, schema_str, "fft_ifftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)")
11477
11478// aten::fft_ifftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
11479static C10_NOINLINE c10::TypedOperatorHandle<fft_ifftn_out::schema> create_fft_ifftn_out_typed_handle() {
11480 return c10::Dispatcher::singleton()
11481 .findSchemaOrThrow(fft_ifftn_out::name, fft_ifftn_out::overload_name)
11482 .typed<fft_ifftn_out::schema>();
11483}
11484
11485// aten::fft_ifftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
11486at::Tensor & fft_ifftn_out::call(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11487
11488 static auto op = create_fft_ifftn_out_typed_handle();
11489 return op.call(self, s, dim, norm, out);
11490}
11491
11492// aten::fft_ifftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
11493at::Tensor & fft_ifftn_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11494
11495 static auto op = create_fft_ifftn_out_typed_handle();
11496 return op.redispatch(dispatchKeySet, self, s, dim, norm, out);
11497}
11498
11499STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_ihfftn, name, "aten::fft_ihfftn")
11500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_ihfftn, overload_name, "")
11501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_ihfftn, schema_str, "fft_ihfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor")
11502
11503// aten::fft_ihfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
11504static C10_NOINLINE c10::TypedOperatorHandle<fft_ihfftn::schema> create_fft_ihfftn_typed_handle() {
11505 return c10::Dispatcher::singleton()
11506 .findSchemaOrThrow(fft_ihfftn::name, fft_ihfftn::overload_name)
11507 .typed<fft_ihfftn::schema>();
11508}
11509
11510// aten::fft_ihfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
11511at::Tensor fft_ihfftn::call(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
11512
11513 static auto op = create_fft_ihfftn_typed_handle();
11514 return op.call(self, s, dim, norm);
11515}
11516
11517// aten::fft_ihfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
11518at::Tensor fft_ihfftn::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
11519
11520 static auto op = create_fft_ihfftn_typed_handle();
11521 return op.redispatch(dispatchKeySet, self, s, dim, norm);
11522}
11523
11524STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_ihfftn_out, name, "aten::fft_ihfftn")
11525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_ihfftn_out, overload_name, "out")
11526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_ihfftn_out, schema_str, "fft_ihfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)")
11527
11528// aten::fft_ihfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
11529static C10_NOINLINE c10::TypedOperatorHandle<fft_ihfftn_out::schema> create_fft_ihfftn_out_typed_handle() {
11530 return c10::Dispatcher::singleton()
11531 .findSchemaOrThrow(fft_ihfftn_out::name, fft_ihfftn_out::overload_name)
11532 .typed<fft_ihfftn_out::schema>();
11533}
11534
11535// aten::fft_ihfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
11536const at::Tensor & fft_ihfftn_out::call(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) {
11537
11538 static auto op = create_fft_ihfftn_out_typed_handle();
11539 return op.call(self, s, dim, norm, out);
11540}
11541
11542// aten::fft_ihfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
11543const at::Tensor & fft_ihfftn_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) {
11544
11545 static auto op = create_fft_ihfftn_out_typed_handle();
11546 return op.redispatch(dispatchKeySet, self, s, dim, norm, out);
11547}
11548
11549STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_fftfreq, name, "aten::fft_fftfreq")
11550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_fftfreq, overload_name, "")
11551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_fftfreq, schema_str, "fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
11552
11553// aten::fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
11554static C10_NOINLINE c10::TypedOperatorHandle<fft_fftfreq::schema> create_fft_fftfreq_typed_handle() {
11555 return c10::Dispatcher::singleton()
11556 .findSchemaOrThrow(fft_fftfreq::name, fft_fftfreq::overload_name)
11557 .typed<fft_fftfreq::schema>();
11558}
11559
11560// aten::fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
11561at::Tensor fft_fftfreq::call(int64_t n, double d, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
11562
11563 static auto op = create_fft_fftfreq_typed_handle();
11564 return op.call(n, d, dtype, layout, device, pin_memory);
11565}
11566
11567// aten::fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
11568at::Tensor fft_fftfreq::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t n, double d, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
11569
11570 static auto op = create_fft_fftfreq_typed_handle();
11571 return op.redispatch(dispatchKeySet, n, d, dtype, layout, device, pin_memory);
11572}
11573
11574STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_fftfreq_out, name, "aten::fft_fftfreq")
11575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_fftfreq_out, overload_name, "out")
11576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_fftfreq_out, schema_str, "fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)")
11577
11578// aten::fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)
11579static C10_NOINLINE c10::TypedOperatorHandle<fft_fftfreq_out::schema> create_fft_fftfreq_out_typed_handle() {
11580 return c10::Dispatcher::singleton()
11581 .findSchemaOrThrow(fft_fftfreq_out::name, fft_fftfreq_out::overload_name)
11582 .typed<fft_fftfreq_out::schema>();
11583}
11584
11585// aten::fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)
11586at::Tensor & fft_fftfreq_out::call(int64_t n, double d, at::Tensor & out) {
11587
11588 static auto op = create_fft_fftfreq_out_typed_handle();
11589 return op.call(n, d, out);
11590}
11591
11592// aten::fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)
11593at::Tensor & fft_fftfreq_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t n, double d, at::Tensor & out) {
11594
11595 static auto op = create_fft_fftfreq_out_typed_handle();
11596 return op.redispatch(dispatchKeySet, n, d, out);
11597}
11598
11599STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_rfftfreq, name, "aten::fft_rfftfreq")
11600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_rfftfreq, overload_name, "")
11601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_rfftfreq, schema_str, "fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
11602
11603// aten::fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
11604static C10_NOINLINE c10::TypedOperatorHandle<fft_rfftfreq::schema> create_fft_rfftfreq_typed_handle() {
11605 return c10::Dispatcher::singleton()
11606 .findSchemaOrThrow(fft_rfftfreq::name, fft_rfftfreq::overload_name)
11607 .typed<fft_rfftfreq::schema>();
11608}
11609
11610// aten::fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
11611at::Tensor fft_rfftfreq::call(int64_t n, double d, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
11612
11613 static auto op = create_fft_rfftfreq_typed_handle();
11614 return op.call(n, d, dtype, layout, device, pin_memory);
11615}
11616
11617// aten::fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
11618at::Tensor fft_rfftfreq::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t n, double d, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
11619
11620 static auto op = create_fft_rfftfreq_typed_handle();
11621 return op.redispatch(dispatchKeySet, n, d, dtype, layout, device, pin_memory);
11622}
11623
11624STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_rfftfreq_out, name, "aten::fft_rfftfreq")
11625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_rfftfreq_out, overload_name, "out")
11626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(fft_rfftfreq_out, schema_str, "fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)")
11627
11628// aten::fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)
11629static C10_NOINLINE c10::TypedOperatorHandle<fft_rfftfreq_out::schema> create_fft_rfftfreq_out_typed_handle() {
11630 return c10::Dispatcher::singleton()
11631 .findSchemaOrThrow(fft_rfftfreq_out::name, fft_rfftfreq_out::overload_name)
11632 .typed<fft_rfftfreq_out::schema>();
11633}
11634
11635// aten::fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)
11636at::Tensor & fft_rfftfreq_out::call(int64_t n, double d, at::Tensor & out) {
11637
11638 static auto op = create_fft_rfftfreq_out_typed_handle();
11639 return op.call(n, d, out);
11640}
11641
11642// aten::fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)
11643at::Tensor & fft_rfftfreq_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t n, double d, at::Tensor & out) {
11644
11645 static auto op = create_fft_rfftfreq_out_typed_handle();
11646 return op.redispatch(dispatchKeySet, n, d, out);
11647}
11648
11649STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cholesky_ex, name, "aten::linalg_cholesky_ex")
11650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cholesky_ex, overload_name, "")
11651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cholesky_ex, schema_str, "linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info)")
11652
11653// aten::linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info)
11654static C10_NOINLINE c10::TypedOperatorHandle<linalg_cholesky_ex::schema> create_linalg_cholesky_ex_typed_handle() {
11655 return c10::Dispatcher::singleton()
11656 .findSchemaOrThrow(linalg_cholesky_ex::name, linalg_cholesky_ex::overload_name)
11657 .typed<linalg_cholesky_ex::schema>();
11658}
11659
11660// aten::linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info)
11661::std::tuple<at::Tensor,at::Tensor> linalg_cholesky_ex::call(const at::Tensor & self, bool upper, bool check_errors) {
11662
11663 static auto op = create_linalg_cholesky_ex_typed_handle();
11664 return op.call(self, upper, check_errors);
11665}
11666
11667// aten::linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info)
11668::std::tuple<at::Tensor,at::Tensor> linalg_cholesky_ex::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, bool check_errors) {
11669
11670 static auto op = create_linalg_cholesky_ex_typed_handle();
11671 return op.redispatch(dispatchKeySet, self, upper, check_errors);
11672}
11673
11674STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cholesky_ex_L, name, "aten::linalg_cholesky_ex")
11675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cholesky_ex_L, overload_name, "L")
11676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cholesky_ex_L, schema_str, "linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info)")
11677
11678// aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info)
11679static C10_NOINLINE c10::TypedOperatorHandle<linalg_cholesky_ex_L::schema> create_linalg_cholesky_ex_L_typed_handle() {
11680 return c10::Dispatcher::singleton()
11681 .findSchemaOrThrow(linalg_cholesky_ex_L::name, linalg_cholesky_ex_L::overload_name)
11682 .typed<linalg_cholesky_ex_L::schema>();
11683}
11684
11685// aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info)
11686::std::tuple<at::Tensor &,at::Tensor &> linalg_cholesky_ex_L::call(const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info) {
11687
11688 static auto op = create_linalg_cholesky_ex_L_typed_handle();
11689 return op.call(self, upper, check_errors, L, info);
11690}
11691
11692// aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info)
11693::std::tuple<at::Tensor &,at::Tensor &> linalg_cholesky_ex_L::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info) {
11694
11695 static auto op = create_linalg_cholesky_ex_L_typed_handle();
11696 return op.redispatch(dispatchKeySet, self, upper, check_errors, L, info);
11697}
11698
11699STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cross, name, "aten::linalg_cross")
11700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cross, overload_name, "")
11701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cross, schema_str, "linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor")
11702
11703// aten::linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor
11704static C10_NOINLINE c10::TypedOperatorHandle<linalg_cross::schema> create_linalg_cross_typed_handle() {
11705 return c10::Dispatcher::singleton()
11706 .findSchemaOrThrow(linalg_cross::name, linalg_cross::overload_name)
11707 .typed<linalg_cross::schema>();
11708}
11709
11710// aten::linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor
11711at::Tensor linalg_cross::call(const at::Tensor & self, const at::Tensor & other, int64_t dim) {
11712
11713 static auto op = create_linalg_cross_typed_handle();
11714 return op.call(self, other, dim);
11715}
11716
11717// aten::linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor
11718at::Tensor linalg_cross::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t dim) {
11719
11720 static auto op = create_linalg_cross_typed_handle();
11721 return op.redispatch(dispatchKeySet, self, other, dim);
11722}
11723
11724STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cross_out, name, "aten::linalg_cross")
11725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cross_out, overload_name, "out")
11726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cross_out, schema_str, "linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)")
11727
11728// aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)
11729static C10_NOINLINE c10::TypedOperatorHandle<linalg_cross_out::schema> create_linalg_cross_out_typed_handle() {
11730 return c10::Dispatcher::singleton()
11731 .findSchemaOrThrow(linalg_cross_out::name, linalg_cross_out::overload_name)
11732 .typed<linalg_cross_out::schema>();
11733}
11734
11735// aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)
11736at::Tensor & linalg_cross_out::call(const at::Tensor & self, const at::Tensor & other, int64_t dim, at::Tensor & out) {
11737
11738 static auto op = create_linalg_cross_out_typed_handle();
11739 return op.call(self, other, dim, out);
11740}
11741
11742// aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)
11743at::Tensor & linalg_cross_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t dim, at::Tensor & out) {
11744
11745 static auto op = create_linalg_cross_out_typed_handle();
11746 return op.redispatch(dispatchKeySet, self, other, dim, out);
11747}
11748
11749STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lu_factor_ex, name, "aten::linalg_lu_factor_ex")
11750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lu_factor_ex, overload_name, "")
11751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lu_factor_ex, schema_str, "linalg_lu_factor_ex(Tensor A, *, bool pivot=True, bool check_errors=False) -> (Tensor LU, Tensor pivots, Tensor info)")
11752
11753// aten::linalg_lu_factor_ex(Tensor A, *, bool pivot=True, bool check_errors=False) -> (Tensor LU, Tensor pivots, Tensor info)
11754static C10_NOINLINE c10::TypedOperatorHandle<linalg_lu_factor_ex::schema> create_linalg_lu_factor_ex_typed_handle() {
11755 return c10::Dispatcher::singleton()
11756 .findSchemaOrThrow(linalg_lu_factor_ex::name, linalg_lu_factor_ex::overload_name)
11757 .typed<linalg_lu_factor_ex::schema>();
11758}
11759
11760// aten::linalg_lu_factor_ex(Tensor A, *, bool pivot=True, bool check_errors=False) -> (Tensor LU, Tensor pivots, Tensor info)
11761::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu_factor_ex::call(const at::Tensor & A, bool pivot, bool check_errors) {
11762
11763 static auto op = create_linalg_lu_factor_ex_typed_handle();
11764 return op.call(A, pivot, check_errors);
11765}
11766
11767// aten::linalg_lu_factor_ex(Tensor A, *, bool pivot=True, bool check_errors=False) -> (Tensor LU, Tensor pivots, Tensor info)
11768::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu_factor_ex::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, bool check_errors) {
11769
11770 static auto op = create_linalg_lu_factor_ex_typed_handle();
11771 return op.redispatch(dispatchKeySet, A, pivot, check_errors);
11772}
11773
11774STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lu_factor_ex_out, name, "aten::linalg_lu_factor_ex")
11775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lu_factor_ex_out, overload_name, "out")
11776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_lu_factor_ex_out, schema_str, "linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info)")
11777
11778// aten::linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info)
11779static C10_NOINLINE c10::TypedOperatorHandle<linalg_lu_factor_ex_out::schema> create_linalg_lu_factor_ex_out_typed_handle() {
11780 return c10::Dispatcher::singleton()
11781 .findSchemaOrThrow(linalg_lu_factor_ex_out::name, linalg_lu_factor_ex_out::overload_name)
11782 .typed<linalg_lu_factor_ex_out::schema>();
11783}
11784
11785// aten::linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info)
11786::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_factor_ex_out::call(const at::Tensor & A, bool pivot, bool check_errors, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) {
11787
11788 static auto op = create_linalg_lu_factor_ex_out_typed_handle();
11789 return op.call(A, pivot, check_errors, LU, pivots, info);
11790}
11791
11792// aten::linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info)
11793::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_factor_ex_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, bool check_errors, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) {
11794
11795 static auto op = create_linalg_lu_factor_ex_out_typed_handle();
11796 return op.redispatch(dispatchKeySet, A, pivot, check_errors, LU, pivots, info);
11797}
11798
11799STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(det, name, "aten::det")
11800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(det, overload_name, "")
11801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(det, schema_str, "det(Tensor self) -> Tensor")
11802
11803// aten::det(Tensor self) -> Tensor
11804static C10_NOINLINE c10::TypedOperatorHandle<det::schema> create_det_typed_handle() {
11805 return c10::Dispatcher::singleton()
11806 .findSchemaOrThrow(det::name, det::overload_name)
11807 .typed<det::schema>();
11808}
11809
11810// aten::det(Tensor self) -> Tensor
11811at::Tensor det::call(const at::Tensor & self) {
11812
11813 static auto op = create_det_typed_handle();
11814 return op.call(self);
11815}
11816
11817// aten::det(Tensor self) -> Tensor
11818at::Tensor det::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
11819
11820 static auto op = create_det_typed_handle();
11821 return op.redispatch(dispatchKeySet, self);
11822}
11823
11824STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(inverse, name, "aten::inverse")
11825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(inverse, overload_name, "")
11826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(inverse, schema_str, "inverse(Tensor self) -> Tensor")
11827
11828// aten::inverse(Tensor self) -> Tensor
11829static C10_NOINLINE c10::TypedOperatorHandle<inverse::schema> create_inverse_typed_handle() {
11830 return c10::Dispatcher::singleton()
11831 .findSchemaOrThrow(inverse::name, inverse::overload_name)
11832 .typed<inverse::schema>();
11833}
11834
11835// aten::inverse(Tensor self) -> Tensor
11836at::Tensor inverse::call(const at::Tensor & self) {
11837
11838 static auto op = create_inverse_typed_handle();
11839 return op.call(self);
11840}
11841
11842// aten::inverse(Tensor self) -> Tensor
11843at::Tensor inverse::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
11844
11845 static auto op = create_inverse_typed_handle();
11846 return op.redispatch(dispatchKeySet, self);
11847}
11848
11849STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(inverse_out, name, "aten::inverse")
11850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(inverse_out, overload_name, "out")
11851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(inverse_out, schema_str, "inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
11852
11853// aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11854static C10_NOINLINE c10::TypedOperatorHandle<inverse_out::schema> create_inverse_out_typed_handle() {
11855 return c10::Dispatcher::singleton()
11856 .findSchemaOrThrow(inverse_out::name, inverse_out::overload_name)
11857 .typed<inverse_out::schema>();
11858}
11859
11860// aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11861at::Tensor & inverse_out::call(const at::Tensor & self, at::Tensor & out) {
11862
11863 static auto op = create_inverse_out_typed_handle();
11864 return op.call(self, out);
11865}
11866
11867// aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11868at::Tensor & inverse_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
11869
11870 static auto op = create_inverse_out_typed_handle();
11871 return op.redispatch(dispatchKeySet, self, out);
11872}
11873
11874STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cond, name, "aten::linalg_cond")
11875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cond, overload_name, "")
11876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cond, schema_str, "linalg_cond(Tensor self, Scalar? p=None) -> Tensor")
11877
11878// aten::linalg_cond(Tensor self, Scalar? p=None) -> Tensor
11879static C10_NOINLINE c10::TypedOperatorHandle<linalg_cond::schema> create_linalg_cond_typed_handle() {
11880 return c10::Dispatcher::singleton()
11881 .findSchemaOrThrow(linalg_cond::name, linalg_cond::overload_name)
11882 .typed<linalg_cond::schema>();
11883}
11884
11885// aten::linalg_cond(Tensor self, Scalar? p=None) -> Tensor
11886at::Tensor linalg_cond::call(const at::Tensor & self, const c10::optional<at::Scalar> & p) {
11887
11888 static auto op = create_linalg_cond_typed_handle();
11889 return op.call(self, p);
11890}
11891
11892// aten::linalg_cond(Tensor self, Scalar? p=None) -> Tensor
11893at::Tensor linalg_cond::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional<at::Scalar> & p) {
11894
11895 static auto op = create_linalg_cond_typed_handle();
11896 return op.redispatch(dispatchKeySet, self, p);
11897}
11898
11899STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cond_out, name, "aten::linalg_cond")
11900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cond_out, overload_name, "out")
11901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cond_out, schema_str, "linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!)")
11902
11903// aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!)
11904static C10_NOINLINE c10::TypedOperatorHandle<linalg_cond_out::schema> create_linalg_cond_out_typed_handle() {
11905 return c10::Dispatcher::singleton()
11906 .findSchemaOrThrow(linalg_cond_out::name, linalg_cond_out::overload_name)
11907 .typed<linalg_cond_out::schema>();
11908}
11909
11910// aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!)
11911at::Tensor & linalg_cond_out::call(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::Tensor & out) {
11912
11913 static auto op = create_linalg_cond_out_typed_handle();
11914 return op.call(self, p, out);
11915}
11916
11917// aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!)
11918at::Tensor & linalg_cond_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional<at::Scalar> & p, at::Tensor & out) {
11919
11920 static auto op = create_linalg_cond_out_typed_handle();
11921 return op.redispatch(dispatchKeySet, self, p, out);
11922}
11923
11924STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cond_p_str, name, "aten::linalg_cond")
11925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cond_p_str, overload_name, "p_str")
11926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cond_p_str, schema_str, "linalg_cond.p_str(Tensor self, str p) -> Tensor")
11927
11928// aten::linalg_cond.p_str(Tensor self, str p) -> Tensor
11929static C10_NOINLINE c10::TypedOperatorHandle<linalg_cond_p_str::schema> create_linalg_cond_p_str_typed_handle() {
11930 return c10::Dispatcher::singleton()
11931 .findSchemaOrThrow(linalg_cond_p_str::name, linalg_cond_p_str::overload_name)
11932 .typed<linalg_cond_p_str::schema>();
11933}
11934
11935// aten::linalg_cond.p_str(Tensor self, str p) -> Tensor
11936at::Tensor linalg_cond_p_str::call(const at::Tensor & self, c10::string_view p) {
11937
11938 static auto op = create_linalg_cond_p_str_typed_handle();
11939 return op.call(self, p);
11940}
11941
11942// aten::linalg_cond.p_str(Tensor self, str p) -> Tensor
11943at::Tensor linalg_cond_p_str::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view p) {
11944
11945 static auto op = create_linalg_cond_p_str_typed_handle();
11946 return op.redispatch(dispatchKeySet, self, p);
11947}
11948
11949STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cond_p_str_out, name, "aten::linalg_cond")
11950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cond_p_str_out, overload_name, "p_str_out")
11951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_cond_p_str_out, schema_str, "linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!)")
11952
11953// aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!)
11954static C10_NOINLINE c10::TypedOperatorHandle<linalg_cond_p_str_out::schema> create_linalg_cond_p_str_out_typed_handle() {
11955 return c10::Dispatcher::singleton()
11956 .findSchemaOrThrow(linalg_cond_p_str_out::name, linalg_cond_p_str_out::overload_name)
11957 .typed<linalg_cond_p_str_out::schema>();
11958}
11959
11960// aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!)
11961at::Tensor & linalg_cond_p_str_out::call(const at::Tensor & self, c10::string_view p, at::Tensor & out) {
11962
11963 static auto op = create_linalg_cond_p_str_out_typed_handle();
11964 return op.call(self, p, out);
11965}
11966
11967// aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!)
11968at::Tensor & linalg_cond_p_str_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view p, at::Tensor & out) {
11969
11970 static auto op = create_linalg_cond_p_str_out_typed_handle();
11971 return op.redispatch(dispatchKeySet, self, p, out);
11972}
11973
11974STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_pinv_atol_rtol_tensor, name, "aten::linalg_pinv")
11975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_pinv_atol_rtol_tensor, overload_name, "atol_rtol_tensor")
11976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_pinv_atol_rtol_tensor, schema_str, "linalg_pinv.atol_rtol_tensor(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor")
11977
11978// aten::linalg_pinv.atol_rtol_tensor(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor
11979static C10_NOINLINE c10::TypedOperatorHandle<linalg_pinv_atol_rtol_tensor::schema> create_linalg_pinv_atol_rtol_tensor_typed_handle() {
11980 return c10::Dispatcher::singleton()
11981 .findSchemaOrThrow(linalg_pinv_atol_rtol_tensor::name, linalg_pinv_atol_rtol_tensor::overload_name)
11982 .typed<linalg_pinv_atol_rtol_tensor::schema>();
11983}
11984
11985// aten::linalg_pinv.atol_rtol_tensor(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor
11986at::Tensor linalg_pinv_atol_rtol_tensor::call(const at::Tensor & self, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian) {
11987
11988 static auto op = create_linalg_pinv_atol_rtol_tensor_typed_handle();
11989 return op.call(self, atol, rtol, hermitian);
11990}
11991
11992// aten::linalg_pinv.atol_rtol_tensor(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor
11993at::Tensor linalg_pinv_atol_rtol_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian) {
11994
11995 static auto op = create_linalg_pinv_atol_rtol_tensor_typed_handle();
11996 return op.redispatch(dispatchKeySet, self, atol, rtol, hermitian);
11997}
11998
11999STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_pinv_atol_rtol_tensor_out, name, "aten::linalg_pinv")
12000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_pinv_atol_rtol_tensor_out, overload_name, "atol_rtol_tensor_out")
12001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_pinv_atol_rtol_tensor_out, schema_str, "linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)")
12002
12003// aten::linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
12004static C10_NOINLINE c10::TypedOperatorHandle<linalg_pinv_atol_rtol_tensor_out::schema> create_linalg_pinv_atol_rtol_tensor_out_typed_handle() {
12005 return c10::Dispatcher::singleton()
12006 .findSchemaOrThrow(linalg_pinv_atol_rtol_tensor_out::name, linalg_pinv_atol_rtol_tensor_out::overload_name)
12007 .typed<linalg_pinv_atol_rtol_tensor_out::schema>();
12008}
12009
12010// aten::linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
12011at::Tensor & linalg_pinv_atol_rtol_tensor_out::call(const at::Tensor & self, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian, at::Tensor & out) {
12012
12013 static auto op = create_linalg_pinv_atol_rtol_tensor_out_typed_handle();
12014 return op.call(self, atol, rtol, hermitian, out);
12015}
12016
12017// aten::linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
12018at::Tensor & linalg_pinv_atol_rtol_tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian, at::Tensor & out) {
12019
12020 static auto op = create_linalg_pinv_atol_rtol_tensor_out_typed_handle();
12021 return op.redispatch(dispatchKeySet, self, atol, rtol, hermitian, out);
12022}
12023
12024STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_pinv_atol_rtol_float, name, "aten::linalg_pinv")
12025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_pinv_atol_rtol_float, overload_name, "atol_rtol_float")
12026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_pinv_atol_rtol_float, schema_str, "linalg_pinv.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor")
12027
12028// aten::linalg_pinv.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor
12029static C10_NOINLINE c10::TypedOperatorHandle<linalg_pinv_atol_rtol_float::schema> create_linalg_pinv_atol_rtol_float_typed_handle() {
12030 return c10::Dispatcher::singleton()
12031 .findSchemaOrThrow(linalg_pinv_atol_rtol_float::name, linalg_pinv_atol_rtol_float::overload_name)
12032 .typed<linalg_pinv_atol_rtol_float::schema>();
12033}
12034
12035// aten::linalg_pinv.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor
12036at::Tensor linalg_pinv_atol_rtol_float::call(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian) {
12037
12038 static auto op = create_linalg_pinv_atol_rtol_float_typed_handle();
12039 return op.call(self, atol, rtol, hermitian);
12040}
12041
12042// aten::linalg_pinv.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor
12043at::Tensor linalg_pinv_atol_rtol_float::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian) {
12044
12045 static auto op = create_linalg_pinv_atol_rtol_float_typed_handle();
12046 return op.redispatch(dispatchKeySet, self, atol, rtol, hermitian);
12047}
12048
12049STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_pinv_atol_rtol_float_out, name, "aten::linalg_pinv")
12050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_pinv_atol_rtol_float_out, overload_name, "atol_rtol_float_out")
12051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_pinv_atol_rtol_float_out, schema_str, "linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)")
12052
12053// aten::linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
12054static C10_NOINLINE c10::TypedOperatorHandle<linalg_pinv_atol_rtol_float_out::schema> create_linalg_pinv_atol_rtol_float_out_typed_handle() {
12055 return c10::Dispatcher::singleton()
12056 .findSchemaOrThrow(linalg_pinv_atol_rtol_float_out::name, linalg_pinv_atol_rtol_float_out::overload_name)
12057 .typed<linalg_pinv_atol_rtol_float_out::schema>();
12058}
12059
12060// aten::linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
12061at::Tensor & linalg_pinv_atol_rtol_float_out::call(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian, at::Tensor & out) {
12062
12063 static auto op = create_linalg_pinv_atol_rtol_float_out_typed_handle();
12064 return op.call(self, atol, rtol, hermitian, out);
12065}
12066
12067// aten::linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
12068at::Tensor & linalg_pinv_atol_rtol_float_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian, at::Tensor & out) {
12069
12070 static auto op = create_linalg_pinv_atol_rtol_float_out_typed_handle();
12071 return op.redispatch(dispatchKeySet, self, atol, rtol, hermitian, out);
12072}
12073
12074STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_pinv, name, "aten::linalg_pinv")
12075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_pinv, overload_name, "")
12076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_pinv, schema_str, "linalg_pinv(Tensor self, float rcond, bool hermitian=False) -> Tensor")
12077
12078// aten::linalg_pinv(Tensor self, float rcond, bool hermitian=False) -> Tensor
12079static C10_NOINLINE c10::TypedOperatorHandle<linalg_pinv::schema> create_linalg_pinv_typed_handle() {
12080 return c10::Dispatcher::singleton()
12081 .findSchemaOrThrow(linalg_pinv::name, linalg_pinv::overload_name)
12082 .typed<linalg_pinv::schema>();
12083}
12084
12085// aten::linalg_pinv(Tensor self, float rcond, bool hermitian=False) -> Tensor
12086at::Tensor linalg_pinv::call(const at::Tensor & self, double rcond, bool hermitian) {
12087
12088 static auto op = create_linalg_pinv_typed_handle();
12089 return op.call(self, rcond, hermitian);
12090}
12091
12092// aten::linalg_pinv(Tensor self, float rcond, bool hermitian=False) -> Tensor
12093at::Tensor linalg_pinv::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double rcond, bool hermitian) {
12094
12095 static auto op = create_linalg_pinv_typed_handle();
12096 return op.redispatch(dispatchKeySet, self, rcond, hermitian);
12097}
12098
12099STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_pinv_rcond_tensor, name, "aten::linalg_pinv")
12100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_pinv_rcond_tensor, overload_name, "rcond_tensor")
12101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_pinv_rcond_tensor, schema_str, "linalg_pinv.rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False) -> Tensor")
12102
12103// aten::linalg_pinv.rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False) -> Tensor
12104static C10_NOINLINE c10::TypedOperatorHandle<linalg_pinv_rcond_tensor::schema> create_linalg_pinv_rcond_tensor_typed_handle() {
12105 return c10::Dispatcher::singleton()
12106 .findSchemaOrThrow(linalg_pinv_rcond_tensor::name, linalg_pinv_rcond_tensor::overload_name)
12107 .typed<linalg_pinv_rcond_tensor::schema>();
12108}
12109
12110// aten::linalg_pinv.rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False) -> Tensor
12111at::Tensor linalg_pinv_rcond_tensor::call(const at::Tensor & self, const at::Tensor & rcond, bool hermitian) {
12112
12113 static auto op = create_linalg_pinv_rcond_tensor_typed_handle();
12114 return op.call(self, rcond, hermitian);
12115}
12116
12117// aten::linalg_pinv.rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False) -> Tensor
12118at::Tensor linalg_pinv_rcond_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & rcond, bool hermitian) {
12119
12120 static auto op = create_linalg_pinv_rcond_tensor_typed_handle();
12121 return op.redispatch(dispatchKeySet, self, rcond, hermitian);
12122}
12123
12124STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_pinv_out, name, "aten::linalg_pinv")
12125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_pinv_out, overload_name, "out")
12126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_pinv_out, schema_str, "linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)")
12127
12128// aten::linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
12129static C10_NOINLINE c10::TypedOperatorHandle<linalg_pinv_out::schema> create_linalg_pinv_out_typed_handle() {
12130 return c10::Dispatcher::singleton()
12131 .findSchemaOrThrow(linalg_pinv_out::name, linalg_pinv_out::overload_name)
12132 .typed<linalg_pinv_out::schema>();
12133}
12134
12135// aten::linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
12136at::Tensor & linalg_pinv_out::call(const at::Tensor & self, double rcond, bool hermitian, at::Tensor & out) {
12137
12138 static auto op = create_linalg_pinv_out_typed_handle();
12139 return op.call(self, rcond, hermitian, out);
12140}
12141
12142// aten::linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
12143at::Tensor & linalg_pinv_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double rcond, bool hermitian, at::Tensor & out) {
12144
12145 static auto op = create_linalg_pinv_out_typed_handle();
12146 return op.redispatch(dispatchKeySet, self, rcond, hermitian, out);
12147}
12148
12149STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_pinv_out_rcond_tensor, name, "aten::linalg_pinv")
12150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_pinv_out_rcond_tensor, overload_name, "out_rcond_tensor")
12151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_pinv_out_rcond_tensor, schema_str, "linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)")
12152
12153// aten::linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
12154static C10_NOINLINE c10::TypedOperatorHandle<linalg_pinv_out_rcond_tensor::schema> create_linalg_pinv_out_rcond_tensor_typed_handle() {
12155 return c10::Dispatcher::singleton()
12156 .findSchemaOrThrow(linalg_pinv_out_rcond_tensor::name, linalg_pinv_out_rcond_tensor::overload_name)
12157 .typed<linalg_pinv_out_rcond_tensor::schema>();
12158}
12159
12160// aten::linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
12161at::Tensor & linalg_pinv_out_rcond_tensor::call(const at::Tensor & self, const at::Tensor & rcond, bool hermitian, at::Tensor & out) {
12162
12163 static auto op = create_linalg_pinv_out_rcond_tensor_typed_handle();
12164 return op.call(self, rcond, hermitian, out);
12165}
12166
12167// aten::linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
12168at::Tensor & linalg_pinv_out_rcond_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & rcond, bool hermitian, at::Tensor & out) {
12169
12170 static auto op = create_linalg_pinv_out_rcond_tensor_typed_handle();
12171 return op.redispatch(dispatchKeySet, self, rcond, hermitian, out);
12172}
12173
12174STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_solve_ex, name, "aten::linalg_solve_ex")
12175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_solve_ex, overload_name, "")
12176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_solve_ex, schema_str, "linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info)")
12177
12178// aten::linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info)
12179static C10_NOINLINE c10::TypedOperatorHandle<linalg_solve_ex::schema> create_linalg_solve_ex_typed_handle() {
12180 return c10::Dispatcher::singleton()
12181 .findSchemaOrThrow(linalg_solve_ex::name, linalg_solve_ex::overload_name)
12182 .typed<linalg_solve_ex::schema>();
12183}
12184
12185// aten::linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info)
12186::std::tuple<at::Tensor,at::Tensor> linalg_solve_ex::call(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
12187
12188 static auto op = create_linalg_solve_ex_typed_handle();
12189 return op.call(A, B, left, check_errors);
12190}
12191
12192// aten::linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info)
12193::std::tuple<at::Tensor,at::Tensor> linalg_solve_ex::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
12194
12195 static auto op = create_linalg_solve_ex_typed_handle();
12196 return op.redispatch(dispatchKeySet, A, B, left, check_errors);
12197}
12198
12199STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_solve_ex_out, name, "aten::linalg_solve_ex")
12200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_solve_ex_out, overload_name, "out")
12201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_solve_ex_out, schema_str, "linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info)")
12202
12203// aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info)
12204static C10_NOINLINE c10::TypedOperatorHandle<linalg_solve_ex_out::schema> create_linalg_solve_ex_out_typed_handle() {
12205 return c10::Dispatcher::singleton()
12206 .findSchemaOrThrow(linalg_solve_ex_out::name, linalg_solve_ex_out::overload_name)
12207 .typed<linalg_solve_ex_out::schema>();
12208}
12209
12210// aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info)
12211::std::tuple<at::Tensor &,at::Tensor &> linalg_solve_ex_out::call(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & info) {
12212
12213 static auto op = create_linalg_solve_ex_out_typed_handle();
12214 return op.call(A, B, left, check_errors, result, info);
12215}
12216
12217// aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info)
12218::std::tuple<at::Tensor &,at::Tensor &> linalg_solve_ex_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & info) {
12219
12220 static auto op = create_linalg_solve_ex_out_typed_handle();
12221 return op.redispatch(dispatchKeySet, A, B, left, check_errors, result, info);
12222}
12223
12224STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_tensorsolve, name, "aten::linalg_tensorsolve")
12225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_tensorsolve, overload_name, "")
12226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_tensorsolve, schema_str, "linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor")
12227
12228// aten::linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor
12229static C10_NOINLINE c10::TypedOperatorHandle<linalg_tensorsolve::schema> create_linalg_tensorsolve_typed_handle() {
12230 return c10::Dispatcher::singleton()
12231 .findSchemaOrThrow(linalg_tensorsolve::name, linalg_tensorsolve::overload_name)
12232 .typed<linalg_tensorsolve::schema>();
12233}
12234
12235// aten::linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor
12236at::Tensor linalg_tensorsolve::call(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims) {
12237
12238 static auto op = create_linalg_tensorsolve_typed_handle();
12239 return op.call(self, other, dims);
12240}
12241
12242// aten::linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor
12243at::Tensor linalg_tensorsolve::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims) {
12244
12245 static auto op = create_linalg_tensorsolve_typed_handle();
12246 return op.redispatch(dispatchKeySet, self, other, dims);
12247}
12248
12249STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_tensorsolve_out, name, "aten::linalg_tensorsolve")
12250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_tensorsolve_out, overload_name, "out")
12251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_tensorsolve_out, schema_str, "linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!)")
12252
12253// aten::linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!)
12254static C10_NOINLINE c10::TypedOperatorHandle<linalg_tensorsolve_out::schema> create_linalg_tensorsolve_out_typed_handle() {
12255 return c10::Dispatcher::singleton()
12256 .findSchemaOrThrow(linalg_tensorsolve_out::name, linalg_tensorsolve_out::overload_name)
12257 .typed<linalg_tensorsolve_out::schema>();
12258}
12259
12260// aten::linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!)
12261at::Tensor & linalg_tensorsolve_out::call(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims, at::Tensor & out) {
12262
12263 static auto op = create_linalg_tensorsolve_out_typed_handle();
12264 return op.call(self, other, dims, out);
12265}
12266
12267// aten::linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!)
12268at::Tensor & linalg_tensorsolve_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims, at::Tensor & out) {
12269
12270 static auto op = create_linalg_tensorsolve_out_typed_handle();
12271 return op.redispatch(dispatchKeySet, self, other, dims, out);
12272}
12273
12274STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_multi_dot, name, "aten::linalg_multi_dot")
12275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_multi_dot, overload_name, "")
12276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_multi_dot, schema_str, "linalg_multi_dot(Tensor[] tensors) -> Tensor")
12277
12278// aten::linalg_multi_dot(Tensor[] tensors) -> Tensor
12279static C10_NOINLINE c10::TypedOperatorHandle<linalg_multi_dot::schema> create_linalg_multi_dot_typed_handle() {
12280 return c10::Dispatcher::singleton()
12281 .findSchemaOrThrow(linalg_multi_dot::name, linalg_multi_dot::overload_name)
12282 .typed<linalg_multi_dot::schema>();
12283}
12284
12285// aten::linalg_multi_dot(Tensor[] tensors) -> Tensor
12286at::Tensor linalg_multi_dot::call(at::TensorList tensors) {
12287
12288 static auto op = create_linalg_multi_dot_typed_handle();
12289 return op.call(tensors);
12290}
12291
12292// aten::linalg_multi_dot(Tensor[] tensors) -> Tensor
12293at::Tensor linalg_multi_dot::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
12294
12295 static auto op = create_linalg_multi_dot_typed_handle();
12296 return op.redispatch(dispatchKeySet, tensors);
12297}
12298
12299STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_multi_dot_out, name, "aten::linalg_multi_dot")
12300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_multi_dot_out, overload_name, "out")
12301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(linalg_multi_dot_out, schema_str, "linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)")
12302
12303// aten::linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
12304static C10_NOINLINE c10::TypedOperatorHandle<linalg_multi_dot_out::schema> create_linalg_multi_dot_out_typed_handle() {
12305 return c10::Dispatcher::singleton()
12306 .findSchemaOrThrow(linalg_multi_dot_out::name, linalg_multi_dot_out::overload_name)
12307 .typed<linalg_multi_dot_out::schema>();
12308}
12309
12310// aten::linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
12311at::Tensor & linalg_multi_dot_out::call(at::TensorList tensors, at::Tensor & out) {
12312
12313 static auto op = create_linalg_multi_dot_out_typed_handle();
12314 return op.call(tensors, out);
12315}
12316
12317// aten::linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
12318at::Tensor & linalg_multi_dot_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) {
12319
12320 static auto op = create_linalg_multi_dot_out_typed_handle();
12321 return op.redispatch(dispatchKeySet, tensors, out);
12322}
12323
12324STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_string_default, name, "aten::_test_string_default")
12325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_string_default, overload_name, "")
12326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_test_string_default, schema_str, "_test_string_default(Tensor dummy, str a=\"\\\"'\\\\\", str b='\"\\'\\\\') -> Tensor")
12327
12328// aten::_test_string_default(Tensor dummy, str a="\"'\\", str b='"\'\\') -> Tensor
12329static C10_NOINLINE c10::TypedOperatorHandle<_test_string_default::schema> create__test_string_default_typed_handle() {
12330 return c10::Dispatcher::singleton()
12331 .findSchemaOrThrow(_test_string_default::name, _test_string_default::overload_name)
12332 .typed<_test_string_default::schema>();
12333}
12334
12335// aten::_test_string_default(Tensor dummy, str a="\"'\\", str b='"\'\\') -> Tensor
12336at::Tensor _test_string_default::call(const at::Tensor & dummy, c10::string_view a, c10::string_view b) {
12337
12338 static auto op = create__test_string_default_typed_handle();
12339 return op.call(dummy, a, b);
12340}
12341
12342// aten::_test_string_default(Tensor dummy, str a="\"'\\", str b='"\'\\') -> Tensor
12343at::Tensor _test_string_default::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dummy, c10::string_view a, c10::string_view b) {
12344
12345 static auto op = create__test_string_default_typed_handle();
12346 return op.redispatch(dispatchKeySet, dummy, a, b);
12347}
12348
12349STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(flatten_dense_tensors, name, "aten::flatten_dense_tensors")
12350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(flatten_dense_tensors, overload_name, "")
12351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(flatten_dense_tensors, schema_str, "flatten_dense_tensors(Tensor[] tensors) -> Tensor")
12352
12353// aten::flatten_dense_tensors(Tensor[] tensors) -> Tensor
12354static C10_NOINLINE c10::TypedOperatorHandle<flatten_dense_tensors::schema> create_flatten_dense_tensors_typed_handle() {
12355 return c10::Dispatcher::singleton()
12356 .findSchemaOrThrow(flatten_dense_tensors::name, flatten_dense_tensors::overload_name)
12357 .typed<flatten_dense_tensors::schema>();
12358}
12359
12360// aten::flatten_dense_tensors(Tensor[] tensors) -> Tensor
12361at::Tensor flatten_dense_tensors::call(at::TensorList tensors) {
12362
12363 static auto op = create_flatten_dense_tensors_typed_handle();
12364 return op.call(tensors);
12365}
12366
12367// aten::flatten_dense_tensors(Tensor[] tensors) -> Tensor
12368at::Tensor flatten_dense_tensors::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
12369
12370 static auto op = create_flatten_dense_tensors_typed_handle();
12371 return op.redispatch(dispatchKeySet, tensors);
12372}
12373
12374STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_conj_copy, name, "aten::_conj_copy")
12375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_conj_copy, overload_name, "")
12376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_conj_copy, schema_str, "_conj_copy(Tensor self) -> Tensor")
12377
12378// aten::_conj_copy(Tensor self) -> Tensor
12379static C10_NOINLINE c10::TypedOperatorHandle<_conj_copy::schema> create__conj_copy_typed_handle() {
12380 return c10::Dispatcher::singleton()
12381 .findSchemaOrThrow(_conj_copy::name, _conj_copy::overload_name)
12382 .typed<_conj_copy::schema>();
12383}
12384
12385// aten::_conj_copy(Tensor self) -> Tensor
12386at::Tensor _conj_copy::call(const at::Tensor & self) {
12387
12388 static auto op = create__conj_copy_typed_handle();
12389 return op.call(self);
12390}
12391
12392// aten::_conj_copy(Tensor self) -> Tensor
12393at::Tensor _conj_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
12394
12395 static auto op = create__conj_copy_typed_handle();
12396 return op.redispatch(dispatchKeySet, self);
12397}
12398
12399STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(detach_copy, name, "aten::detach_copy")
12400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(detach_copy, overload_name, "")
12401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(detach_copy, schema_str, "detach_copy(Tensor self) -> Tensor")
12402
12403// aten::detach_copy(Tensor self) -> Tensor
12404static C10_NOINLINE c10::TypedOperatorHandle<detach_copy::schema> create_detach_copy_typed_handle() {
12405 return c10::Dispatcher::singleton()
12406 .findSchemaOrThrow(detach_copy::name, detach_copy::overload_name)
12407 .typed<detach_copy::schema>();
12408}
12409
12410// aten::detach_copy(Tensor self) -> Tensor
12411at::Tensor detach_copy::call(const at::Tensor & self) {
12412
12413 static auto op = create_detach_copy_typed_handle();
12414 return op.call(self);
12415}
12416
12417// aten::detach_copy(Tensor self) -> Tensor
12418at::Tensor detach_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
12419
12420 static auto op = create_detach_copy_typed_handle();
12421 return op.redispatch(dispatchKeySet, self);
12422}
12423
12424STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(row_indices_copy, name, "aten::row_indices_copy")
12425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(row_indices_copy, overload_name, "")
12426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(row_indices_copy, schema_str, "row_indices_copy(Tensor self) -> Tensor")
12427
12428// aten::row_indices_copy(Tensor self) -> Tensor
12429static C10_NOINLINE c10::TypedOperatorHandle<row_indices_copy::schema> create_row_indices_copy_typed_handle() {
12430 return c10::Dispatcher::singleton()
12431 .findSchemaOrThrow(row_indices_copy::name, row_indices_copy::overload_name)
12432 .typed<row_indices_copy::schema>();
12433}
12434
12435// aten::row_indices_copy(Tensor self) -> Tensor
12436at::Tensor row_indices_copy::call(const at::Tensor & self) {
12437
12438 static auto op = create_row_indices_copy_typed_handle();
12439 return op.call(self);
12440}
12441
12442// aten::row_indices_copy(Tensor self) -> Tensor
12443at::Tensor row_indices_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
12444
12445 static auto op = create_row_indices_copy_typed_handle();
12446 return op.redispatch(dispatchKeySet, self);
12447}
12448
12449STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_transformer_encoder_layer_fwd, name, "aten::_transformer_encoder_layer_fwd")
12450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_transformer_encoder_layer_fwd, overload_name, "")
12451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_transformer_encoder_layer_fwd, schema_str, "_transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor")
12452
12453// aten::_transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor
12454static C10_NOINLINE c10::TypedOperatorHandle<_transformer_encoder_layer_fwd::schema> create__transformer_encoder_layer_fwd_typed_handle() {
12455 return c10::Dispatcher::singleton()
12456 .findSchemaOrThrow(_transformer_encoder_layer_fwd::name, _transformer_encoder_layer_fwd::overload_name)
12457 .typed<_transformer_encoder_layer_fwd::schema>();
12458}
12459
12460// aten::_transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor
12461at::Tensor _transformer_encoder_layer_fwd::call(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, c10::optional<int64_t> mask_type) {
12462
12463 static auto op = create__transformer_encoder_layer_fwd_typed_handle();
12464 return op.call(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type);
12465}
12466
12467// aten::_transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor
12468at::Tensor _transformer_encoder_layer_fwd::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, c10::optional<int64_t> mask_type) {
12469
12470 static auto op = create__transformer_encoder_layer_fwd_typed_handle();
12471 return op.redispatch(dispatchKeySet, src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type);
12472}
12473
12474STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_native_multi_head_attention, name, "aten::_native_multi_head_attention")
12475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_native_multi_head_attention, overload_name, "")
12476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_native_multi_head_attention, schema_str, "_native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor)")
12477
12478// aten::_native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor)
12479static C10_NOINLINE c10::TypedOperatorHandle<_native_multi_head_attention::schema> create__native_multi_head_attention_typed_handle() {
12480 return c10::Dispatcher::singleton()
12481 .findSchemaOrThrow(_native_multi_head_attention::name, _native_multi_head_attention::overload_name)
12482 .typed<_native_multi_head_attention::schema>();
12483}
12484
12485// aten::_native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor)
12486::std::tuple<at::Tensor,at::Tensor> _native_multi_head_attention::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, bool need_weights, bool average_attn_weights, c10::optional<int64_t> mask_type) {
12487
12488 static auto op = create__native_multi_head_attention_typed_handle();
12489 return op.call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type);
12490}
12491
12492// aten::_native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor)
12493::std::tuple<at::Tensor,at::Tensor> _native_multi_head_attention::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, bool need_weights, bool average_attn_weights, c10::optional<int64_t> mask_type) {
12494
12495 static auto op = create__native_multi_head_attention_typed_handle();
12496 return op.redispatch(dispatchKeySet, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type);
12497}
12498
12499STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_scaled_dot_product_attention, name, "aten::_scaled_dot_product_attention")
12500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_scaled_dot_product_attention, overload_name, "")
12501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_scaled_dot_product_attention, schema_str, "_scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool need_attn_weights=False, bool is_causal=False) -> (Tensor, Tensor)")
12502
12503// aten::_scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool need_attn_weights=False, bool is_causal=False) -> (Tensor, Tensor)
12504static C10_NOINLINE c10::TypedOperatorHandle<_scaled_dot_product_attention::schema> create__scaled_dot_product_attention_typed_handle() {
12505 return c10::Dispatcher::singleton()
12506 .findSchemaOrThrow(_scaled_dot_product_attention::name, _scaled_dot_product_attention::overload_name)
12507 .typed<_scaled_dot_product_attention::schema>();
12508}
12509
12510// aten::_scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool need_attn_weights=False, bool is_causal=False) -> (Tensor, Tensor)
12511::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_attention::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool need_attn_weights, bool is_causal) {
12512
12513 static auto op = create__scaled_dot_product_attention_typed_handle();
12514 return op.call(query, key, value, attn_mask, dropout_p, need_attn_weights, is_causal);
12515}
12516
12517// aten::_scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool need_attn_weights=False, bool is_causal=False) -> (Tensor, Tensor)
12518::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_attention::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool need_attn_weights, bool is_causal) {
12519
12520 static auto op = create__scaled_dot_product_attention_typed_handle();
12521 return op.redispatch(dispatchKeySet, query, key, value, attn_mask, dropout_p, need_attn_weights, is_causal);
12522}
12523
12524STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fused_sdp_choice, name, "aten::_fused_sdp_choice")
12525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fused_sdp_choice, overload_name, "")
12526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fused_sdp_choice, schema_str, "_fused_sdp_choice(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False) -> int")
12527
12528// aten::_fused_sdp_choice(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False) -> int
12529static C10_NOINLINE c10::TypedOperatorHandle<_fused_sdp_choice::schema> create__fused_sdp_choice_typed_handle() {
12530 return c10::Dispatcher::singleton()
12531 .findSchemaOrThrow(_fused_sdp_choice::name, _fused_sdp_choice::overload_name)
12532 .typed<_fused_sdp_choice::schema>();
12533}
12534
12535// aten::_fused_sdp_choice(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False) -> int
12536int64_t _fused_sdp_choice::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal) {
12537
12538 static auto op = create__fused_sdp_choice_typed_handle();
12539 return op.call(query, key, value, attn_mask, dropout_p, is_causal);
12540}
12541
12542// aten::_fused_sdp_choice(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False) -> int
12543int64_t _fused_sdp_choice::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal) {
12544
12545 static auto op = create__fused_sdp_choice_typed_handle();
12546 return op.redispatch(dispatchKeySet, query, key, value, attn_mask, dropout_p, is_causal);
12547}
12548
12549STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_scaled_dot_product_flash_attention, name, "aten::_scaled_dot_product_flash_attention")
12550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_scaled_dot_product_flash_attention, overload_name, "")
12551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_scaled_dot_product_flash_attention, schema_str, "_scaled_dot_product_flash_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False) -> (Tensor ouput, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, int philox_seed, int philox_offset, Tensor debug_attn_mask)")
12552
12553// aten::_scaled_dot_product_flash_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False) -> (Tensor ouput, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, int philox_seed, int philox_offset, Tensor debug_attn_mask)
12554static C10_NOINLINE c10::TypedOperatorHandle<_scaled_dot_product_flash_attention::schema> create__scaled_dot_product_flash_attention_typed_handle() {
12555 return c10::Dispatcher::singleton()
12556 .findSchemaOrThrow(_scaled_dot_product_flash_attention::name, _scaled_dot_product_flash_attention::overload_name)
12557 .typed<_scaled_dot_product_flash_attention::schema>();
12558}
12559
12560// aten::_scaled_dot_product_flash_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False) -> (Tensor ouput, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, int philox_seed, int philox_offset, Tensor debug_attn_mask)
12561::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,int64_t,int64_t,int64_t,int64_t,at::Tensor> _scaled_dot_product_flash_attention::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p, bool is_causal, bool return_debug_mask) {
12562
12563 static auto op = create__scaled_dot_product_flash_attention_typed_handle();
12564 return op.call(query, key, value, dropout_p, is_causal, return_debug_mask);
12565}
12566
12567// aten::_scaled_dot_product_flash_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False) -> (Tensor ouput, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, int philox_seed, int philox_offset, Tensor debug_attn_mask)
12568::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,int64_t,int64_t,int64_t,int64_t,at::Tensor> _scaled_dot_product_flash_attention::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p, bool is_causal, bool return_debug_mask) {
12569
12570 static auto op = create__scaled_dot_product_flash_attention_typed_handle();
12571 return op.redispatch(dispatchKeySet, query, key, value, dropout_p, is_causal, return_debug_mask);
12572}
12573
12574STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_scaled_dot_product_efficient_attention_backward, name, "aten::_scaled_dot_product_efficient_attention_backward")
12575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_scaled_dot_product_efficient_attention_backward, overload_name, "")
12576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_scaled_dot_product_efficient_attention_backward, schema_str, "_scaled_dot_product_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, bool is_causal=False, bool chunk_grad_outputs=False) -> (Tensor, Tensor, Tensor)")
12577
12578// aten::_scaled_dot_product_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, bool is_causal=False, bool chunk_grad_outputs=False) -> (Tensor, Tensor, Tensor)
12579static C10_NOINLINE c10::TypedOperatorHandle<_scaled_dot_product_efficient_attention_backward::schema> create__scaled_dot_product_efficient_attention_backward_typed_handle() {
12580 return c10::Dispatcher::singleton()
12581 .findSchemaOrThrow(_scaled_dot_product_efficient_attention_backward::name, _scaled_dot_product_efficient_attention_backward::overload_name)
12582 .typed<_scaled_dot_product_efficient_attention_backward::schema>();
12583}
12584
12585// aten::_scaled_dot_product_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, bool is_causal=False, bool chunk_grad_outputs=False) -> (Tensor, Tensor, Tensor)
12586::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_efficient_attention_backward::call(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, bool is_causal, bool chunk_grad_outputs) {
12587
12588 static auto op = create__scaled_dot_product_efficient_attention_backward_typed_handle();
12589 return op.call(grad_out_, query, key, value, out, logsumexp, is_causal, chunk_grad_outputs);
12590}
12591
12592// aten::_scaled_dot_product_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, bool is_causal=False, bool chunk_grad_outputs=False) -> (Tensor, Tensor, Tensor)
12593::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_efficient_attention_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, bool is_causal, bool chunk_grad_outputs) {
12594
12595 static auto op = create__scaled_dot_product_efficient_attention_backward_typed_handle();
12596 return op.redispatch(dispatchKeySet, grad_out_, query, key, value, out, logsumexp, is_causal, chunk_grad_outputs);
12597}
12598
12599STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_flash_attention_backward, name, "aten::_flash_attention_backward")
12600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_flash_attention_backward, overload_name, "")
12601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_flash_attention_backward, schema_str, "_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, int philox_seed, int philox_offset) -> (Tensor, Tensor, Tensor)")
12602
12603// aten::_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, int philox_seed, int philox_offset) -> (Tensor, Tensor, Tensor)
12604static C10_NOINLINE c10::TypedOperatorHandle<_flash_attention_backward::schema> create__flash_attention_backward_typed_handle() {
12605 return c10::Dispatcher::singleton()
12606 .findSchemaOrThrow(_flash_attention_backward::name, _flash_attention_backward::overload_name)
12607 .typed<_flash_attention_backward::schema>();
12608}
12609
12610// aten::_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, int philox_seed, int philox_offset) -> (Tensor, Tensor, Tensor)
12611::std::tuple<at::Tensor,at::Tensor,at::Tensor> _flash_attention_backward::call(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, int64_t philox_seed, int64_t philox_offset) {
12612
12613 static auto op = create__flash_attention_backward_typed_handle();
12614 return op.call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset);
12615}
12616
12617// aten::_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, int philox_seed, int philox_offset) -> (Tensor, Tensor, Tensor)
12618::std::tuple<at::Tensor,at::Tensor,at::Tensor> _flash_attention_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, int64_t philox_seed, int64_t philox_offset) {
12619
12620 static auto op = create__flash_attention_backward_typed_handle();
12621 return op.redispatch(dispatchKeySet, grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset);
12622}
12623
12624STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_efficient_attention_backward, name, "aten::_efficient_attention_backward")
12625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_efficient_attention_backward, overload_name, "")
12626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_efficient_attention_backward, schema_str, "_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, bool is_causal=False, bool chunk_grad_outputs=False) -> (Tensor, Tensor, Tensor)")
12627
12628// aten::_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, bool is_causal=False, bool chunk_grad_outputs=False) -> (Tensor, Tensor, Tensor)
12629static C10_NOINLINE c10::TypedOperatorHandle<_efficient_attention_backward::schema> create__efficient_attention_backward_typed_handle() {
12630 return c10::Dispatcher::singleton()
12631 .findSchemaOrThrow(_efficient_attention_backward::name, _efficient_attention_backward::overload_name)
12632 .typed<_efficient_attention_backward::schema>();
12633}
12634
12635// aten::_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, bool is_causal=False, bool chunk_grad_outputs=False) -> (Tensor, Tensor, Tensor)
12636::std::tuple<at::Tensor,at::Tensor,at::Tensor> _efficient_attention_backward::call(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, bool is_causal, bool chunk_grad_outputs) {
12637
12638 static auto op = create__efficient_attention_backward_typed_handle();
12639 return op.call(grad_out_, query, key, value, out, logsumexp, is_causal, chunk_grad_outputs);
12640}
12641
12642// aten::_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, bool is_causal=False, bool chunk_grad_outputs=False) -> (Tensor, Tensor, Tensor)
12643::std::tuple<at::Tensor,at::Tensor,at::Tensor> _efficient_attention_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, bool is_causal, bool chunk_grad_outputs) {
12644
12645 static auto op = create__efficient_attention_backward_typed_handle();
12646 return op.redispatch(dispatchKeySet, grad_out_, query, key, value, out, logsumexp, is_causal, chunk_grad_outputs);
12647}
12648
12649STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_triton_multi_head_attention, name, "aten::_triton_multi_head_attention")
12650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_triton_multi_head_attention, overload_name, "")
12651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_triton_multi_head_attention, schema_str, "_triton_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor")
12652
12653// aten::_triton_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor
12654static C10_NOINLINE c10::TypedOperatorHandle<_triton_multi_head_attention::schema> create__triton_multi_head_attention_typed_handle() {
12655 return c10::Dispatcher::singleton()
12656 .findSchemaOrThrow(_triton_multi_head_attention::name, _triton_multi_head_attention::overload_name)
12657 .typed<_triton_multi_head_attention::schema>();
12658}
12659
12660// aten::_triton_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor
12661at::Tensor _triton_multi_head_attention::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask) {
12662
12663 static auto op = create__triton_multi_head_attention_typed_handle();
12664 return op.call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask);
12665}
12666
12667// aten::_triton_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor
12668at::Tensor _triton_multi_head_attention::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask) {
12669
12670 static auto op = create__triton_multi_head_attention_typed_handle();
12671 return op.redispatch(dispatchKeySet, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask);
12672}
12673
12674STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_airy_ai, name, "aten::special_airy_ai")
12675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_airy_ai, overload_name, "")
12676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_airy_ai, schema_str, "special_airy_ai(Tensor x) -> Tensor")
12677
12678// aten::special_airy_ai(Tensor x) -> Tensor
12679static C10_NOINLINE c10::TypedOperatorHandle<special_airy_ai::schema> create_special_airy_ai_typed_handle() {
12680 return c10::Dispatcher::singleton()
12681 .findSchemaOrThrow(special_airy_ai::name, special_airy_ai::overload_name)
12682 .typed<special_airy_ai::schema>();
12683}
12684
12685// aten::special_airy_ai(Tensor x) -> Tensor
12686at::Tensor special_airy_ai::call(const at::Tensor & x) {
12687
12688 static auto op = create_special_airy_ai_typed_handle();
12689 return op.call(x);
12690}
12691
12692// aten::special_airy_ai(Tensor x) -> Tensor
12693at::Tensor special_airy_ai::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x) {
12694
12695 static auto op = create_special_airy_ai_typed_handle();
12696 return op.redispatch(dispatchKeySet, x);
12697}
12698
12699STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_airy_ai_out, name, "aten::special_airy_ai")
12700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_airy_ai_out, overload_name, "out")
12701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_airy_ai_out, schema_str, "special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)")
12702
12703// aten::special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
12704static C10_NOINLINE c10::TypedOperatorHandle<special_airy_ai_out::schema> create_special_airy_ai_out_typed_handle() {
12705 return c10::Dispatcher::singleton()
12706 .findSchemaOrThrow(special_airy_ai_out::name, special_airy_ai_out::overload_name)
12707 .typed<special_airy_ai_out::schema>();
12708}
12709
12710// aten::special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
12711at::Tensor & special_airy_ai_out::call(const at::Tensor & x, at::Tensor & out) {
12712
12713 static auto op = create_special_airy_ai_out_typed_handle();
12714 return op.call(x, out);
12715}
12716
12717// aten::special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
12718at::Tensor & special_airy_ai_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, at::Tensor & out) {
12719
12720 static auto op = create_special_airy_ai_out_typed_handle();
12721 return op.redispatch(dispatchKeySet, x, out);
12722}
12723
12724STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_w, name, "aten::special_chebyshev_polynomial_w")
12725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_w, overload_name, "")
12726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_w, schema_str, "special_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor")
12727
12728// aten::special_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor
12729static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_w::schema> create_special_chebyshev_polynomial_w_typed_handle() {
12730 return c10::Dispatcher::singleton()
12731 .findSchemaOrThrow(special_chebyshev_polynomial_w::name, special_chebyshev_polynomial_w::overload_name)
12732 .typed<special_chebyshev_polynomial_w::schema>();
12733}
12734
12735// aten::special_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor
12736at::Tensor special_chebyshev_polynomial_w::call(const at::Tensor & x, const at::Tensor & n) {
12737
12738 static auto op = create_special_chebyshev_polynomial_w_typed_handle();
12739 return op.call(x, n);
12740}
12741
12742// aten::special_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor
12743at::Tensor special_chebyshev_polynomial_w::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) {
12744
12745 static auto op = create_special_chebyshev_polynomial_w_typed_handle();
12746 return op.redispatch(dispatchKeySet, x, n);
12747}
12748
12749STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_w_x_scalar, name, "aten::special_chebyshev_polynomial_w")
12750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_w_x_scalar, overload_name, "x_scalar")
12751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_w_x_scalar, schema_str, "special_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor")
12752
12753// aten::special_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor
12754static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_w_x_scalar::schema> create_special_chebyshev_polynomial_w_x_scalar_typed_handle() {
12755 return c10::Dispatcher::singleton()
12756 .findSchemaOrThrow(special_chebyshev_polynomial_w_x_scalar::name, special_chebyshev_polynomial_w_x_scalar::overload_name)
12757 .typed<special_chebyshev_polynomial_w_x_scalar::schema>();
12758}
12759
12760// aten::special_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor
12761at::Tensor special_chebyshev_polynomial_w_x_scalar::call(const at::Scalar & x, const at::Tensor & n) {
12762
12763 static auto op = create_special_chebyshev_polynomial_w_x_scalar_typed_handle();
12764 return op.call(x, n);
12765}
12766
12767// aten::special_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor
12768at::Tensor special_chebyshev_polynomial_w_x_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) {
12769
12770 static auto op = create_special_chebyshev_polynomial_w_x_scalar_typed_handle();
12771 return op.redispatch(dispatchKeySet, x, n);
12772}
12773
12774STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_w_n_scalar, name, "aten::special_chebyshev_polynomial_w")
12775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_w_n_scalar, overload_name, "n_scalar")
12776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_w_n_scalar, schema_str, "special_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor")
12777
12778// aten::special_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor
12779static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_w_n_scalar::schema> create_special_chebyshev_polynomial_w_n_scalar_typed_handle() {
12780 return c10::Dispatcher::singleton()
12781 .findSchemaOrThrow(special_chebyshev_polynomial_w_n_scalar::name, special_chebyshev_polynomial_w_n_scalar::overload_name)
12782 .typed<special_chebyshev_polynomial_w_n_scalar::schema>();
12783}
12784
12785// aten::special_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor
12786at::Tensor special_chebyshev_polynomial_w_n_scalar::call(const at::Tensor & x, const at::Scalar & n) {
12787
12788 static auto op = create_special_chebyshev_polynomial_w_n_scalar_typed_handle();
12789 return op.call(x, n);
12790}
12791
12792// aten::special_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor
12793at::Tensor special_chebyshev_polynomial_w_n_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) {
12794
12795 static auto op = create_special_chebyshev_polynomial_w_n_scalar_typed_handle();
12796 return op.redispatch(dispatchKeySet, x, n);
12797}
12798
12799STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_w_out, name, "aten::special_chebyshev_polynomial_w")
12800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_w_out, overload_name, "out")
12801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_w_out, schema_str, "special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)")
12802
12803// aten::special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
12804static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_w_out::schema> create_special_chebyshev_polynomial_w_out_typed_handle() {
12805 return c10::Dispatcher::singleton()
12806 .findSchemaOrThrow(special_chebyshev_polynomial_w_out::name, special_chebyshev_polynomial_w_out::overload_name)
12807 .typed<special_chebyshev_polynomial_w_out::schema>();
12808}
12809
12810// aten::special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
12811at::Tensor & special_chebyshev_polynomial_w_out::call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
12812
12813 static auto op = create_special_chebyshev_polynomial_w_out_typed_handle();
12814 return op.call(x, n, out);
12815}
12816
12817// aten::special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
12818at::Tensor & special_chebyshev_polynomial_w_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
12819
12820 static auto op = create_special_chebyshev_polynomial_w_out_typed_handle();
12821 return op.redispatch(dispatchKeySet, x, n, out);
12822}
12823
12824STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_w_x_scalar_out, name, "aten::special_chebyshev_polynomial_w")
12825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_w_x_scalar_out, overload_name, "x_scalar_out")
12826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_w_x_scalar_out, schema_str, "special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)")
12827
12828// aten::special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
12829static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_w_x_scalar_out::schema> create_special_chebyshev_polynomial_w_x_scalar_out_typed_handle() {
12830 return c10::Dispatcher::singleton()
12831 .findSchemaOrThrow(special_chebyshev_polynomial_w_x_scalar_out::name, special_chebyshev_polynomial_w_x_scalar_out::overload_name)
12832 .typed<special_chebyshev_polynomial_w_x_scalar_out::schema>();
12833}
12834
12835// aten::special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
12836at::Tensor & special_chebyshev_polynomial_w_x_scalar_out::call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
12837
12838 static auto op = create_special_chebyshev_polynomial_w_x_scalar_out_typed_handle();
12839 return op.call(x, n, out);
12840}
12841
12842// aten::special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
12843at::Tensor & special_chebyshev_polynomial_w_x_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
12844
12845 static auto op = create_special_chebyshev_polynomial_w_x_scalar_out_typed_handle();
12846 return op.redispatch(dispatchKeySet, x, n, out);
12847}
12848
12849STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_w_n_scalar_out, name, "aten::special_chebyshev_polynomial_w")
12850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_w_n_scalar_out, overload_name, "n_scalar_out")
12851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_chebyshev_polynomial_w_n_scalar_out, schema_str, "special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)")
12852
12853// aten::special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
12854static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_w_n_scalar_out::schema> create_special_chebyshev_polynomial_w_n_scalar_out_typed_handle() {
12855 return c10::Dispatcher::singleton()
12856 .findSchemaOrThrow(special_chebyshev_polynomial_w_n_scalar_out::name, special_chebyshev_polynomial_w_n_scalar_out::overload_name)
12857 .typed<special_chebyshev_polynomial_w_n_scalar_out::schema>();
12858}
12859
12860// aten::special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
12861at::Tensor & special_chebyshev_polynomial_w_n_scalar_out::call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
12862
12863 static auto op = create_special_chebyshev_polynomial_w_n_scalar_out_typed_handle();
12864 return op.call(x, n, out);
12865}
12866
12867// aten::special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
12868at::Tensor & special_chebyshev_polynomial_w_n_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
12869
12870 static auto op = create_special_chebyshev_polynomial_w_n_scalar_out_typed_handle();
12871 return op.redispatch(dispatchKeySet, x, n, out);
12872}
12873
12874STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_h, name, "aten::special_hermite_polynomial_h")
12875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_h, overload_name, "")
12876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_h, schema_str, "special_hermite_polynomial_h(Tensor x, Tensor n) -> Tensor")
12877
12878// aten::special_hermite_polynomial_h(Tensor x, Tensor n) -> Tensor
12879static C10_NOINLINE c10::TypedOperatorHandle<special_hermite_polynomial_h::schema> create_special_hermite_polynomial_h_typed_handle() {
12880 return c10::Dispatcher::singleton()
12881 .findSchemaOrThrow(special_hermite_polynomial_h::name, special_hermite_polynomial_h::overload_name)
12882 .typed<special_hermite_polynomial_h::schema>();
12883}
12884
12885// aten::special_hermite_polynomial_h(Tensor x, Tensor n) -> Tensor
12886at::Tensor special_hermite_polynomial_h::call(const at::Tensor & x, const at::Tensor & n) {
12887
12888 static auto op = create_special_hermite_polynomial_h_typed_handle();
12889 return op.call(x, n);
12890}
12891
12892// aten::special_hermite_polynomial_h(Tensor x, Tensor n) -> Tensor
12893at::Tensor special_hermite_polynomial_h::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) {
12894
12895 static auto op = create_special_hermite_polynomial_h_typed_handle();
12896 return op.redispatch(dispatchKeySet, x, n);
12897}
12898
12899STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_h_x_scalar, name, "aten::special_hermite_polynomial_h")
12900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_h_x_scalar, overload_name, "x_scalar")
12901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_h_x_scalar, schema_str, "special_hermite_polynomial_h.x_scalar(Scalar x, Tensor n) -> Tensor")
12902
12903// aten::special_hermite_polynomial_h.x_scalar(Scalar x, Tensor n) -> Tensor
12904static C10_NOINLINE c10::TypedOperatorHandle<special_hermite_polynomial_h_x_scalar::schema> create_special_hermite_polynomial_h_x_scalar_typed_handle() {
12905 return c10::Dispatcher::singleton()
12906 .findSchemaOrThrow(special_hermite_polynomial_h_x_scalar::name, special_hermite_polynomial_h_x_scalar::overload_name)
12907 .typed<special_hermite_polynomial_h_x_scalar::schema>();
12908}
12909
12910// aten::special_hermite_polynomial_h.x_scalar(Scalar x, Tensor n) -> Tensor
12911at::Tensor special_hermite_polynomial_h_x_scalar::call(const at::Scalar & x, const at::Tensor & n) {
12912
12913 static auto op = create_special_hermite_polynomial_h_x_scalar_typed_handle();
12914 return op.call(x, n);
12915}
12916
12917// aten::special_hermite_polynomial_h.x_scalar(Scalar x, Tensor n) -> Tensor
12918at::Tensor special_hermite_polynomial_h_x_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) {
12919
12920 static auto op = create_special_hermite_polynomial_h_x_scalar_typed_handle();
12921 return op.redispatch(dispatchKeySet, x, n);
12922}
12923
12924STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_h_n_scalar, name, "aten::special_hermite_polynomial_h")
12925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_h_n_scalar, overload_name, "n_scalar")
12926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_h_n_scalar, schema_str, "special_hermite_polynomial_h.n_scalar(Tensor x, Scalar n) -> Tensor")
12927
12928// aten::special_hermite_polynomial_h.n_scalar(Tensor x, Scalar n) -> Tensor
12929static C10_NOINLINE c10::TypedOperatorHandle<special_hermite_polynomial_h_n_scalar::schema> create_special_hermite_polynomial_h_n_scalar_typed_handle() {
12930 return c10::Dispatcher::singleton()
12931 .findSchemaOrThrow(special_hermite_polynomial_h_n_scalar::name, special_hermite_polynomial_h_n_scalar::overload_name)
12932 .typed<special_hermite_polynomial_h_n_scalar::schema>();
12933}
12934
12935// aten::special_hermite_polynomial_h.n_scalar(Tensor x, Scalar n) -> Tensor
12936at::Tensor special_hermite_polynomial_h_n_scalar::call(const at::Tensor & x, const at::Scalar & n) {
12937
12938 static auto op = create_special_hermite_polynomial_h_n_scalar_typed_handle();
12939 return op.call(x, n);
12940}
12941
12942// aten::special_hermite_polynomial_h.n_scalar(Tensor x, Scalar n) -> Tensor
12943at::Tensor special_hermite_polynomial_h_n_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) {
12944
12945 static auto op = create_special_hermite_polynomial_h_n_scalar_typed_handle();
12946 return op.redispatch(dispatchKeySet, x, n);
12947}
12948
12949STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_h_out, name, "aten::special_hermite_polynomial_h")
12950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_h_out, overload_name, "out")
12951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_h_out, schema_str, "special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)")
12952
12953// aten::special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
12954static C10_NOINLINE c10::TypedOperatorHandle<special_hermite_polynomial_h_out::schema> create_special_hermite_polynomial_h_out_typed_handle() {
12955 return c10::Dispatcher::singleton()
12956 .findSchemaOrThrow(special_hermite_polynomial_h_out::name, special_hermite_polynomial_h_out::overload_name)
12957 .typed<special_hermite_polynomial_h_out::schema>();
12958}
12959
12960// aten::special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
12961at::Tensor & special_hermite_polynomial_h_out::call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
12962
12963 static auto op = create_special_hermite_polynomial_h_out_typed_handle();
12964 return op.call(x, n, out);
12965}
12966
12967// aten::special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
12968at::Tensor & special_hermite_polynomial_h_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
12969
12970 static auto op = create_special_hermite_polynomial_h_out_typed_handle();
12971 return op.redispatch(dispatchKeySet, x, n, out);
12972}
12973
12974STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_h_x_scalar_out, name, "aten::special_hermite_polynomial_h")
12975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_h_x_scalar_out, overload_name, "x_scalar_out")
12976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_h_x_scalar_out, schema_str, "special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)")
12977
12978// aten::special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
12979static C10_NOINLINE c10::TypedOperatorHandle<special_hermite_polynomial_h_x_scalar_out::schema> create_special_hermite_polynomial_h_x_scalar_out_typed_handle() {
12980 return c10::Dispatcher::singleton()
12981 .findSchemaOrThrow(special_hermite_polynomial_h_x_scalar_out::name, special_hermite_polynomial_h_x_scalar_out::overload_name)
12982 .typed<special_hermite_polynomial_h_x_scalar_out::schema>();
12983}
12984
12985// aten::special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
12986at::Tensor & special_hermite_polynomial_h_x_scalar_out::call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
12987
12988 static auto op = create_special_hermite_polynomial_h_x_scalar_out_typed_handle();
12989 return op.call(x, n, out);
12990}
12991
12992// aten::special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
12993at::Tensor & special_hermite_polynomial_h_x_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
12994
12995 static auto op = create_special_hermite_polynomial_h_x_scalar_out_typed_handle();
12996 return op.redispatch(dispatchKeySet, x, n, out);
12997}
12998
12999STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_h_n_scalar_out, name, "aten::special_hermite_polynomial_h")
13000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_h_n_scalar_out, overload_name, "n_scalar_out")
13001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_hermite_polynomial_h_n_scalar_out, schema_str, "special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)")
13002
13003// aten::special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
13004static C10_NOINLINE c10::TypedOperatorHandle<special_hermite_polynomial_h_n_scalar_out::schema> create_special_hermite_polynomial_h_n_scalar_out_typed_handle() {
13005 return c10::Dispatcher::singleton()
13006 .findSchemaOrThrow(special_hermite_polynomial_h_n_scalar_out::name, special_hermite_polynomial_h_n_scalar_out::overload_name)
13007 .typed<special_hermite_polynomial_h_n_scalar_out::schema>();
13008}
13009
13010// aten::special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
13011at::Tensor & special_hermite_polynomial_h_n_scalar_out::call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
13012
13013 static auto op = create_special_hermite_polynomial_h_n_scalar_out_typed_handle();
13014 return op.call(x, n, out);
13015}
13016
13017// aten::special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
13018at::Tensor & special_hermite_polynomial_h_n_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
13019
13020 static auto op = create_special_hermite_polynomial_h_n_scalar_out_typed_handle();
13021 return op.redispatch(dispatchKeySet, x, n, out);
13022}
13023
13024STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_modified_bessel_i0, name, "aten::special_modified_bessel_i0")
13025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_modified_bessel_i0, overload_name, "")
13026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_modified_bessel_i0, schema_str, "special_modified_bessel_i0(Tensor self) -> Tensor")
13027
13028// aten::special_modified_bessel_i0(Tensor self) -> Tensor
13029static C10_NOINLINE c10::TypedOperatorHandle<special_modified_bessel_i0::schema> create_special_modified_bessel_i0_typed_handle() {
13030 return c10::Dispatcher::singleton()
13031 .findSchemaOrThrow(special_modified_bessel_i0::name, special_modified_bessel_i0::overload_name)
13032 .typed<special_modified_bessel_i0::schema>();
13033}
13034
13035// aten::special_modified_bessel_i0(Tensor self) -> Tensor
13036at::Tensor special_modified_bessel_i0::call(const at::Tensor & self) {
13037
13038 static auto op = create_special_modified_bessel_i0_typed_handle();
13039 return op.call(self);
13040}
13041
13042// aten::special_modified_bessel_i0(Tensor self) -> Tensor
13043at::Tensor special_modified_bessel_i0::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
13044
13045 static auto op = create_special_modified_bessel_i0_typed_handle();
13046 return op.redispatch(dispatchKeySet, self);
13047}
13048
13049STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_modified_bessel_i0_out, name, "aten::special_modified_bessel_i0")
13050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_modified_bessel_i0_out, overload_name, "out")
13051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_modified_bessel_i0_out, schema_str, "special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
13052
13053// aten::special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13054static C10_NOINLINE c10::TypedOperatorHandle<special_modified_bessel_i0_out::schema> create_special_modified_bessel_i0_out_typed_handle() {
13055 return c10::Dispatcher::singleton()
13056 .findSchemaOrThrow(special_modified_bessel_i0_out::name, special_modified_bessel_i0_out::overload_name)
13057 .typed<special_modified_bessel_i0_out::schema>();
13058}
13059
13060// aten::special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13061at::Tensor & special_modified_bessel_i0_out::call(const at::Tensor & self, at::Tensor & out) {
13062
13063 static auto op = create_special_modified_bessel_i0_out_typed_handle();
13064 return op.call(self, out);
13065}
13066
13067// aten::special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13068at::Tensor & special_modified_bessel_i0_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
13069
13070 static auto op = create_special_modified_bessel_i0_out_typed_handle();
13071 return op.redispatch(dispatchKeySet, self, out);
13072}
13073
13074STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_modified_bessel_k0, name, "aten::special_modified_bessel_k0")
13075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_modified_bessel_k0, overload_name, "")
13076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_modified_bessel_k0, schema_str, "special_modified_bessel_k0(Tensor self) -> Tensor")
13077
13078// aten::special_modified_bessel_k0(Tensor self) -> Tensor
13079static C10_NOINLINE c10::TypedOperatorHandle<special_modified_bessel_k0::schema> create_special_modified_bessel_k0_typed_handle() {
13080 return c10::Dispatcher::singleton()
13081 .findSchemaOrThrow(special_modified_bessel_k0::name, special_modified_bessel_k0::overload_name)
13082 .typed<special_modified_bessel_k0::schema>();
13083}
13084
13085// aten::special_modified_bessel_k0(Tensor self) -> Tensor
13086at::Tensor special_modified_bessel_k0::call(const at::Tensor & self) {
13087
13088 static auto op = create_special_modified_bessel_k0_typed_handle();
13089 return op.call(self);
13090}
13091
13092// aten::special_modified_bessel_k0(Tensor self) -> Tensor
13093at::Tensor special_modified_bessel_k0::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
13094
13095 static auto op = create_special_modified_bessel_k0_typed_handle();
13096 return op.redispatch(dispatchKeySet, self);
13097}
13098
13099STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_modified_bessel_k0_out, name, "aten::special_modified_bessel_k0")
13100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_modified_bessel_k0_out, overload_name, "out")
13101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_modified_bessel_k0_out, schema_str, "special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
13102
13103// aten::special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13104static C10_NOINLINE c10::TypedOperatorHandle<special_modified_bessel_k0_out::schema> create_special_modified_bessel_k0_out_typed_handle() {
13105 return c10::Dispatcher::singleton()
13106 .findSchemaOrThrow(special_modified_bessel_k0_out::name, special_modified_bessel_k0_out::overload_name)
13107 .typed<special_modified_bessel_k0_out::schema>();
13108}
13109
13110// aten::special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13111at::Tensor & special_modified_bessel_k0_out::call(const at::Tensor & self, at::Tensor & out) {
13112
13113 static auto op = create_special_modified_bessel_k0_out_typed_handle();
13114 return op.call(self, out);
13115}
13116
13117// aten::special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13118at::Tensor & special_modified_bessel_k0_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
13119
13120 static auto op = create_special_modified_bessel_k0_out_typed_handle();
13121 return op.redispatch(dispatchKeySet, self, out);
13122}
13123
13124STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_modified_bessel_k1, name, "aten::special_modified_bessel_k1")
13125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_modified_bessel_k1, overload_name, "")
13126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_modified_bessel_k1, schema_str, "special_modified_bessel_k1(Tensor self) -> Tensor")
13127
13128// aten::special_modified_bessel_k1(Tensor self) -> Tensor
13129static C10_NOINLINE c10::TypedOperatorHandle<special_modified_bessel_k1::schema> create_special_modified_bessel_k1_typed_handle() {
13130 return c10::Dispatcher::singleton()
13131 .findSchemaOrThrow(special_modified_bessel_k1::name, special_modified_bessel_k1::overload_name)
13132 .typed<special_modified_bessel_k1::schema>();
13133}
13134
13135// aten::special_modified_bessel_k1(Tensor self) -> Tensor
13136at::Tensor special_modified_bessel_k1::call(const at::Tensor & self) {
13137
13138 static auto op = create_special_modified_bessel_k1_typed_handle();
13139 return op.call(self);
13140}
13141
13142// aten::special_modified_bessel_k1(Tensor self) -> Tensor
13143at::Tensor special_modified_bessel_k1::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
13144
13145 static auto op = create_special_modified_bessel_k1_typed_handle();
13146 return op.redispatch(dispatchKeySet, self);
13147}
13148
13149STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_modified_bessel_k1_out, name, "aten::special_modified_bessel_k1")
13150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_modified_bessel_k1_out, overload_name, "out")
13151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_modified_bessel_k1_out, schema_str, "special_modified_bessel_k1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
13152
13153// aten::special_modified_bessel_k1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13154static C10_NOINLINE c10::TypedOperatorHandle<special_modified_bessel_k1_out::schema> create_special_modified_bessel_k1_out_typed_handle() {
13155 return c10::Dispatcher::singleton()
13156 .findSchemaOrThrow(special_modified_bessel_k1_out::name, special_modified_bessel_k1_out::overload_name)
13157 .typed<special_modified_bessel_k1_out::schema>();
13158}
13159
13160// aten::special_modified_bessel_k1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13161at::Tensor & special_modified_bessel_k1_out::call(const at::Tensor & self, at::Tensor & out) {
13162
13163 static auto op = create_special_modified_bessel_k1_out_typed_handle();
13164 return op.call(self, out);
13165}
13166
13167// aten::special_modified_bessel_k1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13168at::Tensor & special_modified_bessel_k1_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
13169
13170 static auto op = create_special_modified_bessel_k1_out_typed_handle();
13171 return op.redispatch(dispatchKeySet, self, out);
13172}
13173
13174STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_t, name, "aten::special_shifted_chebyshev_polynomial_t")
13175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_t, overload_name, "")
13176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_t, schema_str, "special_shifted_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor")
13177
13178// aten::special_shifted_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor
13179static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_t::schema> create_special_shifted_chebyshev_polynomial_t_typed_handle() {
13180 return c10::Dispatcher::singleton()
13181 .findSchemaOrThrow(special_shifted_chebyshev_polynomial_t::name, special_shifted_chebyshev_polynomial_t::overload_name)
13182 .typed<special_shifted_chebyshev_polynomial_t::schema>();
13183}
13184
13185// aten::special_shifted_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor
13186at::Tensor special_shifted_chebyshev_polynomial_t::call(const at::Tensor & x, const at::Tensor & n) {
13187
13188 static auto op = create_special_shifted_chebyshev_polynomial_t_typed_handle();
13189 return op.call(x, n);
13190}
13191
13192// aten::special_shifted_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor
13193at::Tensor special_shifted_chebyshev_polynomial_t::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) {
13194
13195 static auto op = create_special_shifted_chebyshev_polynomial_t_typed_handle();
13196 return op.redispatch(dispatchKeySet, x, n);
13197}
13198
13199STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_t_x_scalar, name, "aten::special_shifted_chebyshev_polynomial_t")
13200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_t_x_scalar, overload_name, "x_scalar")
13201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_t_x_scalar, schema_str, "special_shifted_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor")
13202
13203// aten::special_shifted_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor
13204static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_t_x_scalar::schema> create_special_shifted_chebyshev_polynomial_t_x_scalar_typed_handle() {
13205 return c10::Dispatcher::singleton()
13206 .findSchemaOrThrow(special_shifted_chebyshev_polynomial_t_x_scalar::name, special_shifted_chebyshev_polynomial_t_x_scalar::overload_name)
13207 .typed<special_shifted_chebyshev_polynomial_t_x_scalar::schema>();
13208}
13209
13210// aten::special_shifted_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor
13211at::Tensor special_shifted_chebyshev_polynomial_t_x_scalar::call(const at::Scalar & x, const at::Tensor & n) {
13212
13213 static auto op = create_special_shifted_chebyshev_polynomial_t_x_scalar_typed_handle();
13214 return op.call(x, n);
13215}
13216
13217// aten::special_shifted_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor
13218at::Tensor special_shifted_chebyshev_polynomial_t_x_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) {
13219
13220 static auto op = create_special_shifted_chebyshev_polynomial_t_x_scalar_typed_handle();
13221 return op.redispatch(dispatchKeySet, x, n);
13222}
13223
13224STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_t_n_scalar, name, "aten::special_shifted_chebyshev_polynomial_t")
13225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_t_n_scalar, overload_name, "n_scalar")
13226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_t_n_scalar, schema_str, "special_shifted_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor")
13227
13228// aten::special_shifted_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor
13229static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_t_n_scalar::schema> create_special_shifted_chebyshev_polynomial_t_n_scalar_typed_handle() {
13230 return c10::Dispatcher::singleton()
13231 .findSchemaOrThrow(special_shifted_chebyshev_polynomial_t_n_scalar::name, special_shifted_chebyshev_polynomial_t_n_scalar::overload_name)
13232 .typed<special_shifted_chebyshev_polynomial_t_n_scalar::schema>();
13233}
13234
13235// aten::special_shifted_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor
13236at::Tensor special_shifted_chebyshev_polynomial_t_n_scalar::call(const at::Tensor & x, const at::Scalar & n) {
13237
13238 static auto op = create_special_shifted_chebyshev_polynomial_t_n_scalar_typed_handle();
13239 return op.call(x, n);
13240}
13241
13242// aten::special_shifted_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor
13243at::Tensor special_shifted_chebyshev_polynomial_t_n_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) {
13244
13245 static auto op = create_special_shifted_chebyshev_polynomial_t_n_scalar_typed_handle();
13246 return op.redispatch(dispatchKeySet, x, n);
13247}
13248
13249STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_t_out, name, "aten::special_shifted_chebyshev_polynomial_t")
13250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_t_out, overload_name, "out")
13251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_t_out, schema_str, "special_shifted_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)")
13252
13253// aten::special_shifted_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
13254static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_t_out::schema> create_special_shifted_chebyshev_polynomial_t_out_typed_handle() {
13255 return c10::Dispatcher::singleton()
13256 .findSchemaOrThrow(special_shifted_chebyshev_polynomial_t_out::name, special_shifted_chebyshev_polynomial_t_out::overload_name)
13257 .typed<special_shifted_chebyshev_polynomial_t_out::schema>();
13258}
13259
13260// aten::special_shifted_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
13261at::Tensor & special_shifted_chebyshev_polynomial_t_out::call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
13262
13263 static auto op = create_special_shifted_chebyshev_polynomial_t_out_typed_handle();
13264 return op.call(x, n, out);
13265}
13266
13267// aten::special_shifted_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
13268at::Tensor & special_shifted_chebyshev_polynomial_t_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
13269
13270 static auto op = create_special_shifted_chebyshev_polynomial_t_out_typed_handle();
13271 return op.redispatch(dispatchKeySet, x, n, out);
13272}
13273
13274STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_t_x_scalar_out, name, "aten::special_shifted_chebyshev_polynomial_t")
13275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_t_x_scalar_out, overload_name, "x_scalar_out")
13276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_t_x_scalar_out, schema_str, "special_shifted_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)")
13277
13278// aten::special_shifted_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
13279static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_t_x_scalar_out::schema> create_special_shifted_chebyshev_polynomial_t_x_scalar_out_typed_handle() {
13280 return c10::Dispatcher::singleton()
13281 .findSchemaOrThrow(special_shifted_chebyshev_polynomial_t_x_scalar_out::name, special_shifted_chebyshev_polynomial_t_x_scalar_out::overload_name)
13282 .typed<special_shifted_chebyshev_polynomial_t_x_scalar_out::schema>();
13283}
13284
13285// aten::special_shifted_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
13286at::Tensor & special_shifted_chebyshev_polynomial_t_x_scalar_out::call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
13287
13288 static auto op = create_special_shifted_chebyshev_polynomial_t_x_scalar_out_typed_handle();
13289 return op.call(x, n, out);
13290}
13291
13292// aten::special_shifted_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
13293at::Tensor & special_shifted_chebyshev_polynomial_t_x_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
13294
13295 static auto op = create_special_shifted_chebyshev_polynomial_t_x_scalar_out_typed_handle();
13296 return op.redispatch(dispatchKeySet, x, n, out);
13297}
13298
13299STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_t_n_scalar_out, name, "aten::special_shifted_chebyshev_polynomial_t")
13300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_t_n_scalar_out, overload_name, "n_scalar_out")
13301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(special_shifted_chebyshev_polynomial_t_n_scalar_out, schema_str, "special_shifted_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)")
13302
13303// aten::special_shifted_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
13304static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_t_n_scalar_out::schema> create_special_shifted_chebyshev_polynomial_t_n_scalar_out_typed_handle() {
13305 return c10::Dispatcher::singleton()
13306 .findSchemaOrThrow(special_shifted_chebyshev_polynomial_t_n_scalar_out::name, special_shifted_chebyshev_polynomial_t_n_scalar_out::overload_name)
13307 .typed<special_shifted_chebyshev_polynomial_t_n_scalar_out::schema>();
13308}
13309
13310// aten::special_shifted_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
13311at::Tensor & special_shifted_chebyshev_polynomial_t_n_scalar_out::call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
13312
13313 static auto op = create_special_shifted_chebyshev_polynomial_t_n_scalar_out_typed_handle();
13314 return op.call(x, n, out);
13315}
13316
13317// aten::special_shifted_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
13318at::Tensor & special_shifted_chebyshev_polynomial_t_n_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
13319
13320 static auto op = create_special_shifted_chebyshev_polynomial_t_n_scalar_out_typed_handle();
13321 return op.redispatch(dispatchKeySet, x, n, out);
13322}
13323
13324STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fused_adamw_, name, "aten::_fused_adamw_")
13325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fused_adamw_, overload_name, "")
13326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fused_adamw_, schema_str, "_fused_adamw_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()")
13327
13328// aten::_fused_adamw_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
13329static C10_NOINLINE c10::TypedOperatorHandle<_fused_adamw_::schema> create__fused_adamw__typed_handle() {
13330 return c10::Dispatcher::singleton()
13331 .findSchemaOrThrow(_fused_adamw_::name, _fused_adamw_::overload_name)
13332 .typed<_fused_adamw_::schema>();
13333}
13334
13335// aten::_fused_adamw_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
13336void _fused_adamw_::call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
13337
13338 static auto op = create__fused_adamw__typed_handle();
13339 return op.call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
13340}
13341
13342// aten::_fused_adamw_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
13343void _fused_adamw_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
13344
13345 static auto op = create__fused_adamw__typed_handle();
13346 return op.redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
13347}
13348
13349STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cudnn_rnn_flatten_weight_out, name, "aten::_cudnn_rnn_flatten_weight")
13350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cudnn_rnn_flatten_weight_out, overload_name, "out")
13351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cudnn_rnn_flatten_weight_out, schema_str, "_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!)")
13352
13353// aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!)
13354static C10_NOINLINE c10::TypedOperatorHandle<_cudnn_rnn_flatten_weight_out::schema> create__cudnn_rnn_flatten_weight_out_typed_handle() {
13355 return c10::Dispatcher::singleton()
13356 .findSchemaOrThrow(_cudnn_rnn_flatten_weight_out::name, _cudnn_rnn_flatten_weight_out::overload_name)
13357 .typed<_cudnn_rnn_flatten_weight_out::schema>();
13358}
13359
13360// aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!)
13361at::Tensor & _cudnn_rnn_flatten_weight_out::call(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) {
13362
13363 static auto op = create__cudnn_rnn_flatten_weight_out_typed_handle();
13364 return op.call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out);
13365}
13366
13367// aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!)
13368at::Tensor & _cudnn_rnn_flatten_weight_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) {
13369
13370 static auto op = create__cudnn_rnn_flatten_weight_out_typed_handle();
13371 return op.redispatch(dispatchKeySet, weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out);
13372}
13373
13374STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantized_batch_norm_out, name, "aten::quantized_batch_norm")
13375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantized_batch_norm_out, overload_name, "out")
13376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantized_batch_norm_out, schema_str, "quantized_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point, *, Tensor(a!) out) -> Tensor(a!)")
13377
13378// aten::quantized_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point, *, Tensor(a!) out) -> Tensor(a!)
13379static C10_NOINLINE c10::TypedOperatorHandle<quantized_batch_norm_out::schema> create_quantized_batch_norm_out_typed_handle() {
13380 return c10::Dispatcher::singleton()
13381 .findSchemaOrThrow(quantized_batch_norm_out::name, quantized_batch_norm_out::overload_name)
13382 .typed<quantized_batch_norm_out::schema>();
13383}
13384
13385// aten::quantized_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point, *, Tensor(a!) out) -> Tensor(a!)
13386at::Tensor & quantized_batch_norm_out::call(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point, at::Tensor & out) {
13387
13388 static auto op = create_quantized_batch_norm_out_typed_handle();
13389 return op.call(input, weight, bias, mean, var, eps, output_scale, output_zero_point, out);
13390}
13391
13392// aten::quantized_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point, *, Tensor(a!) out) -> Tensor(a!)
13393at::Tensor & quantized_batch_norm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point, at::Tensor & out) {
13394
13395 static auto op = create_quantized_batch_norm_out_typed_handle();
13396 return op.redispatch(dispatchKeySet, input, weight, bias, mean, var, eps, output_scale, output_zero_point, out);
13397}
13398
13399STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv_tbc_out, name, "aten::conv_tbc")
13400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv_tbc_out, overload_name, "out")
13401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(conv_tbc_out, schema_str, "conv_tbc.out(Tensor self, Tensor weight, Tensor bias, int pad=0, *, Tensor(a!) out) -> Tensor(a!)")
13402
13403// aten::conv_tbc.out(Tensor self, Tensor weight, Tensor bias, int pad=0, *, Tensor(a!) out) -> Tensor(a!)
13404static C10_NOINLINE c10::TypedOperatorHandle<conv_tbc_out::schema> create_conv_tbc_out_typed_handle() {
13405 return c10::Dispatcher::singleton()
13406 .findSchemaOrThrow(conv_tbc_out::name, conv_tbc_out::overload_name)
13407 .typed<conv_tbc_out::schema>();
13408}
13409
13410// aten::conv_tbc.out(Tensor self, Tensor weight, Tensor bias, int pad=0, *, Tensor(a!) out) -> Tensor(a!)
13411at::Tensor & conv_tbc_out::call(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad, at::Tensor & out) {
13412
13413 static auto op = create_conv_tbc_out_typed_handle();
13414 return op.call(self, weight, bias, pad, out);
13415}
13416
13417// aten::conv_tbc.out(Tensor self, Tensor weight, Tensor bias, int pad=0, *, Tensor(a!) out) -> Tensor(a!)
13418at::Tensor & conv_tbc_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad, at::Tensor & out) {
13419
13420 static auto op = create_conv_tbc_out_typed_handle();
13421 return op.redispatch(dispatchKeySet, self, weight, bias, pad, out);
13422}
13423
13424STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_affine_grid_generator_backward_out, name, "aten::cudnn_affine_grid_generator_backward")
13425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_affine_grid_generator_backward_out, overload_name, "out")
13426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_affine_grid_generator_backward_out, schema_str, "cudnn_affine_grid_generator_backward.out(Tensor grad, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)")
13427
13428// aten::cudnn_affine_grid_generator_backward.out(Tensor grad, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)
13429static C10_NOINLINE c10::TypedOperatorHandle<cudnn_affine_grid_generator_backward_out::schema> create_cudnn_affine_grid_generator_backward_out_typed_handle() {
13430 return c10::Dispatcher::singleton()
13431 .findSchemaOrThrow(cudnn_affine_grid_generator_backward_out::name, cudnn_affine_grid_generator_backward_out::overload_name)
13432 .typed<cudnn_affine_grid_generator_backward_out::schema>();
13433}
13434
13435// aten::cudnn_affine_grid_generator_backward.out(Tensor grad, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)
13436at::Tensor & cudnn_affine_grid_generator_backward_out::call(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out) {
13437
13438 static auto op = create_cudnn_affine_grid_generator_backward_out_typed_handle();
13439 return op.call(grad, N, C, H, W, out);
13440}
13441
13442// aten::cudnn_affine_grid_generator_backward.out(Tensor grad, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)
13443at::Tensor & cudnn_affine_grid_generator_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out) {
13444
13445 static auto op = create_cudnn_affine_grid_generator_backward_out_typed_handle();
13446 return op.redispatch(dispatchKeySet, grad, N, C, H, W, out);
13447}
13448
13449STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_grid_sampler_out, name, "aten::cudnn_grid_sampler")
13450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_grid_sampler_out, overload_name, "out")
13451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cudnn_grid_sampler_out, schema_str, "cudnn_grid_sampler.out(Tensor self, Tensor grid, *, Tensor(a!) out) -> Tensor(a!)")
13452
13453// aten::cudnn_grid_sampler.out(Tensor self, Tensor grid, *, Tensor(a!) out) -> Tensor(a!)
13454static C10_NOINLINE c10::TypedOperatorHandle<cudnn_grid_sampler_out::schema> create_cudnn_grid_sampler_out_typed_handle() {
13455 return c10::Dispatcher::singleton()
13456 .findSchemaOrThrow(cudnn_grid_sampler_out::name, cudnn_grid_sampler_out::overload_name)
13457 .typed<cudnn_grid_sampler_out::schema>();
13458}
13459
13460// aten::cudnn_grid_sampler.out(Tensor self, Tensor grid, *, Tensor(a!) out) -> Tensor(a!)
13461at::Tensor & cudnn_grid_sampler_out::call(const at::Tensor & self, const at::Tensor & grid, at::Tensor & out) {
13462
13463 static auto op = create_cudnn_grid_sampler_out_typed_handle();
13464 return op.call(self, grid, out);
13465}
13466
13467// aten::cudnn_grid_sampler.out(Tensor self, Tensor grid, *, Tensor(a!) out) -> Tensor(a!)
13468at::Tensor & cudnn_grid_sampler_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grid, at::Tensor & out) {
13469
13470 static auto op = create_cudnn_grid_sampler_out_typed_handle();
13471 return op.redispatch(dispatchKeySet, self, grid, out);
13472}
13473
13474STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div_Scalar_out, name, "aten::div")
13475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div_Scalar_out, overload_name, "Scalar_out")
13476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div_Scalar_out, schema_str, "div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
13477
13478// aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
13479static C10_NOINLINE c10::TypedOperatorHandle<div_Scalar_out::schema> create_div_Scalar_out_typed_handle() {
13480 return c10::Dispatcher::singleton()
13481 .findSchemaOrThrow(div_Scalar_out::name, div_Scalar_out::overload_name)
13482 .typed<div_Scalar_out::schema>();
13483}
13484
13485// aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
13486at::Tensor & div_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
13487
13488 static auto op = create_div_Scalar_out_typed_handle();
13489 return op.call(self, other, out);
13490}
13491
13492// aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
13493at::Tensor & div_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
13494
13495 static auto op = create_div_Scalar_out_typed_handle();
13496 return op.redispatch(dispatchKeySet, self, other, out);
13497}
13498
13499STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div_Scalar_mode_out, name, "aten::div")
13500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div_Scalar_mode_out, overload_name, "Scalar_mode_out")
13501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(div_Scalar_mode_out, schema_str, "div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)")
13502
13503// aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
13504static C10_NOINLINE c10::TypedOperatorHandle<div_Scalar_mode_out::schema> create_div_Scalar_mode_out_typed_handle() {
13505 return c10::Dispatcher::singleton()
13506 .findSchemaOrThrow(div_Scalar_mode_out::name, div_Scalar_mode_out::overload_name)
13507 .typed<div_Scalar_mode_out::schema>();
13508}
13509
13510// aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
13511at::Tensor & div_Scalar_mode_out::call(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out) {
13512
13513 static auto op = create_div_Scalar_mode_out_typed_handle();
13514 return op.call(self, other, rounding_mode, out);
13515}
13516
13517// aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
13518at::Tensor & div_Scalar_mode_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out) {
13519
13520 static auto op = create_div_Scalar_mode_out_typed_handle();
13521 return op.redispatch(dispatchKeySet, self, other, rounding_mode, out);
13522}
13523
13524STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_embedding_bag_forward_only_out, name, "aten::_embedding_bag_forward_only")
13525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_embedding_bag_forward_only_out, overload_name, "out")
13526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_embedding_bag_forward_only_out, schema_str, "_embedding_bag_forward_only.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))")
13527
13528// aten::_embedding_bag_forward_only.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
13529static C10_NOINLINE c10::TypedOperatorHandle<_embedding_bag_forward_only_out::schema> create__embedding_bag_forward_only_out_typed_handle() {
13530 return c10::Dispatcher::singleton()
13531 .findSchemaOrThrow(_embedding_bag_forward_only_out::name, _embedding_bag_forward_only_out::overload_name)
13532 .typed<_embedding_bag_forward_only_out::schema>();
13533}
13534
13535// aten::_embedding_bag_forward_only.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
13536::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _embedding_bag_forward_only_out::call(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
13537
13538 static auto op = create__embedding_bag_forward_only_out_typed_handle();
13539 return op.call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx, out0, out1, out2, out3);
13540}
13541
13542// aten::_embedding_bag_forward_only.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
13543::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _embedding_bag_forward_only_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
13544
13545 static auto op = create__embedding_bag_forward_only_out_typed_handle();
13546 return op.redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx, out0, out1, out2, out3);
13547}
13548
13549STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(new_zeros_out, name, "aten::new_zeros")
13550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(new_zeros_out, overload_name, "out")
13551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(new_zeros_out, schema_str, "new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)")
13552
13553// aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
13554static C10_NOINLINE c10::TypedOperatorHandle<new_zeros_out::schema> create_new_zeros_out_typed_handle() {
13555 return c10::Dispatcher::singleton()
13556 .findSchemaOrThrow(new_zeros_out::name, new_zeros_out::overload_name)
13557 .typed<new_zeros_out::schema>();
13558}
13559
13560// aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
13561at::Tensor & new_zeros_out::call(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
13562
13563 static auto op = create_new_zeros_out_typed_handle();
13564 return op.call(self, size, out);
13565}
13566
13567// aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
13568at::Tensor & new_zeros_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
13569
13570 static auto op = create_new_zeros_out_typed_handle();
13571 return op.redispatch(dispatchKeySet, self, size, out);
13572}
13573
13574STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_grid_sampler_2d_cpu_fallback_out, name, "aten::_grid_sampler_2d_cpu_fallback")
13575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_grid_sampler_2d_cpu_fallback_out, overload_name, "out")
13576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_grid_sampler_2d_cpu_fallback_out, schema_str, "_grid_sampler_2d_cpu_fallback.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)")
13577
13578// aten::_grid_sampler_2d_cpu_fallback.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
13579static C10_NOINLINE c10::TypedOperatorHandle<_grid_sampler_2d_cpu_fallback_out::schema> create__grid_sampler_2d_cpu_fallback_out_typed_handle() {
13580 return c10::Dispatcher::singleton()
13581 .findSchemaOrThrow(_grid_sampler_2d_cpu_fallback_out::name, _grid_sampler_2d_cpu_fallback_out::overload_name)
13582 .typed<_grid_sampler_2d_cpu_fallback_out::schema>();
13583}
13584
13585// aten::_grid_sampler_2d_cpu_fallback.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
13586at::Tensor & _grid_sampler_2d_cpu_fallback_out::call(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) {
13587
13588 static auto op = create__grid_sampler_2d_cpu_fallback_out_typed_handle();
13589 return op.call(input, grid, interpolation_mode, padding_mode, align_corners, out);
13590}
13591
13592// aten::_grid_sampler_2d_cpu_fallback.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
13593at::Tensor & _grid_sampler_2d_cpu_fallback_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) {
13594
13595 static auto op = create__grid_sampler_2d_cpu_fallback_out_typed_handle();
13596 return op.redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners, out);
13597}
13598
13599STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(grid_sampler_3d_out, name, "aten::grid_sampler_3d")
13600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(grid_sampler_3d_out, overload_name, "out")
13601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(grid_sampler_3d_out, schema_str, "grid_sampler_3d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)")
13602
13603// aten::grid_sampler_3d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
13604static C10_NOINLINE c10::TypedOperatorHandle<grid_sampler_3d_out::schema> create_grid_sampler_3d_out_typed_handle() {
13605 return c10::Dispatcher::singleton()
13606 .findSchemaOrThrow(grid_sampler_3d_out::name, grid_sampler_3d_out::overload_name)
13607 .typed<grid_sampler_3d_out::schema>();
13608}
13609
13610// aten::grid_sampler_3d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
13611at::Tensor & grid_sampler_3d_out::call(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) {
13612
13613 static auto op = create_grid_sampler_3d_out_typed_handle();
13614 return op.call(input, grid, interpolation_mode, padding_mode, align_corners, out);
13615}
13616
13617// aten::grid_sampler_3d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
13618at::Tensor & grid_sampler_3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) {
13619
13620 static auto op = create_grid_sampler_3d_out_typed_handle();
13621 return op.redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners, out);
13622}
13623
13624STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hann_window_out, name, "aten::hann_window")
13625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hann_window_out, overload_name, "out")
13626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hann_window_out, schema_str, "hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)")
13627
13628// aten::hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
13629static C10_NOINLINE c10::TypedOperatorHandle<hann_window_out::schema> create_hann_window_out_typed_handle() {
13630 return c10::Dispatcher::singleton()
13631 .findSchemaOrThrow(hann_window_out::name, hann_window_out::overload_name)
13632 .typed<hann_window_out::schema>();
13633}
13634
13635// aten::hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
13636at::Tensor & hann_window_out::call(int64_t window_length, at::Tensor & out) {
13637
13638 static auto op = create_hann_window_out_typed_handle();
13639 return op.call(window_length, out);
13640}
13641
13642// aten::hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
13643at::Tensor & hann_window_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) {
13644
13645 static auto op = create_hann_window_out_typed_handle();
13646 return op.redispatch(dispatchKeySet, window_length, out);
13647}
13648
13649STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hann_window_periodic_out, name, "aten::hann_window")
13650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hann_window_periodic_out, overload_name, "periodic_out")
13651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hann_window_periodic_out, schema_str, "hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)")
13652
13653// aten::hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
13654static C10_NOINLINE c10::TypedOperatorHandle<hann_window_periodic_out::schema> create_hann_window_periodic_out_typed_handle() {
13655 return c10::Dispatcher::singleton()
13656 .findSchemaOrThrow(hann_window_periodic_out::name, hann_window_periodic_out::overload_name)
13657 .typed<hann_window_periodic_out::schema>();
13658}
13659
13660// aten::hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
13661at::Tensor & hann_window_periodic_out::call(int64_t window_length, bool periodic, at::Tensor & out) {
13662
13663 static auto op = create_hann_window_periodic_out_typed_handle();
13664 return op.call(window_length, periodic, out);
13665}
13666
13667// aten::hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
13668at::Tensor & hann_window_periodic_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) {
13669
13670 static auto op = create_hann_window_periodic_out_typed_handle();
13671 return op.redispatch(dispatchKeySet, window_length, periodic, out);
13672}
13673
13674STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hamming_window_out, name, "aten::hamming_window")
13675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hamming_window_out, overload_name, "out")
13676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hamming_window_out, schema_str, "hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)")
13677
13678// aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
13679static C10_NOINLINE c10::TypedOperatorHandle<hamming_window_out::schema> create_hamming_window_out_typed_handle() {
13680 return c10::Dispatcher::singleton()
13681 .findSchemaOrThrow(hamming_window_out::name, hamming_window_out::overload_name)
13682 .typed<hamming_window_out::schema>();
13683}
13684
13685// aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
13686at::Tensor & hamming_window_out::call(int64_t window_length, at::Tensor & out) {
13687
13688 static auto op = create_hamming_window_out_typed_handle();
13689 return op.call(window_length, out);
13690}
13691
13692// aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
13693at::Tensor & hamming_window_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) {
13694
13695 static auto op = create_hamming_window_out_typed_handle();
13696 return op.redispatch(dispatchKeySet, window_length, out);
13697}
13698
13699STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hamming_window_periodic_out, name, "aten::hamming_window")
13700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hamming_window_periodic_out, overload_name, "periodic_out")
13701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hamming_window_periodic_out, schema_str, "hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)")
13702
13703// aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
13704static C10_NOINLINE c10::TypedOperatorHandle<hamming_window_periodic_out::schema> create_hamming_window_periodic_out_typed_handle() {
13705 return c10::Dispatcher::singleton()
13706 .findSchemaOrThrow(hamming_window_periodic_out::name, hamming_window_periodic_out::overload_name)
13707 .typed<hamming_window_periodic_out::schema>();
13708}
13709
13710// aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
13711at::Tensor & hamming_window_periodic_out::call(int64_t window_length, bool periodic, at::Tensor & out) {
13712
13713 static auto op = create_hamming_window_periodic_out_typed_handle();
13714 return op.call(window_length, periodic, out);
13715}
13716
13717// aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
13718at::Tensor & hamming_window_periodic_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) {
13719
13720 static auto op = create_hamming_window_periodic_out_typed_handle();
13721 return op.redispatch(dispatchKeySet, window_length, periodic, out);
13722}
13723
13724STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hamming_window_periodic_alpha_out, name, "aten::hamming_window")
13725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hamming_window_periodic_alpha_out, overload_name, "periodic_alpha_out")
13726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hamming_window_periodic_alpha_out, schema_str, "hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!)")
13727
13728// aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!)
13729static C10_NOINLINE c10::TypedOperatorHandle<hamming_window_periodic_alpha_out::schema> create_hamming_window_periodic_alpha_out_typed_handle() {
13730 return c10::Dispatcher::singleton()
13731 .findSchemaOrThrow(hamming_window_periodic_alpha_out::name, hamming_window_periodic_alpha_out::overload_name)
13732 .typed<hamming_window_periodic_alpha_out::schema>();
13733}
13734
13735// aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!)
13736at::Tensor & hamming_window_periodic_alpha_out::call(int64_t window_length, bool periodic, double alpha, at::Tensor & out) {
13737
13738 static auto op = create_hamming_window_periodic_alpha_out_typed_handle();
13739 return op.call(window_length, periodic, alpha, out);
13740}
13741
13742// aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!)
13743at::Tensor & hamming_window_periodic_alpha_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, at::Tensor & out) {
13744
13745 static auto op = create_hamming_window_periodic_alpha_out_typed_handle();
13746 return op.redispatch(dispatchKeySet, window_length, periodic, alpha, out);
13747}
13748
13749STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hamming_window_periodic_alpha_beta_out, name, "aten::hamming_window")
13750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hamming_window_periodic_alpha_beta_out, overload_name, "periodic_alpha_beta_out")
13751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hamming_window_periodic_alpha_beta_out, schema_str, "hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!)")
13752
13753// aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!)
13754static C10_NOINLINE c10::TypedOperatorHandle<hamming_window_periodic_alpha_beta_out::schema> create_hamming_window_periodic_alpha_beta_out_typed_handle() {
13755 return c10::Dispatcher::singleton()
13756 .findSchemaOrThrow(hamming_window_periodic_alpha_beta_out::name, hamming_window_periodic_alpha_beta_out::overload_name)
13757 .typed<hamming_window_periodic_alpha_beta_out::schema>();
13758}
13759
13760// aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!)
13761at::Tensor & hamming_window_periodic_alpha_beta_out::call(int64_t window_length, bool periodic, double alpha, double beta, at::Tensor & out) {
13762
13763 static auto op = create_hamming_window_periodic_alpha_beta_out_typed_handle();
13764 return op.call(window_length, periodic, alpha, beta, out);
13765}
13766
13767// aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!)
13768at::Tensor & hamming_window_periodic_alpha_beta_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, double beta, at::Tensor & out) {
13769
13770 static auto op = create_hamming_window_periodic_alpha_beta_out_typed_handle();
13771 return op.redispatch(dispatchKeySet, window_length, periodic, alpha, beta, out);
13772}
13773
13774STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_group_norm_backward_out, name, "aten::native_group_norm_backward")
13775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_group_norm_backward_out, overload_name, "out")
13776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_group_norm_backward_out, schema_str, "native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
13777
13778// aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
13779static C10_NOINLINE c10::TypedOperatorHandle<native_group_norm_backward_out::schema> create_native_group_norm_backward_out_typed_handle() {
13780 return c10::Dispatcher::singleton()
13781 .findSchemaOrThrow(native_group_norm_backward_out::name, native_group_norm_backward_out::overload_name)
13782 .typed<native_group_norm_backward_out::schema>();
13783}
13784
13785// aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
13786::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_backward_out::call(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
13787
13788 static auto op = create_native_group_norm_backward_out_typed_handle();
13789 return op.call(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask, out0, out1, out2);
13790}
13791
13792// aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
13793::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
13794
13795 static auto op = create_native_group_norm_backward_out_typed_handle();
13796 return op.redispatch(dispatchKeySet, grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask, out0, out1, out2);
13797}
13798
13799STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(isnan_out, name, "aten::isnan")
13800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(isnan_out, overload_name, "out")
13801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(isnan_out, schema_str, "isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
13802
13803// aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13804static C10_NOINLINE c10::TypedOperatorHandle<isnan_out::schema> create_isnan_out_typed_handle() {
13805 return c10::Dispatcher::singleton()
13806 .findSchemaOrThrow(isnan_out::name, isnan_out::overload_name)
13807 .typed<isnan_out::schema>();
13808}
13809
13810// aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13811at::Tensor & isnan_out::call(const at::Tensor & self, at::Tensor & out) {
13812
13813 static auto op = create_isnan_out_typed_handle();
13814 return op.call(self, out);
13815}
13816
13817// aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13818at::Tensor & isnan_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
13819
13820 static auto op = create_isnan_out_typed_handle();
13821 return op.redispatch(dispatchKeySet, self, out);
13822}
13823
13824STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_layer_norm_out, name, "aten::native_layer_norm")
13825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_layer_norm_out, overload_name, "out")
13826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_layer_norm_out, schema_str, "native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
13827
13828// aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
13829static C10_NOINLINE c10::TypedOperatorHandle<native_layer_norm_out::schema> create_native_layer_norm_out_typed_handle() {
13830 return c10::Dispatcher::singleton()
13831 .findSchemaOrThrow(native_layer_norm_out::name, native_layer_norm_out::overload_name)
13832 .typed<native_layer_norm_out::schema>();
13833}
13834
13835// aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
13836::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_out::call(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
13837
13838 static auto op = create_native_layer_norm_out_typed_handle();
13839 return op.call(input, normalized_shape, weight, bias, eps, out0, out1, out2);
13840}
13841
13842// aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
13843::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
13844
13845 static auto op = create_native_layer_norm_out_typed_handle();
13846 return op.redispatch(dispatchKeySet, input, normalized_shape, weight, bias, eps, out0, out1, out2);
13847}
13848
13849STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_mps_max_pool2d_out, name, "aten::_mps_max_pool2d")
13850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_mps_max_pool2d_out, overload_name, "out")
13851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_mps_max_pool2d_out, schema_str, "_mps_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)")
13852
13853// aten::_mps_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
13854static C10_NOINLINE c10::TypedOperatorHandle<_mps_max_pool2d_out::schema> create__mps_max_pool2d_out_typed_handle() {
13855 return c10::Dispatcher::singleton()
13856 .findSchemaOrThrow(_mps_max_pool2d_out::name, _mps_max_pool2d_out::overload_name)
13857 .typed<_mps_max_pool2d_out::schema>();
13858}
13859
13860// aten::_mps_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
13861at::Tensor & _mps_max_pool2d_out::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
13862
13863 static auto op = create__mps_max_pool2d_out_typed_handle();
13864 return op.call(self, kernel_size, stride, padding, dilation, ceil_mode, out);
13865}
13866
13867// aten::_mps_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
13868at::Tensor & _mps_max_pool2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
13869
13870 static auto op = create__mps_max_pool2d_out_typed_handle();
13871 return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out);
13872}
13873
13874STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_max_pool2d_out, name, "aten::mkldnn_max_pool2d")
13875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_max_pool2d_out, overload_name, "out")
13876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_max_pool2d_out, schema_str, "mkldnn_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)")
13877
13878// aten::mkldnn_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
13879static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_max_pool2d_out::schema> create_mkldnn_max_pool2d_out_typed_handle() {
13880 return c10::Dispatcher::singleton()
13881 .findSchemaOrThrow(mkldnn_max_pool2d_out::name, mkldnn_max_pool2d_out::overload_name)
13882 .typed<mkldnn_max_pool2d_out::schema>();
13883}
13884
13885// aten::mkldnn_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
13886at::Tensor & mkldnn_max_pool2d_out::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
13887
13888 static auto op = create_mkldnn_max_pool2d_out_typed_handle();
13889 return op.call(self, kernel_size, stride, padding, dilation, ceil_mode, out);
13890}
13891
13892// aten::mkldnn_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
13893at::Tensor & mkldnn_max_pool2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
13894
13895 static auto op = create_mkldnn_max_pool2d_out_typed_handle();
13896 return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out);
13897}
13898
13899STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantized_max_pool2d_out, name, "aten::quantized_max_pool2d")
13900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantized_max_pool2d_out, overload_name, "out")
13901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantized_max_pool2d_out, schema_str, "quantized_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)")
13902
13903// aten::quantized_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
13904static C10_NOINLINE c10::TypedOperatorHandle<quantized_max_pool2d_out::schema> create_quantized_max_pool2d_out_typed_handle() {
13905 return c10::Dispatcher::singleton()
13906 .findSchemaOrThrow(quantized_max_pool2d_out::name, quantized_max_pool2d_out::overload_name)
13907 .typed<quantized_max_pool2d_out::schema>();
13908}
13909
13910// aten::quantized_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
13911at::Tensor & quantized_max_pool2d_out::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
13912
13913 static auto op = create_quantized_max_pool2d_out_typed_handle();
13914 return op.call(self, kernel_size, stride, padding, dilation, ceil_mode, out);
13915}
13916
13917// aten::quantized_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
13918at::Tensor & quantized_max_pool2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
13919
13920 static auto op = create_quantized_max_pool2d_out_typed_handle();
13921 return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out);
13922}
13923
13924STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_mps_convolution_out, name, "aten::_mps_convolution")
13925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_mps_convolution_out, overload_name, "out")
13926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_mps_convolution_out, schema_str, "_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)")
13927
13928// aten::_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)
13929static C10_NOINLINE c10::TypedOperatorHandle<_mps_convolution_out::schema> create__mps_convolution_out_typed_handle() {
13930 return c10::Dispatcher::singleton()
13931 .findSchemaOrThrow(_mps_convolution_out::name, _mps_convolution_out::overload_name)
13932 .typed<_mps_convolution_out::schema>();
13933}
13934
13935// aten::_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)
13936at::Tensor & _mps_convolution_out::call(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) {
13937
13938 static auto op = create__mps_convolution_out_typed_handle();
13939 return op.call(self, weight, bias, padding, stride, dilation, groups, out);
13940}
13941
13942// aten::_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)
13943at::Tensor & _mps_convolution_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) {
13944
13945 static auto op = create__mps_convolution_out_typed_handle();
13946 return op.redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, out);
13947}
13948
13949STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_rnn_layer_backward_out, name, "aten::mkldnn_rnn_layer_backward")
13950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_rnn_layer_backward_out, overload_name, "out")
13951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_rnn_layer_backward_out, schema_str, "mkldnn_rnn_layer_backward.out(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5, Tensor(g!) out6) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!))")
13952
13953// aten::mkldnn_rnn_layer_backward.out(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5, Tensor(g!) out6) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!))
13954static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_rnn_layer_backward_out::schema> create_mkldnn_rnn_layer_backward_out_typed_handle() {
13955 return c10::Dispatcher::singleton()
13956 .findSchemaOrThrow(mkldnn_rnn_layer_backward_out::name, mkldnn_rnn_layer_backward_out::overload_name)
13957 .typed<mkldnn_rnn_layer_backward_out::schema>();
13958}
13959
13960// aten::mkldnn_rnn_layer_backward.out(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5, Tensor(g!) out6) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!))
13961::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_rnn_layer_backward_out::call(const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5, at::Tensor & out6) {
13962
13963 static auto op = create_mkldnn_rnn_layer_backward_out_typed_handle();
13964 return op.call(input, weight1, weight2, weight3, weight4, hx_, cx_tmp, output, hy_, cy_, grad_output, grad_hy, grad_cy, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace, out0, out1, out2, out3, out4, out5, out6);
13965}
13966
13967// aten::mkldnn_rnn_layer_backward.out(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5, Tensor(g!) out6) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!))
13968::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_rnn_layer_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5, at::Tensor & out6) {
13969
13970 static auto op = create_mkldnn_rnn_layer_backward_out_typed_handle();
13971 return op.redispatch(dispatchKeySet, input, weight1, weight2, weight3, weight4, hx_, cx_tmp, output, hy_, cy_, grad_output, grad_hy, grad_cy, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace, out0, out1, out2, out3, out4, out5, out6);
13972}
13973
13974STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_depthwise_convolution_out, name, "aten::miopen_depthwise_convolution")
13975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_depthwise_convolution_out, overload_name, "out")
13976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(miopen_depthwise_convolution_out, schema_str, "miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)")
13977
13978// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
13979static C10_NOINLINE c10::TypedOperatorHandle<miopen_depthwise_convolution_out::schema> create_miopen_depthwise_convolution_out_typed_handle() {
13980 return c10::Dispatcher::singleton()
13981 .findSchemaOrThrow(miopen_depthwise_convolution_out::name, miopen_depthwise_convolution_out::overload_name)
13982 .typed<miopen_depthwise_convolution_out::schema>();
13983}
13984
13985// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
13986at::Tensor & miopen_depthwise_convolution_out::call(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) {
13987
13988 static auto op = create_miopen_depthwise_convolution_out_typed_handle();
13989 return op.call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out);
13990}
13991
13992// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
13993at::Tensor & miopen_depthwise_convolution_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) {
13994
13995 static auto op = create_miopen_depthwise_convolution_out_typed_handle();
13996 return op.redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out);
13997}
13998
13999STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_stats_out, name, "aten::batch_norm_stats")
14000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_stats_out, overload_name, "out")
14001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_stats_out, schema_str, "batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
14002
14003// aten::batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
14004static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_stats_out::schema> create_batch_norm_stats_out_typed_handle() {
14005 return c10::Dispatcher::singleton()
14006 .findSchemaOrThrow(batch_norm_stats_out::name, batch_norm_stats_out::overload_name)
14007 .typed<batch_norm_stats_out::schema>();
14008}
14009
14010// aten::batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
14011::std::tuple<at::Tensor &,at::Tensor &> batch_norm_stats_out::call(const at::Tensor & input, double eps, at::Tensor & out0, at::Tensor & out1) {
14012
14013 static auto op = create_batch_norm_stats_out_typed_handle();
14014 return op.call(input, eps, out0, out1);
14015}
14016
14017// aten::batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
14018::std::tuple<at::Tensor &,at::Tensor &> batch_norm_stats_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double eps, at::Tensor & out0, at::Tensor & out1) {
14019
14020 static auto op = create_batch_norm_stats_out_typed_handle();
14021 return op.redispatch(dispatchKeySet, input, eps, out0, out1);
14022}
14023
14024STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_gather_stats_out, name, "aten::batch_norm_gather_stats")
14025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_gather_stats_out, overload_name, "out")
14026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_gather_stats_out, schema_str, "batch_norm_gather_stats.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
14027
14028// aten::batch_norm_gather_stats.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
14029static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_gather_stats_out::schema> create_batch_norm_gather_stats_out_typed_handle() {
14030 return c10::Dispatcher::singleton()
14031 .findSchemaOrThrow(batch_norm_gather_stats_out::name, batch_norm_gather_stats_out::overload_name)
14032 .typed<batch_norm_gather_stats_out::schema>();
14033}
14034
14035// aten::batch_norm_gather_stats.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
14036::std::tuple<at::Tensor &,at::Tensor &> batch_norm_gather_stats_out::call(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, int64_t count, at::Tensor & out0, at::Tensor & out1) {
14037
14038 static auto op = create_batch_norm_gather_stats_out_typed_handle();
14039 return op.call(input, mean, invstd, running_mean, running_var, momentum, eps, count, out0, out1);
14040}
14041
14042// aten::batch_norm_gather_stats.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
14043::std::tuple<at::Tensor &,at::Tensor &> batch_norm_gather_stats_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, int64_t count, at::Tensor & out0, at::Tensor & out1) {
14044
14045 static auto op = create_batch_norm_gather_stats_out_typed_handle();
14046 return op.redispatch(dispatchKeySet, input, mean, invstd, running_mean, running_var, momentum, eps, count, out0, out1);
14047}
14048
14049STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_batch_norm_backward_out, name, "aten::native_batch_norm_backward")
14050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_batch_norm_backward_out, overload_name, "out")
14051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(native_batch_norm_backward_out, schema_str, "native_batch_norm_backward.out(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
14052
14053// aten::native_batch_norm_backward.out(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
14054static C10_NOINLINE c10::TypedOperatorHandle<native_batch_norm_backward_out::schema> create_native_batch_norm_backward_out_typed_handle() {
14055 return c10::Dispatcher::singleton()
14056 .findSchemaOrThrow(native_batch_norm_backward_out::name, native_batch_norm_backward_out::overload_name)
14057 .typed<native_batch_norm_backward_out::schema>();
14058}
14059
14060// aten::native_batch_norm_backward.out(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
14061::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_batch_norm_backward_out::call(const at::Tensor & grad_out, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
14062
14063 static auto op = create_native_batch_norm_backward_out_typed_handle();
14064 return op.call(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask, out0, out1, out2);
14065}
14066
14067// aten::native_batch_norm_backward.out(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
14068::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_batch_norm_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
14069
14070 static auto op = create_native_batch_norm_backward_out_typed_handle();
14071 return op.redispatch(dispatchKeySet, grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask, out0, out1, out2);
14072}
14073
14074STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_backward_reduce_out, name, "aten::batch_norm_backward_reduce")
14075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_backward_reduce_out, overload_name, "out")
14076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(batch_norm_backward_reduce_out, schema_str, "batch_norm_backward_reduce.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))")
14077
14078// aten::batch_norm_backward_reduce.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
14079static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_backward_reduce_out::schema> create_batch_norm_backward_reduce_out_typed_handle() {
14080 return c10::Dispatcher::singleton()
14081 .findSchemaOrThrow(batch_norm_backward_reduce_out::name, batch_norm_backward_reduce_out::overload_name)
14082 .typed<batch_norm_backward_reduce_out::schema>();
14083}
14084
14085// aten::batch_norm_backward_reduce.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
14086::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> batch_norm_backward_reduce_out::call(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, bool input_g, bool weight_g, bool bias_g, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
14087
14088 static auto op = create_batch_norm_backward_reduce_out_typed_handle();
14089 return op.call(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g, out0, out1, out2, out3);
14090}
14091
14092// aten::batch_norm_backward_reduce.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
14093::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> batch_norm_backward_reduce_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, bool input_g, bool weight_g, bool bias_g, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
14094
14095 static auto op = create_batch_norm_backward_reduce_out_typed_handle();
14096 return op.redispatch(dispatchKeySet, grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g, out0, out1, out2, out3);
14097}
14098
14099STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nnpack_spatial_convolution_out, name, "aten::_nnpack_spatial_convolution")
14100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nnpack_spatial_convolution_out, overload_name, "out")
14101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nnpack_spatial_convolution_out, schema_str, "_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1, *, Tensor(a!) out) -> Tensor(a!)")
14102
14103// aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1, *, Tensor(a!) out) -> Tensor(a!)
14104static C10_NOINLINE c10::TypedOperatorHandle<_nnpack_spatial_convolution_out::schema> create__nnpack_spatial_convolution_out_typed_handle() {
14105 return c10::Dispatcher::singleton()
14106 .findSchemaOrThrow(_nnpack_spatial_convolution_out::name, _nnpack_spatial_convolution_out::overload_name)
14107 .typed<_nnpack_spatial_convolution_out::schema>();
14108}
14109
14110// aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1, *, Tensor(a!) out) -> Tensor(a!)
14111at::Tensor & _nnpack_spatial_convolution_out::call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
14112
14113 static auto op = create__nnpack_spatial_convolution_out_typed_handle();
14114 return op.call(input, weight, bias, padding, stride, out);
14115}
14116
14117// aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1, *, Tensor(a!) out) -> Tensor(a!)
14118at::Tensor & _nnpack_spatial_convolution_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
14119
14120 static auto op = create__nnpack_spatial_convolution_out_typed_handle();
14121 return op.redispatch(dispatchKeySet, input, weight, bias, padding, stride, out);
14122}
14123
14124STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ones_names_out, name, "aten::ones")
14125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ones_names_out, overload_name, "names_out")
14126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(ones_names_out, schema_str, "ones.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)")
14127
14128// aten::ones.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
14129static C10_NOINLINE c10::TypedOperatorHandle<ones_names_out::schema> create_ones_names_out_typed_handle() {
14130 return c10::Dispatcher::singleton()
14131 .findSchemaOrThrow(ones_names_out::name, ones_names_out::overload_name)
14132 .typed<ones_names_out::schema>();
14133}
14134
14135// aten::ones.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
14136at::Tensor & ones_names_out::call(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) {
14137
14138 static auto op = create_ones_names_out_typed_handle();
14139 return op.call(size, names, out);
14140}
14141
14142// aten::ones.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
14143at::Tensor & ones_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) {
14144
14145 static auto op = create_ones_names_out_typed_handle();
14146 return op.redispatch(dispatchKeySet, size, names, out);
14147}
14148
14149STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cdist_forward_out, name, "aten::_cdist_forward")
14150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cdist_forward_out, overload_name, "out")
14151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_cdist_forward_out, schema_str, "_cdist_forward.out(Tensor x1, Tensor x2, float p, int? compute_mode, *, Tensor(a!) out) -> Tensor(a!)")
14152
14153// aten::_cdist_forward.out(Tensor x1, Tensor x2, float p, int? compute_mode, *, Tensor(a!) out) -> Tensor(a!)
14154static C10_NOINLINE c10::TypedOperatorHandle<_cdist_forward_out::schema> create__cdist_forward_out_typed_handle() {
14155 return c10::Dispatcher::singleton()
14156 .findSchemaOrThrow(_cdist_forward_out::name, _cdist_forward_out::overload_name)
14157 .typed<_cdist_forward_out::schema>();
14158}
14159
14160// aten::_cdist_forward.out(Tensor x1, Tensor x2, float p, int? compute_mode, *, Tensor(a!) out) -> Tensor(a!)
14161at::Tensor & _cdist_forward_out::call(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode, at::Tensor & out) {
14162
14163 static auto op = create__cdist_forward_out_typed_handle();
14164 return op.call(x1, x2, p, compute_mode, out);
14165}
14166
14167// aten::_cdist_forward.out(Tensor x1, Tensor x2, float p, int? compute_mode, *, Tensor(a!) out) -> Tensor(a!)
14168at::Tensor & _cdist_forward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode, at::Tensor & out) {
14169
14170 static auto op = create__cdist_forward_out_typed_handle();
14171 return op.redispatch(dispatchKeySet, x1, x2, p, compute_mode, out);
14172}
14173
14174STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rand_like_out, name, "aten::rand_like")
14175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rand_like_out, overload_name, "out")
14176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(rand_like_out, schema_str, "rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)")
14177
14178// aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
14179static C10_NOINLINE c10::TypedOperatorHandle<rand_like_out::schema> create_rand_like_out_typed_handle() {
14180 return c10::Dispatcher::singleton()
14181 .findSchemaOrThrow(rand_like_out::name, rand_like_out::overload_name)
14182 .typed<rand_like_out::schema>();
14183}
14184
14185// aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
14186at::Tensor & rand_like_out::call(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
14187
14188 static auto op = create_rand_like_out_typed_handle();
14189 return op.call(self, memory_format, out);
14190}
14191
14192// aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
14193at::Tensor & rand_like_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
14194
14195 static auto op = create_rand_like_out_typed_handle();
14196 return op.redispatch(dispatchKeySet, self, memory_format, out);
14197}
14198
14199STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randint_like_out, name, "aten::randint_like")
14200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randint_like_out, overload_name, "out")
14201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randint_like_out, schema_str, "randint_like.out(Tensor self, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)")
14202
14203// aten::randint_like.out(Tensor self, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
14204static C10_NOINLINE c10::TypedOperatorHandle<randint_like_out::schema> create_randint_like_out_typed_handle() {
14205 return c10::Dispatcher::singleton()
14206 .findSchemaOrThrow(randint_like_out::name, randint_like_out::overload_name)
14207 .typed<randint_like_out::schema>();
14208}
14209
14210// aten::randint_like.out(Tensor self, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
14211at::Tensor & randint_like_out::call(const at::Tensor & self, int64_t high, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
14212
14213 static auto op = create_randint_like_out_typed_handle();
14214 return op.call(self, high, memory_format, out);
14215}
14216
14217// aten::randint_like.out(Tensor self, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
14218at::Tensor & randint_like_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t high, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
14219
14220 static auto op = create_randint_like_out_typed_handle();
14221 return op.redispatch(dispatchKeySet, self, high, memory_format, out);
14222}
14223
14224STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randint_like_low_dtype_out, name, "aten::randint_like")
14225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randint_like_low_dtype_out, overload_name, "low_dtype_out")
14226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(randint_like_low_dtype_out, schema_str, "randint_like.low_dtype_out(Tensor self, int low, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)")
14227
14228// aten::randint_like.low_dtype_out(Tensor self, int low, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
14229static C10_NOINLINE c10::TypedOperatorHandle<randint_like_low_dtype_out::schema> create_randint_like_low_dtype_out_typed_handle() {
14230 return c10::Dispatcher::singleton()
14231 .findSchemaOrThrow(randint_like_low_dtype_out::name, randint_like_low_dtype_out::overload_name)
14232 .typed<randint_like_low_dtype_out::schema>();
14233}
14234
14235// aten::randint_like.low_dtype_out(Tensor self, int low, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
14236at::Tensor & randint_like_low_dtype_out::call(const at::Tensor & self, int64_t low, int64_t high, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
14237
14238 static auto op = create_randint_like_low_dtype_out_typed_handle();
14239 return op.call(self, low, high, memory_format, out);
14240}
14241
14242// aten::randint_like.low_dtype_out(Tensor self, int low, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
14243at::Tensor & randint_like_low_dtype_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t low, int64_t high, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
14244
14245 static auto op = create_randint_like_low_dtype_out_typed_handle();
14246 return op.redispatch(dispatchKeySet, self, low, high, memory_format, out);
14247}
14248
14249STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(select_backward_out, name, "aten::select_backward")
14250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(select_backward_out, overload_name, "out")
14251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(select_backward_out, schema_str, "select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)")
14252
14253// aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
14254static C10_NOINLINE c10::TypedOperatorHandle<select_backward_out::schema> create_select_backward_out_typed_handle() {
14255 return c10::Dispatcher::singleton()
14256 .findSchemaOrThrow(select_backward_out::name, select_backward_out::overload_name)
14257 .typed<select_backward_out::schema>();
14258}
14259
14260// aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
14261at::Tensor & select_backward_out::call(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index, at::Tensor & out) {
14262
14263 static auto op = create_select_backward_out_typed_handle();
14264 return op.call(grad_output, input_sizes, dim, index, out);
14265}
14266
14267// aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
14268at::Tensor & select_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index, at::Tensor & out) {
14269
14270 static auto op = create_select_backward_out_typed_handle();
14271 return op.redispatch(dispatchKeySet, grad_output, input_sizes, dim, index, out);
14272}
14273
14274STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slice_scatter_out, name, "aten::slice_scatter")
14275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slice_scatter_out, overload_name, "out")
14276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(slice_scatter_out, schema_str, "slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)")
14277
14278// aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)
14279static C10_NOINLINE c10::TypedOperatorHandle<slice_scatter_out::schema> create_slice_scatter_out_typed_handle() {
14280 return c10::Dispatcher::singleton()
14281 .findSchemaOrThrow(slice_scatter_out::name, slice_scatter_out::overload_name)
14282 .typed<slice_scatter_out::schema>();
14283}
14284
14285// aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)
14286at::Tensor & slice_scatter_out::call(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step, at::Tensor & out) {
14287
14288 static auto op = create_slice_scatter_out_typed_handle();
14289 return op.call(self, src, dim, start, end, step, out);
14290}
14291
14292// aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)
14293at::Tensor & slice_scatter_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step, at::Tensor & out) {
14294
14295 static auto op = create_slice_scatter_out_typed_handle();
14296 return op.redispatch(dispatchKeySet, self, src, dim, start, end, step, out);
14297}
14298
14299STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_mkldnn_transpose_out, name, "aten::_mkldnn_transpose")
14300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_mkldnn_transpose_out, overload_name, "out")
14301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_mkldnn_transpose_out, schema_str, "_mkldnn_transpose.out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)")
14302
14303// aten::_mkldnn_transpose.out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)
14304static C10_NOINLINE c10::TypedOperatorHandle<_mkldnn_transpose_out::schema> create__mkldnn_transpose_out_typed_handle() {
14305 return c10::Dispatcher::singleton()
14306 .findSchemaOrThrow(_mkldnn_transpose_out::name, _mkldnn_transpose_out::overload_name)
14307 .typed<_mkldnn_transpose_out::schema>();
14308}
14309
14310// aten::_mkldnn_transpose.out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)
14311at::Tensor & _mkldnn_transpose_out::call(const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) {
14312
14313 static auto op = create__mkldnn_transpose_out_typed_handle();
14314 return op.call(self, dim0, dim1, out);
14315}
14316
14317// aten::_mkldnn_transpose.out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)
14318at::Tensor & _mkldnn_transpose_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) {
14319
14320 static auto op = create__mkldnn_transpose_out_typed_handle();
14321 return op.redispatch(dispatchKeySet, self, dim0, dim1, out);
14322}
14323
14324STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_from_padded_and_nested_example_out, name, "aten::_nested_from_padded_and_nested_example")
14325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_from_padded_and_nested_example_out, overload_name, "out")
14326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_nested_from_padded_and_nested_example_out, schema_str, "_nested_from_padded_and_nested_example.out(Tensor padded, Tensor nt_example, *, Tensor(a!) out) -> Tensor(a!)")
14327
14328// aten::_nested_from_padded_and_nested_example.out(Tensor padded, Tensor nt_example, *, Tensor(a!) out) -> Tensor(a!)
14329static C10_NOINLINE c10::TypedOperatorHandle<_nested_from_padded_and_nested_example_out::schema> create__nested_from_padded_and_nested_example_out_typed_handle() {
14330 return c10::Dispatcher::singleton()
14331 .findSchemaOrThrow(_nested_from_padded_and_nested_example_out::name, _nested_from_padded_and_nested_example_out::overload_name)
14332 .typed<_nested_from_padded_and_nested_example_out::schema>();
14333}
14334
14335// aten::_nested_from_padded_and_nested_example.out(Tensor padded, Tensor nt_example, *, Tensor(a!) out) -> Tensor(a!)
14336at::Tensor & _nested_from_padded_and_nested_example_out::call(const at::Tensor & padded, const at::Tensor & nt_example, at::Tensor & out) {
14337
14338 static auto op = create__nested_from_padded_and_nested_example_out_typed_handle();
14339 return op.call(padded, nt_example, out);
14340}
14341
14342// aten::_nested_from_padded_and_nested_example.out(Tensor padded, Tensor nt_example, *, Tensor(a!) out) -> Tensor(a!)
14343at::Tensor & _nested_from_padded_and_nested_example_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & padded, const at::Tensor & nt_example, at::Tensor & out) {
14344
14345 static auto op = create__nested_from_padded_and_nested_example_out_typed_handle();
14346 return op.redispatch(dispatchKeySet, padded, nt_example, out);
14347}
14348
14349STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unique_dim_out, name, "aten::unique_dim")
14350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unique_dim_out, overload_name, "out")
14351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unique_dim_out, schema_str, "unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
14352
14353// aten::unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
14354static C10_NOINLINE c10::TypedOperatorHandle<unique_dim_out::schema> create_unique_dim_out_typed_handle() {
14355 return c10::Dispatcher::singleton()
14356 .findSchemaOrThrow(unique_dim_out::name, unique_dim_out::overload_name)
14357 .typed<unique_dim_out::schema>();
14358}
14359
14360// aten::unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
14361::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_dim_out::call(const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
14362
14363 static auto op = create_unique_dim_out_typed_handle();
14364 return op.call(self, dim, sorted, return_inverse, return_counts, out0, out1, out2);
14365}
14366
14367// aten::unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
14368::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_dim_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
14369
14370 static auto op = create_unique_dim_out_typed_handle();
14371 return op.redispatch(dispatchKeySet, self, dim, sorted, return_inverse, return_counts, out0, out1, out2);
14372}
14373
14374STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unique_consecutive_out, name, "aten::unique_consecutive")
14375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unique_consecutive_out, overload_name, "out")
14376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unique_consecutive_out, schema_str, "unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
14377
14378// aten::unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
14379static C10_NOINLINE c10::TypedOperatorHandle<unique_consecutive_out::schema> create_unique_consecutive_out_typed_handle() {
14380 return c10::Dispatcher::singleton()
14381 .findSchemaOrThrow(unique_consecutive_out::name, unique_consecutive_out::overload_name)
14382 .typed<unique_consecutive_out::schema>();
14383}
14384
14385// aten::unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
14386::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_consecutive_out::call(const at::Tensor & self, bool return_inverse, bool return_counts, c10::optional<int64_t> dim, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
14387
14388 static auto op = create_unique_consecutive_out_typed_handle();
14389 return op.call(self, return_inverse, return_counts, dim, out0, out1, out2);
14390}
14391
14392// aten::unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
14393::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_consecutive_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool return_inverse, bool return_counts, c10::optional<int64_t> dim, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
14394
14395 static auto op = create_unique_consecutive_out_typed_handle();
14396 return op.redispatch(dispatchKeySet, self, return_inverse, return_counts, dim, out0, out1, out2);
14397}
14398
14399STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_dirichlet_grad_out, name, "aten::_dirichlet_grad")
14400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_dirichlet_grad_out, overload_name, "out")
14401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_dirichlet_grad_out, schema_str, "_dirichlet_grad.out(Tensor x, Tensor alpha, Tensor total, *, Tensor(a!) out) -> Tensor(a!)")
14402
14403// aten::_dirichlet_grad.out(Tensor x, Tensor alpha, Tensor total, *, Tensor(a!) out) -> Tensor(a!)
14404static C10_NOINLINE c10::TypedOperatorHandle<_dirichlet_grad_out::schema> create__dirichlet_grad_out_typed_handle() {
14405 return c10::Dispatcher::singleton()
14406 .findSchemaOrThrow(_dirichlet_grad_out::name, _dirichlet_grad_out::overload_name)
14407 .typed<_dirichlet_grad_out::schema>();
14408}
14409
14410// aten::_dirichlet_grad.out(Tensor x, Tensor alpha, Tensor total, *, Tensor(a!) out) -> Tensor(a!)
14411at::Tensor & _dirichlet_grad_out::call(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total, at::Tensor & out) {
14412
14413 static auto op = create__dirichlet_grad_out_typed_handle();
14414 return op.call(x, alpha, total, out);
14415}
14416
14417// aten::_dirichlet_grad.out(Tensor x, Tensor alpha, Tensor total, *, Tensor(a!) out) -> Tensor(a!)
14418at::Tensor & _dirichlet_grad_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total, at::Tensor & out) {
14419
14420 static auto op = create__dirichlet_grad_out_typed_handle();
14421 return op.redispatch(dispatchKeySet, x, alpha, total, out);
14422}
14423
14424STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(clone_out, name, "aten::clone")
14425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(clone_out, overload_name, "out")
14426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(clone_out, schema_str, "clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)")
14427
14428// aten::clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
14429static C10_NOINLINE c10::TypedOperatorHandle<clone_out::schema> create_clone_out_typed_handle() {
14430 return c10::Dispatcher::singleton()
14431 .findSchemaOrThrow(clone_out::name, clone_out::overload_name)
14432 .typed<clone_out::schema>();
14433}
14434
14435// aten::clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
14436at::Tensor & clone_out::call(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
14437
14438 static auto op = create_clone_out_typed_handle();
14439 return op.call(self, memory_format, out);
14440}
14441
14442// aten::clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
14443at::Tensor & clone_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
14444
14445 static auto op = create_clone_out_typed_handle();
14446 return op.redispatch(dispatchKeySet, self, memory_format, out);
14447}
14448
14449STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resize_as_out, name, "aten::resize_as")
14450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resize_as_out, overload_name, "out")
14451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resize_as_out, schema_str, "resize_as.out(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)")
14452
14453// aten::resize_as.out(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
14454static C10_NOINLINE c10::TypedOperatorHandle<resize_as_out::schema> create_resize_as_out_typed_handle() {
14455 return c10::Dispatcher::singleton()
14456 .findSchemaOrThrow(resize_as_out::name, resize_as_out::overload_name)
14457 .typed<resize_as_out::schema>();
14458}
14459
14460// aten::resize_as.out(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
14461const at::Tensor & resize_as_out::call(const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format, const at::Tensor & out) {
14462
14463 static auto op = create_resize_as_out_typed_handle();
14464 return op.call(self, the_template, memory_format, out);
14465}
14466
14467// aten::resize_as.out(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
14468const at::Tensor & resize_as_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format, const at::Tensor & out) {
14469
14470 static auto op = create_resize_as_out_typed_handle();
14471 return op.redispatch(dispatchKeySet, self, the_template, memory_format, out);
14472}
14473
14474STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resize_as, name, "aten::resize_as")
14475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resize_as, overload_name, "")
14476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resize_as, schema_str, "resize_as(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor")
14477
14478// aten::resize_as(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor
14479static C10_NOINLINE c10::TypedOperatorHandle<resize_as::schema> create_resize_as_typed_handle() {
14480 return c10::Dispatcher::singleton()
14481 .findSchemaOrThrow(resize_as::name, resize_as::overload_name)
14482 .typed<resize_as::schema>();
14483}
14484
14485// aten::resize_as(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor
14486at::Tensor resize_as::call(const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format) {
14487
14488 static auto op = create_resize_as_typed_handle();
14489 return op.call(self, the_template, memory_format);
14490}
14491
14492// aten::resize_as(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor
14493at::Tensor resize_as::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format) {
14494
14495 static auto op = create_resize_as_typed_handle();
14496 return op.redispatch(dispatchKeySet, self, the_template, memory_format);
14497}
14498
14499STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resize_as_sparse_out, name, "aten::resize_as_sparse")
14500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resize_as_sparse_out, overload_name, "out")
14501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resize_as_sparse_out, schema_str, "resize_as_sparse.out(Tensor self, Tensor the_template, *, Tensor(a!) out) -> Tensor(a!)")
14502
14503// aten::resize_as_sparse.out(Tensor self, Tensor the_template, *, Tensor(a!) out) -> Tensor(a!)
14504static C10_NOINLINE c10::TypedOperatorHandle<resize_as_sparse_out::schema> create_resize_as_sparse_out_typed_handle() {
14505 return c10::Dispatcher::singleton()
14506 .findSchemaOrThrow(resize_as_sparse_out::name, resize_as_sparse_out::overload_name)
14507 .typed<resize_as_sparse_out::schema>();
14508}
14509
14510// aten::resize_as_sparse.out(Tensor self, Tensor the_template, *, Tensor(a!) out) -> Tensor(a!)
14511const at::Tensor & resize_as_sparse_out::call(const at::Tensor & self, const at::Tensor & the_template, const at::Tensor & out) {
14512
14513 static auto op = create_resize_as_sparse_out_typed_handle();
14514 return op.call(self, the_template, out);
14515}
14516
14517// aten::resize_as_sparse.out(Tensor self, Tensor the_template, *, Tensor(a!) out) -> Tensor(a!)
14518const at::Tensor & resize_as_sparse_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template, const at::Tensor & out) {
14519
14520 static auto op = create_resize_as_sparse_out_typed_handle();
14521 return op.redispatch(dispatchKeySet, self, the_template, out);
14522}
14523
14524STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resize_as_sparse, name, "aten::resize_as_sparse")
14525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resize_as_sparse, overload_name, "")
14526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(resize_as_sparse, schema_str, "resize_as_sparse(Tensor self, Tensor the_template) -> Tensor")
14527
14528// aten::resize_as_sparse(Tensor self, Tensor the_template) -> Tensor
14529static C10_NOINLINE c10::TypedOperatorHandle<resize_as_sparse::schema> create_resize_as_sparse_typed_handle() {
14530 return c10::Dispatcher::singleton()
14531 .findSchemaOrThrow(resize_as_sparse::name, resize_as_sparse::overload_name)
14532 .typed<resize_as_sparse::schema>();
14533}
14534
14535// aten::resize_as_sparse(Tensor self, Tensor the_template) -> Tensor
14536at::Tensor resize_as_sparse::call(const at::Tensor & self, const at::Tensor & the_template) {
14537
14538 static auto op = create_resize_as_sparse_typed_handle();
14539 return op.call(self, the_template);
14540}
14541
14542// aten::resize_as_sparse(Tensor self, Tensor the_template) -> Tensor
14543at::Tensor resize_as_sparse::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template) {
14544
14545 static auto op = create_resize_as_sparse_typed_handle();
14546 return op.redispatch(dispatchKeySet, self, the_template);
14547}
14548
14549STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse_csc_out, name, "aten::to_sparse_csc")
14550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse_csc_out, overload_name, "out")
14551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(to_sparse_csc_out, schema_str, "to_sparse_csc.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)")
14552
14553// aten::to_sparse_csc.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
14554static C10_NOINLINE c10::TypedOperatorHandle<to_sparse_csc_out::schema> create_to_sparse_csc_out_typed_handle() {
14555 return c10::Dispatcher::singleton()
14556 .findSchemaOrThrow(to_sparse_csc_out::name, to_sparse_csc_out::overload_name)
14557 .typed<to_sparse_csc_out::schema>();
14558}
14559
14560// aten::to_sparse_csc.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
14561at::Tensor & to_sparse_csc_out::call(const at::Tensor & self, c10::optional<int64_t> dense_dim, at::Tensor & out) {
14562
14563 static auto op = create_to_sparse_csc_out_typed_handle();
14564 return op.call(self, dense_dim, out);
14565}
14566
14567// aten::to_sparse_csc.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
14568at::Tensor & to_sparse_csc_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> dense_dim, at::Tensor & out) {
14569
14570 static auto op = create_to_sparse_csc_out_typed_handle();
14571 return op.redispatch(dispatchKeySet, self, dense_dim, out);
14572}
14573
14574STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_reorder_conv2d_weight_out, name, "aten::mkldnn_reorder_conv2d_weight")
14575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_reorder_conv2d_weight_out, overload_name, "out")
14576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(mkldnn_reorder_conv2d_weight_out, schema_str, "mkldnn_reorder_conv2d_weight.out(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!)")
14577
14578// aten::mkldnn_reorder_conv2d_weight.out(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!)
14579static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_reorder_conv2d_weight_out::schema> create_mkldnn_reorder_conv2d_weight_out_typed_handle() {
14580 return c10::Dispatcher::singleton()
14581 .findSchemaOrThrow(mkldnn_reorder_conv2d_weight_out::name, mkldnn_reorder_conv2d_weight_out::overload_name)
14582 .typed<mkldnn_reorder_conv2d_weight_out::schema>();
14583}
14584
14585// aten::mkldnn_reorder_conv2d_weight.out(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!)
14586at::Tensor & mkldnn_reorder_conv2d_weight_out::call(const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::OptionalIntArrayRef input_size, at::Tensor & out) {
14587
14588 static auto op = create_mkldnn_reorder_conv2d_weight_out_typed_handle();
14589 return op.call(self, padding, stride, dilation, groups, input_size, out);
14590}
14591
14592// aten::mkldnn_reorder_conv2d_weight.out(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!)
14593at::Tensor & mkldnn_reorder_conv2d_weight_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::OptionalIntArrayRef input_size, at::Tensor & out) {
14594
14595 static auto op = create_mkldnn_reorder_conv2d_weight_out_typed_handle();
14596 return op.redispatch(dispatchKeySet, self, padding, stride, dilation, groups, input_size, out);
14597}
14598
14599STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantize_per_channel_out, name, "aten::quantize_per_channel")
14600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantize_per_channel_out, overload_name, "out")
14601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(quantize_per_channel_out, schema_str, "quantize_per_channel.out(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)")
14602
14603// aten::quantize_per_channel.out(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
14604static C10_NOINLINE c10::TypedOperatorHandle<quantize_per_channel_out::schema> create_quantize_per_channel_out_typed_handle() {
14605 return c10::Dispatcher::singleton()
14606 .findSchemaOrThrow(quantize_per_channel_out::name, quantize_per_channel_out::overload_name)
14607 .typed<quantize_per_channel_out::schema>();
14608}
14609
14610// aten::quantize_per_channel.out(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
14611at::Tensor & quantize_per_channel_out::call(const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype, at::Tensor & out) {
14612
14613 static auto op = create_quantize_per_channel_out_typed_handle();
14614 return op.call(self, scales, zero_points, axis, dtype, out);
14615}
14616
14617// aten::quantize_per_channel.out(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
14618at::Tensor & quantize_per_channel_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype, at::Tensor & out) {
14619
14620 static auto op = create_quantize_per_channel_out_typed_handle();
14621 return op.redispatch(dispatchKeySet, self, scales, zero_points, axis, dtype, out);
14622}
14623
14624STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dequantize_self_out, name, "aten::dequantize")
14625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dequantize_self_out, overload_name, "self_out")
14626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dequantize_self_out, schema_str, "dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
14627
14628// aten::dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14629static C10_NOINLINE c10::TypedOperatorHandle<dequantize_self_out::schema> create_dequantize_self_out_typed_handle() {
14630 return c10::Dispatcher::singleton()
14631 .findSchemaOrThrow(dequantize_self_out::name, dequantize_self_out::overload_name)
14632 .typed<dequantize_self_out::schema>();
14633}
14634
14635// aten::dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14636at::Tensor & dequantize_self_out::call(const at::Tensor & self, at::Tensor & out) {
14637
14638 static auto op = create_dequantize_self_out_typed_handle();
14639 return op.call(self, out);
14640}
14641
14642// aten::dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14643at::Tensor & dequantize_self_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
14644
14645 static auto op = create_dequantize_self_out_typed_handle();
14646 return op.redispatch(dispatchKeySet, self, out);
14647}
14648
14649STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dequantize_tensors_out, name, "aten::dequantize")
14650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dequantize_tensors_out, overload_name, "tensors_out")
14651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(dequantize_tensors_out, schema_str, "dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> ()")
14652
14653// aten::dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> ()
14654static C10_NOINLINE c10::TypedOperatorHandle<dequantize_tensors_out::schema> create_dequantize_tensors_out_typed_handle() {
14655 return c10::Dispatcher::singleton()
14656 .findSchemaOrThrow(dequantize_tensors_out::name, dequantize_tensors_out::overload_name)
14657 .typed<dequantize_tensors_out::schema>();
14658}
14659
14660// aten::dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> ()
14661void dequantize_tensors_out::call(at::TensorList tensors, at::TensorList out) {
14662
14663 static auto op = create_dequantize_tensors_out_typed_handle();
14664 return op.call(tensors, out);
14665}
14666
14667// aten::dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> ()
14668void dequantize_tensors_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::TensorList out) {
14669
14670 static auto op = create_dequantize_tensors_out_typed_handle();
14671 return op.redispatch(dispatchKeySet, tensors, out);
14672}
14673
14674STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(q_per_channel_zero_points_out, name, "aten::q_per_channel_zero_points")
14675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(q_per_channel_zero_points_out, overload_name, "out")
14676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(q_per_channel_zero_points_out, schema_str, "q_per_channel_zero_points.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
14677
14678// aten::q_per_channel_zero_points.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14679static C10_NOINLINE c10::TypedOperatorHandle<q_per_channel_zero_points_out::schema> create_q_per_channel_zero_points_out_typed_handle() {
14680 return c10::Dispatcher::singleton()
14681 .findSchemaOrThrow(q_per_channel_zero_points_out::name, q_per_channel_zero_points_out::overload_name)
14682 .typed<q_per_channel_zero_points_out::schema>();
14683}
14684
14685// aten::q_per_channel_zero_points.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14686at::Tensor & q_per_channel_zero_points_out::call(const at::Tensor & self, at::Tensor & out) {
14687
14688 static auto op = create_q_per_channel_zero_points_out_typed_handle();
14689 return op.call(self, out);
14690}
14691
14692// aten::q_per_channel_zero_points.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14693at::Tensor & q_per_channel_zero_points_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
14694
14695 static auto op = create_q_per_channel_zero_points_out_typed_handle();
14696 return op.redispatch(dispatchKeySet, self, out);
14697}
14698
14699STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fake_quantize_learnable_per_channel_affine_out, name, "aten::_fake_quantize_learnable_per_channel_affine")
14700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fake_quantize_learnable_per_channel_affine_out, overload_name, "out")
14701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fake_quantize_learnable_per_channel_affine_out, schema_str, "_fake_quantize_learnable_per_channel_affine.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!)")
14702
14703// aten::_fake_quantize_learnable_per_channel_affine.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!)
14704static C10_NOINLINE c10::TypedOperatorHandle<_fake_quantize_learnable_per_channel_affine_out::schema> create__fake_quantize_learnable_per_channel_affine_out_typed_handle() {
14705 return c10::Dispatcher::singleton()
14706 .findSchemaOrThrow(_fake_quantize_learnable_per_channel_affine_out::name, _fake_quantize_learnable_per_channel_affine_out::overload_name)
14707 .typed<_fake_quantize_learnable_per_channel_affine_out::schema>();
14708}
14709
14710// aten::_fake_quantize_learnable_per_channel_affine.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!)
14711at::Tensor & _fake_quantize_learnable_per_channel_affine_out::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out) {
14712
14713 static auto op = create__fake_quantize_learnable_per_channel_affine_out_typed_handle();
14714 return op.call(self, scale, zero_point, axis, quant_min, quant_max, grad_factor, out);
14715}
14716
14717// aten::_fake_quantize_learnable_per_channel_affine.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!)
14718at::Tensor & _fake_quantize_learnable_per_channel_affine_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out) {
14719
14720 static auto op = create__fake_quantize_learnable_per_channel_affine_out_typed_handle();
14721 return op.redispatch(dispatchKeySet, self, scale, zero_point, axis, quant_min, quant_max, grad_factor, out);
14722}
14723
14724STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_lstm_mps_out, name, "aten::_lstm_mps")
14725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_lstm_mps_out, overload_name, "out")
14726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_lstm_mps_out, schema_str, "_lstm_mps.out(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))")
14727
14728// aten::_lstm_mps.out(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
14729static C10_NOINLINE c10::TypedOperatorHandle<_lstm_mps_out::schema> create__lstm_mps_out_typed_handle() {
14730 return c10::Dispatcher::singleton()
14731 .findSchemaOrThrow(_lstm_mps_out::name, _lstm_mps_out::overload_name)
14732 .typed<_lstm_mps_out::schema>();
14733}
14734
14735// aten::_lstm_mps.out(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
14736::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _lstm_mps_out::call(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) {
14737
14738 static auto op = create__lstm_mps_out_typed_handle();
14739 return op.call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2, out3, out4);
14740}
14741
14742// aten::_lstm_mps.out(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
14743::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _lstm_mps_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) {
14744
14745 static auto op = create__lstm_mps_out_typed_handle();
14746 return op.redispatch(dispatchKeySet, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2, out3, out4);
14747}
14748
14749STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_thnn_fused_lstm_cell_out, name, "aten::_thnn_fused_lstm_cell")
14750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_thnn_fused_lstm_cell_out, overload_name, "out")
14751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_thnn_fused_lstm_cell_out, schema_str, "_thnn_fused_lstm_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
14752
14753// aten::_thnn_fused_lstm_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
14754static C10_NOINLINE c10::TypedOperatorHandle<_thnn_fused_lstm_cell_out::schema> create__thnn_fused_lstm_cell_out_typed_handle() {
14755 return c10::Dispatcher::singleton()
14756 .findSchemaOrThrow(_thnn_fused_lstm_cell_out::name, _thnn_fused_lstm_cell_out::overload_name)
14757 .typed<_thnn_fused_lstm_cell_out::schema>();
14758}
14759
14760// aten::_thnn_fused_lstm_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
14761::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_lstm_cell_out::call(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
14762
14763 static auto op = create__thnn_fused_lstm_cell_out_typed_handle();
14764 return op.call(input_gates, hidden_gates, cx, input_bias, hidden_bias, out0, out1, out2);
14765}
14766
14767// aten::_thnn_fused_lstm_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
14768::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_lstm_cell_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
14769
14770 static auto op = create__thnn_fused_lstm_cell_out_typed_handle();
14771 return op.redispatch(dispatchKeySet, input_gates, hidden_gates, cx, input_bias, hidden_bias, out0, out1, out2);
14772}
14773
14774STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lift_fresh_copy_out, name, "aten::lift_fresh_copy")
14775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lift_fresh_copy_out, overload_name, "out")
14776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(lift_fresh_copy_out, schema_str, "lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
14777
14778// aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14779static C10_NOINLINE c10::TypedOperatorHandle<lift_fresh_copy_out::schema> create_lift_fresh_copy_out_typed_handle() {
14780 return c10::Dispatcher::singleton()
14781 .findSchemaOrThrow(lift_fresh_copy_out::name, lift_fresh_copy_out::overload_name)
14782 .typed<lift_fresh_copy_out::schema>();
14783}
14784
14785// aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14786at::Tensor & lift_fresh_copy_out::call(const at::Tensor & self, at::Tensor & out) {
14787
14788 static auto op = create_lift_fresh_copy_out_typed_handle();
14789 return op.call(self, out);
14790}
14791
14792// aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
14793at::Tensor & lift_fresh_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
14794
14795 static auto op = create_lift_fresh_copy_out_typed_handle();
14796 return op.redispatch(dispatchKeySet, self, out);
14797}
14798
14799STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill_int_Scalar_out, name, "aten::index_fill")
14800STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill_int_Scalar_out, overload_name, "int_Scalar_out")
14801STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill_int_Scalar_out, schema_str, "index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)")
14802
14803// aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
14804static C10_NOINLINE c10::TypedOperatorHandle<index_fill_int_Scalar_out::schema> create_index_fill_int_Scalar_out_typed_handle() {
14805 return c10::Dispatcher::singleton()
14806 .findSchemaOrThrow(index_fill_int_Scalar_out::name, index_fill_int_Scalar_out::overload_name)
14807 .typed<index_fill_int_Scalar_out::schema>();
14808}
14809
14810// aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
14811at::Tensor & index_fill_int_Scalar_out::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) {
14812
14813 static auto op = create_index_fill_int_Scalar_out_typed_handle();
14814 return op.call(self, dim, index, value, out);
14815}
14816
14817// aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
14818at::Tensor & index_fill_int_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) {
14819
14820 static auto op = create_index_fill_int_Scalar_out_typed_handle();
14821 return op.redispatch(dispatchKeySet, self, dim, index, value, out);
14822}
14823
14824STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill_int_Tensor_out, name, "aten::index_fill")
14825STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill_int_Tensor_out, overload_name, "int_Tensor_out")
14826STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(index_fill_int_Tensor_out, schema_str, "index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!)")
14827
14828// aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!)
14829static C10_NOINLINE c10::TypedOperatorHandle<index_fill_int_Tensor_out::schema> create_index_fill_int_Tensor_out_typed_handle() {
14830 return c10::Dispatcher::singleton()
14831 .findSchemaOrThrow(index_fill_int_Tensor_out::name, index_fill_int_Tensor_out::overload_name)
14832 .typed<index_fill_int_Tensor_out::schema>();
14833}
14834
14835// aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!)
14836at::Tensor & index_fill_int_Tensor_out::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out) {
14837
14838 static auto op = create_index_fill_int_Tensor_out_typed_handle();
14839 return op.call(self, dim, index, value, out);
14840}
14841
14842// aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!)
14843at::Tensor & index_fill_int_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out) {
14844
14845 static auto op = create_index_fill_int_Tensor_out_typed_handle();
14846 return op.redispatch(dispatchKeySet, self, dim, index, value, out);
14847}
14848
14849STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(random_from_out, name, "aten::random")
14850STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(random_from_out, overload_name, "from_out")
14851STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(random_from_out, schema_str, "random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)")
14852
14853// aten::random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
14854static C10_NOINLINE c10::TypedOperatorHandle<random_from_out::schema> create_random_from_out_typed_handle() {
14855 return c10::Dispatcher::singleton()
14856 .findSchemaOrThrow(random_from_out::name, random_from_out::overload_name)
14857 .typed<random_from_out::schema>();
14858}
14859
14860// aten::random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
14861at::Tensor & random_from_out::call(const at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator, at::Tensor & out) {
14862
14863 static auto op = create_random_from_out_typed_handle();
14864 return op.call(self, from, to, generator, out);
14865}
14866
14867// aten::random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
14868at::Tensor & random_from_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator, at::Tensor & out) {
14869
14870 static auto op = create_random_from_out_typed_handle();
14871 return op.redispatch(dispatchKeySet, self, from, to, generator, out);
14872}
14873
14874STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(random_from, name, "aten::random")
14875STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(random_from, overload_name, "from")
14876STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(random_from, schema_str, "random.from(Tensor self, int from, int? to, *, Generator? generator=None) -> Tensor")
14877
14878// aten::random.from(Tensor self, int from, int? to, *, Generator? generator=None) -> Tensor
14879static C10_NOINLINE c10::TypedOperatorHandle<random_from::schema> create_random_from_typed_handle() {
14880 return c10::Dispatcher::singleton()
14881 .findSchemaOrThrow(random_from::name, random_from::overload_name)
14882 .typed<random_from::schema>();
14883}
14884
14885// aten::random.from(Tensor self, int from, int? to, *, Generator? generator=None) -> Tensor
14886at::Tensor random_from::call(const at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator) {
14887
14888 static auto op = create_random_from_typed_handle();
14889 return op.call(self, from, to, generator);
14890}
14891
14892// aten::random.from(Tensor self, int from, int? to, *, Generator? generator=None) -> Tensor
14893at::Tensor random_from::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator) {
14894
14895 static auto op = create_random_from_typed_handle();
14896 return op.redispatch(dispatchKeySet, self, from, to, generator);
14897}
14898
14899STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(random_to_out, name, "aten::random")
14900STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(random_to_out, overload_name, "to_out")
14901STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(random_to_out, schema_str, "random.to_out(Tensor self, int to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)")
14902
14903// aten::random.to_out(Tensor self, int to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
14904static C10_NOINLINE c10::TypedOperatorHandle<random_to_out::schema> create_random_to_out_typed_handle() {
14905 return c10::Dispatcher::singleton()
14906 .findSchemaOrThrow(random_to_out::name, random_to_out::overload_name)
14907 .typed<random_to_out::schema>();
14908}
14909
14910// aten::random.to_out(Tensor self, int to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
14911at::Tensor & random_to_out::call(const at::Tensor & self, int64_t to, c10::optional<at::Generator> generator, at::Tensor & out) {
14912
14913 static auto op = create_random_to_out_typed_handle();
14914 return op.call(self, to, generator, out);
14915}
14916
14917// aten::random.to_out(Tensor self, int to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
14918at::Tensor & random_to_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t to, c10::optional<at::Generator> generator, at::Tensor & out) {
14919
14920 static auto op = create_random_to_out_typed_handle();
14921 return op.redispatch(dispatchKeySet, self, to, generator, out);
14922}
14923
14924STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(random_to, name, "aten::random")
14925STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(random_to, overload_name, "to")
14926STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(random_to, schema_str, "random.to(Tensor self, int to, *, Generator? generator=None) -> Tensor")
14927
14928// aten::random.to(Tensor self, int to, *, Generator? generator=None) -> Tensor
14929static C10_NOINLINE c10::TypedOperatorHandle<random_to::schema> create_random_to_typed_handle() {
14930 return c10::Dispatcher::singleton()
14931 .findSchemaOrThrow(random_to::name, random_to::overload_name)
14932 .typed<random_to::schema>();
14933}
14934
14935// aten::random.to(Tensor self, int to, *, Generator? generator=None) -> Tensor
14936at::Tensor random_to::call(const at::Tensor & self, int64_t to, c10::optional<at::Generator> generator) {
14937
14938 static auto op = create_random_to_typed_handle();
14939 return op.call(self, to, generator);
14940}
14941
14942// aten::random.to(Tensor self, int to, *, Generator? generator=None) -> Tensor
14943at::Tensor random_to::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t to, c10::optional<at::Generator> generator) {
14944
14945 static auto op = create_random_to_typed_handle();
14946 return op.redispatch(dispatchKeySet, self, to, generator);
14947}
14948
14949STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(random_out, name, "aten::random")
14950STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(random_out, overload_name, "out")
14951STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(random_out, schema_str, "random.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)")
14952
14953// aten::random.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
14954static C10_NOINLINE c10::TypedOperatorHandle<random_out::schema> create_random_out_typed_handle() {
14955 return c10::Dispatcher::singleton()
14956 .findSchemaOrThrow(random_out::name, random_out::overload_name)
14957 .typed<random_out::schema>();
14958}
14959
14960// aten::random.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
14961at::Tensor & random_out::call(const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) {
14962
14963 static auto op = create_random_out_typed_handle();
14964 return op.call(self, generator, out);
14965}
14966
14967// aten::random.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
14968at::Tensor & random_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) {
14969
14970 static auto op = create_random_out_typed_handle();
14971 return op.redispatch(dispatchKeySet, self, generator, out);
14972}
14973
14974STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(random, name, "aten::random")
14975STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(random, overload_name, "")
14976STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(random, schema_str, "random(Tensor self, *, Generator? generator=None) -> Tensor")
14977
14978// aten::random(Tensor self, *, Generator? generator=None) -> Tensor
14979static C10_NOINLINE c10::TypedOperatorHandle<random::schema> create_random_typed_handle() {
14980 return c10::Dispatcher::singleton()
14981 .findSchemaOrThrow(random::name, random::overload_name)
14982 .typed<random::schema>();
14983}
14984
14985// aten::random(Tensor self, *, Generator? generator=None) -> Tensor
14986at::Tensor random::call(const at::Tensor & self, c10::optional<at::Generator> generator) {
14987
14988 static auto op = create_random_typed_handle();
14989 return op.call(self, generator);
14990}
14991
14992// aten::random(Tensor self, *, Generator? generator=None) -> Tensor
14993at::Tensor random::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::Generator> generator) {
14994
14995 static auto op = create_random_typed_handle();
14996 return op.redispatch(dispatchKeySet, self, generator);
14997}
14998
14999STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cauchy_out, name, "aten::cauchy")
15000STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cauchy_out, overload_name, "out")
15001STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cauchy_out, schema_str, "cauchy.out(Tensor self, float median=0, float sigma=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)")
15002
15003// aten::cauchy.out(Tensor self, float median=0, float sigma=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
15004static C10_NOINLINE c10::TypedOperatorHandle<cauchy_out::schema> create_cauchy_out_typed_handle() {
15005 return c10::Dispatcher::singleton()
15006 .findSchemaOrThrow(cauchy_out::name, cauchy_out::overload_name)
15007 .typed<cauchy_out::schema>();
15008}
15009
15010// aten::cauchy.out(Tensor self, float median=0, float sigma=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
15011at::Tensor & cauchy_out::call(const at::Tensor & self, double median, double sigma, c10::optional<at::Generator> generator, at::Tensor & out) {
15012
15013 static auto op = create_cauchy_out_typed_handle();
15014 return op.call(self, median, sigma, generator, out);
15015}
15016
15017// aten::cauchy.out(Tensor self, float median=0, float sigma=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
15018at::Tensor & cauchy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double median, double sigma, c10::optional<at::Generator> generator, at::Tensor & out) {
15019
15020 static auto op = create_cauchy_out_typed_handle();
15021 return op.redispatch(dispatchKeySet, self, median, sigma, generator, out);
15022}
15023
15024STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cauchy, name, "aten::cauchy")
15025STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cauchy, overload_name, "")
15026STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cauchy, schema_str, "cauchy(Tensor self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor")
15027
15028// aten::cauchy(Tensor self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor
15029static C10_NOINLINE c10::TypedOperatorHandle<cauchy::schema> create_cauchy_typed_handle() {
15030 return c10::Dispatcher::singleton()
15031 .findSchemaOrThrow(cauchy::name, cauchy::overload_name)
15032 .typed<cauchy::schema>();
15033}
15034
15035// aten::cauchy(Tensor self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor
15036at::Tensor cauchy::call(const at::Tensor & self, double median, double sigma, c10::optional<at::Generator> generator) {
15037
15038 static auto op = create_cauchy_typed_handle();
15039 return op.call(self, median, sigma, generator);
15040}
15041
15042// aten::cauchy(Tensor self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor
15043at::Tensor cauchy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double median, double sigma, c10::optional<at::Generator> generator) {
15044
15045 static auto op = create_cauchy_typed_handle();
15046 return op.redispatch(dispatchKeySet, self, median, sigma, generator);
15047}
15048
15049STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log_normal_out, name, "aten::log_normal")
15050STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log_normal_out, overload_name, "out")
15051STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log_normal_out, schema_str, "log_normal.out(Tensor self, float mean=1, float std=2, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)")
15052
15053// aten::log_normal.out(Tensor self, float mean=1, float std=2, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
15054static C10_NOINLINE c10::TypedOperatorHandle<log_normal_out::schema> create_log_normal_out_typed_handle() {
15055 return c10::Dispatcher::singleton()
15056 .findSchemaOrThrow(log_normal_out::name, log_normal_out::overload_name)
15057 .typed<log_normal_out::schema>();
15058}
15059
15060// aten::log_normal.out(Tensor self, float mean=1, float std=2, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
15061at::Tensor & log_normal_out::call(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator, at::Tensor & out) {
15062
15063 static auto op = create_log_normal_out_typed_handle();
15064 return op.call(self, mean, std, generator, out);
15065}
15066
15067// aten::log_normal.out(Tensor self, float mean=1, float std=2, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
15068at::Tensor & log_normal_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator, at::Tensor & out) {
15069
15070 static auto op = create_log_normal_out_typed_handle();
15071 return op.redispatch(dispatchKeySet, self, mean, std, generator, out);
15072}
15073
15074STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log_normal, name, "aten::log_normal")
15075STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log_normal, overload_name, "")
15076STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(log_normal, schema_str, "log_normal(Tensor self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor")
15077
15078// aten::log_normal(Tensor self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor
15079static C10_NOINLINE c10::TypedOperatorHandle<log_normal::schema> create_log_normal_typed_handle() {
15080 return c10::Dispatcher::singleton()
15081 .findSchemaOrThrow(log_normal::name, log_normal::overload_name)
15082 .typed<log_normal::schema>();
15083}
15084
15085// aten::log_normal(Tensor self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor
15086at::Tensor log_normal::call(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
15087
15088 static auto op = create_log_normal_typed_handle();
15089 return op.call(self, mean, std, generator);
15090}
15091
15092// aten::log_normal(Tensor self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor
15093at::Tensor log_normal::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
15094
15095 static auto op = create_log_normal_typed_handle();
15096 return op.redispatch(dispatchKeySet, self, mean, std, generator);
15097}
15098
15099STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_histogramdd_bin_edges_out, name, "aten::_histogramdd_bin_edges")
15100STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_histogramdd_bin_edges_out, overload_name, "out")
15101STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_histogramdd_bin_edges_out, schema_str, "_histogramdd_bin_edges.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!)[] out) -> ()")
15102
15103// aten::_histogramdd_bin_edges.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!)[] out) -> ()
15104static C10_NOINLINE c10::TypedOperatorHandle<_histogramdd_bin_edges_out::schema> create__histogramdd_bin_edges_out_typed_handle() {
15105 return c10::Dispatcher::singleton()
15106 .findSchemaOrThrow(_histogramdd_bin_edges_out::name, _histogramdd_bin_edges_out::overload_name)
15107 .typed<_histogramdd_bin_edges_out::schema>();
15108}
15109
15110// aten::_histogramdd_bin_edges.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!)[] out) -> ()
15111void _histogramdd_bin_edges_out::call(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density, at::TensorList out) {
15112
15113 static auto op = create__histogramdd_bin_edges_out_typed_handle();
15114 return op.call(self, bins, range, weight, density, out);
15115}
15116
15117// aten::_histogramdd_bin_edges.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!)[] out) -> ()
15118void _histogramdd_bin_edges_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density, at::TensorList out) {
15119
15120 static auto op = create__histogramdd_bin_edges_out_typed_handle();
15121 return op.redispatch(dispatchKeySet, self, bins, range, weight, density, out);
15122}
15123
15124STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_histogramdd_from_bin_tensors_out, name, "aten::_histogramdd_from_bin_tensors")
15125STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_histogramdd_from_bin_tensors_out, overload_name, "out")
15126STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_histogramdd_from_bin_tensors_out, schema_str, "_histogramdd_from_bin_tensors.out(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)")
15127
15128// aten::_histogramdd_from_bin_tensors.out(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)
15129static C10_NOINLINE c10::TypedOperatorHandle<_histogramdd_from_bin_tensors_out::schema> create__histogramdd_from_bin_tensors_out_typed_handle() {
15130 return c10::Dispatcher::singleton()
15131 .findSchemaOrThrow(_histogramdd_from_bin_tensors_out::name, _histogramdd_from_bin_tensors_out::overload_name)
15132 .typed<_histogramdd_from_bin_tensors_out::schema>();
15133}
15134
15135// aten::_histogramdd_from_bin_tensors.out(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)
15136at::Tensor & _histogramdd_from_bin_tensors_out::call(const at::Tensor & self, at::TensorList bins, const c10::optional<at::Tensor> & weight, bool density, at::Tensor & out) {
15137
15138 static auto op = create__histogramdd_from_bin_tensors_out_typed_handle();
15139 return op.call(self, bins, weight, density, out);
15140}
15141
15142// aten::_histogramdd_from_bin_tensors.out(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)
15143at::Tensor & _histogramdd_from_bin_tensors_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList bins, const c10::optional<at::Tensor> & weight, bool density, at::Tensor & out) {
15144
15145 static auto op = create__histogramdd_from_bin_tensors_out_typed_handle();
15146 return op.redispatch(dispatchKeySet, self, bins, weight, density, out);
15147}
15148
15149STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(argsort_stable_out, name, "aten::argsort")
15150STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(argsort_stable_out, overload_name, "stable_out")
15151STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(argsort_stable_out, schema_str, "argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!)")
15152
15153// aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!)
15154static C10_NOINLINE c10::TypedOperatorHandle<argsort_stable_out::schema> create_argsort_stable_out_typed_handle() {
15155 return c10::Dispatcher::singleton()
15156 .findSchemaOrThrow(argsort_stable_out::name, argsort_stable_out::overload_name)
15157 .typed<argsort_stable_out::schema>();
15158}
15159
15160// aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!)
15161at::Tensor & argsort_stable_out::call(const at::Tensor & self, bool stable, int64_t dim, bool descending, at::Tensor & out) {
15162
15163 static auto op = create_argsort_stable_out_typed_handle();
15164 return op.call(self, stable, dim, descending, out);
15165}
15166
15167// aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!)
15168at::Tensor & argsort_stable_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool stable, int64_t dim, bool descending, at::Tensor & out) {
15169
15170 static auto op = create_argsort_stable_out_typed_handle();
15171 return op.redispatch(dispatchKeySet, self, stable, dim, descending, out);
15172}
15173
15174STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unfold_backward_out, name, "aten::unfold_backward")
15175STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unfold_backward_out, overload_name, "out")
15176STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(unfold_backward_out, schema_str, "unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!)")
15177
15178// aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!)
15179static C10_NOINLINE c10::TypedOperatorHandle<unfold_backward_out::schema> create_unfold_backward_out_typed_handle() {
15180 return c10::Dispatcher::singleton()
15181 .findSchemaOrThrow(unfold_backward_out::name, unfold_backward_out::overload_name)
15182 .typed<unfold_backward_out::schema>();
15183}
15184
15185// aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!)
15186at::Tensor & unfold_backward_out::call(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out) {
15187
15188 static auto op = create_unfold_backward_out_typed_handle();
15189 return op.call(grad_in, input_sizes, dim, size, step, out);
15190}
15191
15192// aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!)
15193at::Tensor & unfold_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out) {
15194
15195 static auto op = create_unfold_backward_out_typed_handle();
15196 return op.redispatch(dispatchKeySet, grad_in, input_sizes, dim, size, step, out);
15197}
15198
15199STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_out, name, "aten::normal")
15200STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_out, overload_name, "out")
15201STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(normal_out, schema_str, "normal.out(Tensor self, float mean=0, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)")
15202
15203// aten::normal.out(Tensor self, float mean=0, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
15204static C10_NOINLINE c10::TypedOperatorHandle<normal_out::schema> create_normal_out_typed_handle() {
15205 return c10::Dispatcher::singleton()
15206 .findSchemaOrThrow(normal_out::name, normal_out::overload_name)
15207 .typed<normal_out::schema>();
15208}
15209
15210// aten::normal.out(Tensor self, float mean=0, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
15211at::Tensor & normal_out::call(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator, at::Tensor & out) {
15212
15213 static auto op = create_normal_out_typed_handle();
15214 return op.call(self, mean, std, generator, out);
15215}
15216
15217// aten::normal.out(Tensor self, float mean=0, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
15218at::Tensor & normal_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator, at::Tensor & out) {
15219
15220 static auto op = create_normal_out_typed_handle();
15221 return op.redispatch(dispatchKeySet, self, mean, std, generator, out);
15222}
15223
15224STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sub_Scalar_out, name, "aten::_foreach_sub")
15225STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sub_Scalar_out, overload_name, "Scalar_out")
15226STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sub_Scalar_out, schema_str, "_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()")
15227
15228// aten::_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
15229static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sub_Scalar_out::schema> create__foreach_sub_Scalar_out_typed_handle() {
15230 return c10::Dispatcher::singleton()
15231 .findSchemaOrThrow(_foreach_sub_Scalar_out::name, _foreach_sub_Scalar_out::overload_name)
15232 .typed<_foreach_sub_Scalar_out::schema>();
15233}
15234
15235// aten::_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
15236void _foreach_sub_Scalar_out::call(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
15237
15238 static auto op = create__foreach_sub_Scalar_out_typed_handle();
15239 return op.call(self, scalar, out);
15240}
15241
15242// aten::_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
15243void _foreach_sub_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
15244
15245 static auto op = create__foreach_sub_Scalar_out_typed_handle();
15246 return op.redispatch(dispatchKeySet, self, scalar, out);
15247}
15248
15249STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_maximum_Scalar_out, name, "aten::_foreach_maximum")
15250STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_maximum_Scalar_out, overload_name, "Scalar_out")
15251STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_maximum_Scalar_out, schema_str, "_foreach_maximum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()")
15252
15253// aten::_foreach_maximum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
15254static C10_NOINLINE c10::TypedOperatorHandle<_foreach_maximum_Scalar_out::schema> create__foreach_maximum_Scalar_out_typed_handle() {
15255 return c10::Dispatcher::singleton()
15256 .findSchemaOrThrow(_foreach_maximum_Scalar_out::name, _foreach_maximum_Scalar_out::overload_name)
15257 .typed<_foreach_maximum_Scalar_out::schema>();
15258}
15259
15260// aten::_foreach_maximum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
15261void _foreach_maximum_Scalar_out::call(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
15262
15263 static auto op = create__foreach_maximum_Scalar_out_typed_handle();
15264 return op.call(self, scalar, out);
15265}
15266
15267// aten::_foreach_maximum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
15268void _foreach_maximum_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
15269
15270 static auto op = create__foreach_maximum_Scalar_out_typed_handle();
15271 return op.redispatch(dispatchKeySet, self, scalar, out);
15272}
15273
15274STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sub_List_out, name, "aten::_foreach_sub")
15275STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sub_List_out, overload_name, "List_out")
15276STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sub_List_out, schema_str, "_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()")
15277
15278// aten::_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()
15279static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sub_List_out::schema> create__foreach_sub_List_out_typed_handle() {
15280 return c10::Dispatcher::singleton()
15281 .findSchemaOrThrow(_foreach_sub_List_out::name, _foreach_sub_List_out::overload_name)
15282 .typed<_foreach_sub_List_out::schema>();
15283}
15284
15285// aten::_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()
15286void _foreach_sub_List_out::call(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) {
15287
15288 static auto op = create__foreach_sub_List_out_typed_handle();
15289 return op.call(self, other, alpha, out);
15290}
15291
15292// aten::_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()
15293void _foreach_sub_List_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) {
15294
15295 static auto op = create__foreach_sub_List_out_typed_handle();
15296 return op.redispatch(dispatchKeySet, self, other, alpha, out);
15297}
15298
15299STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_maximum_List_out, name, "aten::_foreach_maximum")
15300STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_maximum_List_out, overload_name, "List_out")
15301STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_maximum_List_out, schema_str, "_foreach_maximum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()")
15302
15303// aten::_foreach_maximum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
15304static C10_NOINLINE c10::TypedOperatorHandle<_foreach_maximum_List_out::schema> create__foreach_maximum_List_out_typed_handle() {
15305 return c10::Dispatcher::singleton()
15306 .findSchemaOrThrow(_foreach_maximum_List_out::name, _foreach_maximum_List_out::overload_name)
15307 .typed<_foreach_maximum_List_out::schema>();
15308}
15309
15310// aten::_foreach_maximum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
15311void _foreach_maximum_List_out::call(at::TensorList self, at::TensorList other, at::TensorList out) {
15312
15313 static auto op = create__foreach_maximum_List_out_typed_handle();
15314 return op.call(self, other, out);
15315}
15316
15317// aten::_foreach_maximum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
15318void _foreach_maximum_List_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) {
15319
15320 static auto op = create__foreach_maximum_List_out_typed_handle();
15321 return op.redispatch(dispatchKeySet, self, other, out);
15322}
15323
15324STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sub_ScalarList_out, name, "aten::_foreach_sub")
15325STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sub_ScalarList_out, overload_name, "ScalarList_out")
15326STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_sub_ScalarList_out, schema_str, "_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()")
15327
15328// aten::_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
15329static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sub_ScalarList_out::schema> create__foreach_sub_ScalarList_out_typed_handle() {
15330 return c10::Dispatcher::singleton()
15331 .findSchemaOrThrow(_foreach_sub_ScalarList_out::name, _foreach_sub_ScalarList_out::overload_name)
15332 .typed<_foreach_sub_ScalarList_out::schema>();
15333}
15334
15335// aten::_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
15336void _foreach_sub_ScalarList_out::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
15337
15338 static auto op = create__foreach_sub_ScalarList_out_typed_handle();
15339 return op.call(self, scalars, out);
15340}
15341
15342// aten::_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
15343void _foreach_sub_ScalarList_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
15344
15345 static auto op = create__foreach_sub_ScalarList_out_typed_handle();
15346 return op.redispatch(dispatchKeySet, self, scalars, out);
15347}
15348
15349STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_maximum_ScalarList_out, name, "aten::_foreach_maximum")
15350STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_maximum_ScalarList_out, overload_name, "ScalarList_out")
15351STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_maximum_ScalarList_out, schema_str, "_foreach_maximum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()")
15352
15353// aten::_foreach_maximum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
15354static C10_NOINLINE c10::TypedOperatorHandle<_foreach_maximum_ScalarList_out::schema> create__foreach_maximum_ScalarList_out_typed_handle() {
15355 return c10::Dispatcher::singleton()
15356 .findSchemaOrThrow(_foreach_maximum_ScalarList_out::name, _foreach_maximum_ScalarList_out::overload_name)
15357 .typed<_foreach_maximum_ScalarList_out::schema>();
15358}
15359
15360// aten::_foreach_maximum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
15361void _foreach_maximum_ScalarList_out::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
15362
15363 static auto op = create__foreach_maximum_ScalarList_out_typed_handle();
15364 return op.call(self, scalars, out);
15365}
15366
15367// aten::_foreach_maximum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
15368void _foreach_maximum_ScalarList_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
15369
15370 static auto op = create__foreach_maximum_ScalarList_out_typed_handle();
15371 return op.redispatch(dispatchKeySet, self, scalars, out);
15372}
15373
15374STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_acos_out, name, "aten::_foreach_acos")
15375STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_acos_out, overload_name, "out")
15376STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_acos_out, schema_str, "_foreach_acos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
15377
15378// aten::_foreach_acos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
15379static C10_NOINLINE c10::TypedOperatorHandle<_foreach_acos_out::schema> create__foreach_acos_out_typed_handle() {
15380 return c10::Dispatcher::singleton()
15381 .findSchemaOrThrow(_foreach_acos_out::name, _foreach_acos_out::overload_name)
15382 .typed<_foreach_acos_out::schema>();
15383}
15384
15385// aten::_foreach_acos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
15386void _foreach_acos_out::call(at::TensorList self, at::TensorList out) {
15387
15388 static auto op = create__foreach_acos_out_typed_handle();
15389 return op.call(self, out);
15390}
15391
15392// aten::_foreach_acos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
15393void _foreach_acos_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
15394
15395 static auto op = create__foreach_acos_out_typed_handle();
15396 return op.redispatch(dispatchKeySet, self, out);
15397}
15398
15399STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_atan_out, name, "aten::_foreach_atan")
15400STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_atan_out, overload_name, "out")
15401STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_atan_out, schema_str, "_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
15402
15403// aten::_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
15404static C10_NOINLINE c10::TypedOperatorHandle<_foreach_atan_out::schema> create__foreach_atan_out_typed_handle() {
15405 return c10::Dispatcher::singleton()
15406 .findSchemaOrThrow(_foreach_atan_out::name, _foreach_atan_out::overload_name)
15407 .typed<_foreach_atan_out::schema>();
15408}
15409
15410// aten::_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
15411void _foreach_atan_out::call(at::TensorList self, at::TensorList out) {
15412
15413 static auto op = create__foreach_atan_out_typed_handle();
15414 return op.call(self, out);
15415}
15416
15417// aten::_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
15418void _foreach_atan_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
15419
15420 static auto op = create__foreach_atan_out_typed_handle();
15421 return op.redispatch(dispatchKeySet, self, out);
15422}
15423
15424STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_ceil_out, name, "aten::_foreach_ceil")
15425STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_ceil_out, overload_name, "out")
15426STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_ceil_out, schema_str, "_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
15427
15428// aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
15429static C10_NOINLINE c10::TypedOperatorHandle<_foreach_ceil_out::schema> create__foreach_ceil_out_typed_handle() {
15430 return c10::Dispatcher::singleton()
15431 .findSchemaOrThrow(_foreach_ceil_out::name, _foreach_ceil_out::overload_name)
15432 .typed<_foreach_ceil_out::schema>();
15433}
15434
15435// aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
15436void _foreach_ceil_out::call(at::TensorList self, at::TensorList out) {
15437
15438 static auto op = create__foreach_ceil_out_typed_handle();
15439 return op.call(self, out);
15440}
15441
15442// aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
15443void _foreach_ceil_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
15444
15445 static auto op = create__foreach_ceil_out_typed_handle();
15446 return op.redispatch(dispatchKeySet, self, out);
15447}
15448
15449STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_erf_out, name, "aten::_foreach_erf")
15450STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_erf_out, overload_name, "out")
15451STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_erf_out, schema_str, "_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
15452
15453// aten::_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
15454static C10_NOINLINE c10::TypedOperatorHandle<_foreach_erf_out::schema> create__foreach_erf_out_typed_handle() {
15455 return c10::Dispatcher::singleton()
15456 .findSchemaOrThrow(_foreach_erf_out::name, _foreach_erf_out::overload_name)
15457 .typed<_foreach_erf_out::schema>();
15458}
15459
15460// aten::_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
15461void _foreach_erf_out::call(at::TensorList self, at::TensorList out) {
15462
15463 static auto op = create__foreach_erf_out_typed_handle();
15464 return op.call(self, out);
15465}
15466
15467// aten::_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
15468void _foreach_erf_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
15469
15470 static auto op = create__foreach_erf_out_typed_handle();
15471 return op.redispatch(dispatchKeySet, self, out);
15472}
15473
15474STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_log2_out, name, "aten::_foreach_log2")
15475STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_log2_out, overload_name, "out")
15476STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_foreach_log2_out, schema_str, "_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
15477
15478// aten::_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
15479static C10_NOINLINE c10::TypedOperatorHandle<_foreach_log2_out::schema> create__foreach_log2_out_typed_handle() {
15480 return c10::Dispatcher::singleton()
15481 .findSchemaOrThrow(_foreach_log2_out::name, _foreach_log2_out::overload_name)
15482 .typed<_foreach_log2_out::schema>();
15483}
15484
15485// aten::_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
15486void _foreach_log2_out::call(at::TensorList self, at::TensorList out) {
15487
15488 static auto op = create__foreach_log2_out_typed_handle();
15489 return op.call(self, out);
15490}
15491
15492// aten::_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
15493void _foreach_log2_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
15494
15495 static auto op = create__foreach_log2_out_typed_handle();
15496 return op.redispatch(dispatchKeySet, self, out);
15497}
15498
15499STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bucketize_Scalar_out, name, "aten::bucketize")
15500STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bucketize_Scalar_out, overload_name, "Scalar_out")
15501STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(bucketize_Scalar_out, schema_str, "bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)")
15502
15503// aten::bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)
15504static C10_NOINLINE c10::TypedOperatorHandle<bucketize_Scalar_out::schema> create_bucketize_Scalar_out_typed_handle() {
15505 return c10::Dispatcher::singleton()
15506 .findSchemaOrThrow(bucketize_Scalar_out::name, bucketize_Scalar_out::overload_name)
15507 .typed<bucketize_Scalar_out::schema>();
15508}
15509
15510// aten::bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)
15511at::Tensor & bucketize_Scalar_out::call(const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right, at::Tensor & out) {
15512
15513 static auto op = create_bucketize_Scalar_out_typed_handle();
15514 return op.call(self, boundaries, out_int32, right, out);
15515}
15516
15517// aten::bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)
15518at::Tensor & bucketize_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right, at::Tensor & out) {
15519
15520 static auto op = create_bucketize_Scalar_out_typed_handle();
15521 return op.redispatch(dispatchKeySet, self, boundaries, out_int32, right, out);
15522}
15523
15524STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(glu_backward_jvp_out, name, "aten::glu_backward_jvp")
15525STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(glu_backward_jvp_out, overload_name, "out")
15526STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(glu_backward_jvp_out, schema_str, "glu_backward_jvp.out(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)")
15527
15528// aten::glu_backward_jvp.out(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)
15529static C10_NOINLINE c10::TypedOperatorHandle<glu_backward_jvp_out::schema> create_glu_backward_jvp_out_typed_handle() {
15530 return c10::Dispatcher::singleton()
15531 .findSchemaOrThrow(glu_backward_jvp_out::name, glu_backward_jvp_out::overload_name)
15532 .typed<glu_backward_jvp_out::schema>();
15533}
15534
15535// aten::glu_backward_jvp.out(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)
15536at::Tensor & glu_backward_jvp_out::call(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim, at::Tensor & out) {
15537
15538 static auto op = create_glu_backward_jvp_out_typed_handle();
15539 return op.call(grad_x, grad_glu, x, dgrad_glu, dx, dim, out);
15540}
15541
15542// aten::glu_backward_jvp.out(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)
15543at::Tensor & glu_backward_jvp_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim, at::Tensor & out) {
15544
15545 static auto op = create_glu_backward_jvp_out_typed_handle();
15546 return op.redispatch(dispatchKeySet, grad_x, grad_glu, x, dgrad_glu, dx, dim, out);
15547}
15548
15549STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardswish_backward_out, name, "aten::hardswish_backward")
15550STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardswish_backward_out, overload_name, "out")
15551STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(hardswish_backward_out, schema_str, "hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
15552
15553// aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
15554static C10_NOINLINE c10::TypedOperatorHandle<hardswish_backward_out::schema> create_hardswish_backward_out_typed_handle() {
15555 return c10::Dispatcher::singleton()
15556 .findSchemaOrThrow(hardswish_backward_out::name, hardswish_backward_out::overload_name)
15557 .typed<hardswish_backward_out::schema>();
15558}
15559
15560// aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
15561at::Tensor & hardswish_backward_out::call(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) {
15562
15563 static auto op = create_hardswish_backward_out_typed_handle();
15564 return op.call(grad_output, self, out);
15565}
15566
15567// aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
15568at::Tensor & hardswish_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) {
15569
15570 static auto op = create_hardswish_backward_out_typed_handle();
15571 return op.redispatch(dispatchKeySet, grad_output, self, out);
15572}
15573
15574STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_adaptive_avg_pool3d_backward_out, name, "aten::_adaptive_avg_pool3d_backward")
15575STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_adaptive_avg_pool3d_backward_out, overload_name, "out")
15576STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_adaptive_avg_pool3d_backward_out, schema_str, "_adaptive_avg_pool3d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
15577
15578// aten::_adaptive_avg_pool3d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
15579static C10_NOINLINE c10::TypedOperatorHandle<_adaptive_avg_pool3d_backward_out::schema> create__adaptive_avg_pool3d_backward_out_typed_handle() {
15580 return c10::Dispatcher::singleton()
15581 .findSchemaOrThrow(_adaptive_avg_pool3d_backward_out::name, _adaptive_avg_pool3d_backward_out::overload_name)
15582 .typed<_adaptive_avg_pool3d_backward_out::schema>();
15583}
15584
15585// aten::_adaptive_avg_pool3d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
15586at::Tensor & _adaptive_avg_pool3d_backward_out::call(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) {
15587
15588 static auto op = create__adaptive_avg_pool3d_backward_out_typed_handle();
15589 return op.call(grad_output, self, out);
15590}
15591
15592// aten::_adaptive_avg_pool3d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
15593at::Tensor & _adaptive_avg_pool3d_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) {
15594
15595 static auto op = create__adaptive_avg_pool3d_backward_out_typed_handle();
15596 return op.redispatch(dispatchKeySet, grad_output, self, out);
15597}
15598
15599STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_conj_copy_out, name, "aten::_conj_copy")
15600STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_conj_copy_out, overload_name, "out")
15601STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_conj_copy_out, schema_str, "_conj_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
15602
15603// aten::_conj_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
15604static C10_NOINLINE c10::TypedOperatorHandle<_conj_copy_out::schema> create__conj_copy_out_typed_handle() {
15605 return c10::Dispatcher::singleton()
15606 .findSchemaOrThrow(_conj_copy_out::name, _conj_copy_out::overload_name)
15607 .typed<_conj_copy_out::schema>();
15608}
15609
15610// aten::_conj_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
15611at::Tensor & _conj_copy_out::call(const at::Tensor & self, at::Tensor & out) {
15612
15613 static auto op = create__conj_copy_out_typed_handle();
15614 return op.call(self, out);
15615}
15616
15617// aten::_conj_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
15618at::Tensor & _conj_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
15619
15620 static auto op = create__conj_copy_out_typed_handle();
15621 return op.redispatch(dispatchKeySet, self, out);
15622}
15623
15624STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(detach_copy_out, name, "aten::detach_copy")
15625STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(detach_copy_out, overload_name, "out")
15626STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(detach_copy_out, schema_str, "detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
15627
15628// aten::detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
15629static C10_NOINLINE c10::TypedOperatorHandle<detach_copy_out::schema> create_detach_copy_out_typed_handle() {
15630 return c10::Dispatcher::singleton()
15631 .findSchemaOrThrow(detach_copy_out::name, detach_copy_out::overload_name)
15632 .typed<detach_copy_out::schema>();
15633}
15634
15635// aten::detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
15636at::Tensor & detach_copy_out::call(const at::Tensor & self, at::Tensor & out) {
15637
15638 static auto op = create_detach_copy_out_typed_handle();
15639 return op.call(self, out);
15640}
15641
15642// aten::detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
15643at::Tensor & detach_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
15644
15645 static auto op = create_detach_copy_out_typed_handle();
15646 return op.redispatch(dispatchKeySet, self, out);
15647}
15648
15649STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(row_indices_copy_out, name, "aten::row_indices_copy")
15650STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(row_indices_copy_out, overload_name, "out")
15651STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(row_indices_copy_out, schema_str, "row_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
15652
15653// aten::row_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
15654static C10_NOINLINE c10::TypedOperatorHandle<row_indices_copy_out::schema> create_row_indices_copy_out_typed_handle() {
15655 return c10::Dispatcher::singleton()
15656 .findSchemaOrThrow(row_indices_copy_out::name, row_indices_copy_out::overload_name)
15657 .typed<row_indices_copy_out::schema>();
15658}
15659
15660// aten::row_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
15661at::Tensor & row_indices_copy_out::call(const at::Tensor & self, at::Tensor & out) {
15662
15663 static auto op = create_row_indices_copy_out_typed_handle();
15664 return op.call(self, out);
15665}
15666
15667// aten::row_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
15668at::Tensor & row_indices_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
15669
15670 static auto op = create_row_indices_copy_out_typed_handle();
15671 return op.redispatch(dispatchKeySet, self, out);
15672}
15673
15674STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_transformer_encoder_layer_fwd_out, name, "aten::_transformer_encoder_layer_fwd")
15675STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_transformer_encoder_layer_fwd_out, overload_name, "out")
15676STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_transformer_encoder_layer_fwd_out, schema_str, "_transformer_encoder_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)")
15677
15678// aten::_transformer_encoder_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)
15679static C10_NOINLINE c10::TypedOperatorHandle<_transformer_encoder_layer_fwd_out::schema> create__transformer_encoder_layer_fwd_out_typed_handle() {
15680 return c10::Dispatcher::singleton()
15681 .findSchemaOrThrow(_transformer_encoder_layer_fwd_out::name, _transformer_encoder_layer_fwd_out::overload_name)
15682 .typed<_transformer_encoder_layer_fwd_out::schema>();
15683}
15684
15685// aten::_transformer_encoder_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)
15686at::Tensor & _transformer_encoder_layer_fwd_out::call(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, c10::optional<int64_t> mask_type, at::Tensor & out) {
15687
15688 static auto op = create__transformer_encoder_layer_fwd_out_typed_handle();
15689 return op.call(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type, out);
15690}
15691
15692// aten::_transformer_encoder_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)
15693at::Tensor & _transformer_encoder_layer_fwd_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, c10::optional<int64_t> mask_type, at::Tensor & out) {
15694
15695 static auto op = create__transformer_encoder_layer_fwd_out_typed_handle();
15696 return op.redispatch(dispatchKeySet, src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type, out);
15697}
15698
15699STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_native_multi_head_attention_out, name, "aten::_native_multi_head_attention")
15700STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_native_multi_head_attention_out, overload_name, "out")
15701STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_native_multi_head_attention_out, schema_str, "_native_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
15702
15703// aten::_native_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
15704static C10_NOINLINE c10::TypedOperatorHandle<_native_multi_head_attention_out::schema> create__native_multi_head_attention_out_typed_handle() {
15705 return c10::Dispatcher::singleton()
15706 .findSchemaOrThrow(_native_multi_head_attention_out::name, _native_multi_head_attention_out::overload_name)
15707 .typed<_native_multi_head_attention_out::schema>();
15708}
15709
15710// aten::_native_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
15711::std::tuple<at::Tensor &,at::Tensor &> _native_multi_head_attention_out::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, bool need_weights, bool average_attn_weights, c10::optional<int64_t> mask_type, at::Tensor & out0, at::Tensor & out1) {
15712
15713 static auto op = create__native_multi_head_attention_out_typed_handle();
15714 return op.call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type, out0, out1);
15715}
15716
15717// aten::_native_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
15718::std::tuple<at::Tensor &,at::Tensor &> _native_multi_head_attention_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, bool need_weights, bool average_attn_weights, c10::optional<int64_t> mask_type, at::Tensor & out0, at::Tensor & out1) {
15719
15720 static auto op = create__native_multi_head_attention_out_typed_handle();
15721 return op.redispatch(dispatchKeySet, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type, out0, out1);
15722}
15723
15724STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_triton_multi_head_attention_out, name, "aten::_triton_multi_head_attention")
15725STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_triton_multi_head_attention_out, overload_name, "out")
15726STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_triton_multi_head_attention_out, schema_str, "_triton_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, *, Tensor(a!) out) -> Tensor(a!)")
15727
15728// aten::_triton_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, *, Tensor(a!) out) -> Tensor(a!)
15729static C10_NOINLINE c10::TypedOperatorHandle<_triton_multi_head_attention_out::schema> create__triton_multi_head_attention_out_typed_handle() {
15730 return c10::Dispatcher::singleton()
15731 .findSchemaOrThrow(_triton_multi_head_attention_out::name, _triton_multi_head_attention_out::overload_name)
15732 .typed<_triton_multi_head_attention_out::schema>();
15733}
15734
15735// aten::_triton_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, *, Tensor(a!) out) -> Tensor(a!)
15736at::Tensor & _triton_multi_head_attention_out::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, at::Tensor & out) {
15737
15738 static auto op = create__triton_multi_head_attention_out_typed_handle();
15739 return op.call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, out);
15740}
15741
15742// aten::_triton_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, *, Tensor(a!) out) -> Tensor(a!)
15743at::Tensor & _triton_multi_head_attention_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, at::Tensor & out) {
15744
15745 static auto op = create__triton_multi_head_attention_out_typed_handle();
15746 return op.redispatch(dispatchKeySet, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, out);
15747}
15748
15749STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fused_adamw_out, name, "aten::_fused_adamw")
15750STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fused_adamw_out, overload_name, "out")
15751STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fused_adamw_out, schema_str, "_fused_adamw.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()")
15752
15753// aten::_fused_adamw.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
15754static C10_NOINLINE c10::TypedOperatorHandle<_fused_adamw_out::schema> create__fused_adamw_out_typed_handle() {
15755 return c10::Dispatcher::singleton()
15756 .findSchemaOrThrow(_fused_adamw_out::name, _fused_adamw_out::overload_name)
15757 .typed<_fused_adamw_out::schema>();
15758}
15759
15760// aten::_fused_adamw.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
15761void _fused_adamw_out::call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf, at::TensorList out) {
15762
15763 static auto op = create__fused_adamw_out_typed_handle();
15764 return op.call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
15765}
15766
15767// aten::_fused_adamw.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
15768void _fused_adamw_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf, at::TensorList out) {
15769
15770 static auto op = create__fused_adamw_out_typed_handle();
15771 return op.redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
15772}
15773
15774STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fused_adamw, name, "aten::_fused_adamw")
15775STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fused_adamw, overload_name, "")
15776STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(_fused_adamw, schema_str, "_fused_adamw(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)")
15777
15778// aten::_fused_adamw(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)
15779static C10_NOINLINE c10::TypedOperatorHandle<_fused_adamw::schema> create__fused_adamw_typed_handle() {
15780 return c10::Dispatcher::singleton()
15781 .findSchemaOrThrow(_fused_adamw::name, _fused_adamw::overload_name)
15782 .typed<_fused_adamw::schema>();
15783}
15784
15785// aten::_fused_adamw(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)
15786::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adamw::call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
15787
15788 static auto op = create__fused_adamw_typed_handle();
15789 return op.call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
15790}
15791
15792// aten::_fused_adamw(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)
15793::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adamw::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
15794
15795 static auto op = create__fused_adamw_typed_handle();
15796 return op.redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
15797}
15798
15799}} // namespace at::_ops
15800