1// @generated by torchgen/gen.py from RegisterSchema.cpp
2#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
3#include <torch/library.h>
4
5namespace at {
6TORCH_LIBRARY(aten, m) {
7 m.def("_cast_Byte(Tensor self, bool non_blocking=False) -> Tensor", {});
8 m.def("_cast_Char(Tensor self, bool non_blocking=False) -> Tensor", {});
9 m.def("_cast_Double(Tensor self, bool non_blocking=False) -> Tensor", {});
10 m.def("_cast_Float(Tensor self, bool non_blocking=False) -> Tensor", {});
11 m.def("_cast_Int(Tensor self, bool non_blocking=False) -> Tensor", {});
12 m.def("_cast_Long(Tensor self, bool non_blocking=False) -> Tensor", {});
13 m.def("_cast_Short(Tensor self, bool non_blocking=False) -> Tensor", {});
14 m.def("_cast_Half(Tensor self, bool non_blocking=False) -> Tensor", {});
15 m.def("_backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> ()", {});
16 m.def("set_data(Tensor(a!) self, Tensor new_data) -> ()", {});
17 m.def("data(Tensor self) -> Tensor", {});
18 m.def("is_leaf(Tensor self) -> bool", {});
19 m.def("output_nr(Tensor self) -> int", {});
20 m.def("_version(Tensor self) -> int", {});
21 m.def("requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!)", {});
22 m.def("retain_grad(Tensor(a!) self) -> ()", {});
23 m.def("retains_grad(Tensor self) -> bool", {});
24 m.def("_fw_primal(Tensor(a) self, int level) -> Tensor(a)", {});
25 m.def("_make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a)", {});
26 m.def("_unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent)", {});
27 m.def("_new_zeros_with_same_feature_meta(Tensor self, Tensor other, *, int self_num_batch_dims=0) -> Tensor", {});
28 m.def("_has_same_storage_numel(Tensor self, Tensor other) -> bool", {});
29 m.def("rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)", {at::Tag::inplace_view});
30 m.def("rename(Tensor(a) self, Dimname[]? names) -> Tensor(a)", {});
31 m.def("align_to(Tensor(a) self, Dimname[] names) -> Tensor(a)", {});
32 m.def("align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a)", {});
33 m.def("align_as(Tensor self, Tensor other) -> Tensor", {});
34 m.def("align_tensors(Tensor[] tensors) -> Tensor[]", {});
35 m.def("_assert_async(Tensor self) -> ()", {});
36 m.def("_assert_tensor_metadata(Tensor a, int[]? size=None, int[]? stride=None, ScalarType? dtype=None) -> ()", {});
37 m.def("refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a)", {});
38 m.def("_use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool", {});
39 m.def("_use_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank) -> bool", {});
40 m.def("_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)", {});
41 m.def("_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)", {});
42 m.def("_use_cudnn_rnn_flatten_weight() -> bool", {});
43 m.def("_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor", {});
44 m.def("_cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)", {});
45 m.def("_cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])", {});
46 m.def("_cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", {});
47 m.def("_debug_has_internal_overlap(Tensor self) -> int", {});
48 m.def("_fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor)", {at::Tag::nondeterministic_seeded});
49 m.def("_masked_scale(Tensor self, Tensor mask, float scale) -> Tensor", {});
50 m.def("native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor)", {at::Tag::core, at::Tag::nondeterministic_seeded});
51 m.def("native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor", {at::Tag::pointwise});
52 m.def("_sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor)", {});
53 m.def("_sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!)", {});
54 m.def("_sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!)", {});
55 m.def("_sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!)", {});
56 m.def("_reshape_from_tensor(Tensor self, Tensor shape) -> Tensor", {});
57 m.def("_shape_as_tensor(Tensor self) -> Tensor", {});
58 m.def("dropout(Tensor input, float p, bool train) -> Tensor", {at::Tag::nondeterministic_seeded});
59 m.def("dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
60 m.def("feature_dropout(Tensor input, float p, bool train) -> Tensor", {at::Tag::nondeterministic_seeded});
61 m.def("feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
62 m.def("alpha_dropout(Tensor input, float p, bool train) -> Tensor", {at::Tag::nondeterministic_seeded});
63 m.def("alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
64 m.def("feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor", {at::Tag::nondeterministic_seeded});
65 m.def("feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
66 m.def("abs(Tensor self) -> Tensor", {at::Tag::pointwise, at::Tag::core});
67 m.def("abs_(Tensor(a!) self) -> Tensor(a!)", {});
68 m.def("abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
69 m.def("absolute(Tensor self) -> Tensor", {});
70 m.def("absolute_(Tensor(a!) self) -> Tensor(a!)", {});
71 m.def("absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
72 m.def("angle(Tensor self) -> Tensor", {at::Tag::pointwise});
73 m.def("angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
74 m.def("view_as_real(Tensor(a) self) -> Tensor(a)", {});
75 m.def("view_as_complex(Tensor(a) self) -> Tensor(a)", {});
76 m.def("sgn(Tensor self) -> Tensor", {at::Tag::pointwise});
77 m.def("sgn_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
78 m.def("sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
79 m.def("chalf(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor", {});
80 m.def("real(Tensor(a) self) -> Tensor(a)", {});
81 m.def("imag(Tensor(a) self) -> Tensor(a)", {});
82 m.def("_conj(Tensor(a) self) -> Tensor(a)", {});
83 m.def("conj(Tensor(a) self) -> Tensor(a)", {});
84 m.def("_conj_physical(Tensor self) -> Tensor", {});
85 m.def("conj_physical(Tensor self) -> Tensor", {at::Tag::pointwise});
86 m.def("conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
87 m.def("conj_physical_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
88 m.def("resolve_conj(Tensor(a) self) -> Tensor(a)", {});
89 m.def("resolve_neg(Tensor(a) self) -> Tensor(a)", {});
90 m.def("_neg_view(Tensor(a) self) -> Tensor(a)", {});
91 m.def("acos(Tensor self) -> Tensor", {at::Tag::pointwise, at::Tag::core});
92 m.def("acos_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
93 m.def("acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
94 m.def("arccos(Tensor self) -> Tensor", {});
95 m.def("arccos_(Tensor(a!) self) -> Tensor(a!)", {});
96 m.def("arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
97 m.def("avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor", {});
98 m.def("adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor", {});
99 m.def("adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor)", {});
100 m.def("add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor", {at::Tag::pointwise, at::Tag::core});
101 m.def("add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)", {at::Tag::pointwise});
102 m.def("add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
103 m.def("_add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor", {});
104 m.def("_add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)", {});
105 m.def("_add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", {});
106 m.def("_add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", {});
107 m.def("_add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)", {});
108 m.def("add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", {at::Tag::pointwise, at::Tag::core});
109 m.def("add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)", {at::Tag::pointwise});
110 m.def("addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor", {});
111 m.def("addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)", {});
112 m.def("addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", {});
113 m.def("addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", {});
114 m.def("addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)", {});
115 m.def("addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", {});
116 m.def("affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> Tensor", {});
117 m.def("affine_grid_generator_backward(Tensor grad, int[] size, bool align_corners) -> Tensor", {});
118 m.def("_is_all_true(Tensor self) -> Tensor", {});
119 m.def("_is_any_true(Tensor self) -> Tensor", {});
120 m.def("_test_check_tensor(Tensor self) -> Tensor", {});
121 m.def("all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor", {});
122 m.def("all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", {});
123 m.def("all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor", {});
124 m.def("all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", {});
125 m.def("allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool", {at::Tag::data_dependent_output});
126 m.def("any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor", {});
127 m.def("any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", {});
128 m.def("any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor", {});
129 m.def("any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", {});
130 m.def("arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
131 m.def("arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
132 m.def("arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {at::Tag::core});
133 m.def("arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)", {});
134 m.def("arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)", {});
135 m.def("_dim_arange(Tensor like, int dim) -> Tensor", {});
136 m.def("argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor", {at::Tag::core});
137 m.def("argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", {});
138 m.def("argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor", {at::Tag::core});
139 m.def("argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", {});
140 m.def("acosh(Tensor self) -> Tensor", {at::Tag::pointwise, at::Tag::core});
141 m.def("acosh_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
142 m.def("acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
143 m.def("arccosh(Tensor self) -> Tensor", {});
144 m.def("arccosh_(Tensor(a!) self) -> Tensor(a!)", {});
145 m.def("arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
146 m.def("asinh(Tensor self) -> Tensor", {at::Tag::pointwise, at::Tag::core});
147 m.def("asinh_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
148 m.def("asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
149 m.def("arcsinh(Tensor self) -> Tensor", {});
150 m.def("arcsinh_(Tensor(a!) self) -> Tensor(a!)", {});
151 m.def("arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
152 m.def("atanh(Tensor self) -> Tensor", {at::Tag::pointwise, at::Tag::core});
153 m.def("atanh_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
154 m.def("atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
155 m.def("arctanh(Tensor self) -> Tensor", {});
156 m.def("arctanh_(Tensor(a!) self) -> Tensor(a!)", {});
157 m.def("arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
158 m.def("as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)", {at::Tag::core});
159 m.def("as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)", {at::Tag::inplace_view});
160 m.def("asin(Tensor self) -> Tensor", {at::Tag::pointwise, at::Tag::core});
161 m.def("asin_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
162 m.def("asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
163 m.def("arcsin(Tensor self) -> Tensor", {});
164 m.def("arcsin_(Tensor(a!) self) -> Tensor(a!)", {});
165 m.def("arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
166 m.def("atan(Tensor self) -> Tensor", {at::Tag::pointwise, at::Tag::core});
167 m.def("atan_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
168 m.def("atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
169 m.def("arctan(Tensor self) -> Tensor", {});
170 m.def("arctan_(Tensor(a!) self) -> Tensor(a!)", {});
171 m.def("arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
172 m.def("atleast_1d(Tensor self) -> Tensor", {});
173 m.def("atleast_1d.Sequence(Tensor[] tensors) -> Tensor[]", {});
174 m.def("atleast_2d(Tensor self) -> Tensor", {});
175 m.def("atleast_2d.Sequence(Tensor[] tensors) -> Tensor[]", {});
176 m.def("atleast_3d(Tensor self) -> Tensor", {});
177 m.def("atleast_3d.Sequence(Tensor[] tensors) -> Tensor[]", {});
178 m.def("baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", {});
179 m.def("baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)", {});
180 m.def("baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", {});
181 m.def("bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
182 m.def("bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
183 m.def("batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor", {});
184 m.def("quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor", {});
185 m.def("_batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int)", {});
186 m.def("_batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor)", {});
187 m.def("bernoulli(Tensor self, *, Generator? generator=None) -> Tensor", {at::Tag::nondeterministic_seeded});
188 m.def("bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
189 m.def("bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
190 m.def("bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
191 m.def("bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor", {at::Tag::nondeterministic_seeded});
192 m.def("bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor", {});
193 m.def("binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor", {});
194 m.def("binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)", {});
195 m.def("binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor", {});
196 m.def("binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
197 m.def("binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor", {});
198 m.def("bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor", {at::Tag::dynamic_output_shape});
199 m.def("bitwise_not(Tensor self) -> Tensor", {at::Tag::pointwise, at::Tag::core});
200 m.def("bitwise_not_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
201 m.def("bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
202 m.def("copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
203 m.def("copysign.Tensor(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise});
204 m.def("copysign_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {});
205 m.def("copysign.Scalar(Tensor self, Scalar other) -> Tensor", {at::Tag::pointwise});
206 m.def("copysign_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {});
207 m.def("copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
208 m.def("logical_not(Tensor self) -> Tensor", {at::Tag::pointwise, at::Tag::core});
209 m.def("logical_not_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
210 m.def("logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
211 m.def("logical_xor(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise});
212 m.def("logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!)", {at::Tag::pointwise});
213 m.def("logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
214 m.def("logical_and(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise, at::Tag::core});
215 m.def("logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!)", {at::Tag::pointwise});
216 m.def("logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
217 m.def("logical_or(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise, at::Tag::core});
218 m.def("logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!)", {at::Tag::pointwise});
219 m.def("logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
220 m.def("blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
221 m.def("blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
222 m.def("bmm(Tensor self, Tensor mat2) -> Tensor", {at::Tag::core});
223 m.def("bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)", {});
224 m.def("broadcast_tensors(Tensor[] tensors) -> Tensor[]", {});
225 m.def("broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a)", {});
226 m.def("_sparse_broadcast_to(Tensor(a) self, int[] size) -> Tensor(a)", {});
227 m.def("cat(Tensor[] tensors, int dim=0) -> Tensor", {at::Tag::core});
228 m.def("cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)", {});
229 m.def("cat.names(Tensor[] tensors, Dimname dim) -> Tensor", {});
230 m.def("cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)", {});
231 m.def("concat(Tensor[] tensors, int dim=0) -> Tensor", {});
232 m.def("concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)", {});
233 m.def("concat.names(Tensor[] tensors, Dimname dim) -> Tensor", {});
234 m.def("concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)", {});
235 m.def("concatenate(Tensor[] tensors, int dim=0) -> Tensor", {});
236 m.def("concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)", {});
237 m.def("concatenate.names(Tensor[] tensors, Dimname dim) -> Tensor", {});
238 m.def("concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)", {});
239 m.def("block_diag(Tensor[] tensors) -> Tensor", {});
240 m.def("ceil(Tensor self) -> Tensor", {at::Tag::pointwise});
241 m.def("ceil_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
242 m.def("ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
243 m.def("chain_matmul(Tensor[] matrices) -> Tensor", {});
244 m.def("chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!)", {});
245 m.def("unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[]", {});
246 m.def("chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[]", {});
247 m.def("tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[]", {});
248 m.def("tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[]", {});
249 m.def("tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[]", {});
250 m.def("clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor", {at::Tag::pointwise, at::Tag::core});
251 m.def("clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor", {at::Tag::pointwise});
252 m.def("clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)", {at::Tag::pointwise});
253 m.def("clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)", {at::Tag::pointwise});
254 m.def("clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
255 m.def("clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
256 m.def("clamp_max(Tensor self, Scalar max) -> Tensor", {at::Tag::pointwise});
257 m.def("clamp_max.Tensor(Tensor self, Tensor max) -> Tensor", {at::Tag::pointwise});
258 m.def("clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!)", {at::Tag::pointwise});
259 m.def("clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!)", {at::Tag::pointwise});
260 m.def("clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
261 m.def("clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
262 m.def("clamp_min(Tensor self, Scalar min) -> Tensor", {at::Tag::pointwise});
263 m.def("clamp_min.Tensor(Tensor self, Tensor min) -> Tensor", {at::Tag::pointwise});
264 m.def("clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)", {at::Tag::pointwise});
265 m.def("clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!)", {at::Tag::pointwise});
266 m.def("clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
267 m.def("clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
268 m.def("clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor", {at::Tag::pointwise});
269 m.def("clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor", {at::Tag::pointwise});
270 m.def("clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)", {at::Tag::pointwise});
271 m.def("clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)", {at::Tag::pointwise});
272 m.def("clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
273 m.def("clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)", {});
274 m.def("cudnn_is_acceptable(Tensor self) -> bool", {});
275 m.def("complex(Tensor real, Tensor imag) -> Tensor", {});
276 m.def("complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!)", {});
277 m.def("polar(Tensor abs, Tensor angle) -> Tensor", {});
278 m.def("polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!)", {});
279 m.def("constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor", {at::Tag::core});
280 m.def("contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a)", {});
281 m.def("convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups) -> Tensor", {at::Tag::core});
282 m.def("convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", {at::Tag::core});
283 m.def("convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor", {});
284 m.def("convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)", {});
285 m.def("_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor", {});
286 m.def("_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor", {});
287 m.def("_convolution_mode(Tensor input, Tensor weight, Tensor? bias, int[] stride, str padding, int[] dilation, int groups) -> Tensor", {});
288 m.def("_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", {});
289 m.def("conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor", {});
290 m.def("conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor", {});
291 m.def("conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor", {});
292 m.def("conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, str padding=\"valid\", int[1] dilation=1, int groups=1) -> Tensor", {});
293 m.def("conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, str padding=\"valid\", int[2] dilation=1, int groups=1) -> Tensor", {});
294 m.def("conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, str padding=\"valid\", int[3] dilation=1, int groups=1) -> Tensor", {});
295 m.def("conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor", {});
296 m.def("conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor)", {});
297 m.def("conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor", {});
298 m.def("conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor", {});
299 m.def("conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor", {});
300 m.def("copy(Tensor self, Tensor src, bool non_blocking=False) -> Tensor", {});
301 m.def("copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)", {});
302 m.def("_copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor", {});
303 m.def("_copy_from_and_resize(Tensor self, Tensor dst) -> Tensor", {});
304 m.def("cos(Tensor self) -> Tensor", {at::Tag::pointwise, at::Tag::core});
305 m.def("cos_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
306 m.def("cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
307 m.def("cosh(Tensor self) -> Tensor", {at::Tag::pointwise, at::Tag::core});
308 m.def("cosh_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
309 m.def("cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
310 m.def("cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor", {});
311 m.def("count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor", {});
312 m.def("count_nonzero(Tensor self, int? dim=None) -> Tensor", {});
313 m.def("cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor", {});
314 m.def("corrcoef(Tensor self) -> Tensor", {});
315 m.def("cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid", {});
316 m.def("cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta", {});
317 m.def("cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor)", {});
318 m.def("cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace) -> (Tensor, Tensor, Tensor)", {});
319 m.def("cudnn_convolution(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor", {});
320 m.def("cudnn_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor", {});
321 m.def("_mps_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups) -> Tensor", {});
322 m.def("mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask) -> (Tensor, Tensor)", {});
323 m.def("cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor", {});
324 m.def("cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor", {});
325 m.def("cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output", {});
326 m.def("cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid)", {});
327 m.def("cummax(Tensor self, int dim) -> (Tensor values, Tensor indices)", {});
328 m.def("cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", {});
329 m.def("cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)", {});
330 m.def("cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", {});
331 m.def("_cummax_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()", {});
332 m.def("cummin(Tensor self, int dim) -> (Tensor values, Tensor indices)", {});
333 m.def("cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", {});
334 m.def("cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)", {});
335 m.def("cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", {});
336 m.def("_cummin_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()", {});
337 m.def("cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor", {});
338 m.def("cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor", {});
339 m.def("cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)", {});
340 m.def("cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", {});
341 m.def("cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", {});
342 m.def("cumprod_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)", {});
343 m.def("cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", {});
344 m.def("cumprod_backward(Tensor grad, Tensor input, int dim, Tensor output) -> Tensor", {});
345 m.def("cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor", {});
346 m.def("cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)", {});
347 m.def("cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", {});
348 m.def("cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", {});
349 m.def("cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)", {});
350 m.def("cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", {});
351 m.def("cumulative_trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor", {});
352 m.def("cumulative_trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor", {});
353 m.def("ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor", {});
354 m.def("ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor", {});
355 m.def("_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)", {at::Tag::dynamic_output_shape});
356 m.def("_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)", {at::Tag::dynamic_output_shape});
357 m.def("_ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor", {});
358 m.def("_ctc_loss_backward.Tensor(Tensor grad, Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor", {});
359 m.def("diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor", {});
360 m.def("diagflat(Tensor self, int offset=0) -> Tensor", {});
361 m.def("diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)", {});
362 m.def("linalg_diagonal(Tensor(a) A, *, int offset=0, int dim1=-2, int dim2=-1) -> Tensor(a)", {});
363 m.def("diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a)", {});
364 m.def("diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor", {});
365 m.def("fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!)", {});
366 m.def("diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor", {});
367 m.def("diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!)", {});
368 m.def("gradient.scalarint(Tensor self, *, Scalar? spacing=None, int? dim=None, int edge_order=1) -> Tensor[]", {});
369 m.def("gradient.scalararray(Tensor self, *, Scalar spacing, int[] dim, int edge_order=1) -> Tensor[]", {});
370 m.def("gradient.array(Tensor self, *, int[] dim, int edge_order=1) -> Tensor[]", {});
371 m.def("gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[]", {});
372 m.def("gradient.scalarrayarray(Tensor self, *, Scalar[] spacing, int[] dim, int edge_order=1) -> Tensor[]", {});
373 m.def("gradient.tensorarrayint(Tensor self, *, Tensor[] spacing, int? dim=None, int edge_order=1) -> Tensor[]", {});
374 m.def("gradient.tensorarray(Tensor self, *, Tensor[] spacing, int[] dim, int edge_order=1) -> Tensor[]", {});
375 m.def("div.Tensor(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise, at::Tag::core});
376 m.def("div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {at::Tag::pointwise});
377 m.def("div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
378 m.def("div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor", {at::Tag::pointwise});
379 m.def("div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)", {at::Tag::pointwise});
380 m.def("div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
381 m.def("div.Scalar(Tensor self, Scalar other) -> Tensor", {at::Tag::pointwise, at::Tag::core});
382 m.def("div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {at::Tag::pointwise});
383 m.def("div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor", {at::Tag::pointwise});
384 m.def("div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)", {at::Tag::pointwise});
385 m.def("divide.Tensor(Tensor self, Tensor other) -> Tensor", {});
386 m.def("divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {});
387 m.def("divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {});
388 m.def("divide.Scalar(Tensor self, Scalar other) -> Tensor", {});
389 m.def("divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {});
390 m.def("divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor", {});
391 m.def("divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)", {});
392 m.def("divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)", {});
393 m.def("divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor", {});
394 m.def("divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)", {});
395 m.def("true_divide.Tensor(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise});
396 m.def("true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {});
397 m.def("true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {});
398 m.def("true_divide.Scalar(Tensor self, Scalar other) -> Tensor", {});
399 m.def("true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {});
400 m.def("dot(Tensor self, Tensor tensor) -> Tensor", {});
401 m.def("dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!)", {});
402 m.def("vdot(Tensor self, Tensor other) -> Tensor", {});
403 m.def("vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {});
404 m.def("einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor", {});
405 m.def("embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor", {});
406 m.def("embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor", {});
407 m.def("embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor", {at::Tag::core});
408 m.def("embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!)", {});
409 m.def("embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor", {});
410 m.def("_embedding_bag_forward_only(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)", {});
411 m.def("_rowwise_prune(Tensor weight, Tensor mask, ScalarType compressed_indices_dtype) -> (Tensor, Tensor)", {});
412 m.def("row_stack(Tensor[] tensors) -> Tensor", {});
413 m.def("row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", {});
414 m.def("embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor)", {});
415 m.def("embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor)", {});
416 m.def("_embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)", {});
417 m.def("_embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor", {});
418 m.def("_embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor", {});
419 m.def("_embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor", {});
420 m.def("_embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1) -> Tensor", {});
421 m.def("empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", {});
422 m.def("empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", {});
423 m.def("new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
424 m.def("new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
425 m.def("new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
426 m.def("new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
427 m.def("new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
428 m.def("_empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor", {});
429 m.def("_empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor", {});
430 m.def("resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)", {at::Tag::inplace_view});
431 m.def("_resize_output_(Tensor(a!) self, int[] size, Device device) -> Tensor(a!)", {});
432 m.def("empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", {});
433 m.def("empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", {});
434 m.def("empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", {});
435 m.def("empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {at::Tag::core});
436 m.def("erf(Tensor self) -> Tensor", {at::Tag::pointwise, at::Tag::core});
437 m.def("erf_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
438 m.def("erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
439 m.def("erfc(Tensor self) -> Tensor", {at::Tag::pointwise});
440 m.def("erfc_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
441 m.def("erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
442 m.def("exp(Tensor self) -> Tensor", {at::Tag::pointwise, at::Tag::core});
443 m.def("exp_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
444 m.def("exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
445 m.def("exp2(Tensor self) -> Tensor", {at::Tag::pointwise});
446 m.def("exp2_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
447 m.def("exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
448 m.def("expm1(Tensor self) -> Tensor", {at::Tag::pointwise});
449 m.def("expm1_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
450 m.def("expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
451 m.def("expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)", {at::Tag::core});
452 m.def("expand_as(Tensor(a) self, Tensor other) -> Tensor(a)", {});
453 m.def("eye(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
454 m.def("eye.m(int n, int m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
455 m.def("eye.out(int n, *, Tensor(a!) out) -> Tensor(a!)", {});
456 m.def("eye.m_out(int n, int m, *, Tensor(a!) out) -> Tensor(a!)", {});
457 m.def("flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)", {});
458 m.def("flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a)", {});
459 m.def("flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a)", {});
460 m.def("flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a)", {});
461 m.def("unflatten.int(Tensor(a) self, int dim, int[] sizes) -> Tensor(a)", {});
462 m.def("unflatten.Dimname(Tensor(a) self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor(a)", {});
463 m.def("fill.Scalar(Tensor self, Scalar value) -> Tensor", {at::Tag::core});
464 m.def("fill.Tensor(Tensor self, Tensor value) -> Tensor", {});
465 m.def("fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)", {});
466 m.def("fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!)", {});
467 m.def("floor(Tensor self) -> Tensor", {at::Tag::pointwise, at::Tag::core});
468 m.def("floor_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
469 m.def("floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
470 m.def("floor_divide(Tensor self, Tensor other) -> Tensor", {});
471 m.def("floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {});
472 m.def("floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {});
473 m.def("floor_divide.Scalar(Tensor self, Scalar other) -> Tensor", {});
474 m.def("floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {});
475 m.def("frac(Tensor self) -> Tensor", {at::Tag::pointwise});
476 m.def("frac_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
477 m.def("frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
478 m.def("full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
479 m.def("full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {at::Tag::core});
480 m.def("full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)", {});
481 m.def("full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", {});
482 m.def("from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
483 m.def("gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
484 m.def("gcd(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise});
485 m.def("gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!)", {});
486 m.def("lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
487 m.def("lcm(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise});
488 m.def("lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!)", {});
489 m.def("grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor", {});
490 m.def("grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor", {at::Tag::core});
491 m.def("grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)", {});
492 m.def("_grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor", {});
493 m.def("_grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor)", {});
494 m.def("grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor", {});
495 m.def("grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)", {});
496 m.def("hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
497 m.def("hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
498 m.def("hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
499 m.def("hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
500 m.def("hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
501 m.def("hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
502 m.def("kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
503 m.def("kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
504 m.def("kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
505 m.def("hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor", {});
506 m.def("group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor", {});
507 m.def("native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor)", {at::Tag::core});
508 m.def("native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", {at::Tag::core});
509 m.def("_fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor", {});
510 m.def("_fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!)", {});
511 m.def("_fft_c2r(Tensor self, int[] dim, int normalization, int last_dim_size) -> Tensor", {});
512 m.def("_fft_c2r.out(Tensor self, int[] dim, int normalization, int last_dim_size, *, Tensor(a!) out) -> Tensor(a!)", {});
513 m.def("_fft_c2c(Tensor self, SymInt[] dim, int normalization, bool forward) -> Tensor", {});
514 m.def("_fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!)", {});
515 m.def("_validate_compressed_sparse_indices(bool is_crow, Tensor compressed_idx, Tensor plain_idx, int cdim, int dim, int nnz) -> ()", {});
516 m.def("_cufft_get_plan_cache_size(int device_index) -> int", {});
517 m.def("_cufft_get_plan_cache_max_size(int device_index) -> int", {});
518 m.def("_cufft_set_plan_cache_max_size(int device_index, int max_size) -> ()", {});
519 m.def("_cufft_clear_plan_cache(int device_index) -> ()", {});
520 m.def("index.Tensor(Tensor self, Tensor?[] indices) -> Tensor", {at::Tag::dynamic_output_shape});
521 m.def("index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!)", {});
522 m.def("index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!)", {});
523 m.def("index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)", {});
524 m.def("index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor", {});
525 m.def("index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!)", {});
526 m.def("index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor", {});
527 m.def("index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)", {});
528 m.def("index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor", {});
529 m.def("_index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!)", {});
530 m.def("instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor", {});
531 m.def("isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor", {});
532 m.def("isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)", {});
533 m.def("isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor", {});
534 m.def("isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)", {});
535 m.def("isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor", {});
536 m.def("isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)", {});
537 m.def("isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor", {});
538 m.def("isnan(Tensor self) -> Tensor", {at::Tag::pointwise, at::Tag::core});
539 m.def("is_distributed(Tensor self) -> bool", {});
540 m.def("is_floating_point(Tensor self) -> bool", {});
541 m.def("is_complex(Tensor self) -> bool", {});
542 m.def("is_conj(Tensor self) -> bool", {});
543 m.def("_is_zerotensor(Tensor self) -> bool", {});
544 m.def("is_neg(Tensor self) -> bool", {});
545 m.def("isreal(Tensor self) -> Tensor", {});
546 m.def("is_nonzero(Tensor self) -> bool", {});
547 m.def("is_same_size(Tensor self, Tensor other) -> bool", {});
548 m.def("is_signed(Tensor self) -> bool", {});
549 m.def("is_inference(Tensor self) -> bool", {});
550 m.def("kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor", {});
551 m.def("kron(Tensor self, Tensor other) -> Tensor", {});
552 m.def("kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {});
553 m.def("kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)", {});
554 m.def("kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", {});
555 m.def("kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", {});
556 m.def("kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", {});
557 m.def("layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor", {});
558 m.def("native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)", {at::Tag::core});
559 m.def("native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", {at::Tag::core});
560 m.def("nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor", {at::Tag::pointwise});
561 m.def("nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!)", {at::Tag::pointwise});
562 m.def("nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
563 m.def("linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor", {});
564 m.def("linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", {});
565 m.def("linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)", {});
566 m.def("mkldnn_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor", {});
567 m.def("mkldnn_linear_backward_input(int[] input_size, Tensor grad_output, Tensor weight) -> Tensor", {});
568 m.def("mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor)", {});
569 m.def("mkldnn_linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", {});
570 m.def("fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor", {});
571 m.def("fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor", {});
572 m.def("fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int)", {});
573 m.def("fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor", {});
574 m.def("fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor", {});
575 m.def("fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor", {});
576 m.def("fbgemm_pack_quantized_matrix(Tensor input) -> Tensor", {});
577 m.def("fbgemm_pack_quantized_matrix.KN(Tensor input, int K, int N) -> Tensor", {});
578 m.def("ldexp.Tensor(Tensor self, Tensor other) -> Tensor", {});
579 m.def("ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!)", {at::Tag::pointwise});
580 m.def("ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
581 m.def("linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
582 m.def("linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)", {});
583 m.def("log(Tensor self) -> Tensor", {at::Tag::pointwise, at::Tag::core});
584 m.def("log_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
585 m.def("log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
586 m.def("log10(Tensor self) -> Tensor", {at::Tag::pointwise});
587 m.def("log10_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
588 m.def("log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
589 m.def("log1p(Tensor self) -> Tensor", {at::Tag::pointwise});
590 m.def("log1p_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
591 m.def("log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
592 m.def("log2(Tensor self) -> Tensor", {at::Tag::pointwise});
593 m.def("log2_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
594 m.def("log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
595 m.def("logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
596 m.def("logaddexp(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise});
597 m.def("logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
598 m.def("logaddexp2(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise});
599 m.def("xlogy.Tensor(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise});
600 m.def("xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor", {at::Tag::pointwise});
601 m.def("xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor", {at::Tag::pointwise});
602 m.def("xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {at::Tag::pointwise});
603 m.def("xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!)", {});
604 m.def("xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
605 m.def("xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
606 m.def("xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
607 m.def("logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
608 m.def("logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)", {});
609 m.def("log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", {});
610 m.def("log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)", {});
611 m.def("log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", {});
612 m.def("_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor", {at::Tag::core});
613 m.def("_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)", {});
614 m.def("_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor", {});
615 m.def("_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!)", {});
616 m.def("_logcumsumexp(Tensor self, int dim) -> Tensor", {});
617 m.def("_logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)", {});
618 m.def("logcumsumexp(Tensor self, int dim) -> Tensor", {});
619 m.def("logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)", {});
620 m.def("logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor", {});
621 m.def("logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)", {});
622 m.def("logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor", {});
623 m.def("logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", {});
624 m.def("logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor", {});
625 m.def("logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", {});
626 m.def("margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor", {});
627 m.def("matmul(Tensor self, Tensor other) -> Tensor", {});
628 m.def("matmul_backward(Tensor grad, Tensor self, Tensor other, bool[2] mask) -> (Tensor, Tensor)", {});
629 m.def("matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {});
630 m.def("matrix_power(Tensor self, int n) -> Tensor", {});
631 m.def("matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)", {});
632 m.def("matrix_exp(Tensor self) -> Tensor", {});
633 m.def("matrix_exp_backward(Tensor self, Tensor grad) -> Tensor", {});
634 m.def("_aminmax(Tensor self) -> (Tensor, Tensor)", {});
635 m.def("_aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)", {});
636 m.def("aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)", {});
637 m.def("aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max)", {});
638 m.def("_compute_linear_combination(Tensor input, Tensor coefficients) -> Tensor", {});
639 m.def("_compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!)", {});
640 m.def("max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)", {at::Tag::core});
641 m.def("max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)", {});
642 m.def("max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", {});
643 m.def("max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)", {});
644 m.def("value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor", {});
645 m.def("amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor", {at::Tag::core});
646 m.def("amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", {});
647 m.def("max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)", {});
648 m.def("max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor", {});
649 m.def("max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor", {});
650 m.def("_mps_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor", {});
651 m.def("mps_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor", {});
652 m.def("mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor", {});
653 m.def("mkldnn_max_pool2d_backward(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor", {});
654 m.def("mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor", {});
655 m.def("mkldnn_max_pool3d_backward(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor", {});
656 m.def("quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor", {});
657 m.def("quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor", {});
658 m.def("max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor", {});
659 m.def("mean(Tensor self, *, ScalarType? dtype=None) -> Tensor", {});
660 m.def("mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", {at::Tag::core});
661 m.def("mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", {});
662 m.def("mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", {});
663 m.def("mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", {});
664 m.def("nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", {});
665 m.def("nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", {});
666 m.def("median(Tensor self) -> Tensor", {});
667 m.def("median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)", {});
668 m.def("median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", {});
669 m.def("median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", {});
670 m.def("median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", {});
671 m.def("nanmedian(Tensor self) -> Tensor", {});
672 m.def("nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)", {});
673 m.def("nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", {});
674 m.def("nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", {});
675 m.def("nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", {});
676 m.def("min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)", {at::Tag::core});
677 m.def("min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)", {});
678 m.def("min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", {});
679 m.def("min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)", {});
680 m.def("amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor", {at::Tag::core});
681 m.def("amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", {});
682 m.def("_mps_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor", {});
683 m.def("mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", {});
684 m.def("mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups) -> Tensor", {});
685 m.def("mkldnn_rnn_layer(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) -> (Tensor, Tensor, Tensor, Tensor)", {});
686 m.def("mkldnn_rnn_layer_backward(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)", {});
687 m.def("miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor)", {});
688 m.def("miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor)", {});
689 m.def("miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor", {});
690 m.def("miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor", {});
691 m.def("miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor", {});
692 m.def("miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor", {});
693 m.def("miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor", {});
694 m.def("miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)", {});
695 m.def("miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])", {});
696 m.def("mm(Tensor self, Tensor mat2) -> Tensor", {at::Tag::core});
697 m.def("mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)", {});
698 m.def("_sparse_mm(Tensor sparse, Tensor dense) -> Tensor", {});
699 m.def("_sparse_mm.reduce(Tensor sparse, Tensor dense, str reduce) -> Tensor", {});
700 m.def("_sparse_sparse_matmul(Tensor self, Tensor other) -> Tensor", {});
701 m.def("mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)", {});
702 m.def("mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", {});
703 m.def("mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", {});
704 m.def("mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", {});
705 m.def("mul.Tensor(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise, at::Tag::core});
706 m.def("mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {at::Tag::pointwise});
707 m.def("mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
708 m.def("mul.Scalar(Tensor self, Scalar other) -> Tensor", {at::Tag::pointwise, at::Tag::core});
709 m.def("mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {at::Tag::pointwise});
710 m.def("multiply.Tensor(Tensor self, Tensor other) -> Tensor", {});
711 m.def("multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {});
712 m.def("multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {});
713 m.def("multiply.Scalar(Tensor self, Scalar other) -> Tensor", {});
714 m.def("multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {});
715 m.def("mv(Tensor self, Tensor vec) -> Tensor", {});
716 m.def("mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)", {});
717 m.def("mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
718 m.def("mvlgamma(Tensor self, int p) -> Tensor", {at::Tag::pointwise});
719 m.def("mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!)", {at::Tag::pointwise});
720 m.def("narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor", {at::Tag::view_copy});
721 m.def("narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)", {});
722 m.def("narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a)", {});
723 m.def("narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a)", {});
724 m.def("native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)", {at::Tag::core});
725 m.def("native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))", {});
726 m.def("_native_batch_norm_legit(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)", {});
727 m.def("_native_batch_norm_legit.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd) -> (Tensor(d!), Tensor(e!), Tensor(f!))", {});
728 m.def("_native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)", {at::Tag::core});
729 m.def("_native_batch_norm_legit.no_stats_out(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))", {});
730 m.def("batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor)", {});
731 m.def("batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor", {});
732 m.def("batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!)", {});
733 m.def("batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor)", {});
734 m.def("batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor)", {});
735 m.def("native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", {});
736 m.def("batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor)", {});
737 m.def("batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count) -> Tensor", {});
738 m.def("batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor)", {});
739 m.def("is_vulkan_available() -> bool", {});
740 m.def("_nnpack_available() -> bool", {});
741 m.def("_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1) -> Tensor", {});
742 m.def("ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
743 m.def("ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
744 m.def("ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", {});
745 m.def("ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", {});
746 m.def("pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor", {});
747 m.def("cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor", {});
748 m.def("_euclidean_dist(Tensor x1, Tensor x2) -> Tensor", {});
749 m.def("_cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor", {});
750 m.def("_cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor", {});
751 m.def("pdist(Tensor self, float p=2) -> Tensor", {});
752 m.def("_pdist_forward(Tensor self, float p=2) -> Tensor", {});
753 m.def("_pdist_backward(Tensor grad, Tensor self, float p, Tensor pdist) -> Tensor", {});
754 m.def("cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor", {});
755 m.def("permute(Tensor(a) self, int[] dims) -> Tensor(a)", {at::Tag::core});
756 m.def("movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)", {});
757 m.def("movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a)", {});
758 m.def("moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)", {});
759 m.def("moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a)", {});
760 m.def("numpy_T(Tensor(a) self) -> Tensor(a)", {});
761 m.def("matrix_H(Tensor(a) self) -> Tensor(a)", {});
762 m.def("mT(Tensor(a) self) -> Tensor(a)", {});
763 m.def("mH(Tensor(a) self) -> Tensor(a)", {});
764 m.def("adjoint(Tensor(a) self) -> Tensor(a)", {});
765 m.def("pixel_shuffle(Tensor self, int upscale_factor) -> Tensor", {});
766 m.def("pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor", {});
767 m.def("channel_shuffle(Tensor self, int groups) -> Tensor", {});
768 m.def("native_channel_shuffle(Tensor self, int groups) -> Tensor", {});
769 m.def("is_pinned(Tensor self, Device? device=None) -> bool", {});
770 m.def("pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a)", {});
771 m.def("_pin_memory(Tensor self, Device? device=None) -> Tensor", {});
772 m.def("pinverse(Tensor self, float rcond=1e-15) -> Tensor", {});
773 m.def("poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor", {});
774 m.def("rad2deg(Tensor self) -> Tensor", {});
775 m.def("rad2deg_(Tensor(a!) self) -> Tensor(a!)", {});
776 m.def("rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
777 m.def("deg2rad(Tensor self) -> Tensor", {at::Tag::pointwise});
778 m.def("deg2rad_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
779 m.def("deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
780 m.def("scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {at::Tag::core});
781 m.def("rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {at::Tag::nondeterministic_seeded});
782 m.def("rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {at::Tag::nondeterministic_seeded});
783 m.def("rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {at::Tag::nondeterministic_seeded});
784 m.def("rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {at::Tag::nondeterministic_seeded});
785 m.def("rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
786 m.def("rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
787 m.def("rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", {at::Tag::nondeterministic_seeded});
788 m.def("randint(int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {at::Tag::nondeterministic_seeded});
789 m.def("randint.generator(int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {at::Tag::nondeterministic_seeded});
790 m.def("randint.low(int low, int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {at::Tag::nondeterministic_seeded});
791 m.def("randint.low_generator(int low, int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {at::Tag::nondeterministic_seeded});
792 m.def("randint.out(int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
793 m.def("randint.generator_out(int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
794 m.def("randint.low_out(int low, int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
795 m.def("randint.low_generator_out(int low, int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
796 m.def("randint_like(Tensor self, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", {at::Tag::nondeterministic_seeded});
797 m.def("randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", {at::Tag::nondeterministic_seeded});
798 m.def("randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {at::Tag::nondeterministic_seeded});
799 m.def("randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {at::Tag::nondeterministic_seeded});
800 m.def("randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {at::Tag::nondeterministic_seeded});
801 m.def("randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {at::Tag::nondeterministic_seeded});
802 m.def("randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
803 m.def("randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
804 m.def("randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", {at::Tag::nondeterministic_seeded});
805 m.def("randperm(int n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {at::Tag::nondeterministic_seeded});
806 m.def("randperm.generator(int n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {at::Tag::nondeterministic_seeded});
807 m.def("randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
808 m.def("randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
809 m.def("range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
810 m.def("range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
811 m.def("range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!)", {});
812 m.def("range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)", {});
813 m.def("ravel(Tensor(a) self) -> Tensor(a)", {});
814 m.def("reciprocal(Tensor self) -> Tensor", {at::Tag::pointwise, at::Tag::core});
815 m.def("reciprocal_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
816 m.def("reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
817 m.def("neg(Tensor self) -> Tensor", {at::Tag::pointwise, at::Tag::core});
818 m.def("neg_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
819 m.def("neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
820 m.def("negative(Tensor self) -> Tensor", {});
821 m.def("negative_(Tensor(a!) self) -> Tensor(a!)", {});
822 m.def("negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
823 m.def("repeat(Tensor self, SymInt[] repeats) -> Tensor", {at::Tag::core});
824 m.def("repeat_interleave.Tensor(Tensor repeats, *, int? output_size=None) -> Tensor", {at::Tag::dynamic_output_shape});
825 m.def("repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, int? output_size=None) -> Tensor", {});
826 m.def("repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, int? output_size=None) -> Tensor", {});
827 m.def("reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a)", {});
828 m.def("_reshape_copy(Tensor self, SymInt[] size) -> Tensor", {});
829 m.def("_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a)", {});
830 m.def("_mkldnn_reshape(Tensor self, int[] shape) -> Tensor", {});
831 m.def("reshape_as(Tensor(a) self, Tensor other) -> Tensor(a)", {});
832 m.def("round(Tensor self) -> Tensor", {at::Tag::pointwise});
833 m.def("round_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
834 m.def("round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
835 m.def("round.decimals(Tensor self, *, int decimals) -> Tensor", {at::Tag::pointwise});
836 m.def("round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!)", {at::Tag::pointwise});
837 m.def("round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
838 m.def("rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor", {at::Tag::nondeterministic_seeded});
839 m.def("rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
840 m.def("relu(Tensor self) -> Tensor", {at::Tag::pointwise, at::Tag::core});
841 m.def("relu_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
842 m.def("relu6(Tensor self) -> Tensor", {});
843 m.def("relu6_(Tensor(a!) self) -> Tensor(a!)", {});
844 m.def("prelu(Tensor self, Tensor weight) -> Tensor", {});
845 m.def("_prelu_kernel(Tensor self, Tensor weight) -> Tensor", {});
846 m.def("_prelu_kernel_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor)", {});
847 m.def("gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!)", {});
848 m.def("gelu_(Tensor(a!) self, *, str approximate='none') -> Tensor(a!)", {});
849 m.def("gelu(Tensor self, *, str approximate='none') -> Tensor", {at::Tag::pointwise, at::Tag::core});
850 m.def("gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!)", {});
851 m.def("gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor", {at::Tag::pointwise});
852 m.def("infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor", {});
853 m.def("hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)", {});
854 m.def("hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor", {});
855 m.def("hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
856 m.def("hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor", {});
857 m.def("rsqrt(Tensor self) -> Tensor", {at::Tag::pointwise, at::Tag::core});
858 m.def("rsqrt_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
859 m.def("rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
860 m.def("select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a)", {});
861 m.def("select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a)", {at::Tag::core});
862 m.def("select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor", {});
863 m.def("_nested_select_backward(Tensor grad_output, Tensor self, int dim, SymInt index) -> Tensor", {});
864 m.def("selu(Tensor self) -> Tensor", {});
865 m.def("selu_(Tensor(a!) self) -> Tensor(a!)", {});
866 m.def("celu(Tensor self, Scalar alpha=1.0) -> Tensor", {});
867 m.def("celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!)", {});
868 m.def("silu(Tensor self) -> Tensor", {});
869 m.def("silu_(Tensor(a!) self) -> Tensor(a!)", {});
870 m.def("silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
871 m.def("silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
872 m.def("silu_backward(Tensor grad_output, Tensor self) -> Tensor", {});
873 m.def("mish(Tensor self) -> Tensor", {});
874 m.def("mish_(Tensor(a!) self) -> Tensor(a!)", {});
875 m.def("mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
876 m.def("mish_backward(Tensor grad_output, Tensor self) -> Tensor", {});
877 m.def("sigmoid(Tensor self) -> Tensor", {at::Tag::pointwise, at::Tag::core});
878 m.def("sigmoid_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
879 m.def("sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
880 m.def("logit(Tensor self, float? eps=None) -> Tensor", {at::Tag::pointwise});
881 m.def("logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!)", {at::Tag::pointwise});
882 m.def("logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
883 m.def("sin(Tensor self) -> Tensor", {at::Tag::pointwise, at::Tag::core});
884 m.def("sin_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
885 m.def("sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
886 m.def("sinc(Tensor self) -> Tensor", {at::Tag::pointwise});
887 m.def("sinc_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
888 m.def("sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
889 m.def("sinh(Tensor self) -> Tensor", {at::Tag::pointwise, at::Tag::core});
890 m.def("sinh_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
891 m.def("sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
892 m.def("detach(Tensor(a) self) -> Tensor(a)", {});
893 m.def("detach_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::inplace_view});
894 m.def("size.int(Tensor self, int dim) -> int", {});
895 m.def("size.Dimname(Tensor self, Dimname dim) -> int", {});
896 m.def("slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)", {at::Tag::core});
897 m.def("slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor", {});
898 m.def("slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor", {at::Tag::core});
899 m.def("select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor", {});
900 m.def("diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor", {});
901 m.def("as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor", {});
902 m.def("smm(Tensor self, Tensor mat2) -> Tensor", {});
903 m.def("softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", {});
904 m.def("softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)", {});
905 m.def("softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", {});
906 m.def("_softmax(Tensor self, int dim, bool half_to_float) -> Tensor", {at::Tag::core});
907 m.def("_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)", {});
908 m.def("_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor", {});
909 m.def("_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
910 m.def("unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]", {});
911 m.def("split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[]", {});
912 m.def("split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[]", {});
913 m.def("unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]", {});
914 m.def("split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[]", {});
915 m.def("hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]", {});
916 m.def("hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]", {});
917 m.def("vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]", {});
918 m.def("vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]", {});
919 m.def("dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]", {});
920 m.def("dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]", {});
921 m.def("squeeze(Tensor(a) self) -> Tensor(a)", {});
922 m.def("squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)", {at::Tag::core});
923 m.def("squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a)", {});
924 m.def("squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a)", {at::Tag::core});
925 m.def("squeeze_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::inplace_view});
926 m.def("squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!)", {at::Tag::inplace_view});
927 m.def("squeeze_.dims(Tensor(a!) self, int[] dim) -> Tensor(a!)", {at::Tag::inplace_view});
928 m.def("squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!)", {at::Tag::inplace_view});
929 m.def("sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", {});
930 m.def("sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", {});
931 m.def("stack(Tensor[] tensors, int dim=0) -> Tensor", {});
932 m.def("stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)", {});
933 m.def("_stack(Tensor[] tensors, int dim=0) -> Tensor", {});
934 m.def("_stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)", {});
935 m.def("hstack(Tensor[] tensors) -> Tensor", {});
936 m.def("hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", {});
937 m.def("vstack(Tensor[] tensors) -> Tensor", {});
938 m.def("vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", {});
939 m.def("dstack(Tensor[] tensors) -> Tensor", {});
940 m.def("dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", {});
941 m.def("stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor", {});
942 m.def("stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode=\"reflect\", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor", {});
943 m.def("istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor", {});
944 m.def("stride.int(Tensor self, int dim) -> int", {});
945 m.def("stride.Dimname(Tensor self, Dimname dim) -> int", {});
946 m.def("sum(Tensor self, *, ScalarType? dtype=None) -> Tensor", {});
947 m.def("sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", {at::Tag::core});
948 m.def("sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", {});
949 m.def("sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", {});
950 m.def("sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", {});
951 m.def("_nested_sum_backward(Tensor grad, Tensor self, int[1]? dim, bool keepdim=False) -> Tensor", {});
952 m.def("nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", {});
953 m.def("nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", {});
954 m.def("sum_to_size(Tensor self, int[] size) -> Tensor", {});
955 m.def("sqrt(Tensor self) -> Tensor", {at::Tag::pointwise, at::Tag::core});
956 m.def("sqrt_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
957 m.def("sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
958 m.def("square(Tensor self) -> Tensor", {at::Tag::pointwise});
959 m.def("square_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
960 m.def("square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
961 m.def("std(Tensor self, bool unbiased=True) -> Tensor", {});
962 m.def("std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor", {});
963 m.def("std.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor", {});
964 m.def("std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)", {});
965 m.def("std_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)", {});
966 m.def("std_mean.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)", {});
967 m.def("std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)", {});
968 m.def("std_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)", {});
969 m.def("std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", {});
970 m.def("std.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)", {});
971 m.def("std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor", {});
972 m.def("std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", {});
973 m.def("std.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor", {});
974 m.def("std.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)", {});
975 m.def("prod(Tensor self, *, ScalarType? dtype=None) -> Tensor", {});
976 m.def("prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", {});
977 m.def("prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", {});
978 m.def("prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", {});
979 m.def("prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", {});
980 m.def("t(Tensor(a) self) -> Tensor(a)", {});
981 m.def("t_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::inplace_view});
982 m.def("tan(Tensor self) -> Tensor", {at::Tag::pointwise});
983 m.def("tan_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
984 m.def("tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
985 m.def("tanh(Tensor self) -> Tensor", {at::Tag::pointwise, at::Tag::core});
986 m.def("tanh_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
987 m.def("tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
988 m.def("tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor", {});
989 m.def("tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!)", {});
990 m.def("threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor", {});
991 m.def("threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!)", {});
992 m.def("threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!)", {});
993 m.def("threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
994 m.def("threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor", {at::Tag::pointwise});
995 m.def("tile(Tensor self, int[] dims) -> Tensor", {});
996 m.def("transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)", {});
997 m.def("transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a)", {});
998 m.def("_mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor", {});
999 m.def("transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)", {at::Tag::inplace_view});
1000 m.def("_mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)", {});
1001 m.def("one_hot(Tensor self, int num_classes=-1) -> Tensor", {at::Tag::dynamic_output_shape});
1002 m.def("flip(Tensor self, int[] dims) -> Tensor", {at::Tag::core});
1003 m.def("fliplr(Tensor self) -> Tensor", {});
1004 m.def("flipud(Tensor self) -> Tensor", {});
1005 m.def("roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor", {});
1006 m.def("rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor", {});
1007 m.def("trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor", {});
1008 m.def("trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor", {});
1009 m.def("trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor", {});
1010 m.def("trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor", {});
1011 m.def("_transform_bias_rescale_qkv(Tensor qkv, Tensor qkv_bias, int num_heads) -> (Tensor, Tensor, Tensor)", {});
1012 m.def("_nested_tensor_from_mask(Tensor t, Tensor mask, bool mask_check=True) -> Tensor", {});
1013 m.def("_nested_tensor_from_mask_left_aligned(Tensor t, Tensor mask) -> bool", {});
1014 m.def("_nested_from_padded(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False) -> Tensor", {});
1015 m.def("_nested_tensor_size(Tensor self) -> Tensor", {});
1016 m.def("_nested_tensor_strides(Tensor self) -> Tensor", {});
1017 m.def("_nested_tensor_offsets(Tensor self) -> int[]", {});
1018 m.def("_nested_from_padded_and_nested_example(Tensor padded, Tensor nt_example) -> Tensor", {});
1019 m.def("_nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor(a)", {});
1020 m.def("_nested_view_from_buffer_copy(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor", {at::Tag::view_copy});
1021 m.def("_trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor", {});
1022 m.def("triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor", {});
1023 m.def("trunc(Tensor self) -> Tensor", {at::Tag::pointwise});
1024 m.def("trunc_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
1025 m.def("trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1026 m.def("fix(Tensor self) -> Tensor", {});
1027 m.def("fix_(Tensor(a!) self) -> Tensor(a!)", {});
1028 m.def("fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
1029 m.def("type_as(Tensor self, Tensor other) -> Tensor", {});
1030 m.def("_has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool", {});
1031 m.def("_unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor)", {});
1032 m.def("unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)", {at::Tag::dynamic_output_shape});
1033 m.def("unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor)", {at::Tag::dynamic_output_shape});
1034 m.def("unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)", {at::Tag::dynamic_output_shape});
1035 m.def("_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)", {at::Tag::dynamic_output_shape});
1036 m.def("_unsafe_view(Tensor self, SymInt[] size) -> Tensor", {});
1037 m.def("unsqueeze(Tensor(a) self, int dim) -> Tensor(a)", {at::Tag::core});
1038 m.def("unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!)", {at::Tag::inplace_view});
1039 m.def("vander(Tensor x, int? N=None, bool increasing=False) -> Tensor", {});
1040 m.def("var(Tensor self, bool unbiased=True) -> Tensor", {});
1041 m.def("var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor", {at::Tag::core});
1042 m.def("var.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor", {});
1043 m.def("var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", {});
1044 m.def("var.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)", {});
1045 m.def("var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor", {});
1046 m.def("var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", {});
1047 m.def("var.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor", {});
1048 m.def("var.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)", {});
1049 m.def("var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)", {});
1050 m.def("var_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)", {});
1051 m.def("var_mean.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)", {});
1052 m.def("var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)", {});
1053 m.def("var_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)", {});
1054 m.def("view_as(Tensor(a) self, Tensor other) -> Tensor(a)", {});
1055 m.def("where.self(Tensor condition, Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise, at::Tag::core});
1056 m.def("where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {});
1057 m.def("where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor", {});
1058 m.def("where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor", {});
1059 m.def("where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor", {});
1060 m.def("where(Tensor condition) -> Tensor[]", {});
1061 m.def("norm_except_dim(Tensor v, int pow=2, int dim=0) -> Tensor", {});
1062 m.def("_weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor", {});
1063 m.def("_weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor)", {});
1064 m.def("_weight_norm_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)", {});
1065 m.def("_weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)", {});
1066 m.def("zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
1067 m.def("_efficientzerotensor(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
1068 m.def("zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
1069 m.def("zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", {});
1070 m.def("zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", {});
1071 m.def("_standard_gamma_grad(Tensor self, Tensor output) -> Tensor", {});
1072 m.def("_standard_gamma(Tensor self, Generator? generator=None) -> Tensor", {at::Tag::nondeterministic_seeded});
1073 m.def("_dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor", {});
1074 m.def("_sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor", {at::Tag::nondeterministic_seeded});
1075 m.def("poisson(Tensor self, Generator? generator=None) -> Tensor", {at::Tag::nondeterministic_seeded});
1076 m.def("binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor", {at::Tag::nondeterministic_seeded});
1077 m.def("native_norm(Tensor self, Scalar p=2) -> Tensor", {});
1078 m.def("native_norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype) -> Tensor", {});
1079 m.def("_sparse_sum(Tensor self) -> Tensor", {});
1080 m.def("_sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor", {});
1081 m.def("_sparse_sum.dim(Tensor self, int[1] dim) -> Tensor", {});
1082 m.def("_sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor", {});
1083 m.def("_sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor", {});
1084 m.def("_sparse_csr_sum.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", {});
1085 m.def("_sparse_csr_prod.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", {});
1086 m.def("_sparse_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", {});
1087 m.def("_sparse_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", {});
1088 m.def("_sparse_softmax(Tensor self, int dim, bool half_to_float) -> Tensor", {});
1089 m.def("_sparse_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor", {});
1090 m.def("_sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", {});
1091 m.def("_sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", {});
1092 m.def("_sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor", {});
1093 m.def("_sparse_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor", {});
1094 m.def("_spdiags(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None) -> Tensor", {});
1095 m.def("norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor", {});
1096 m.def("norm.Scalar(Tensor self, Scalar p=2) -> Tensor", {});
1097 m.def("norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor", {});
1098 m.def("norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor", {});
1099 m.def("norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)", {});
1100 m.def("norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", {});
1101 m.def("norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor", {});
1102 m.def("norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor", {});
1103 m.def("norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)", {});
1104 m.def("norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", {});
1105 m.def("frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent)", {at::Tag::pointwise});
1106 m.def("frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent)", {at::Tag::pointwise});
1107 m.def("frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor", {});
1108 m.def("frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", {});
1109 m.def("nuclear_norm(Tensor self, bool keepdim=False) -> Tensor", {});
1110 m.def("nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", {});
1111 m.def("nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor", {});
1112 m.def("nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", {});
1113 m.def("clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor", {at::Tag::core});
1114 m.def("positive(Tensor(a) self) -> Tensor(a)", {at::Tag::pointwise});
1115 m.def("resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!)", {});
1116 m.def("resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!)", {});
1117 m.def("zero_(Tensor(a!) self) -> Tensor(a!)", {});
1118 m.def("sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1119 m.def("sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor", {at::Tag::pointwise, at::Tag::core});
1120 m.def("sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)", {at::Tag::pointwise});
1121 m.def("sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", {at::Tag::pointwise, at::Tag::core});
1122 m.def("sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)", {at::Tag::pointwise});
1123 m.def("subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", {});
1124 m.def("subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor", {});
1125 m.def("subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)", {});
1126 m.def("subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", {});
1127 m.def("subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)", {});
1128 m.def("rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor", {});
1129 m.def("heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1130 m.def("heaviside(Tensor self, Tensor values) -> Tensor", {at::Tag::pointwise});
1131 m.def("heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!)", {});
1132 m.def("rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", {at::Tag::pointwise});
1133 m.def("_sparse_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", {});
1134 m.def("sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", {});
1135 m.def("sparse_sampled_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", {});
1136 m.def("_sparse_mm_reduce_impl(Tensor self, Tensor other, str reduce) -> (Tensor, Tensor)", {});
1137 m.def("_sparse_mm_reduce_impl_backward(Tensor self, Tensor grad_out, Tensor weight, str reduce, Tensor arg_out, bool[2] output_mask) -> (Tensor, Tensor)", {});
1138 m.def("addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", {});
1139 m.def("addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", {at::Tag::core});
1140 m.def("addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)", {});
1141 m.def("_addmm_activation.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False, Tensor(a!) out) -> Tensor(a!)", {});
1142 m.def("_addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor", {});
1143 m.def("sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", {});
1144 m.def("sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", {});
1145 m.def("sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", {});
1146 m.def("sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", {});
1147 m.def("sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", {});
1148 m.def("sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", {});
1149 m.def("sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", {});
1150 m.def("sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", {});
1151 m.def("sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", {});
1152 m.def("sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", {});
1153 m.def("_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
1154 m.def("_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
1155 m.def("_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
1156 m.def("_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
1157 m.def("_sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
1158 m.def("sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", {});
1159 m.def("sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
1160 m.def("sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
1161 m.def("_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
1162 m.def("_validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size) -> ()", {});
1163 m.def("_validate_sparse_compressed_tensor_args(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, Layout layout) -> ()", {});
1164 m.def("_validate_sparse_csr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()", {});
1165 m.def("_validate_sparse_csc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()", {});
1166 m.def("_validate_sparse_bsr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()", {});
1167 m.def("_validate_sparse_bsc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()", {});
1168 m.def("_sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", {});
1169 m.def("_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", {});
1170 m.def("sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)", {});
1171 m.def("sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)", {});
1172 m.def("sparse_mask(Tensor self, Tensor mask) -> Tensor", {});
1173 m.def("_to_cpu(Tensor[] tensors) -> Tensor[]", {});
1174 m.def("to_dense(Tensor self, ScalarType? dtype=None) -> Tensor", {});
1175 m.def("_to_dense(Tensor self, ScalarType? dtype=None) -> Tensor", {});
1176 m.def("to_dense_backward(Tensor grad, Tensor input) -> Tensor", {});
1177 m.def("sparse_dim(Tensor self) -> int", {});
1178 m.def("_dimI(Tensor self) -> int", {});
1179 m.def("dense_dim(Tensor self) -> int", {});
1180 m.def("_dimV(Tensor self) -> int", {});
1181 m.def("_nnz(Tensor self) -> int", {});
1182 m.def("coalesce(Tensor(a) self) -> Tensor(a)", {});
1183 m.def("_coalesce(Tensor self) -> Tensor", {});
1184 m.def("is_coalesced(Tensor self) -> bool", {});
1185 m.def("_indices(Tensor(a) self) -> Tensor(a)", {});
1186 m.def("_values(Tensor(a) self) -> Tensor(a)", {});
1187 m.def("_coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!)", {});
1188 m.def("indices(Tensor(a) self) -> Tensor(a)", {});
1189 m.def("values(Tensor(a) self) -> Tensor(a)", {});
1190 m.def("crow_indices(Tensor(a) self) -> Tensor(a)", {});
1191 m.def("col_indices(Tensor(a) self) -> Tensor(a)", {});
1192 m.def("ccol_indices(Tensor(a) self) -> Tensor(a)", {});
1193 m.def("row_indices(Tensor(a) self) -> Tensor(a)", {});
1194 m.def("hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)", {});
1195 m.def("hspmm(Tensor mat1, Tensor mat2) -> Tensor", {});
1196 m.def("copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)", {});
1197 m.def("unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[]", {});
1198 m.def("unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[]", {});
1199 m.def("to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor", {});
1200 m.def("to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor", {});
1201 m.def("to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor", {});
1202 m.def("to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor", {});
1203 m.def("to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor", {});
1204 m.def("to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor", {});
1205 m.def("to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor", {});
1206 m.def("mkldnn_reorder_conv2d_weight(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None) -> Tensor", {});
1207 m.def("mkldnn_reorder_conv3d_weight(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1) -> Tensor", {});
1208 m.def("to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor", {});
1209 m.def("quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor", {});
1210 m.def("quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor", {});
1211 m.def("quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor", {});
1212 m.def("quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[]", {});
1213 m.def("quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor", {});
1214 m.def("dequantize.self(Tensor self) -> Tensor", {});
1215 m.def("dequantize.tensors(Tensor[] tensors) -> Tensor[]", {});
1216 m.def("q_scale(Tensor self) -> float", {});
1217 m.def("q_zero_point(Tensor self) -> int", {});
1218 m.def("q_per_channel_scales(Tensor self) -> Tensor", {});
1219 m.def("q_per_channel_zero_points(Tensor self) -> Tensor", {});
1220 m.def("q_per_channel_axis(Tensor self) -> int", {});
1221 m.def("int_repr(Tensor self) -> Tensor", {});
1222 m.def("_make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor", {});
1223 m.def("_make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor", {});
1224 m.def("qscheme(Tensor self) -> QScheme", {});
1225 m.def("fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor", {});
1226 m.def("fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor", {});
1227 m.def("fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask)", {});
1228 m.def("_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max) -> (Tensor output, Tensor mask)", {});
1229 m.def("fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor", {});
1230 m.def("_fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor", {});
1231 m.def("_fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)", {});
1232 m.def("fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor", {});
1233 m.def("fake_quantize_per_channel_affine_cachemask(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor output, Tensor mask)", {});
1234 m.def("fake_quantize_per_channel_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor", {});
1235 m.def("_fake_quantize_learnable_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor", {});
1236 m.def("_fake_quantize_learnable_per_channel_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)", {});
1237 m.def("fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor", {});
1238 m.def("_fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask)", {});
1239 m.def("_choose_qparams_per_tensor(Tensor self, bool reduce_range=False) -> (float, int)", {});
1240 m.def("_saturate_weight_to_fp16(Tensor weight) -> Tensor", {});
1241 m.def("choose_qparams_optimized(Tensor input, int numel, int n_bins, float ratio, int bit_width) -> (Tensor, Tensor)", {});
1242 m.def("_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a)", {});
1243 m.def("_autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a)", {});
1244 m.def("_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor", {at::Tag::core});
1245 m.def("to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)", {});
1246 m.def("to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)", {});
1247 m.def("to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)", {});
1248 m.def("to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)", {});
1249 m.def("meshgrid(Tensor[] tensors) -> Tensor[]", {});
1250 m.def("meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[]", {});
1251 m.def("cartesian_prod(Tensor[] tensors) -> Tensor", {});
1252 m.def("combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor", {});
1253 m.def("item(Tensor self) -> Scalar", {at::Tag::data_dependent_output});
1254 m.def("result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType", {});
1255 m.def("result_type.Scalar(Tensor tensor, Scalar other) -> ScalarType", {});
1256 m.def("result_type.Scalar_Tensor(Scalar scalar, Tensor tensor) -> ScalarType", {});
1257 m.def("result_type.Scalar_Scalar(Scalar scalar1, Scalar scalar2) -> ScalarType", {});
1258 m.def("can_cast(ScalarType from, ScalarType to) -> bool", {});
1259 m.def("promote_types(ScalarType type1, ScalarType type2) -> ScalarType", {});
1260 m.def("_local_scalar_dense(Tensor self) -> Scalar", {at::Tag::data_dependent_output});
1261 m.def("_lstm_mps(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor, Tensor, Tensor)", {});
1262 m.def("lstm_mps_backward(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[])", {});
1263 m.def("_thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor)", {});
1264 m.def("_thnn_fused_lstm_cell_backward_impl(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor)", {});
1265 m.def("_thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)", {});
1266 m.def("_thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_gates, Tensor? input_bias, Tensor? hidden_bias, Tensor cx, Tensor cy) -> (Tensor, Tensor, Tensor, Tensor, Tensor)", {});
1267 m.def("_thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor)", {});
1268 m.def("_thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)", {});
1269 m.def("_thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias, Tensor? hidden_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)", {});
1270 m.def("lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor)", {});
1271 m.def("lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor)", {});
1272 m.def("gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)", {});
1273 m.def("gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)", {});
1274 m.def("rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)", {});
1275 m.def("rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)", {});
1276 m.def("rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)", {});
1277 m.def("rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)", {});
1278 m.def("lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor)", {});
1279 m.def("gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor", {});
1280 m.def("rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor", {});
1281 m.def("rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor", {});
1282 m.def("quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor)", {});
1283 m.def("quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor", {});
1284 m.def("quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor", {});
1285 m.def("quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor", {});
1286 m.def("_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)", {});
1287 m.def("_pack_padded_sequence_backward(Tensor grad, SymInt[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor", {});
1288 m.def("_pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor)", {});
1289 m.def("set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!)", {});
1290 m.def("set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)", {});
1291 m.def("set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)", {});
1292 m.def("set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!)", {});
1293 m.def("set_(Tensor(a!) self) -> Tensor(a!)", {});
1294 m.def("lift(Tensor self) -> Tensor", {});
1295 m.def("lift_fresh(Tensor(a) self) -> Tensor(a)", {});
1296 m.def("lift_fresh_copy(Tensor self) -> Tensor", {at::Tag::view_copy});
1297 m.def("is_set_to(Tensor self, Tensor tensor) -> bool", {});
1298 m.def("masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!)", {});
1299 m.def("masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor", {at::Tag::pointwise});
1300 m.def("masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!)", {});
1301 m.def("masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor", {});
1302 m.def("masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!)", {});
1303 m.def("masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor", {});
1304 m.def("_masked_softmax(Tensor self, Tensor mask, int? dim=None, int? mask_type=None) -> Tensor", {});
1305 m.def("_masked_softmax_backward(Tensor grad_output, Tensor output, Tensor mask, int? dim=None) -> Tensor", {});
1306 m.def("view(Tensor(a) self, SymInt[] size) -> Tensor(a)", {at::Tag::core});
1307 m.def("view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a)", {});
1308 m.def("put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!)", {});
1309 m.def("put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor", {});
1310 m.def("index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", {});
1311 m.def("index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!)", {});
1312 m.def("index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor", {});
1313 m.def("index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor", {});
1314 m.def("index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)", {});
1315 m.def("index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!)", {});
1316 m.def("index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor", {});
1317 m.def("index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)", {});
1318 m.def("index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor", {});
1319 m.def("index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)", {});
1320 m.def("index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor", {});
1321 m.def("index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!)", {});
1322 m.def("index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!)", {});
1323 m.def("index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor", {});
1324 m.def("index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor", {});
1325 m.def("scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor", {});
1326 m.def("scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)", {});
1327 m.def("scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)", {});
1328 m.def("scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor", {});
1329 m.def("scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)", {});
1330 m.def("scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)", {});
1331 m.def("scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor", {});
1332 m.def("scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!)", {});
1333 m.def("scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!)", {});
1334 m.def("scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor", {});
1335 m.def("scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!)", {});
1336 m.def("scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!)", {});
1337 m.def("scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor", {});
1338 m.def("scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor", {});
1339 m.def("scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor", {at::Tag::core});
1340 m.def("scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)", {});
1341 m.def("scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)", {});
1342 m.def("scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor", {});
1343 m.def("scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor", {at::Tag::core});
1344 m.def("scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!)", {});
1345 m.def("scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)", {});
1346 m.def("eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {});
1347 m.def("eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {});
1348 m.def("bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1349 m.def("bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1350 m.def("bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor", {at::Tag::pointwise});
1351 m.def("bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", {at::Tag::pointwise});
1352 m.def("bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise, at::Tag::core});
1353 m.def("bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {at::Tag::pointwise});
1354 m.def("bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {at::Tag::pointwise});
1355 m.def("__and__.Scalar(Tensor self, Scalar other) -> Tensor", {});
1356 m.def("__and__.Tensor(Tensor self, Tensor other) -> Tensor", {});
1357 m.def("__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {});
1358 m.def("__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {});
1359 m.def("bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1360 m.def("bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1361 m.def("bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor", {at::Tag::pointwise});
1362 m.def("bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", {at::Tag::pointwise});
1363 m.def("bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise, at::Tag::core});
1364 m.def("bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {at::Tag::pointwise});
1365 m.def("bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {at::Tag::pointwise});
1366 m.def("__or__.Scalar(Tensor self, Scalar other) -> Tensor", {});
1367 m.def("__or__.Tensor(Tensor self, Tensor other) -> Tensor", {});
1368 m.def("__ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {});
1369 m.def("__ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {});
1370 m.def("bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1371 m.def("bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1372 m.def("bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor", {at::Tag::pointwise});
1373 m.def("bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", {at::Tag::pointwise});
1374 m.def("bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise, at::Tag::core});
1375 m.def("bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {at::Tag::pointwise});
1376 m.def("bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {at::Tag::pointwise});
1377 m.def("__xor__.Scalar(Tensor self, Scalar other) -> Tensor", {at::Tag::pointwise});
1378 m.def("__xor__.Tensor(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise});
1379 m.def("__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {at::Tag::pointwise});
1380 m.def("__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {at::Tag::pointwise});
1381 m.def("__lshift__.Scalar(Tensor self, Scalar other) -> Tensor", {at::Tag::pointwise});
1382 m.def("__lshift__.Tensor(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise});
1383 m.def("__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {at::Tag::pointwise});
1384 m.def("__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {at::Tag::pointwise});
1385 m.def("bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise});
1386 m.def("bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {at::Tag::pointwise});
1387 m.def("bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1388 m.def("bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor", {at::Tag::pointwise});
1389 m.def("bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {at::Tag::pointwise});
1390 m.def("bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1391 m.def("bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", {at::Tag::pointwise});
1392 m.def("__rshift__.Scalar(Tensor self, Scalar other) -> Tensor", {at::Tag::pointwise});
1393 m.def("__rshift__.Tensor(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise});
1394 m.def("__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {});
1395 m.def("__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {});
1396 m.def("bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise});
1397 m.def("bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {at::Tag::pointwise});
1398 m.def("bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1399 m.def("bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor", {at::Tag::pointwise});
1400 m.def("bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {at::Tag::pointwise});
1401 m.def("bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1402 m.def("bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", {at::Tag::pointwise});
1403 m.def("tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)", {});
1404 m.def("triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)", {});
1405 m.def("digamma_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
1406 m.def("lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!)", {at::Tag::pointwise});
1407 m.def("lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!)", {at::Tag::pointwise});
1408 m.def("addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)", {});
1409 m.def("addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", {});
1410 m.def("addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", {});
1411 m.def("random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
1412 m.def("random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
1413 m.def("random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
1414 m.def("uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
1415 m.def("cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
1416 m.def("log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
1417 m.def("exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
1418 m.def("geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
1419 m.def("diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)", {});
1420 m.def("diag(Tensor self, int diagonal=0) -> Tensor", {});
1421 m.def("cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)", {});
1422 m.def("cross(Tensor self, Tensor other, int? dim=None) -> Tensor", {});
1423 m.def("triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)", {});
1424 m.def("triu(Tensor self, int diagonal=0) -> Tensor", {});
1425 m.def("tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)", {});
1426 m.def("tril(Tensor self, int diagonal=0) -> Tensor", {});
1427 m.def("tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
1428 m.def("triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
1429 m.def("trace(Tensor self) -> Tensor", {});
1430 m.def("trace_backward(Tensor grad, SymInt[] sizes) -> Tensor", {});
1431 m.def("ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1432 m.def("ne.Scalar(Tensor self, Scalar other) -> Tensor", {at::Tag::pointwise, at::Tag::core});
1433 m.def("ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1434 m.def("ne.Tensor(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise, at::Tag::core});
1435 m.def("ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {});
1436 m.def("ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {});
1437 m.def("not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", {});
1438 m.def("not_equal.Scalar(Tensor self, Scalar other) -> Tensor", {});
1439 m.def("not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {});
1440 m.def("not_equal.Tensor(Tensor self, Tensor other) -> Tensor", {});
1441 m.def("not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {});
1442 m.def("not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {});
1443 m.def("eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1444 m.def("eq.Scalar(Tensor self, Scalar other) -> Tensor", {at::Tag::pointwise, at::Tag::core});
1445 m.def("eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1446 m.def("eq.Tensor(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise, at::Tag::core});
1447 m.def("ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1448 m.def("ge.Scalar(Tensor self, Scalar other) -> Tensor", {at::Tag::pointwise, at::Tag::core});
1449 m.def("ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1450 m.def("ge.Tensor(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise, at::Tag::core});
1451 m.def("ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {});
1452 m.def("ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {});
1453 m.def("greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", {});
1454 m.def("greater_equal.Scalar(Tensor self, Scalar other) -> Tensor", {});
1455 m.def("greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {});
1456 m.def("greater_equal.Tensor(Tensor self, Tensor other) -> Tensor", {});
1457 m.def("greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {});
1458 m.def("greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {});
1459 m.def("le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1460 m.def("le.Scalar(Tensor self, Scalar other) -> Tensor", {at::Tag::pointwise, at::Tag::core});
1461 m.def("le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1462 m.def("le.Tensor(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise, at::Tag::core});
1463 m.def("le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {});
1464 m.def("le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {});
1465 m.def("less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", {});
1466 m.def("less_equal.Scalar(Tensor self, Scalar other) -> Tensor", {});
1467 m.def("less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {});
1468 m.def("less_equal.Tensor(Tensor self, Tensor other) -> Tensor", {});
1469 m.def("less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {});
1470 m.def("less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {});
1471 m.def("gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1472 m.def("gt.Scalar(Tensor self, Scalar other) -> Tensor", {at::Tag::pointwise, at::Tag::core});
1473 m.def("gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1474 m.def("gt.Tensor(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise, at::Tag::core});
1475 m.def("gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {});
1476 m.def("gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {});
1477 m.def("greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", {});
1478 m.def("greater.Scalar(Tensor self, Scalar other) -> Tensor", {});
1479 m.def("greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {});
1480 m.def("greater.Tensor(Tensor self, Tensor other) -> Tensor", {});
1481 m.def("greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {});
1482 m.def("greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {});
1483 m.def("lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1484 m.def("lt.Scalar(Tensor self, Scalar other) -> Tensor", {at::Tag::pointwise, at::Tag::core});
1485 m.def("lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1486 m.def("lt.Tensor(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise, at::Tag::core});
1487 m.def("lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {});
1488 m.def("lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {});
1489 m.def("less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", {});
1490 m.def("less.Scalar(Tensor self, Scalar other) -> Tensor", {});
1491 m.def("less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {});
1492 m.def("less.Tensor(Tensor self, Tensor other) -> Tensor", {});
1493 m.def("less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {});
1494 m.def("less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {});
1495 m.def("take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!)", {});
1496 m.def("take(Tensor self, Tensor index) -> Tensor", {});
1497 m.def("take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)", {});
1498 m.def("take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor", {});
1499 m.def("index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)", {});
1500 m.def("index_select(Tensor self, int dim, Tensor index) -> Tensor", {at::Tag::core});
1501 m.def("index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)", {});
1502 m.def("index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor", {});
1503 m.def("index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor", {});
1504 m.def("masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::dynamic_output_shape});
1505 m.def("masked_select(Tensor self, Tensor mask) -> Tensor", {at::Tag::dynamic_output_shape});
1506 m.def("masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor", {});
1507 m.def("nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::dynamic_output_shape});
1508 m.def("nonzero(Tensor self) -> Tensor", {at::Tag::core, at::Tag::dynamic_output_shape});
1509 m.def("nonzero_numpy(Tensor self) -> Tensor[]", {});
1510 m.def("argwhere(Tensor self) -> Tensor", {at::Tag::dynamic_output_shape});
1511 m.def("gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)", {});
1512 m.def("gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor", {at::Tag::core});
1513 m.def("gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor", {});
1514 m.def("gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)", {});
1515 m.def("gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor", {});
1516 m.def("_gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor", {});
1517 m.def("addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1518 m.def("addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor", {at::Tag::pointwise});
1519 m.def("addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)", {at::Tag::pointwise});
1520 m.def("addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1521 m.def("addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor", {at::Tag::pointwise});
1522 m.def("addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)", {at::Tag::pointwise});
1523 m.def("cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor", {});
1524 m.def("triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient)", {});
1525 m.def("triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient)", {});
1526 m.def("_linalg_check_errors(Tensor info, str api_name, *, bool is_matrix) -> ()", {});
1527 m.def("linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!)", {});
1528 m.def("linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor", {});
1529 m.def("linalg_vander(Tensor x, *, int? N=None) -> Tensor", {});
1530 m.def("svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)", {});
1531 m.def("svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V)", {});
1532 m.def("swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a)", {});
1533 m.def("swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!)", {at::Tag::inplace_view});
1534 m.def("swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a)", {});
1535 m.def("swapdims_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)", {at::Tag::inplace_view});
1536 m.def("cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)", {});
1537 m.def("cholesky(Tensor self, bool upper=False) -> Tensor", {});
1538 m.def("cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)", {});
1539 m.def("cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor", {});
1540 m.def("_cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor", {});
1541 m.def("cholesky_inverse(Tensor self, bool upper=False) -> Tensor", {});
1542 m.def("cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)", {});
1543 m.def("qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)", {});
1544 m.def("qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R)", {});
1545 m.def("geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau)", {});
1546 m.def("geqrf(Tensor self) -> (Tensor a, Tensor tau)", {});
1547 m.def("orgqr(Tensor self, Tensor input2) -> Tensor", {});
1548 m.def("orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!)", {});
1549 m.def("ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!)", {});
1550 m.def("ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor", {});
1551 m.def("_lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor LU, Tensor pivots, Tensor info)", {});
1552 m.def("lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!)", {});
1553 m.def("lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor", {});
1554 m.def("lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U)", {});
1555 m.def("lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)", {});
1556 m.def("multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
1557 m.def("multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor", {at::Tag::nondeterministic_seeded});
1558 m.def("lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1559 m.def("lgamma_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
1560 m.def("lgamma(Tensor self) -> Tensor", {at::Tag::pointwise});
1561 m.def("digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1562 m.def("digamma(Tensor self) -> Tensor", {at::Tag::pointwise});
1563 m.def("polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1564 m.def("polygamma(int n, Tensor self) -> Tensor", {at::Tag::pointwise});
1565 m.def("polygamma_(Tensor(a!) self, int n) -> Tensor(a!)", {at::Tag::pointwise});
1566 m.def("erfinv(Tensor self) -> Tensor", {at::Tag::pointwise});
1567 m.def("erfinv_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
1568 m.def("erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1569 m.def("i0(Tensor self) -> Tensor", {at::Tag::pointwise});
1570 m.def("i0_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
1571 m.def("i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1572 m.def("sign(Tensor self) -> Tensor", {at::Tag::pointwise, at::Tag::core});
1573 m.def("sign_(Tensor(a!) self) -> Tensor(a!)", {at::Tag::pointwise});
1574 m.def("sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1575 m.def("signbit(Tensor self) -> Tensor", {at::Tag::pointwise});
1576 m.def("signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1577 m.def("dist(Tensor self, Tensor other, Scalar p=2) -> Tensor", {});
1578 m.def("atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1579 m.def("atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)", {at::Tag::pointwise});
1580 m.def("atan2(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise});
1581 m.def("arctan2(Tensor self, Tensor other) -> Tensor", {});
1582 m.def("arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {});
1583 m.def("arctan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)", {});
1584 m.def("lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1585 m.def("lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1586 m.def("lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor", {at::Tag::pointwise});
1587 m.def("lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor", {at::Tag::pointwise});
1588 m.def("histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)", {});
1589 m.def("histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor", {});
1590 m.def("histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)", {});
1591 m.def("histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)", {});
1592 m.def("histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)", {});
1593 m.def("histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)", {});
1594 m.def("_histogramdd_bin_edges(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor[]", {});
1595 m.def("_histogramdd_from_bin_cts(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor", {});
1596 m.def("_histogramdd_from_bin_tensors(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False) -> Tensor", {});
1597 m.def("histogramdd(Tensor self, int[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)", {});
1598 m.def("histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)", {});
1599 m.def("histogramdd.TensorList_bins(Tensor self, Tensor[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)", {});
1600 m.def("fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1601 m.def("fmod.Scalar(Tensor self, Scalar other) -> Tensor", {at::Tag::pointwise});
1602 m.def("fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {at::Tag::pointwise});
1603 m.def("fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1604 m.def("fmod.Tensor(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise, at::Tag::core});
1605 m.def("fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {at::Tag::pointwise});
1606 m.def("hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1607 m.def("hypot(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise});
1608 m.def("hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!)", {at::Tag::pointwise});
1609 m.def("igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1610 m.def("igamma(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise});
1611 m.def("igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!)", {at::Tag::pointwise});
1612 m.def("igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1613 m.def("igammac(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise});
1614 m.def("igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!)", {at::Tag::pointwise});
1615 m.def("nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1616 m.def("nextafter(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise});
1617 m.def("nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!)", {at::Tag::pointwise});
1618 m.def("remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1619 m.def("remainder.Scalar(Tensor self, Scalar other) -> Tensor", {at::Tag::pointwise});
1620 m.def("remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", {at::Tag::pointwise});
1621 m.def("remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1622 m.def("remainder.Tensor(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise, at::Tag::core});
1623 m.def("remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", {at::Tag::pointwise});
1624 m.def("remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", {at::Tag::pointwise});
1625 m.def("min(Tensor self) -> Tensor", {});
1626 m.def("fmin(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise});
1627 m.def("fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1628 m.def("max(Tensor self) -> Tensor", {});
1629 m.def("fmax(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise});
1630 m.def("fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1631 m.def("maximum(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise, at::Tag::core});
1632 m.def("maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1633 m.def("max.other(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise});
1634 m.def("max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1635 m.def("max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
1636 m.def("minimum(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise, at::Tag::core});
1637 m.def("minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1638 m.def("min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1639 m.def("min.other(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise});
1640 m.def("quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor", {});
1641 m.def("quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)", {});
1642 m.def("quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor", {});
1643 m.def("quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)", {});
1644 m.def("nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor", {});
1645 m.def("nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)", {});
1646 m.def("nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor", {});
1647 m.def("nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)", {});
1648 m.def("sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", {});
1649 m.def("sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", {});
1650 m.def("sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)", {});
1651 m.def("sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)", {});
1652 m.def("sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", {});
1653 m.def("sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", {});
1654 m.def("sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)", {});
1655 m.def("sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)", {});
1656 m.def("msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
1657 m.def("msort(Tensor self) -> Tensor", {});
1658 m.def("argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor", {});
1659 m.def("argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor", {});
1660 m.def("argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor", {});
1661 m.def("topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", {});
1662 m.def("topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)", {at::Tag::core});
1663 m.def("all(Tensor self) -> Tensor", {});
1664 m.def("all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
1665 m.def("any(Tensor self) -> Tensor", {});
1666 m.def("any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
1667 m.def("renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!)", {});
1668 m.def("renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor", {});
1669 m.def("renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!)", {});
1670 m.def("unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)", {});
1671 m.def("unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor", {});
1672 m.def("equal(Tensor self, Tensor other) -> bool", {at::Tag::pointwise, at::Tag::data_dependent_output});
1673 m.def("pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1674 m.def("pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor", {at::Tag::pointwise, at::Tag::core});
1675 m.def("pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1676 m.def("pow.Scalar(Scalar self, Tensor exponent) -> Tensor", {at::Tag::pointwise});
1677 m.def("pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1678 m.def("pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor", {at::Tag::pointwise, at::Tag::core});
1679 m.def("pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)", {at::Tag::pointwise});
1680 m.def("pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)", {at::Tag::pointwise});
1681 m.def("float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1682 m.def("float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor", {at::Tag::pointwise});
1683 m.def("float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1684 m.def("float_power.Scalar(Scalar self, Tensor exponent) -> Tensor", {at::Tag::pointwise});
1685 m.def("float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
1686 m.def("float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor", {at::Tag::pointwise});
1687 m.def("float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)", {at::Tag::pointwise});
1688 m.def("float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)", {at::Tag::pointwise});
1689 m.def("normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
1690 m.def("normal_functional(Tensor self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor", {at::Tag::nondeterministic_seeded});
1691 m.def("normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
1692 m.def("normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor", {at::Tag::nondeterministic_seeded});
1693 m.def("normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
1694 m.def("normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor", {at::Tag::nondeterministic_seeded});
1695 m.def("normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
1696 m.def("normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor", {at::Tag::nondeterministic_seeded});
1697 m.def("normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {at::Tag::nondeterministic_seeded});
1698 m.def("normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
1699 m.def("alias(Tensor(a) self) -> Tensor(a)", {at::Tag::core});
1700 m.def("_amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> ()", {});
1701 m.def("_amp_update_scale_(Tensor(a!) self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> Tensor(a!)", {});
1702 m.def("_foreach_add.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", {});
1703 m.def("_foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", {});
1704 m.def("_foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", {});
1705 m.def("_foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", {});
1706 m.def("_foreach_mul.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", {});
1707 m.def("_foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", {});
1708 m.def("_foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", {});
1709 m.def("_foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", {});
1710 m.def("_foreach_clamp_min.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", {});
1711 m.def("_foreach_clamp_min_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", {});
1712 m.def("_foreach_clamp_max.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", {});
1713 m.def("_foreach_clamp_max_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", {});
1714 m.def("_foreach_maximum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", {});
1715 m.def("_foreach_maximum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", {});
1716 m.def("_foreach_minimum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", {});
1717 m.def("_foreach_minimum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", {});
1718 m.def("_foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]", {});
1719 m.def("_foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()", {});
1720 m.def("_foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]", {});
1721 m.def("_foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()", {});
1722 m.def("_foreach_mul.List(Tensor[] self, Tensor[] other) -> Tensor[]", {});
1723 m.def("_foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> ()", {});
1724 m.def("_foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[]", {});
1725 m.def("_foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> ()", {});
1726 m.def("_foreach_clamp_min.List(Tensor[] self, Tensor[] other) -> Tensor[]", {});
1727 m.def("_foreach_clamp_min_.List(Tensor(a!)[] self, Tensor[] other) -> ()", {});
1728 m.def("_foreach_clamp_max.List(Tensor[] self, Tensor[] other) -> Tensor[]", {});
1729 m.def("_foreach_clamp_max_.List(Tensor(a!)[] self, Tensor[] other) -> ()", {});
1730 m.def("_foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[]", {});
1731 m.def("_foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> ()", {});
1732 m.def("_foreach_minimum.List(Tensor[] self, Tensor[] other) -> Tensor[]", {});
1733 m.def("_foreach_minimum_.List(Tensor(a!)[] self, Tensor[] other) -> ()", {});
1734 m.def("_foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", {});
1735 m.def("_foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", {});
1736 m.def("_foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", {});
1737 m.def("_foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", {});
1738 m.def("_foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", {});
1739 m.def("_foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", {});
1740 m.def("_foreach_mul.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", {});
1741 m.def("_foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", {});
1742 m.def("_foreach_clamp_min.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", {});
1743 m.def("_foreach_clamp_min_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", {});
1744 m.def("_foreach_clamp_max.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", {});
1745 m.def("_foreach_clamp_max_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", {});
1746 m.def("_foreach_maximum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", {});
1747 m.def("_foreach_maximum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", {});
1748 m.def("_foreach_minimum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", {});
1749 m.def("_foreach_minimum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", {});
1750 m.def("_foreach_exp(Tensor[] self) -> Tensor[]", {});
1751 m.def("_foreach_zero_(Tensor(a!)[] self) -> ()", {});
1752 m.def("_foreach_exp_(Tensor(a!)[] self) -> ()", {});
1753 m.def("_foreach_sqrt(Tensor[] self) -> Tensor[]", {});
1754 m.def("_foreach_sqrt_(Tensor(a!)[] self) -> ()", {});
1755 m.def("_foreach_abs(Tensor[] self) -> Tensor[]", {});
1756 m.def("_foreach_abs_(Tensor(a!)[] self) -> ()", {});
1757 m.def("_foreach_acos(Tensor[] self) -> Tensor[]", {});
1758 m.def("_foreach_acos_(Tensor(a!)[] self) -> ()", {});
1759 m.def("_foreach_asin(Tensor[] self) -> Tensor[]", {});
1760 m.def("_foreach_asin_(Tensor(a!)[] self) -> ()", {});
1761 m.def("_foreach_atan(Tensor[] self) -> Tensor[]", {});
1762 m.def("_foreach_atan_(Tensor(a!)[] self) -> ()", {});
1763 m.def("_foreach_ceil(Tensor[] self) -> Tensor[]", {});
1764 m.def("_foreach_ceil_(Tensor(a!)[] self) -> ()", {});
1765 m.def("_foreach_cos(Tensor[] self) -> Tensor[]", {});
1766 m.def("_foreach_cos_(Tensor(a!)[] self) -> ()", {});
1767 m.def("_foreach_cosh(Tensor[] self) -> Tensor[]", {});
1768 m.def("_foreach_cosh_(Tensor(a!)[] self) -> ()", {});
1769 m.def("_foreach_erf(Tensor[] self) -> Tensor[]", {});
1770 m.def("_foreach_erf_(Tensor(a!)[] self) -> ()", {});
1771 m.def("_foreach_erfc(Tensor[] self) -> Tensor[]", {});
1772 m.def("_foreach_erfc_(Tensor(a!)[] self) -> ()", {});
1773 m.def("_foreach_expm1(Tensor[] self) -> Tensor[]", {});
1774 m.def("_foreach_expm1_(Tensor(a!)[] self) -> ()", {});
1775 m.def("_foreach_floor(Tensor[] self) -> Tensor[]", {});
1776 m.def("_foreach_floor_(Tensor(a!)[] self) -> ()", {});
1777 m.def("_foreach_log(Tensor[] self) -> Tensor[]", {});
1778 m.def("_foreach_log_(Tensor(a!)[] self) -> ()", {});
1779 m.def("_foreach_log10(Tensor[] self) -> Tensor[]", {});
1780 m.def("_foreach_log10_(Tensor(a!)[] self) -> ()", {});
1781 m.def("_foreach_log1p(Tensor[] self) -> Tensor[]", {});
1782 m.def("_foreach_log1p_(Tensor(a!)[] self) -> ()", {});
1783 m.def("_foreach_log2(Tensor[] self) -> Tensor[]", {});
1784 m.def("_foreach_log2_(Tensor(a!)[] self) -> ()", {});
1785 m.def("_foreach_neg(Tensor[] self) -> Tensor[]", {});
1786 m.def("_foreach_neg_(Tensor(a!)[] self) -> ()", {});
1787 m.def("_foreach_tan(Tensor[] self) -> Tensor[]", {});
1788 m.def("_foreach_tan_(Tensor(a!)[] self) -> ()", {});
1789 m.def("_foreach_tanh(Tensor[] self) -> Tensor[]", {});
1790 m.def("_foreach_tanh_(Tensor(a!)[] self) -> ()", {});
1791 m.def("_foreach_sin(Tensor[] self) -> Tensor[]", {});
1792 m.def("_foreach_sin_(Tensor(a!)[] self) -> ()", {});
1793 m.def("_foreach_sinh(Tensor[] self) -> Tensor[]", {});
1794 m.def("_foreach_sinh_(Tensor(a!)[] self) -> ()", {});
1795 m.def("_foreach_round(Tensor[] self) -> Tensor[]", {});
1796 m.def("_foreach_round_(Tensor(a!)[] self) -> ()", {});
1797 m.def("_foreach_lgamma(Tensor[] self) -> Tensor[]", {});
1798 m.def("_foreach_lgamma_(Tensor(a!)[] self) -> ()", {});
1799 m.def("_foreach_frac(Tensor[] self) -> Tensor[]", {});
1800 m.def("_foreach_frac_(Tensor(a!)[] self) -> ()", {});
1801 m.def("_foreach_reciprocal(Tensor[] self) -> Tensor[]", {});
1802 m.def("_foreach_reciprocal_(Tensor(a!)[] self) -> ()", {});
1803 m.def("_foreach_sigmoid(Tensor[] self) -> Tensor[]", {});
1804 m.def("_foreach_sigmoid_(Tensor(a!)[] self) -> ()", {});
1805 m.def("_foreach_trunc(Tensor[] self) -> Tensor[]", {});
1806 m.def("_foreach_trunc_(Tensor(a!)[] self) -> ()", {});
1807 m.def("_foreach_addcdiv_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()", {});
1808 m.def("_foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()", {});
1809 m.def("_foreach_addcdiv_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()", {});
1810 m.def("_foreach_addcdiv_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()", {});
1811 m.def("_foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()", {});
1812 m.def("_foreach_addcmul_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()", {});
1813 m.def("_foreach_addcdiv.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]", {});
1814 m.def("_foreach_addcmul.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]", {});
1815 m.def("_foreach_addcdiv.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]", {});
1816 m.def("_foreach_addcdiv.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]", {});
1817 m.def("_foreach_addcmul.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]", {});
1818 m.def("_foreach_addcmul.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]", {});
1819 m.def("_foreach_norm.Scalar(Tensor[] self, Scalar ord=2) -> Tensor[]", {});
1820 m.def("_foreach_lerp.List(Tensor[] self, Tensor[] tensors1, Tensor[] weights) -> Tensor[]", {});
1821 m.def("_foreach_lerp_.List(Tensor(a!)[] self, Tensor[] tensors1, Tensor[] weights) -> ()", {});
1822 m.def("_foreach_lerp.Scalar(Tensor[] self, Tensor[] tensors1, Scalar weight) -> Tensor[]", {});
1823 m.def("_foreach_lerp_.Scalar(Tensor(a!)[] self, Tensor[] tensors1, Scalar weight) -> ()", {});
1824 m.def("bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor", {});
1825 m.def("bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)", {});
1826 m.def("bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor", {});
1827 m.def("searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor", {});
1828 m.def("searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)", {});
1829 m.def("searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor", {});
1830 m.def("_convert_indices_from_coo_to_csr(Tensor self, int size, *, bool out_int32=False) -> Tensor", {});
1831 m.def("_convert_indices_from_coo_to_csr.out(Tensor self, int size, *, bool out_int32=False, Tensor(a!) out) -> Tensor(a!)", {});
1832 m.def("_convert_indices_from_csr_to_coo(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False) -> Tensor", {});
1833 m.def("_convert_indices_from_csr_to_coo.out(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False, Tensor(a!) out) -> Tensor(a!)", {});
1834 m.def("mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)", {});
1835 m.def("mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor", {});
1836 m.def("mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1837 m.def("mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor", {});
1838 m.def("l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor", {});
1839 m.def("multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)", {});
1840 m.def("multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor", {});
1841 m.def("multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1842 m.def("multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor", {});
1843 m.def("multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)", {});
1844 m.def("multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor", {});
1845 m.def("multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!))", {});
1846 m.def("multilabel_margin_loss_forward(Tensor self, Tensor target, int reduction) -> (Tensor output, Tensor is_target)", {});
1847 m.def("multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1848 m.def("multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor", {});
1849 m.def("nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)", {});
1850 m.def("nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor", {});
1851 m.def("nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor", {});
1852 m.def("nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))", {});
1853 m.def("nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)", {});
1854 m.def("nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1855 m.def("nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor", {});
1856 m.def("nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)", {});
1857 m.def("nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor", {});
1858 m.def("nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))", {});
1859 m.def("nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)", {});
1860 m.def("nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1861 m.def("nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor", {});
1862 m.def("smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!)", {});
1863 m.def("smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, float beta=1.0) -> Tensor", {});
1864 m.def("smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1865 m.def("smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor", {});
1866 m.def("huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!)", {});
1867 m.def("huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor", {});
1868 m.def("huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1869 m.def("huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor", {});
1870 m.def("soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)", {});
1871 m.def("soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor", {});
1872 m.def("soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1873 m.def("soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor", {});
1874 m.def("elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!)", {});
1875 m.def("elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor", {});
1876 m.def("elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1877 m.def("elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor", {});
1878 m.def("elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!)", {});
1879 m.def("glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)", {});
1880 m.def("glu(Tensor self, int dim=-1) -> Tensor", {});
1881 m.def("glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1882 m.def("glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor", {});
1883 m.def("glu_jvp(Tensor glu, Tensor x, Tensor dx, int dim) -> Tensor", {});
1884 m.def("glu_backward_jvp(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim) -> Tensor", {});
1885 m.def("hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
1886 m.def("hardsigmoid(Tensor self) -> Tensor", {});
1887 m.def("hardsigmoid_(Tensor(a!) self) -> Tensor(a!)", {});
1888 m.def("hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1889 m.def("hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor", {});
1890 m.def("hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)", {});
1891 m.def("hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor", {at::Tag::core});
1892 m.def("hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1893 m.def("hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor", {});
1894 m.def("hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!)", {});
1895 m.def("hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
1896 m.def("hardswish(Tensor self) -> Tensor", {});
1897 m.def("hardswish_(Tensor(a!) self) -> Tensor(a!)", {});
1898 m.def("hardswish_backward(Tensor grad_output, Tensor self) -> Tensor", {});
1899 m.def("leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)", {});
1900 m.def("leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor", {at::Tag::core});
1901 m.def("leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1902 m.def("leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor", {});
1903 m.def("leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)", {});
1904 m.def("log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
1905 m.def("log_sigmoid(Tensor self) -> Tensor", {});
1906 m.def("log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!))", {});
1907 m.def("log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)", {});
1908 m.def("log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1909 m.def("log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor", {});
1910 m.def("rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
1911 m.def("rrelu_with_noise(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor", {at::Tag::nondeterministic_seeded});
1912 m.def("rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor", {});
1913 m.def("rrelu_with_noise_(Tensor(a!) self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)", {at::Tag::nondeterministic_seeded});
1914 m.def("softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)", {});
1915 m.def("softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor", {});
1916 m.def("softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1917 m.def("softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor", {});
1918 m.def("softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)", {});
1919 m.def("softshrink(Tensor self, Scalar lambd=0.5) -> Tensor", {});
1920 m.def("softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1921 m.def("softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor", {});
1922 m.def("adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)", {});
1923 m.def("adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor", {});
1924 m.def("mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor", {});
1925 m.def("mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)", {});
1926 m.def("mkldnn_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor", {});
1927 m.def("_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor", {at::Tag::core});
1928 m.def("_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor", {at::Tag::core});
1929 m.def("adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)", {});
1930 m.def("adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor", {});
1931 m.def("_adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor", {});
1932 m.def("adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1933 m.def("_adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor", {});
1934 m.def("adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", {});
1935 m.def("adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor)", {});
1936 m.def("adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1937 m.def("adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor", {});
1938 m.def("adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", {});
1939 m.def("adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)", {});
1940 m.def("adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1941 m.def("adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor", {});
1942 m.def("avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)", {});
1943 m.def("avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor", {at::Tag::core});
1944 m.def("avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1945 m.def("avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor", {at::Tag::core});
1946 m.def("avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)", {});
1947 m.def("avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor", {});
1948 m.def("avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1949 m.def("avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor", {});
1950 m.def("fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", {});
1951 m.def("fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor)", {});
1952 m.def("fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1953 m.def("fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor", {});
1954 m.def("fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", {});
1955 m.def("fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor)", {});
1956 m.def("fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1957 m.def("fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor", {});
1958 m.def("max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", {});
1959 m.def("max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)", {at::Tag::core});
1960 m.def("max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1961 m.def("max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor", {at::Tag::core});
1962 m.def("max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", {});
1963 m.def("max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)", {at::Tag::core});
1964 m.def("max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1965 m.def("max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor", {});
1966 m.def("max_unpool2d.out(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)", {});
1967 m.def("max_unpool2d(Tensor self, Tensor indices, int[2] output_size) -> Tensor", {});
1968 m.def("max_unpool3d.out(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!)", {});
1969 m.def("max_unpool3d(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor", {});
1970 m.def("reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)", {});
1971 m.def("reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor", {});
1972 m.def("reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1973 m.def("reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor", {});
1974 m.def("reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)", {});
1975 m.def("reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor", {at::Tag::core});
1976 m.def("reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1977 m.def("reflection_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor", {});
1978 m.def("reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)", {});
1979 m.def("reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor", {});
1980 m.def("reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1981 m.def("reflection_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor", {});
1982 m.def("replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)", {});
1983 m.def("replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor", {});
1984 m.def("replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1985 m.def("replication_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor", {});
1986 m.def("replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)", {});
1987 m.def("replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor", {at::Tag::core});
1988 m.def("replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1989 m.def("replication_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor", {});
1990 m.def("replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)", {});
1991 m.def("replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor", {at::Tag::core});
1992 m.def("replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
1993 m.def("replication_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor", {});
1994 m.def("_pad_circular(Tensor self, SymInt[] pad) -> Tensor", {});
1995 m.def("_pad_enum(Tensor self, SymInt[] pad, int mode, float? value=None) -> Tensor", {});
1996 m.def("pad(Tensor self, SymInt[] pad, str mode=\"constant\", float? value=None) -> Tensor", {});
1997 m.def("upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", {});
1998 m.def("upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", {at::Tag::core});
1999 m.def("_upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", {});
2000 m.def("upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", {});
2001 m.def("upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", {});
2002 m.def("_upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", {});
2003 m.def("upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor", {});
2004 m.def("_upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor", {});
2005 m.def("upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor", {at::Tag::core});
2006 m.def("_upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor", {});
2007 m.def("upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor", {});
2008 m.def("_upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor", {});
2009 m.def("upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2010 m.def("upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor", {});
2011 m.def("upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
2012 m.def("upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor", {});
2013 m.def("upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2014 m.def("upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", {});
2015 m.def("upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
2016 m.def("upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", {});
2017 m.def("_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2018 m.def("_upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", {});
2019 m.def("_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
2020 m.def("_upsample_bilinear2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", {});
2021 m.def("upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2022 m.def("upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", {});
2023 m.def("upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
2024 m.def("upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", {});
2025 m.def("_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2026 m.def("_upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", {});
2027 m.def("_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
2028 m.def("_upsample_bicubic2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", {});
2029 m.def("upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2030 m.def("upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor", {});
2031 m.def("upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
2032 m.def("upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor", {});
2033 m.def("upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2034 m.def("_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2035 m.def("upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor", {});
2036 m.def("_upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor", {});
2037 m.def("upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
2038 m.def("_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
2039 m.def("upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor", {});
2040 m.def("_upsample_nearest_exact1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor", {});
2041 m.def("upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2042 m.def("_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2043 m.def("upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor", {});
2044 m.def("_upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor", {});
2045 m.def("upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
2046 m.def("_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
2047 m.def("upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor", {});
2048 m.def("_upsample_nearest_exact2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor", {});
2049 m.def("upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2050 m.def("_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2051 m.def("upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor", {});
2052 m.def("_upsample_nearest_exact3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor", {});
2053 m.def("upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
2054 m.def("_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", {});
2055 m.def("upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor", {});
2056 m.def("_upsample_nearest_exact3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor", {});
2057 m.def("sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)", {at::Tag::pointwise});
2058 m.def("sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor", {at::Tag::pointwise});
2059 m.def("logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!)", {at::Tag::pointwise});
2060 m.def("logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor", {at::Tag::pointwise});
2061 m.def("tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)", {at::Tag::pointwise});
2062 m.def("tanh_backward(Tensor grad_output, Tensor output) -> Tensor", {at::Tag::pointwise});
2063 m.def("slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)", {});
2064 m.def("slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1) -> Tensor", {});
2065 m.def("slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)", {});
2066 m.def("slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1) -> Tensor", {});
2067 m.def("thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!)", {});
2068 m.def("thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0) -> Tensor", {});
2069 m.def("_slow_conv2d_forward.output(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, *, Tensor(a!) output) -> Tensor(a!)", {});
2070 m.def("_slow_conv2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding) -> Tensor", {});
2071 m.def("_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))", {});
2072 m.def("_slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)", {});
2073 m.def("_conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!)", {});
2074 m.def("_conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation) -> Tensor", {});
2075 m.def("conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation) -> Tensor", {});
2076 m.def("slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!)", {});
2077 m.def("slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0) -> Tensor", {});
2078 m.def("slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!)", {});
2079 m.def("slow_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding) -> Tensor", {});
2080 m.def("slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1) -> Tensor", {});
2081 m.def("slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1) -> Tensor", {});
2082 m.def("col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)", {});
2083 m.def("col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor", {at::Tag::core});
2084 m.def("column_stack(Tensor[] tensors) -> Tensor", {});
2085 m.def("column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", {});
2086 m.def("im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)", {});
2087 m.def("im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor", {});
2088 m.def("isfinite(Tensor self) -> Tensor", {});
2089 m.def("isinf(Tensor self) -> Tensor", {at::Tag::core});
2090 m.def("record_stream(Tensor(a!) self, Stream s) -> ()", {});
2091 m.def("isposinf(Tensor self) -> Tensor", {at::Tag::pointwise});
2092 m.def("isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2093 m.def("isneginf(Tensor self) -> Tensor", {at::Tag::pointwise});
2094 m.def("isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2095 m.def("_add_batch_dim(Tensor self, int batch_dim, int level) -> Tensor", {});
2096 m.def("_remove_batch_dim(Tensor self, int level, int batch_size, int out_dim) -> Tensor", {});
2097 m.def("special_entr(Tensor self) -> Tensor", {at::Tag::pointwise});
2098 m.def("special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2099 m.def("special_ndtri(Tensor self) -> Tensor", {at::Tag::pointwise});
2100 m.def("special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2101 m.def("special_log_ndtr(Tensor self) -> Tensor", {at::Tag::pointwise});
2102 m.def("special_log_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2103 m.def("special_expm1(Tensor self) -> Tensor", {});
2104 m.def("special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
2105 m.def("special_exp2(Tensor self) -> Tensor", {});
2106 m.def("special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
2107 m.def("special_psi(Tensor self) -> Tensor", {});
2108 m.def("special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
2109 m.def("special_digamma(Tensor self) -> Tensor", {});
2110 m.def("special_digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
2111 m.def("special_gammaln(Tensor self) -> Tensor", {});
2112 m.def("special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
2113 m.def("special_erf(Tensor self) -> Tensor", {});
2114 m.def("special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
2115 m.def("special_erfc(Tensor self) -> Tensor", {});
2116 m.def("special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
2117 m.def("special_erfcx(Tensor self) -> Tensor", {at::Tag::pointwise});
2118 m.def("special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2119 m.def("special_erfinv(Tensor self) -> Tensor", {});
2120 m.def("special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
2121 m.def("special_ndtr(Tensor self) -> Tensor", {});
2122 m.def("special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
2123 m.def("special_xlog1py(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise});
2124 m.def("special_xlog1py.self_scalar(Scalar self, Tensor other) -> Tensor", {at::Tag::pointwise});
2125 m.def("special_xlog1py.other_scalar(Tensor self, Scalar other) -> Tensor", {at::Tag::pointwise});
2126 m.def("special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2127 m.def("special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2128 m.def("special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2129 m.def("special_xlogy(Tensor self, Tensor other) -> Tensor", {});
2130 m.def("special_xlogy.self_scalar(Scalar self, Tensor other) -> Tensor", {});
2131 m.def("special_xlogy.other_scalar(Tensor self, Scalar other) -> Tensor", {});
2132 m.def("special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {});
2133 m.def("special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {});
2134 m.def("special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", {});
2135 m.def("special_zeta(Tensor self, Tensor other) -> Tensor", {at::Tag::pointwise});
2136 m.def("special_zeta.self_scalar(Scalar self, Tensor other) -> Tensor", {at::Tag::pointwise});
2137 m.def("special_zeta.other_scalar(Tensor self, Scalar other) -> Tensor", {at::Tag::pointwise});
2138 m.def("special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2139 m.def("special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2140 m.def("special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2141 m.def("special_i0(Tensor self) -> Tensor", {});
2142 m.def("special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
2143 m.def("special_i0e(Tensor self) -> Tensor", {at::Tag::pointwise});
2144 m.def("special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2145 m.def("special_i1(Tensor self) -> Tensor", {at::Tag::pointwise});
2146 m.def("special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2147 m.def("special_i1e(Tensor self) -> Tensor", {at::Tag::pointwise});
2148 m.def("special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2149 m.def("special_logit(Tensor self, float? eps=None) -> Tensor", {});
2150 m.def("special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2151 m.def("special_polygamma(int n, Tensor self) -> Tensor", {});
2152 m.def("special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
2153 m.def("special_logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor", {});
2154 m.def("special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", {});
2155 m.def("special_expit(Tensor self) -> Tensor", {});
2156 m.def("special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
2157 m.def("special_sinc(Tensor self) -> Tensor", {});
2158 m.def("special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
2159 m.def("special_round(Tensor self, *, int decimals=0) -> Tensor", {});
2160 m.def("special_round.out(Tensor self, *, int decimals=0, Tensor(a!) out) -> Tensor(a!)", {});
2161 m.def("special_log1p(Tensor self) -> Tensor", {});
2162 m.def("special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
2163 m.def("special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor", {});
2164 m.def("special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {});
2165 m.def("special_gammainc(Tensor self, Tensor other) -> Tensor", {});
2166 m.def("special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {});
2167 m.def("special_gammaincc(Tensor self, Tensor other) -> Tensor", {});
2168 m.def("special_multigammaln(Tensor self, int p) -> Tensor", {});
2169 m.def("special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)", {});
2170 m.def("special_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", {});
2171 m.def("fft_fft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor", {});
2172 m.def("fft_fft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2173 m.def("fft_ifft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor", {});
2174 m.def("fft_ifft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2175 m.def("fft_rfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor", {});
2176 m.def("fft_rfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2177 m.def("fft_irfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor", {});
2178 m.def("fft_irfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2179 m.def("fft_hfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor", {});
2180 m.def("fft_hfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2181 m.def("fft_ihfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor", {});
2182 m.def("fft_ihfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2183 m.def("fft_fft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", {});
2184 m.def("fft_fft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2185 m.def("fft_ifft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", {});
2186 m.def("fft_ifft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2187 m.def("fft_rfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", {});
2188 m.def("fft_rfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2189 m.def("fft_irfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", {});
2190 m.def("fft_irfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2191 m.def("fft_hfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", {});
2192 m.def("fft_hfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2193 m.def("fft_ihfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", {});
2194 m.def("fft_ihfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2195 m.def("fft_fftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", {});
2196 m.def("fft_fftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2197 m.def("fft_ifftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", {});
2198 m.def("fft_ifftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2199 m.def("fft_rfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", {});
2200 m.def("fft_rfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2201 m.def("fft_irfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", {});
2202 m.def("fft_irfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2203 m.def("fft_hfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", {});
2204 m.def("fft_hfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2205 m.def("fft_ihfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", {});
2206 m.def("fft_ihfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2207 m.def("fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
2208 m.def("fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)", {});
2209 m.def("fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
2210 m.def("fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)", {});
2211 m.def("fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor", {});
2212 m.def("fft_ifftshift(Tensor self, int[1]? dim=None) -> Tensor", {});
2213 m.def("linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info)", {});
2214 m.def("linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info)", {});
2215 m.def("linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor", {});
2216 m.def("linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!)", {});
2217 m.def("linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor", {});
2218 m.def("linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)", {});
2219 m.def("linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots)", {});
2220 m.def("linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots)", {});
2221 m.def("linalg_lu_factor_ex(Tensor A, *, bool pivot=True, bool check_errors=False) -> (Tensor LU, Tensor pivots, Tensor info)", {});
2222 m.def("linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info)", {});
2223 m.def("linalg_lu(Tensor A, *, bool pivot=True) -> (Tensor P, Tensor L, Tensor U)", {});
2224 m.def("linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)", {});
2225 m.def("linalg_lu_solve(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False) -> Tensor", {});
2226 m.def("linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!)", {});
2227 m.def("_linalg_det(Tensor A) -> (Tensor result, Tensor LU, Tensor pivots)", {});
2228 m.def("_linalg_det.result(Tensor A, *, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots)", {});
2229 m.def("linalg_det(Tensor A) -> Tensor", {});
2230 m.def("linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)", {});
2231 m.def("det(Tensor self) -> Tensor", {});
2232 m.def("linalg_ldl_factor_ex(Tensor self, *, bool hermitian=False, bool check_errors=False) -> (Tensor LD, Tensor pivots, Tensor info)", {});
2233 m.def("linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info)", {});
2234 m.def("linalg_ldl_factor(Tensor self, *, bool hermitian=False) -> (Tensor LD, Tensor pivots)", {});
2235 m.def("linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots)", {});
2236 m.def("linalg_ldl_solve(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False) -> Tensor", {});
2237 m.def("linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)", {});
2238 m.def("linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values)", {at::Tag::dynamic_output_shape});
2239 m.def("linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values)", {at::Tag::dynamic_output_shape});
2240 m.def("linalg_matmul(Tensor self, Tensor other) -> Tensor", {});
2241 m.def("linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {});
2242 m.def("linalg_vecdot(Tensor x, Tensor y, *, int dim=-1) -> Tensor", {});
2243 m.def("linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)", {});
2244 m.def("linalg_matrix_exp(Tensor self) -> Tensor", {});
2245 m.def("_linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet, Tensor LU, Tensor pivots)", {});
2246 m.def("_linalg_slogdet.sign(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) -> (Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots)", {});
2247 m.def("linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet)", {});
2248 m.def("linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)", {});
2249 m.def("slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)", {});
2250 m.def("slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)", {});
2251 m.def("logdet(Tensor self) -> Tensor", {});
2252 m.def("linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors)", {});
2253 m.def("linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)", {});
2254 m.def("linalg_eigvals(Tensor self) -> Tensor", {});
2255 m.def("linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
2256 m.def("_linalg_eigh(Tensor A, str UPLO=\"L\", bool compute_v=True) -> (Tensor eigenvalues, Tensor eigenvectors)", {});
2257 m.def("_linalg_eigh.eigenvalues(Tensor A, str UPLO=\"L\", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)", {});
2258 m.def("linalg_eigh(Tensor self, str UPLO=\"L\") -> (Tensor eigenvalues, Tensor eigenvectors)", {});
2259 m.def("linalg_eigh.eigvals(Tensor self, str UPLO=\"L\", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)", {});
2260 m.def("linalg_eigvalsh(Tensor self, str UPLO=\"L\") -> Tensor", {});
2261 m.def("linalg_eigvalsh.out(Tensor self, str UPLO=\"L\", *, Tensor(a!) out) -> Tensor(a!)", {});
2262 m.def("linalg_householder_product(Tensor input, Tensor tau) -> Tensor", {});
2263 m.def("linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!)", {});
2264 m.def("linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info)", {});
2265 m.def("linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)", {});
2266 m.def("linalg_inv(Tensor A) -> Tensor", {});
2267 m.def("linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)", {});
2268 m.def("inverse(Tensor self) -> Tensor", {});
2269 m.def("inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {});
2270 m.def("inner(Tensor self, Tensor other) -> Tensor", {});
2271 m.def("inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {});
2272 m.def("outer(Tensor self, Tensor vec2) -> Tensor", {});
2273 m.def("outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)", {});
2274 m.def("ger(Tensor self, Tensor vec2) -> Tensor", {});
2275 m.def("ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)", {});
2276 m.def("linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", {});
2277 m.def("linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", {});
2278 m.def("linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", {});
2279 m.def("linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", {});
2280 m.def("linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", {});
2281 m.def("linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", {});
2282 m.def("linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", {});
2283 m.def("linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", {});
2284 m.def("linalg_matrix_norm.str_ord(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", {});
2285 m.def("linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", {});
2286 m.def("_linalg_svd(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)", {});
2287 m.def("_linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)", {});
2288 m.def("linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)", {});
2289 m.def("linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)", {});
2290 m.def("linalg_svdvals(Tensor A, *, str? driver=None) -> Tensor", {});
2291 m.def("linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!)", {});
2292 m.def("linalg_cond(Tensor self, Scalar? p=None) -> Tensor", {});
2293 m.def("linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2294 m.def("linalg_cond.p_str(Tensor self, str p) -> Tensor", {});
2295 m.def("linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!)", {});
2296 m.def("linalg_pinv.atol_rtol_tensor(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor", {});
2297 m.def("linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)", {});
2298 m.def("linalg_pinv.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor", {});
2299 m.def("linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)", {});
2300 m.def("linalg_pinv(Tensor self, float rcond, bool hermitian=False) -> Tensor", {});
2301 m.def("linalg_pinv.rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False) -> Tensor", {});
2302 m.def("linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)", {});
2303 m.def("linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)", {});
2304 m.def("_linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor LU, Tensor pivots, Tensor info)", {});
2305 m.def("_linalg_solve_ex.result(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info)", {});
2306 m.def("linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info)", {});
2307 m.def("linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info)", {});
2308 m.def("linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor", {});
2309 m.def("linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!)", {});
2310 m.def("linalg_tensorinv(Tensor self, int ind=2) -> Tensor", {});
2311 m.def("linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!)", {});
2312 m.def("linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor", {});
2313 m.def("linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!)", {});
2314 m.def("linalg_qr(Tensor A, str mode='reduced') -> (Tensor Q, Tensor R)", {});
2315 m.def("linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)", {});
2316 m.def("linalg_matrix_power(Tensor self, int n) -> Tensor", {});
2317 m.def("linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)", {});
2318 m.def("linalg_matrix_rank.atol_rtol_tensor(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor", {});
2319 m.def("linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)", {});
2320 m.def("linalg_matrix_rank.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor", {});
2321 m.def("linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)", {});
2322 m.def("linalg_matrix_rank(Tensor self, float tol, bool hermitian=False) -> Tensor", {});
2323 m.def("linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)", {});
2324 m.def("linalg_matrix_rank.tol_tensor(Tensor input, Tensor tol, bool hermitian=False) -> Tensor", {});
2325 m.def("linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)", {});
2326 m.def("linalg_multi_dot(Tensor[] tensors) -> Tensor", {});
2327 m.def("linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", {});
2328 m.def("nested_to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor", {});
2329 m.def("_test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor", {});
2330 m.def("_test_optional_intlist(Tensor values, int[]? addends) -> Tensor", {});
2331 m.def("_test_optional_filled_intlist(Tensor values, int[2]? addends) -> Tensor", {});
2332 m.def("_test_optional_floatlist(Tensor values, float[]? addends) -> Tensor", {});
2333 m.def("_test_string_default(Tensor dummy, str a=\"\\\"'\\\\\", str b='\"\\'\\\\') -> Tensor", {});
2334 m.def("_test_ambiguous_defaults.a(Tensor dummy, int a=1, int b=1) -> Tensor", {});
2335 m.def("_test_ambiguous_defaults.b(Tensor dummy, int a=2, str b=\"2\") -> Tensor", {});
2336 m.def("_test_warn_in_autograd(Tensor self) -> Tensor", {});
2337 m.def("_test_autograd_multiple_dispatch.fullcoverage(Tensor self) -> Tensor", {});
2338 m.def("_test_autograd_multiple_dispatch.ntonly(Tensor self, bool b) -> Tensor", {});
2339 m.def("_test_autograd_multiple_dispatch_view(Tensor(a) self) -> Tensor(a)", {});
2340 m.def("_test_autograd_multiple_dispatch_view_copy(Tensor self) -> Tensor", {at::Tag::view_copy});
2341 m.def("segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor", {});
2342 m.def("_segment_reduce_backward(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None) -> Tensor", {});
2343 m.def("pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0) -> Tensor", {});
2344 m.def("flatten_dense_tensors(Tensor[] tensors) -> Tensor", {});
2345 m.def("unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> Tensor[]", {});
2346 m.def("_nested_tensor_from_tensor_list(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", {});
2347 m.def("_fw_primal_copy(Tensor self, int level) -> Tensor", {at::Tag::view_copy});
2348 m.def("_make_dual_copy(Tensor primal, Tensor tangent, int level) -> Tensor", {at::Tag::view_copy});
2349 m.def("view_as_real_copy(Tensor self) -> Tensor", {at::Tag::view_copy});
2350 m.def("view_as_complex_copy(Tensor self) -> Tensor", {at::Tag::view_copy});
2351 m.def("_conj_copy(Tensor self) -> Tensor", {at::Tag::view_copy});
2352 m.def("_neg_view_copy(Tensor self) -> Tensor", {at::Tag::view_copy});
2353 m.def("as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor", {at::Tag::view_copy});
2354 m.def("_sparse_broadcast_to_copy(Tensor self, int[] size) -> Tensor", {at::Tag::view_copy});
2355 m.def("diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor", {at::Tag::view_copy});
2356 m.def("expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor", {at::Tag::view_copy});
2357 m.def("permute_copy(Tensor self, int[] dims) -> Tensor", {at::Tag::view_copy});
2358 m.def("_reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor", {at::Tag::view_copy});
2359 m.def("select_copy.int(Tensor self, int dim, SymInt index) -> Tensor", {at::Tag::view_copy});
2360 m.def("detach_copy(Tensor self) -> Tensor", {at::Tag::view_copy});
2361 m.def("slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor", {at::Tag::view_copy});
2362 m.def("split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]", {at::Tag::view_copy});
2363 m.def("split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]", {at::Tag::view_copy});
2364 m.def("squeeze_copy(Tensor self) -> Tensor", {at::Tag::view_copy});
2365 m.def("squeeze_copy.dim(Tensor self, int dim) -> Tensor", {at::Tag::view_copy});
2366 m.def("squeeze_copy.dims(Tensor self, int[] dim) -> Tensor", {at::Tag::view_copy});
2367 m.def("t_copy(Tensor self) -> Tensor", {at::Tag::view_copy});
2368 m.def("transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor", {at::Tag::view_copy});
2369 m.def("unsqueeze_copy(Tensor self, int dim) -> Tensor", {at::Tag::view_copy});
2370 m.def("_indices_copy(Tensor self) -> Tensor", {at::Tag::view_copy});
2371 m.def("_values_copy(Tensor self) -> Tensor", {at::Tag::view_copy});
2372 m.def("indices_copy(Tensor self) -> Tensor", {at::Tag::view_copy});
2373 m.def("values_copy(Tensor self) -> Tensor", {at::Tag::view_copy});
2374 m.def("crow_indices_copy(Tensor self) -> Tensor", {at::Tag::view_copy});
2375 m.def("col_indices_copy(Tensor self) -> Tensor", {at::Tag::view_copy});
2376 m.def("ccol_indices_copy(Tensor self) -> Tensor", {at::Tag::view_copy});
2377 m.def("row_indices_copy(Tensor self) -> Tensor", {at::Tag::view_copy});
2378 m.def("unbind_copy.int(Tensor self, int dim=0) -> Tensor[]", {at::Tag::view_copy});
2379 m.def("unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> ()", {});
2380 m.def("split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()", {});
2381 m.def("split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()", {});
2382 m.def("view_copy(Tensor self, SymInt[] size) -> Tensor", {at::Tag::view_copy});
2383 m.def("view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor", {at::Tag::view_copy});
2384 m.def("unfold_copy(Tensor self, int dimension, int size, int step) -> Tensor", {at::Tag::view_copy});
2385 m.def("alias_copy(Tensor self) -> Tensor", {at::Tag::view_copy});
2386 m.def("to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor", {});
2387 m.def("_nested_tensor_softmax_with_shape(Tensor self, Tensor query) -> Tensor", {});
2388 m.def("_transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor", {});
2389 m.def("_native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor)", {});
2390 m.def("scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False) -> Tensor", {});
2391 m.def("_scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool need_attn_weights=False, bool is_causal=False) -> (Tensor, Tensor)", {});
2392 m.def("_fused_sdp_choice(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False) -> int", {});
2393 m.def("_scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None) -> (Tensor, Tensor)", {});
2394 m.def("_scaled_dot_product_flash_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False) -> (Tensor ouput, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, int philox_seed, int philox_offset, Tensor debug_attn_mask)", {});
2395 m.def("_scaled_dot_product_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, int philox_seed, int philox_offset) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value)", {});
2396 m.def("_scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, bool compute_log_sumexp, bool is_causal=False) -> (Tensor, Tensor)", {});
2397 m.def("_scaled_dot_product_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, bool is_causal=False, bool chunk_grad_outputs=False) -> (Tensor, Tensor, Tensor)", {});
2398 m.def("_chunk_grad_outputs_efficient_attention(Tensor query, Tensor key, Tensor value, bool is_causal=False) -> bool", {});
2399 m.def("_flash_attention_forward(Tensor query, Tensor key, Tensor value, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, bool return_debug_mask) -> (Tensor output, Tensor softmax_logsumexp, int philox_seed, int philox_offset, Tensor debug_attn_mask)", {});
2400 m.def("_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, int philox_seed, int philox_offset) -> (Tensor, Tensor, Tensor)", {});
2401 m.def("_efficient_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, int? max_seqlen_q, bool compute_log_sumexp=False, bool causal=False) -> (Tensor, Tensor)", {});
2402 m.def("_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, bool is_causal=False, bool chunk_grad_outputs=False) -> (Tensor, Tensor, Tensor)", {});
2403 m.def("_triton_scaled_dot_attention(Tensor q, Tensor k, Tensor v, float dropout_p=0.0) -> Tensor", {});
2404 m.def("_triton_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor", {});
2405 m.def("special_airy_ai(Tensor x) -> Tensor", {at::Tag::pointwise});
2406 m.def("special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2407 m.def("_transformer_decoder_only_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None) -> (Tensor, Tensor, Tensor)", {});
2408 m.def("_native_decoder_only_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, bool need_weights=True, bool average_attn_weights=True) -> (Tensor, Tensor, Tensor, Tensor)", {});
2409 m.def("special_bessel_j0(Tensor self) -> Tensor", {at::Tag::pointwise});
2410 m.def("special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2411 m.def("special_bessel_j1(Tensor self) -> Tensor", {at::Tag::pointwise});
2412 m.def("special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2413 m.def("special_bessel_y0(Tensor self) -> Tensor", {at::Tag::pointwise});
2414 m.def("special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2415 m.def("special_bessel_y1(Tensor self) -> Tensor", {at::Tag::pointwise});
2416 m.def("special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2417 m.def("special_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor", {at::Tag::pointwise});
2418 m.def("special_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor", {at::Tag::pointwise});
2419 m.def("special_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor", {at::Tag::pointwise});
2420 m.def("special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2421 m.def("special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2422 m.def("special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2423 m.def("special_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor", {at::Tag::pointwise});
2424 m.def("special_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor", {at::Tag::pointwise});
2425 m.def("special_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor", {at::Tag::pointwise});
2426 m.def("special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2427 m.def("special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2428 m.def("special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2429 m.def("special_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor", {at::Tag::pointwise});
2430 m.def("special_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor", {at::Tag::pointwise});
2431 m.def("special_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor", {at::Tag::pointwise});
2432 m.def("special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2433 m.def("special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2434 m.def("special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2435 m.def("special_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor", {at::Tag::pointwise});
2436 m.def("special_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor", {at::Tag::pointwise});
2437 m.def("special_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor", {at::Tag::pointwise});
2438 m.def("special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2439 m.def("special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2440 m.def("special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2441 m.def("special_hermite_polynomial_h(Tensor x, Tensor n) -> Tensor", {at::Tag::pointwise});
2442 m.def("special_hermite_polynomial_h.x_scalar(Scalar x, Tensor n) -> Tensor", {at::Tag::pointwise});
2443 m.def("special_hermite_polynomial_h.n_scalar(Tensor x, Scalar n) -> Tensor", {at::Tag::pointwise});
2444 m.def("special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2445 m.def("special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2446 m.def("special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2447 m.def("special_hermite_polynomial_he(Tensor x, Tensor n) -> Tensor", {at::Tag::pointwise});
2448 m.def("special_hermite_polynomial_he.x_scalar(Scalar x, Tensor n) -> Tensor", {at::Tag::pointwise});
2449 m.def("special_hermite_polynomial_he.n_scalar(Tensor x, Scalar n) -> Tensor", {at::Tag::pointwise});
2450 m.def("special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2451 m.def("special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2452 m.def("special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2453 m.def("special_laguerre_polynomial_l(Tensor x, Tensor n) -> Tensor", {at::Tag::pointwise});
2454 m.def("special_laguerre_polynomial_l.x_scalar(Scalar x, Tensor n) -> Tensor", {at::Tag::pointwise});
2455 m.def("special_laguerre_polynomial_l.n_scalar(Tensor x, Scalar n) -> Tensor", {at::Tag::pointwise});
2456 m.def("special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2457 m.def("special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2458 m.def("special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2459 m.def("special_legendre_polynomial_p(Tensor x, Tensor n) -> Tensor", {at::Tag::pointwise});
2460 m.def("special_legendre_polynomial_p.x_scalar(Scalar x, Tensor n) -> Tensor", {at::Tag::pointwise});
2461 m.def("special_legendre_polynomial_p.n_scalar(Tensor x, Scalar n) -> Tensor", {at::Tag::pointwise});
2462 m.def("special_legendre_polynomial_p.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2463 m.def("special_legendre_polynomial_p.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2464 m.def("special_legendre_polynomial_p.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2465 m.def("special_modified_bessel_i0(Tensor self) -> Tensor", {at::Tag::pointwise});
2466 m.def("special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2467 m.def("special_modified_bessel_i1(Tensor self) -> Tensor", {at::Tag::pointwise});
2468 m.def("special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2469 m.def("special_modified_bessel_k0(Tensor self) -> Tensor", {at::Tag::pointwise});
2470 m.def("special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2471 m.def("special_modified_bessel_k1(Tensor self) -> Tensor", {at::Tag::pointwise});
2472 m.def("special_modified_bessel_k1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2473 m.def("special_scaled_modified_bessel_k0(Tensor x) -> Tensor", {at::Tag::pointwise});
2474 m.def("special_scaled_modified_bessel_k0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2475 m.def("special_scaled_modified_bessel_k1(Tensor x) -> Tensor", {at::Tag::pointwise});
2476 m.def("special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2477 m.def("special_shifted_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor", {at::Tag::pointwise});
2478 m.def("special_shifted_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor", {at::Tag::pointwise});
2479 m.def("special_shifted_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor", {at::Tag::pointwise});
2480 m.def("special_shifted_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2481 m.def("special_shifted_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2482 m.def("special_shifted_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2483 m.def("special_shifted_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor", {at::Tag::pointwise});
2484 m.def("special_shifted_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor", {at::Tag::pointwise});
2485 m.def("special_shifted_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor", {at::Tag::pointwise});
2486 m.def("special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2487 m.def("special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2488 m.def("special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2489 m.def("special_shifted_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor", {at::Tag::pointwise});
2490 m.def("special_shifted_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor", {at::Tag::pointwise});
2491 m.def("special_shifted_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor", {at::Tag::pointwise});
2492 m.def("special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2493 m.def("special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2494 m.def("special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2495 m.def("special_shifted_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor", {at::Tag::pointwise});
2496 m.def("special_shifted_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor", {at::Tag::pointwise});
2497 m.def("special_shifted_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor", {at::Tag::pointwise});
2498 m.def("special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2499 m.def("special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2500 m.def("special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2501 m.def("special_spherical_bessel_j0(Tensor x) -> Tensor", {at::Tag::pointwise});
2502 m.def("special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::pointwise});
2503 m.def("_foobar(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True) -> Tensor", {});
2504 m.def("_fused_adam_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()", {});
2505 m.def("_fused_adamw_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()", {});
2506 m.def("_new_zeros_with_same_feature_meta.out(Tensor self, Tensor other, *, int self_num_batch_dims=0, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2507 m.def("_cudnn_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated});
2508 m.def("_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2509 m.def("_cudnn_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))", {at::Tag::generated});
2510 m.def("_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()", {at::Tag::generated});
2511 m.def("_cudnn_init_dropout_state.out(float dropout, bool train, int dropout_seed, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2512 m.def("_fused_dropout.out(Tensor self, float p, Generator? generator=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2513 m.def("_masked_scale.out(Tensor self, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2514 m.def("native_dropout.out(Tensor input, float p, bool? train, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2515 m.def("native_dropout_backward.out(Tensor grad_output, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2516 m.def("_conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2517 m.def("_add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2518 m.def("add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2519 m.def("affine_grid_generator.out(Tensor theta, int[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2520 m.def("bartlett_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2521 m.def("bartlett_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2522 m.def("quantized_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2523 m.def("bernoulli.Tensor_out(Tensor self, Tensor p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2524 m.def("bernoulli.Tensor(Tensor self, Tensor p, *, Generator? generator=None) -> Tensor", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2525 m.def("bernoulli.float_out(Tensor self, float p=0.5, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2526 m.def("binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2527 m.def("bincount.out(Tensor self, Tensor? weights=None, int minlength=0, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2528 m.def("blackman_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2529 m.def("blackman_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2530 m.def("block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2531 m.def("constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2532 m.def("convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2533 m.def("convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", {at::Tag::generated});
2534 m.def("convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2535 m.def("convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", {at::Tag::generated});
2536 m.def("_convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2537 m.def("conv_tbc.out(Tensor self, Tensor weight, Tensor bias, int pad=0, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2538 m.def("copy.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2539 m.def("_copy_from.out(Tensor self, Tensor dst, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2540 m.def("_copy_from_and_resize.out(Tensor self, Tensor dst, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2541 m.def("count_nonzero.dim_IntList_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2542 m.def("count_nonzero.out(Tensor self, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2543 m.def("cudnn_affine_grid_generator.out(Tensor theta, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2544 m.def("cudnn_affine_grid_generator_backward.out(Tensor grad, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2545 m.def("cudnn_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))", {at::Tag::generated});
2546 m.def("cudnn_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", {at::Tag::generated});
2547 m.def("cudnn_convolution.out(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2548 m.def("cudnn_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2549 m.def("_mps_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2550 m.def("mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated});
2551 m.def("cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2552 m.def("cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2553 m.def("cudnn_grid_sampler.out(Tensor self, Tensor grid, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2554 m.def("cudnn_grid_sampler_backward.out(Tensor self, Tensor grid, Tensor grad_output, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated});
2555 m.def("_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated});
2556 m.def("_ctc_loss.Tensor_out(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated});
2557 m.def("_ctc_loss_backward.out(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2558 m.def("diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2559 m.def("diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2560 m.def("div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2561 m.def("div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2562 m.def("embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2563 m.def("embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2564 m.def("embedding_renorm.out(Tensor self, Tensor indices, float max_norm, float norm_type, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2565 m.def("embedding_renorm(Tensor self, Tensor indices, float max_norm, float norm_type) -> Tensor", {at::Tag::generated});
2566 m.def("_embedding_bag_forward_only.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))", {at::Tag::generated});
2567 m.def("_embedding_bag.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))", {at::Tag::generated});
2568 m.def("_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2569 m.def("_embedding_bag_per_sample_weights_backward.out(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2570 m.def("empty.names_out(int[] size, *, Dimname[]? names, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2571 m.def("new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2572 m.def("new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2573 m.def("new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2574 m.def("new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2575 m.def("new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2576 m.def("_empty_affine_quantized.out(int[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2577 m.def("_empty_per_channel_affine_quantized.out(int[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2578 m.def("resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2579 m.def("resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor", {at::Tag::generated});
2580 m.def("_resize_output.out(Tensor self, int[] size, Device device, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2581 m.def("_resize_output(Tensor self, int[] size, Device device) -> Tensor", {at::Tag::generated});
2582 m.def("empty_quantized.out(int[] size, Tensor qtensor, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2583 m.def("empty_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2584 m.def("empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2585 m.def("fill.Scalar_out(Tensor self, Scalar value, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2586 m.def("fill.Tensor_out(Tensor self, Tensor value, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2587 m.def("full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2588 m.def("full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2589 m.def("from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2590 m.def("grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2591 m.def("grid_sampler_2d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated});
2592 m.def("_grid_sampler_2d_cpu_fallback.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2593 m.def("grid_sampler_3d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2594 m.def("grid_sampler_3d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated});
2595 m.def("hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2596 m.def("hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2597 m.def("hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2598 m.def("hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2599 m.def("hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2600 m.def("hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2601 m.def("kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2602 m.def("kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2603 m.def("kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2604 m.def("native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", {at::Tag::generated});
2605 m.def("native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", {at::Tag::generated});
2606 m.def("index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2607 m.def("_index_put_impl.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2608 m.def("_index_put_impl(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor", {at::Tag::generated});
2609 m.def("isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2610 m.def("native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", {at::Tag::generated});
2611 m.def("native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", {at::Tag::generated});
2612 m.def("linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", {at::Tag::generated});
2613 m.def("mkldnn_linear.out(Tensor self, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2614 m.def("mkldnn_linear_backward_input.out(int[] input_size, Tensor grad_output, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2615 m.def("mkldnn_linear_backward_weights.out(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated});
2616 m.def("mkldnn_linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", {at::Tag::generated});
2617 m.def("matmul_backward.out(Tensor grad, Tensor self, Tensor other, bool[2] mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated});
2618 m.def("_aminmax.out(Tensor self, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated});
2619 m.def("_aminmax.dim_out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated});
2620 m.def("_mps_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2621 m.def("mps_max_pool2d_backward.out(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2622 m.def("mkldnn_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2623 m.def("mkldnn_max_pool2d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2624 m.def("mkldnn_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2625 m.def("mkldnn_max_pool3d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2626 m.def("quantized_max_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2627 m.def("quantized_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2628 m.def("median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2629 m.def("nanmedian.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2630 m.def("_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2631 m.def("mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", {at::Tag::generated});
2632 m.def("mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2633 m.def("mkldnn_rnn_layer.out(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))", {at::Tag::generated});
2634 m.def("mkldnn_rnn_layer_backward.out(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5, Tensor(g!) out6) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!))", {at::Tag::generated});
2635 m.def("miopen_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", {at::Tag::generated});
2636 m.def("miopen_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", {at::Tag::generated});
2637 m.def("miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2638 m.def("miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2639 m.def("miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2640 m.def("miopen_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))", {at::Tag::generated});
2641 m.def("miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()", {at::Tag::generated});
2642 m.def("_sparse_sparse_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2643 m.def("mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2644 m.def("_native_batch_norm_legit_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out)", {at::Tag::generated});
2645 m.def("batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated});
2646 m.def("batch_norm_gather_stats.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated});
2647 m.def("batch_norm_gather_stats_with_counts.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated});
2648 m.def("native_batch_norm_backward.out(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", {at::Tag::generated});
2649 m.def("batch_norm_backward_reduce.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))", {at::Tag::generated});
2650 m.def("batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2651 m.def("batch_norm_update_stats.out(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated});
2652 m.def("_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2653 m.def("ones.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2654 m.def("ones_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2655 m.def("_euclidean_dist.out(Tensor x1, Tensor x2, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2656 m.def("_cdist_forward.out(Tensor x1, Tensor x2, float p, int? compute_mode, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2657 m.def("_cdist_backward.out(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2658 m.def("_pdist_forward.out(Tensor self, float p=2, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2659 m.def("_pdist_backward.out(Tensor grad, Tensor self, float p, Tensor pdist, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2660 m.def("pixel_shuffle.out(Tensor self, int upscale_factor, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2661 m.def("pixel_unshuffle.out(Tensor self, int downscale_factor, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2662 m.def("channel_shuffle.out(Tensor self, int groups, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2663 m.def("_pin_memory.out(Tensor self, Device? device=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2664 m.def("scalar_tensor.out(Scalar s, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2665 m.def("rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2666 m.def("rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2667 m.def("rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2668 m.def("randint_like.out(Tensor self, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2669 m.def("randint_like.low_dtype_out(Tensor self, int low, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2670 m.def("randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2671 m.def("randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2672 m.def("randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2673 m.def("repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2674 m.def("repeat_interleave.Tensor_out(Tensor repeats, *, int? output_size=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2675 m.def("_mkldnn_reshape.out(Tensor self, int[] shape, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2676 m.def("relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2677 m.def("select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2678 m.def("celu.out(Tensor self, Scalar alpha=1.0, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2679 m.def("slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2680 m.def("slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2681 m.def("select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2682 m.def("diagonal_scatter.out(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2683 m.def("as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2684 m.def("unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2685 m.def("unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2686 m.def("sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2687 m.def("std_mean.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated});
2688 m.def("prod.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2689 m.def("_mkldnn_transpose.out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2690 m.def("flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2691 m.def("roll.out(Tensor self, int[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2692 m.def("rot90.out(Tensor self, int k=1, int[] dims=[0,1], *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2693 m.def("_transform_bias_rescale_qkv.out(Tensor qkv, Tensor qkv_bias, int num_heads, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", {at::Tag::generated});
2694 m.def("_nested_tensor_from_mask.out(Tensor t, Tensor mask, bool mask_check=True, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2695 m.def("_nested_from_padded.out(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2696 m.def("_nested_tensor_size.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2697 m.def("_nested_tensor_strides.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2698 m.def("_nested_from_padded_and_nested_example.out(Tensor padded, Tensor nt_example, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2699 m.def("_nested_view_from_buffer_copy.out(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2700 m.def("_trilinear.out(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2701 m.def("_unique.out(Tensor self, bool sorted=True, bool return_inverse=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated});
2702 m.def("unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", {at::Tag::generated});
2703 m.def("unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", {at::Tag::generated});
2704 m.def("unique_dim_consecutive.out(Tensor self, int dim, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", {at::Tag::generated});
2705 m.def("_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", {at::Tag::generated});
2706 m.def("_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2707 m.def("var_mean.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated});
2708 m.def("_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated});
2709 m.def("_weight_norm_interface_backward.out(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated});
2710 m.def("zeros.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2711 m.def("_efficientzerotensor.out(int[] size, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2712 m.def("zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2713 m.def("_standard_gamma_grad.out(Tensor self, Tensor output, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2714 m.def("_standard_gamma.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2715 m.def("_dirichlet_grad.out(Tensor x, Tensor alpha, Tensor total, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2716 m.def("_sample_dirichlet.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2717 m.def("poisson.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2718 m.def("binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2719 m.def("native_norm.out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2720 m.def("native_norm.ScalarOpt_dim_dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2721 m.def("_sparse_sum.dim_out(Tensor self, int[1] dim, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2722 m.def("_sparse_sum_backward.out(Tensor grad, Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2723 m.def("_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2724 m.def("_sparse_csr_prod.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2725 m.def("_sparse_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2726 m.def("_sparse_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2727 m.def("_sparse_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2728 m.def("_sparse_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2729 m.def("_spdiags.out(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2730 m.def("norm.ScalarOpt_dtype_out(Tensor self, Scalar? p, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2731 m.def("norm.Scalar_out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2732 m.def("clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2733 m.def("resize_as.out(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2734 m.def("resize_as(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor", {at::Tag::generated});
2735 m.def("resize_as_sparse.out(Tensor self, Tensor the_template, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2736 m.def("resize_as_sparse(Tensor self, Tensor the_template) -> Tensor", {at::Tag::generated});
2737 m.def("zero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2738 m.def("zero(Tensor self) -> Tensor", {at::Tag::generated});
2739 m.def("sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2740 m.def("rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2741 m.def("rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2742 m.def("_sparse_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2743 m.def("sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2744 m.def("_sparse_coo_tensor_with_dims.out(int sparse_dim, int dense_dim, int[] size, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2745 m.def("_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2746 m.def("sparse_resize.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2747 m.def("sparse_resize(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor", {at::Tag::generated});
2748 m.def("sparse_resize_and_clear.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2749 m.def("sparse_resize_and_clear(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor", {at::Tag::generated});
2750 m.def("sparse_mask.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2751 m.def("_to_dense.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2752 m.def("_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2753 m.def("_coalesced.out(Tensor self, bool coalesced, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2754 m.def("_coalesced(Tensor self, bool coalesced) -> Tensor", {at::Tag::generated});
2755 m.def("copy_sparse_to_sparse.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2756 m.def("copy_sparse_to_sparse(Tensor self, Tensor src, bool non_blocking=False) -> Tensor", {at::Tag::generated});
2757 m.def("to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2758 m.def("to_sparse.out(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2759 m.def("to_sparse_csr.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2760 m.def("to_sparse_csc.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2761 m.def("to_sparse_bsr.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2762 m.def("to_sparse_bsc.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2763 m.def("to_mkldnn.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2764 m.def("mkldnn_reorder_conv2d_weight.out(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2765 m.def("mkldnn_reorder_conv3d_weight.out(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2766 m.def("quantize_per_tensor_dynamic.out(Tensor self, ScalarType dtype, bool reduce_range, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2767 m.def("quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2768 m.def("quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2769 m.def("quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2770 m.def("quantize_per_channel.out(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2771 m.def("dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2772 m.def("dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2773 m.def("q_per_channel_scales.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2774 m.def("q_per_channel_zero_points.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2775 m.def("int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2776 m.def("_make_per_tensor_quantized_tensor.out(Tensor self, float scale, int zero_point, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2777 m.def("_make_per_channel_quantized_tensor.out(Tensor self, Tensor scale, Tensor zero_point, int axis, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2778 m.def("fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated});
2779 m.def("_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated});
2780 m.def("_fake_quantize_learnable_per_tensor_affine.out(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2781 m.def("fake_quantize_per_channel_affine_cachemask.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated});
2782 m.def("_fake_quantize_learnable_per_channel_affine.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2783 m.def("_fused_moving_avg_obs_fq_helper.out(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False, *, Tensor(e!) out0, Tensor(f!) out1) -> (Tensor(e!), Tensor(f!))", {at::Tag::generated});
2784 m.def("_fused_moving_avg_obs_fq_helper_functional(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor running_min, Tensor running_max, Tensor scale, Tensor zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask, Tensor running_min_out, Tensor running_max_out, Tensor scale_out, Tensor zero_point_out)", {at::Tag::generated});
2785 m.def("_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2786 m.def("_lstm_mps.out(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))", {at::Tag::generated});
2787 m.def("lstm_mps_backward.out(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> ()", {at::Tag::generated});
2788 m.def("_thnn_fused_lstm_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", {at::Tag::generated});
2789 m.def("_thnn_fused_lstm_cell_backward_impl.out(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", {at::Tag::generated});
2790 m.def("_thnn_fused_gru_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated});
2791 m.def("_thnn_fused_gru_cell_backward.out(Tensor grad_hy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))", {at::Tag::generated});
2792 m.def("_pack_padded_sequence.out(Tensor input, Tensor lengths, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated});
2793 m.def("set.source_Storage_out(Tensor self, Storage source, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2794 m.def("set.source_Storage(Tensor self, Storage source) -> Tensor", {at::Tag::generated});
2795 m.def("set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2796 m.def("set.source_Storage_storage_offset(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor", {at::Tag::generated});
2797 m.def("set.source_Tensor_out(Tensor self, Tensor source, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2798 m.def("set.source_Tensor(Tensor self, Tensor source) -> Tensor", {at::Tag::generated});
2799 m.def("set.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2800 m.def("set(Tensor self) -> Tensor", {at::Tag::generated});
2801 m.def("lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2802 m.def("lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2803 m.def("masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2804 m.def("masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2805 m.def("masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2806 m.def("_masked_softmax.out(Tensor self, Tensor mask, int? dim=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2807 m.def("_masked_softmax_backward.out(Tensor grad_output, Tensor output, Tensor mask, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2808 m.def("put.out(Tensor self, Tensor index, Tensor source, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2809 m.def("index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2810 m.def("index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2811 m.def("bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2812 m.def("bitwise_or.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2813 m.def("bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2814 m.def("__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2815 m.def("__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2816 m.def("bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2817 m.def("__rshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2818 m.def("__rshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2819 m.def("bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2820 m.def("random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2821 m.def("random.from(Tensor self, int from, int? to, *, Generator? generator=None) -> Tensor", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2822 m.def("random.to_out(Tensor self, int to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2823 m.def("random.to(Tensor self, int to, *, Generator? generator=None) -> Tensor", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2824 m.def("random.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2825 m.def("random(Tensor self, *, Generator? generator=None) -> Tensor", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2826 m.def("uniform.out(Tensor self, float from=0, float to=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2827 m.def("uniform(Tensor self, float from=0, float to=1, *, Generator? generator=None) -> Tensor", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2828 m.def("cauchy.out(Tensor self, float median=0, float sigma=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2829 m.def("cauchy(Tensor self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2830 m.def("log_normal.out(Tensor self, float mean=1, float std=2, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2831 m.def("log_normal(Tensor self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2832 m.def("exponential.out(Tensor self, float lambd=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2833 m.def("exponential(Tensor self, float lambd=1, *, Generator? generator=None) -> Tensor", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2834 m.def("geometric.out(Tensor self, float p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2835 m.def("geometric(Tensor self, float p, *, Generator? generator=None) -> Tensor", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2836 m.def("tril_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2837 m.def("triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2838 m.def("trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2839 m.def("_cholesky_solve_helper.out(Tensor self, Tensor A, bool upper, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2840 m.def("dist.out(Tensor self, Tensor other, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2841 m.def("_histogramdd_bin_edges.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2842 m.def("_histogramdd_from_bin_cts.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2843 m.def("_histogramdd_from_bin_tensors.out(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2844 m.def("remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2845 m.def("argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2846 m.def("unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2847 m.def("normal.out(Tensor self, float mean=0, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::nondeterministic_seeded});
2848 m.def("_amp_foreach_non_finite_check_and_unscale.out(Tensor[] self, Tensor(b!) found_inf, Tensor inv_scale, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2849 m.def("_amp_foreach_non_finite_check_and_unscale(Tensor[] self, Tensor found_inf, Tensor inv_scale) -> (Tensor[] self_out, Tensor found_inf_out)", {at::Tag::generated});
2850 m.def("_amp_update_scale.out(Tensor self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2851 m.def("_amp_update_scale(Tensor self, Tensor growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> (Tensor, Tensor growth_tracker_out)", {at::Tag::generated});
2852 m.def("_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2853 m.def("_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2854 m.def("_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2855 m.def("_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2856 m.def("_foreach_clamp_min.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2857 m.def("_foreach_clamp_max.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2858 m.def("_foreach_maximum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2859 m.def("_foreach_minimum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2860 m.def("_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2861 m.def("_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2862 m.def("_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2863 m.def("_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2864 m.def("_foreach_clamp_min.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2865 m.def("_foreach_clamp_max.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2866 m.def("_foreach_maximum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2867 m.def("_foreach_minimum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2868 m.def("_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2869 m.def("_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2870 m.def("_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2871 m.def("_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2872 m.def("_foreach_clamp_min.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2873 m.def("_foreach_clamp_max.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2874 m.def("_foreach_maximum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2875 m.def("_foreach_minimum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2876 m.def("_foreach_exp.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2877 m.def("_foreach_zero.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2878 m.def("_foreach_zero(Tensor[] self) -> Tensor[] self_out", {at::Tag::generated});
2879 m.def("_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2880 m.def("_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2881 m.def("_foreach_acos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2882 m.def("_foreach_asin.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2883 m.def("_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2884 m.def("_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2885 m.def("_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2886 m.def("_foreach_cosh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2887 m.def("_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2888 m.def("_foreach_erfc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2889 m.def("_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2890 m.def("_foreach_floor.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2891 m.def("_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2892 m.def("_foreach_log10.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2893 m.def("_foreach_log1p.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2894 m.def("_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2895 m.def("_foreach_neg.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2896 m.def("_foreach_tan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2897 m.def("_foreach_tanh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2898 m.def("_foreach_sin.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2899 m.def("_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2900 m.def("_foreach_round.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2901 m.def("_foreach_lgamma.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2902 m.def("_foreach_frac.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2903 m.def("_foreach_reciprocal.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2904 m.def("_foreach_sigmoid.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2905 m.def("_foreach_trunc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2906 m.def("_foreach_addcdiv.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2907 m.def("_foreach_addcmul.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2908 m.def("_foreach_addcdiv.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2909 m.def("_foreach_addcdiv.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2910 m.def("_foreach_addcmul.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2911 m.def("_foreach_addcmul.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2912 m.def("_foreach_norm.Scalar_out(Tensor[] self, Scalar ord=2, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2913 m.def("_foreach_lerp.List_out(Tensor[] self, Tensor[] tensors1, Tensor[] weights, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2914 m.def("_foreach_lerp.Scalar_out(Tensor[] self, Tensor[] tensors1, Scalar weight, *, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2915 m.def("bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2916 m.def("searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2917 m.def("glu_jvp.out(Tensor glu, Tensor x, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2918 m.def("glu_backward_jvp.out(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2919 m.def("hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2920 m.def("rrelu_with_noise_backward.out(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2921 m.def("mkldnn_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2922 m.def("_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2923 m.def("_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2924 m.def("_adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2925 m.def("_adaptive_avg_pool3d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2926 m.def("_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", {at::Tag::generated});
2927 m.def("conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2928 m.def("slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2929 m.def("slow_conv_dilated3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2930 m.def("isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2931 m.def("linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2932 m.def("_test_optional_intlist.out(Tensor values, int[]? addends, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2933 m.def("_test_optional_filled_intlist.out(Tensor values, int[2]? addends, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2934 m.def("_test_optional_floatlist.out(Tensor values, float[]? addends, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2935 m.def("_test_warn_in_autograd.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2936 m.def("_test_autograd_multiple_dispatch.fullcoverage_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2937 m.def("_test_autograd_multiple_dispatch_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2938 m.def("segment_reduce.out(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2939 m.def("_segment_reduce_backward.out(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2940 m.def("_nested_tensor_from_tensor_list.out(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2941 m.def("_fw_primal_copy.out(Tensor self, int level, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2942 m.def("_make_dual_copy.out(Tensor primal, Tensor tangent, int level, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2943 m.def("view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2944 m.def("view_as_complex_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2945 m.def("_conj_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2946 m.def("_neg_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2947 m.def("as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2948 m.def("_sparse_broadcast_to_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2949 m.def("diagonal_copy.out(Tensor self, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2950 m.def("expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2951 m.def("permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2952 m.def("_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2953 m.def("select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2954 m.def("detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2955 m.def("slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2956 m.def("squeeze_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2957 m.def("squeeze_copy.dim_out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2958 m.def("squeeze_copy.dims_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2959 m.def("t_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2960 m.def("transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2961 m.def("unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2962 m.def("_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2963 m.def("_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2964 m.def("indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2965 m.def("values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2966 m.def("crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2967 m.def("col_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2968 m.def("ccol_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2969 m.def("row_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2970 m.def("view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2971 m.def("view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2972 m.def("unfold_copy.out(Tensor self, int dimension, int size, int step, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2973 m.def("alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated, at::Tag::view_copy});
2974 m.def("to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2975 m.def("_transformer_encoder_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2976 m.def("_native_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", {at::Tag::generated});
2977 m.def("_triton_scaled_dot_attention.out(Tensor q, Tensor k, Tensor v, float dropout_p=0.0, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2978 m.def("_triton_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, *, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2979 m.def("_transformer_decoder_only_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", {at::Tag::generated});
2980 m.def("_native_decoder_only_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, bool need_weights=True, bool average_attn_weights=True, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))", {at::Tag::generated});
2981 m.def("_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!)", {at::Tag::generated});
2982 m.def("_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2983 m.def("_fused_adam(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)", {at::Tag::generated});
2984 m.def("_fused_adamw.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()", {at::Tag::generated});
2985 m.def("_fused_adamw(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)", {at::Tag::generated});;
2986 // Distributed Ops
2987 // Implementations located in torch/csrc/jit/runtime/register_distributed_ops.cpp
2988 m.def("get_gradients(int context_id) -> Dict(Tensor, Tensor)");
2989}
2990
2991} // namespace at
2992