1#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
2// @generated from ../tools/autograd/templates/python_variable_methods.cpp
3
4#include <Python.h>
5
6// Undefine the copysign macro so that at::copysign works as intended with MSVC
7// https://github.com/python/cpython/blob/c60394c7fc9cc09b16e9675a3eeb5844b6d8523f/PC/pyconfig.h#L196
8#ifdef _MSC_VER
9#undef copysign
10#endif // _MSC_VER
11
12#include "torch/csrc/DynamicTypes.h"
13#include "torch/csrc/Exceptions.h"
14#include "torch/csrc/Size.h"
15#include "torch/csrc/autograd/generated/VariableType.h"
16#include "torch/csrc/autograd/python_variable.h"
17#include "torch/csrc/autograd/utils/python_arg_parsing.h"
18#include "torch/csrc/autograd/utils/error_messages.h"
19#include "torch/csrc/autograd/utils/wrap_outputs.h"
20#include "torch/csrc/jit/frontend/tracer.h"
21#ifdef USE_CUDA
22#include "torch/csrc/cuda/Event.h"
23#endif
24#include "torch/csrc/utils/cuda_lazy_init.h"
25#include "torch/csrc/utils/object_ptr.h"
26#include "torch/csrc/utils/pycfunction_helpers.h"
27#include "torch/csrc/utils/python_arg_parser.h"
28#include "torch/csrc/utils/python_numbers.h"
29#include "torch/csrc/utils/python_strings.h"
30#include "torch/csrc/utils/python_tuples.h"
31#include "torch/csrc/utils/tensor_apply.h"
32#include "torch/csrc/utils/tensor_list.h"
33#include "torch/csrc/utils/tensor_new.h"
34#include "torch/csrc/utils/tensor_numpy.h"
35#include "torch/csrc/utils/tensor_types.h"
36#include "torch/csrc/utils/structseq.h"
37#include "torch/csrc/autograd/python_return_types.h"
38
39#include <ATen/core/Tensor.h>
40#include <ATen/FuncTorchTLS.h>
41#include "c10/util/Optional.h"
42#include "c10/core/Stream.h"
43
44#include <stdexcept>
45
46#ifndef AT_PER_OPERATOR_HEADERS
47#include <ATen/Functions.h>
48#else
49#include <ATen/ops/and.h>
50#include <ATen/ops/and.h>
51#include <ATen/ops/lshift.h>
52#include <ATen/ops/or.h>
53#include <ATen/ops/rshift.h>
54#include <ATen/ops/xor.h>
55#include <ATen/ops/lshift.h>
56#include <ATen/ops/or.h>
57#include <ATen/ops/rshift.h>
58#include <ATen/ops/xor.h>
59#include <ATen/ops/_addmm_activation.h>
60#include <ATen/ops/_autocast_to_full_precision.h>
61#include <ATen/ops/_autocast_to_reduced_precision.h>
62#include <ATen/ops/_coalesced.h>
63#include <ATen/ops/_conj.h>
64#include <ATen/ops/_conj_physical.h>
65#include <ATen/ops/_dimI.h>
66#include <ATen/ops/_dimV.h>
67#include <ATen/ops/_indices.h>
68#include <ATen/ops/_is_all_true.h>
69#include <ATen/ops/_is_any_true.h>
70#include <ATen/ops/_is_zerotensor.h>
71#include <ATen/ops/_neg_view.h>
72#include <ATen/ops/_nested_tensor_size.h>
73#include <ATen/ops/_nested_tensor_strides.h>
74#include <ATen/ops/_nnz.h>
75#include <ATen/ops/_to_dense.h>
76#include <ATen/ops/_values.h>
77#include <ATen/ops/abs.h>
78#include <ATen/ops/abs.h>
79#include <ATen/ops/absolute.h>
80#include <ATen/ops/absolute.h>
81#include <ATen/ops/acos.h>
82#include <ATen/ops/acos.h>
83#include <ATen/ops/acosh.h>
84#include <ATen/ops/acosh.h>
85#include <ATen/ops/add.h>
86#include <ATen/ops/add.h>
87#include <ATen/ops/addbmm.h>
88#include <ATen/ops/addbmm.h>
89#include <ATen/ops/addcdiv.h>
90#include <ATen/ops/addcdiv.h>
91#include <ATen/ops/addcmul.h>
92#include <ATen/ops/addcmul.h>
93#include <ATen/ops/addmm.h>
94#include <ATen/ops/addmm.h>
95#include <ATen/ops/addmv.h>
96#include <ATen/ops/addmv.h>
97#include <ATen/ops/addr.h>
98#include <ATen/ops/addr.h>
99#include <ATen/ops/adjoint.h>
100#include <ATen/ops/align_as.h>
101#include <ATen/ops/align_to.h>
102#include <ATen/ops/all.h>
103#include <ATen/ops/allclose.h>
104#include <ATen/ops/amax.h>
105#include <ATen/ops/amin.h>
106#include <ATen/ops/aminmax.h>
107#include <ATen/ops/angle.h>
108#include <ATen/ops/any.h>
109#include <ATen/ops/arccos.h>
110#include <ATen/ops/arccos.h>
111#include <ATen/ops/arccosh.h>
112#include <ATen/ops/arccosh.h>
113#include <ATen/ops/arcsin.h>
114#include <ATen/ops/arcsin.h>
115#include <ATen/ops/arcsinh.h>
116#include <ATen/ops/arcsinh.h>
117#include <ATen/ops/arctan.h>
118#include <ATen/ops/arctan2.h>
119#include <ATen/ops/arctan2.h>
120#include <ATen/ops/arctan.h>
121#include <ATen/ops/arctanh.h>
122#include <ATen/ops/arctanh.h>
123#include <ATen/ops/argmax.h>
124#include <ATen/ops/argmin.h>
125#include <ATen/ops/argsort.h>
126#include <ATen/ops/argwhere.h>
127#include <ATen/ops/as_strided.h>
128#include <ATen/ops/as_strided.h>
129#include <ATen/ops/as_strided_scatter.h>
130#include <ATen/ops/asin.h>
131#include <ATen/ops/asin.h>
132#include <ATen/ops/asinh.h>
133#include <ATen/ops/asinh.h>
134#include <ATen/ops/atan.h>
135#include <ATen/ops/atan2.h>
136#include <ATen/ops/atan2.h>
137#include <ATen/ops/atan.h>
138#include <ATen/ops/atanh.h>
139#include <ATen/ops/atanh.h>
140#include <ATen/ops/baddbmm.h>
141#include <ATen/ops/baddbmm.h>
142#include <ATen/ops/bernoulli.h>
143#include <ATen/ops/bernoulli.h>
144#include <ATen/ops/bincount.h>
145#include <ATen/ops/bitwise_and.h>
146#include <ATen/ops/bitwise_and.h>
147#include <ATen/ops/bitwise_left_shift.h>
148#include <ATen/ops/bitwise_left_shift.h>
149#include <ATen/ops/bitwise_not.h>
150#include <ATen/ops/bitwise_not.h>
151#include <ATen/ops/bitwise_or.h>
152#include <ATen/ops/bitwise_or.h>
153#include <ATen/ops/bitwise_right_shift.h>
154#include <ATen/ops/bitwise_right_shift.h>
155#include <ATen/ops/bitwise_xor.h>
156#include <ATen/ops/bitwise_xor.h>
157#include <ATen/ops/bmm.h>
158#include <ATen/ops/broadcast_to.h>
159#include <ATen/ops/cauchy.h>
160#include <ATen/ops/ccol_indices.h>
161#include <ATen/ops/ceil.h>
162#include <ATen/ops/ceil.h>
163#include <ATen/ops/chalf.h>
164#include <ATen/ops/cholesky.h>
165#include <ATen/ops/cholesky_inverse.h>
166#include <ATen/ops/cholesky_solve.h>
167#include <ATen/ops/chunk.h>
168#include <ATen/ops/clamp.h>
169#include <ATen/ops/clamp.h>
170#include <ATen/ops/clamp_max.h>
171#include <ATen/ops/clamp_max.h>
172#include <ATen/ops/clamp_min.h>
173#include <ATen/ops/clamp_min.h>
174#include <ATen/ops/clip.h>
175#include <ATen/ops/clip.h>
176#include <ATen/ops/clone.h>
177#include <ATen/ops/coalesce.h>
178#include <ATen/ops/col_indices.h>
179#include <ATen/ops/conj.h>
180#include <ATen/ops/conj_physical.h>
181#include <ATen/ops/conj_physical.h>
182#include <ATen/ops/copysign.h>
183#include <ATen/ops/copysign.h>
184#include <ATen/ops/corrcoef.h>
185#include <ATen/ops/cos.h>
186#include <ATen/ops/cos.h>
187#include <ATen/ops/cosh.h>
188#include <ATen/ops/cosh.h>
189#include <ATen/ops/count_nonzero.h>
190#include <ATen/ops/cov.h>
191#include <ATen/ops/cross.h>
192#include <ATen/ops/crow_indices.h>
193#include <ATen/ops/cummax.h>
194#include <ATen/ops/cummin.h>
195#include <ATen/ops/cumprod.h>
196#include <ATen/ops/cumprod.h>
197#include <ATen/ops/cumsum.h>
198#include <ATen/ops/cumsum.h>
199#include <ATen/ops/deg2rad.h>
200#include <ATen/ops/deg2rad.h>
201#include <ATen/ops/dense_dim.h>
202#include <ATen/ops/dequantize.h>
203#include <ATen/ops/det.h>
204#include <ATen/ops/detach.h>
205#include <ATen/ops/detach.h>
206#include <ATen/ops/diag.h>
207#include <ATen/ops/diag_embed.h>
208#include <ATen/ops/diagflat.h>
209#include <ATen/ops/diagonal.h>
210#include <ATen/ops/diagonal_scatter.h>
211#include <ATen/ops/diff.h>
212#include <ATen/ops/digamma.h>
213#include <ATen/ops/digamma.h>
214#include <ATen/ops/dist.h>
215#include <ATen/ops/div.h>
216#include <ATen/ops/div.h>
217#include <ATen/ops/divide.h>
218#include <ATen/ops/divide.h>
219#include <ATen/ops/dot.h>
220#include <ATen/ops/dsplit.h>
221#include <ATen/ops/eq.h>
222#include <ATen/ops/eq.h>
223#include <ATen/ops/equal.h>
224#include <ATen/ops/erf.h>
225#include <ATen/ops/erf.h>
226#include <ATen/ops/erfc.h>
227#include <ATen/ops/erfc.h>
228#include <ATen/ops/erfinv.h>
229#include <ATen/ops/erfinv.h>
230#include <ATen/ops/exp.h>
231#include <ATen/ops/exp2.h>
232#include <ATen/ops/exp2.h>
233#include <ATen/ops/exp.h>
234#include <ATen/ops/expand.h>
235#include <ATen/ops/expand_as.h>
236#include <ATen/ops/expm1.h>
237#include <ATen/ops/expm1.h>
238#include <ATen/ops/exponential.h>
239#include <ATen/ops/fill.h>
240#include <ATen/ops/fill_diagonal.h>
241#include <ATen/ops/fix.h>
242#include <ATen/ops/fix.h>
243#include <ATen/ops/flatten.h>
244#include <ATen/ops/flip.h>
245#include <ATen/ops/fliplr.h>
246#include <ATen/ops/flipud.h>
247#include <ATen/ops/float_power.h>
248#include <ATen/ops/float_power.h>
249#include <ATen/ops/floor.h>
250#include <ATen/ops/floor.h>
251#include <ATen/ops/floor_divide.h>
252#include <ATen/ops/floor_divide.h>
253#include <ATen/ops/fmax.h>
254#include <ATen/ops/fmin.h>
255#include <ATen/ops/fmod.h>
256#include <ATen/ops/fmod.h>
257#include <ATen/ops/frac.h>
258#include <ATen/ops/frac.h>
259#include <ATen/ops/frexp.h>
260#include <ATen/ops/gather.h>
261#include <ATen/ops/gcd.h>
262#include <ATen/ops/gcd.h>
263#include <ATen/ops/ge.h>
264#include <ATen/ops/ge.h>
265#include <ATen/ops/geometric.h>
266#include <ATen/ops/geqrf.h>
267#include <ATen/ops/ger.h>
268#include <ATen/ops/greater.h>
269#include <ATen/ops/greater.h>
270#include <ATen/ops/greater_equal.h>
271#include <ATen/ops/greater_equal.h>
272#include <ATen/ops/gt.h>
273#include <ATen/ops/gt.h>
274#include <ATen/ops/hardshrink.h>
275#include <ATen/ops/heaviside.h>
276#include <ATen/ops/heaviside.h>
277#include <ATen/ops/histc.h>
278#include <ATen/ops/histogram.h>
279#include <ATen/ops/hsplit.h>
280#include <ATen/ops/hypot.h>
281#include <ATen/ops/hypot.h>
282#include <ATen/ops/i0.h>
283#include <ATen/ops/i0.h>
284#include <ATen/ops/igamma.h>
285#include <ATen/ops/igamma.h>
286#include <ATen/ops/igammac.h>
287#include <ATen/ops/igammac.h>
288#include <ATen/ops/index_add.h>
289#include <ATen/ops/index_add.h>
290#include <ATen/ops/index_copy.h>
291#include <ATen/ops/index_copy.h>
292#include <ATen/ops/index_fill.h>
293#include <ATen/ops/index_fill.h>
294#include <ATen/ops/index_put.h>
295#include <ATen/ops/index_put.h>
296#include <ATen/ops/index_reduce.h>
297#include <ATen/ops/index_reduce.h>
298#include <ATen/ops/index_select.h>
299#include <ATen/ops/indices.h>
300#include <ATen/ops/inner.h>
301#include <ATen/ops/int_repr.h>
302#include <ATen/ops/inverse.h>
303#include <ATen/ops/is_coalesced.h>
304#include <ATen/ops/is_complex.h>
305#include <ATen/ops/is_conj.h>
306#include <ATen/ops/is_distributed.h>
307#include <ATen/ops/is_floating_point.h>
308#include <ATen/ops/is_inference.h>
309#include <ATen/ops/is_neg.h>
310#include <ATen/ops/is_nonzero.h>
311#include <ATen/ops/is_pinned.h>
312#include <ATen/ops/is_same_size.h>
313#include <ATen/ops/is_set_to.h>
314#include <ATen/ops/is_signed.h>
315#include <ATen/ops/isclose.h>
316#include <ATen/ops/isfinite.h>
317#include <ATen/ops/isinf.h>
318#include <ATen/ops/isnan.h>
319#include <ATen/ops/isneginf.h>
320#include <ATen/ops/isposinf.h>
321#include <ATen/ops/isreal.h>
322#include <ATen/ops/istft.h>
323#include <ATen/ops/kron.h>
324#include <ATen/ops/kthvalue.h>
325#include <ATen/ops/lcm.h>
326#include <ATen/ops/lcm.h>
327#include <ATen/ops/ldexp.h>
328#include <ATen/ops/ldexp.h>
329#include <ATen/ops/le.h>
330#include <ATen/ops/le.h>
331#include <ATen/ops/lerp.h>
332#include <ATen/ops/lerp.h>
333#include <ATen/ops/less.h>
334#include <ATen/ops/less.h>
335#include <ATen/ops/less_equal.h>
336#include <ATen/ops/less_equal.h>
337#include <ATen/ops/lgamma.h>
338#include <ATen/ops/lgamma.h>
339#include <ATen/ops/log.h>
340#include <ATen/ops/log10.h>
341#include <ATen/ops/log10.h>
342#include <ATen/ops/log1p.h>
343#include <ATen/ops/log1p.h>
344#include <ATen/ops/log2.h>
345#include <ATen/ops/log2.h>
346#include <ATen/ops/log.h>
347#include <ATen/ops/log_normal.h>
348#include <ATen/ops/log_softmax.h>
349#include <ATen/ops/logaddexp.h>
350#include <ATen/ops/logaddexp2.h>
351#include <ATen/ops/logcumsumexp.h>
352#include <ATen/ops/logdet.h>
353#include <ATen/ops/logical_and.h>
354#include <ATen/ops/logical_and.h>
355#include <ATen/ops/logical_not.h>
356#include <ATen/ops/logical_not.h>
357#include <ATen/ops/logical_or.h>
358#include <ATen/ops/logical_or.h>
359#include <ATen/ops/logical_xor.h>
360#include <ATen/ops/logical_xor.h>
361#include <ATen/ops/logit.h>
362#include <ATen/ops/logit.h>
363#include <ATen/ops/logsumexp.h>
364#include <ATen/ops/lt.h>
365#include <ATen/ops/lt.h>
366#include <ATen/ops/lu_solve.h>
367#include <ATen/ops/masked_fill.h>
368#include <ATen/ops/masked_fill.h>
369#include <ATen/ops/masked_scatter.h>
370#include <ATen/ops/masked_scatter.h>
371#include <ATen/ops/masked_select.h>
372#include <ATen/ops/matmul.h>
373#include <ATen/ops/matrix_exp.h>
374#include <ATen/ops/matrix_power.h>
375#include <ATen/ops/max.h>
376#include <ATen/ops/maximum.h>
377#include <ATen/ops/mean.h>
378#include <ATen/ops/median.h>
379#include <ATen/ops/min.h>
380#include <ATen/ops/minimum.h>
381#include <ATen/ops/mm.h>
382#include <ATen/ops/mode.h>
383#include <ATen/ops/moveaxis.h>
384#include <ATen/ops/movedim.h>
385#include <ATen/ops/msort.h>
386#include <ATen/ops/mul.h>
387#include <ATen/ops/mul.h>
388#include <ATen/ops/multinomial.h>
389#include <ATen/ops/multiply.h>
390#include <ATen/ops/multiply.h>
391#include <ATen/ops/mv.h>
392#include <ATen/ops/mvlgamma.h>
393#include <ATen/ops/mvlgamma.h>
394#include <ATen/ops/nan_to_num.h>
395#include <ATen/ops/nan_to_num.h>
396#include <ATen/ops/nanmean.h>
397#include <ATen/ops/nanmedian.h>
398#include <ATen/ops/nanquantile.h>
399#include <ATen/ops/nansum.h>
400#include <ATen/ops/narrow.h>
401#include <ATen/ops/narrow_copy.h>
402#include <ATen/ops/ne.h>
403#include <ATen/ops/ne.h>
404#include <ATen/ops/neg.h>
405#include <ATen/ops/neg.h>
406#include <ATen/ops/negative.h>
407#include <ATen/ops/negative.h>
408#include <ATen/ops/new_empty.h>
409#include <ATen/ops/new_empty_strided.h>
410#include <ATen/ops/new_full.h>
411#include <ATen/ops/new_ones.h>
412#include <ATen/ops/new_zeros.h>
413#include <ATen/ops/nextafter.h>
414#include <ATen/ops/nextafter.h>
415#include <ATen/ops/norm.h>
416#include <ATen/ops/normal.h>
417#include <ATen/ops/not_equal.h>
418#include <ATen/ops/not_equal.h>
419#include <ATen/ops/orgqr.h>
420#include <ATen/ops/ormqr.h>
421#include <ATen/ops/outer.h>
422#include <ATen/ops/permute.h>
423#include <ATen/ops/pin_memory.h>
424#include <ATen/ops/pinverse.h>
425#include <ATen/ops/polygamma.h>
426#include <ATen/ops/polygamma.h>
427#include <ATen/ops/positive.h>
428#include <ATen/ops/pow.h>
429#include <ATen/ops/pow.h>
430#include <ATen/ops/prelu.h>
431#include <ATen/ops/prod.h>
432#include <ATen/ops/put.h>
433#include <ATen/ops/put.h>
434#include <ATen/ops/q_per_channel_axis.h>
435#include <ATen/ops/q_per_channel_scales.h>
436#include <ATen/ops/q_per_channel_zero_points.h>
437#include <ATen/ops/q_scale.h>
438#include <ATen/ops/q_zero_point.h>
439#include <ATen/ops/qr.h>
440#include <ATen/ops/qscheme.h>
441#include <ATen/ops/quantile.h>
442#include <ATen/ops/rad2deg.h>
443#include <ATen/ops/rad2deg.h>
444#include <ATen/ops/random.h>
445#include <ATen/ops/ravel.h>
446#include <ATen/ops/reciprocal.h>
447#include <ATen/ops/reciprocal.h>
448#include <ATen/ops/record_stream.h>
449#include <ATen/ops/refine_names.h>
450#include <ATen/ops/relu.h>
451#include <ATen/ops/relu.h>
452#include <ATen/ops/remainder.h>
453#include <ATen/ops/remainder.h>
454#include <ATen/ops/rename.h>
455#include <ATen/ops/rename.h>
456#include <ATen/ops/renorm.h>
457#include <ATen/ops/renorm.h>
458#include <ATen/ops/repeat.h>
459#include <ATen/ops/repeat_interleave.h>
460#include <ATen/ops/reshape.h>
461#include <ATen/ops/reshape_as.h>
462#include <ATen/ops/resize.h>
463#include <ATen/ops/resize_as.h>
464#include <ATen/ops/resize_as_sparse.h>
465#include <ATen/ops/resolve_conj.h>
466#include <ATen/ops/resolve_neg.h>
467#include <ATen/ops/retain_grad.h>
468#include <ATen/ops/roll.h>
469#include <ATen/ops/rot90.h>
470#include <ATen/ops/round.h>
471#include <ATen/ops/round.h>
472#include <ATen/ops/row_indices.h>
473#include <ATen/ops/rsqrt.h>
474#include <ATen/ops/rsqrt.h>
475#include <ATen/ops/scatter.h>
476#include <ATen/ops/scatter.h>
477#include <ATen/ops/scatter_add.h>
478#include <ATen/ops/scatter_add.h>
479#include <ATen/ops/scatter_reduce.h>
480#include <ATen/ops/scatter_reduce.h>
481#include <ATen/ops/select.h>
482#include <ATen/ops/select_scatter.h>
483#include <ATen/ops/sgn.h>
484#include <ATen/ops/sgn.h>
485#include <ATen/ops/sigmoid.h>
486#include <ATen/ops/sigmoid.h>
487#include <ATen/ops/sign.h>
488#include <ATen/ops/sign.h>
489#include <ATen/ops/signbit.h>
490#include <ATen/ops/sin.h>
491#include <ATen/ops/sin.h>
492#include <ATen/ops/sinc.h>
493#include <ATen/ops/sinc.h>
494#include <ATen/ops/sinh.h>
495#include <ATen/ops/sinh.h>
496#include <ATen/ops/slice_scatter.h>
497#include <ATen/ops/slogdet.h>
498#include <ATen/ops/smm.h>
499#include <ATen/ops/softmax.h>
500#include <ATen/ops/sort.h>
501#include <ATen/ops/sparse_dim.h>
502#include <ATen/ops/sparse_mask.h>
503#include <ATen/ops/sparse_resize.h>
504#include <ATen/ops/sparse_resize_and_clear.h>
505#include <ATen/ops/split.h>
506#include <ATen/ops/split_with_sizes.h>
507#include <ATen/ops/sqrt.h>
508#include <ATen/ops/sqrt.h>
509#include <ATen/ops/square.h>
510#include <ATen/ops/square.h>
511#include <ATen/ops/squeeze.h>
512#include <ATen/ops/squeeze.h>
513#include <ATen/ops/sspaddmm.h>
514#include <ATen/ops/std.h>
515#include <ATen/ops/stft.h>
516#include <ATen/ops/sub.h>
517#include <ATen/ops/sub.h>
518#include <ATen/ops/subtract.h>
519#include <ATen/ops/subtract.h>
520#include <ATen/ops/sum.h>
521#include <ATen/ops/sum_to_size.h>
522#include <ATen/ops/svd.h>
523#include <ATen/ops/swapaxes.h>
524#include <ATen/ops/swapaxes.h>
525#include <ATen/ops/swapdims.h>
526#include <ATen/ops/swapdims.h>
527#include <ATen/ops/t.h>
528#include <ATen/ops/t.h>
529#include <ATen/ops/take.h>
530#include <ATen/ops/take_along_dim.h>
531#include <ATen/ops/tan.h>
532#include <ATen/ops/tan.h>
533#include <ATen/ops/tanh.h>
534#include <ATen/ops/tanh.h>
535#include <ATen/ops/tensor_split.h>
536#include <ATen/ops/tile.h>
537#include <ATen/ops/to_dense.h>
538#include <ATen/ops/to_mkldnn.h>
539#include <ATen/ops/to_padded_tensor.h>
540#include <ATen/ops/to_sparse.h>
541#include <ATen/ops/to_sparse_bsc.h>
542#include <ATen/ops/to_sparse_bsr.h>
543#include <ATen/ops/to_sparse_csc.h>
544#include <ATen/ops/to_sparse_csr.h>
545#include <ATen/ops/topk.h>
546#include <ATen/ops/trace.h>
547#include <ATen/ops/transpose.h>
548#include <ATen/ops/transpose.h>
549#include <ATen/ops/triangular_solve.h>
550#include <ATen/ops/tril.h>
551#include <ATen/ops/tril.h>
552#include <ATen/ops/triu.h>
553#include <ATen/ops/triu.h>
554#include <ATen/ops/true_divide.h>
555#include <ATen/ops/true_divide.h>
556#include <ATen/ops/trunc.h>
557#include <ATen/ops/trunc.h>
558#include <ATen/ops/type_as.h>
559#include <ATen/ops/unbind.h>
560#include <ATen/ops/unflatten.h>
561#include <ATen/ops/unfold.h>
562#include <ATen/ops/uniform.h>
563#include <ATen/ops/unsafe_chunk.h>
564#include <ATen/ops/unsafe_split.h>
565#include <ATen/ops/unsafe_split_with_sizes.h>
566#include <ATen/ops/unsqueeze.h>
567#include <ATen/ops/unsqueeze.h>
568#include <ATen/ops/values.h>
569#include <ATen/ops/var.h>
570#include <ATen/ops/vdot.h>
571#include <ATen/ops/view.h>
572#include <ATen/ops/view_as.h>
573#include <ATen/ops/vsplit.h>
574#include <ATen/ops/where.h>
575#include <ATen/ops/xlogy.h>
576#include <ATen/ops/xlogy.h>
577#include <ATen/ops/zero.h>
578#include <ATen/ops/_local_scalar_dense.h>
579#endif
580
581using at::DeviceGuard;
582using at::device_of;
583using at::OptionalDeviceGuard;
584using at::Backend;
585using at::Scalar;
586using at::ScalarType;
587using at::Tensor;
588using c10::Stream;
589using namespace torch::autograd::utils;
590
591namespace torch { namespace autograd {
592
593static PyObject * THPVariable__is_view(PyObject *self, PyObject* args)
594{
595 HANDLE_TH_ERRORS
596 if (check_has_torch_function(self)) {
597 return handle_torch_function(self, "_is_view", args);
598 }
599 auto& self_ = THPVariable_Unpack(self);
600 if (self_.is_view()) {
601 Py_RETURN_TRUE;
602 } else {
603 Py_RETURN_FALSE;
604 }
605 END_HANDLE_TH_ERRORS
606}
607
608// implemented on the python object bc no support for first-class functions in native_functions.yaml
609// See: ATen/native/README.md for more context
610static PyObject * THPVariable_apply_(PyObject* self, PyObject* arg)
611{
612 HANDLE_TH_ERRORS
613 if (check_has_torch_function(self)) {
614 auto args = py::make_tuple(py::handle(arg));
615 return handle_torch_function(self, "apply_", args.ptr());
616 }
617 auto& self_ = THPVariable_Unpack(self);
618 if (self_.requires_grad()) {
619 throw std::runtime_error(
620 "Can't call apply_() on Variable that requires grad. Use "
621 "var.detach().apply_() instead.");
622 }
623 return THPVariable_Wrap(torch::utils::apply_(self_, arg));
624 END_HANDLE_TH_ERRORS
625}
626
627static PyObject * THPVariable_size(PyObject* self, PyObject* args, PyObject* kwargs)
628{
629 HANDLE_TH_ERRORS
630 static PythonArgParser parser({
631 "size(int64_t dim)",
632 "size()",
633 "size(Dimname dim)",
634 });
635 auto& self_ = THPVariable_Unpack(self);
636 ParsedArgs<3> parsed_args;
637 auto r = parser.parse(self, args, kwargs, parsed_args);
638
639 if(r.has_torch_function()){
640 return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
641 }
642 if (r.idx == 0) {
643 if (jit::tracer::isTracing()) {
644 // will error out if a tensor has symints
645 return wrap(jit::tracer::getSizeOf(self_, r.toInt64(0)));
646 } else {
647 return torch::toPyObject(self_.sym_size(r.toInt64(0)));
648 }
649 } else if (r.idx == 1) {
650 return THPSize_NewFromSymSizes(self_);
651 }
652 else if (r.idx == 2) {
653 if (jit::tracer::isTracing()) {
654 TORCH_INTERNAL_ASSERT(false, "NYI: Named tensors w/ JIT");
655 }
656 return wrap(self_.size(r.dimname(0)));
657 }
658 Py_RETURN_NONE;
659 END_HANDLE_TH_ERRORS
660}
661
662static PyObject * THPVariable_stride(PyObject* self, PyObject* args, PyObject* kwargs)
663{
664 HANDLE_TH_ERRORS
665 static PythonArgParser parser({
666 "stride(int64_t dim)",
667 "stride()",
668 "stride(Dimname dim)",
669 });
670 auto& self_ = THPVariable_Unpack(self);
671 ParsedArgs<3> parsed_args;
672 auto r = parser.parse(self, args, kwargs, parsed_args);
673
674 if(r.has_torch_function()){
675 return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
676 }
677
678 if (r.idx == 0) {
679 return torch::toPyObject(self_.sym_stride(r.toInt64(0)));
680 } else if (r.idx == 1) {
681 // yes, this is called strides in ATen.
682 at::SymIntArrayRef strides = self_.sym_strides();
683 // we can't do the normal wrapping here because IntArrayRef maps to both
684 // torch.Size and tuple in python
685 // TODO: consider factoring this out
686 THPObjectPtr tuple(PyTuple_New(strides.size()));
687 if (!tuple) throw python_error();
688 for (size_t i = 0; i != strides.size(); i++) {
689 PyObject* s = torch::toPyObject(strides[i]);
690 if (!s) throw python_error();
691 PyTuple_SET_ITEM(tuple.get(), i, s);
692 }
693 return tuple.release();
694 }
695 else if (r.idx == 2) {
696 return wrap(self_.stride(r.dimname(0)));
697 }
698 Py_RETURN_NONE;
699 END_HANDLE_TH_ERRORS
700}
701
702// implemented on the python object to avoid dispatch overhead
703static PyObject * THPVariable_get_device(PyObject* self_, PyObject* args)
704{
705 HANDLE_TH_ERRORS
706 if (check_has_torch_function(self_)) {
707 return handle_torch_function(self_, "get_device", args, nullptr);
708 }
709 auto& self = THPVariable_Unpack(self_);
710 return wrap(self.get_device());
711 END_HANDLE_TH_ERRORS
712}
713
714static PyObject * THPVariable_has_names(PyObject* self_, PyObject* args)
715{
716 HANDLE_TH_ERRORS
717 if (check_has_torch_function(self_)) {
718 return handle_torch_function(self_, "has_names", args);
719 }
720 auto& self = THPVariable_Unpack(self_);
721 return wrap(self.has_names());
722 END_HANDLE_TH_ERRORS
723}
724
725// implemented on the python object to avoid dispatch overhead
726static PyObject * THPVariable_data_ptr(PyObject* self_, PyObject* args)
727{
728 HANDLE_TH_ERRORS
729 if (check_has_torch_function(self_)) {
730 return handle_torch_function(self_, "data_ptr", args);
731 }
732 auto& self = THPVariable_Unpack(self_);
733 return wrap(self.data_ptr());
734 END_HANDLE_TH_ERRORS
735}
736
737// implemented on the python object to avoid dispatch overhead
738static PyObject * THPVariable_storage_offset(PyObject* self_, PyObject* args)
739{
740 HANDLE_TH_ERRORS
741 if (check_has_torch_function(self_)) {
742 return handle_torch_function(self_, "storage_offset");
743 }
744 auto& self = THPVariable_Unpack(self_);
745 return py::cast(self.sym_storage_offset()).release().ptr();
746 END_HANDLE_TH_ERRORS
747}
748
749// implemented on the python object to avoid dispatch overhead
750static PyObject * THPVariable_dim(PyObject* self, PyObject* args)
751{
752 HANDLE_TH_ERRORS
753 if (check_has_torch_function(self)) {
754 return handle_torch_function(self, "dim", args);
755 }
756 auto& self_ = THPVariable_Unpack(self);
757 return THPUtils_packInt64(self_.dim());
758 END_HANDLE_TH_ERRORS
759}
760
761// implemented on the python object to avoid dispatch overhead
762static PyObject * THPVariable_numel(PyObject* self, PyObject* args)
763{
764 HANDLE_TH_ERRORS
765 if (check_has_torch_function(self)) {
766 return handle_torch_function(self, "numel", args);
767 }
768 auto& self_ = THPVariable_Unpack(self);
769 if (jit::tracer::isTracing()) {
770 return wrap(jit::tracer::getNumelOf(self_));
771 } else {
772 return py::cast(self_.sym_numel()).release().ptr();
773 }
774 END_HANDLE_TH_ERRORS
775}
776
777static Tensor dispatch_contiguous(const Tensor & self, at::MemoryFormat memory_format) {
778 pybind11::gil_scoped_release no_gil;
779 OptionalDeviceGuard device_guard(device_of(self));
780 return self.contiguous(memory_format);
781}
782
783static PyObject * THPVariable_contiguous(PyObject* self, PyObject* args, PyObject* kwargs)
784{
785 HANDLE_TH_ERRORS
786 static PythonArgParser parser({
787 "contiguous(*, MemoryFormat memory_format=contiguous_format)",
788 });
789 ParsedArgs<1> parsed_args;
790 auto r = parser.parse(self, args, kwargs, parsed_args);
791
792 if(r.has_torch_function()){
793 return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
794 }
795
796 auto& self_ = THPVariable_Unpack(self);
797 auto memory_format = r.memoryformat(0);
798 // avoids touching the GIL or current device if self is already contiguous
799 if (self_.is_contiguous(memory_format)) {
800 // NOTE: this logic is duplicated from VariableType.cpp. Since we need to
801 // record this call to contiguous() in the trace regardless of whether
802 // we actually call contiguous here, we need to record this information
803 // manually.
804 if (jit::tracer::isTracing()) {
805 auto tracer_state = jit::tracer::getTracingState();
806 auto op_name = c10::Symbol::fromQualString("aten::contiguous");
807 auto node = tracer_state->createNode(op_name, /*num_outputs=*/0);
808 jit::tracer::recordSourceLocation(node);
809 jit::tracer::addInputs(node, "self", self_);
810 jit::tracer::addInputs(node, "memory_format", memory_format);
811 tracer_state->insertNode(node);
812 jit::tracer::addOutput(node, self_);
813 }
814 Py_INCREF(self);
815 return self;
816 }
817 return THPVariable_Wrap(dispatch_contiguous(self_, memory_format));
818 END_HANDLE_TH_ERRORS
819}
820
821static Tensor dispatch_copy_(const Tensor & self, const Tensor & other, bool non_blocking) {
822 pybind11::gil_scoped_release no_gil;
823 OptionalDeviceGuard device_guard(device_of(self));
824 return self.copy_(other, non_blocking);
825}
826
827 static PyObject * THPVariable_copy_(PyObject* self, PyObject* args, PyObject* kwargs)
828{
829 HANDLE_TH_ERRORS
830 static PythonArgParser parser({
831 "copy_(Tensor other, bool non_blocking=False)",
832 "copy_(Tensor other, bool async=False)|deprecated"
833 });
834 auto& self_ = THPVariable_Unpack(self);
835 ParsedArgs<2> parsed_args;
836 auto r = parser.parse(self, args, kwargs, parsed_args);
837
838 if(r.has_torch_function()){
839 return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
840 }
841
842 return THPVariable_Wrap(dispatch_copy_(self_, r.tensor(0), r.toBool(1)));
843 END_HANDLE_TH_ERRORS
844}
845
846static double dispatch_to_CDouble(const Tensor & self) {
847 pybind11::gil_scoped_release no_gil;
848 OptionalDeviceGuard device_guard(device_of(self));
849 if (self.sym_numel() != 1) {
850 throw ValueError("only one element tensors can be converted to Python scalars");
851 }
852 return self.item<double>();
853}
854
855static c10::complex<double> dispatch_to_CComplexDouble(const Tensor & self) {
856 pybind11::gil_scoped_release no_gil;
857 OptionalDeviceGuard device_guard(device_of(self));
858 if (self.sym_numel() != 1) {
859 throw ValueError("only one element tensors can be converted to Python scalars");
860 }
861 return self.item<c10::complex<double>>();
862}
863
864static int64_t dispatch_to_CLong(const Tensor & self) {
865 pybind11::gil_scoped_release no_gil;
866 OptionalDeviceGuard device_guard(device_of(self));
867 if (self.sym_numel() != 1) {
868 throw ValueError("only one element tensors can be converted to Python scalars");
869 }
870 return self.item<int64_t>();
871}
872
873static PyObject * THPVariable_float_scalar(PyObject* self, PyObject* args) {
874 HANDLE_TH_ERRORS
875 if (check_has_torch_function(self)) {
876 return handle_torch_function(self, "__float__", args);
877 }
878 jit::tracer::warn("Converting a tensor to a Python float", jit::tracer::WARN_PYTHON_DATAFLOW);
879 auto& self_ = THPVariable_Unpack(self);
880 return wrap(dispatch_to_CDouble(self_));
881 END_HANDLE_TH_ERRORS
882}
883
884static PyObject * THPVariable_complex_scalar(PyObject* self, PyObject* args) {
885 HANDLE_TH_ERRORS
886 if (check_has_torch_function(self)) {
887 return handle_torch_function(self, "__complex__", args);
888 }
889 jit::tracer::warn("Converting a tensor to a Python complex", jit::tracer::WARN_PYTHON_DATAFLOW);
890 auto& self_ = THPVariable_Unpack(self);
891 return wrap(dispatch_to_CComplexDouble(self_));
892 END_HANDLE_TH_ERRORS
893}
894
895static PyObject * THPVariable_integral_scalar(PyObject* self, PyObject* args) {
896 HANDLE_TH_ERRORS
897 if (check_has_torch_function(self)) {
898 return handle_torch_function(self, "__int__", args);
899 }
900 jit::tracer::warn("Converting a tensor to a Python integer", jit::tracer::WARN_PYTHON_DATAFLOW);
901 auto& self_ = THPVariable_Unpack(self);
902 if (isFloatingType(self_.scalar_type())) {
903 // we can't dispatch to item<int64_t> here because we want to avoid ATen overflow checks;
904 // the python integral type (long in python2) can't overflow.
905 return THPUtils_packDoubleAsInt(dispatch_to_CDouble(self_));
906 } else {
907 return wrap(dispatch_to_CLong(self_));
908 }
909 END_HANDLE_TH_ERRORS
910}
911
912// This is the __index__ function in Python which is similar to __int__, but
913// called when used as a slice.
914static PyObject * THPVariable_index_scalar(PyObject* self, PyObject* args) {
915 HANDLE_TH_ERRORS
916 if (check_has_torch_function(self)) {
917 return handle_torch_function(self, "__index__", args);
918 }
919 auto& self_ = THPVariable_Unpack(self);
920 // TODO: change the condition to `self_.dim() != 0` once we expose scalars
921 // in PyTorch.
922 if (!isIntegralType(self_.scalar_type(), /*includeBool=*/true) || self_.sym_numel() != 1) {
923 throw TypeError("only integer tensors of a single element can be converted to an index");
924 }
925 return wrap(dispatch_to_CLong(self_));
926 END_HANDLE_TH_ERRORS
927}
928
929static Tensor dispatch_invert(const Tensor & self) {
930 pybind11::gil_scoped_release no_gil;
931 OptionalDeviceGuard device_guard(device_of(self));
932 return self.bitwise_not();
933}
934
935static PyObject * THPVariable_invert(PyObject* self, PyObject* args) {
936 HANDLE_TH_ERRORS
937 if (check_has_torch_function(self)) {
938 return handle_torch_function(self, "__invert__", args);
939 }
940 auto& self_ = THPVariable_Unpack(self);
941 if (!isIntegralType(self_.scalar_type(), /*includeBool=*/true)) {
942 throw TypeError("~ (operator.invert) is only implemented on integer and Boolean-type tensors");
943 }
944 return THPVariable_Wrap(dispatch_invert(self_));
945 END_HANDLE_TH_ERRORS
946}
947
948static Tensor dispatch_to(const Tensor & self, Device device, bool non_blocking, bool copy, c10::optional<c10::MemoryFormat> optional_memory_format) {
949 pybind11::gil_scoped_release no_gil;
950 // NOTE: this is where we record aten::to in the graph during tracing. However, the behavior of aten::to
951 // is different with respect to TensorOptions fields that are not present: aten::to inherits fields that
952 // are missing from the self argument while the tracer assumes that they should be populated with the
953 // default values (eg. float for scalar type). By explicitly copying over the tensor options here we fully
954 // specify all tensor options and thus record the proper trace
955 return self.to(self.options().device(device).memory_format(optional_memory_format), non_blocking, copy);
956}
957
958static Tensor dispatch_to(const Tensor & self, bool non_blocking, bool copy, c10::optional<c10::MemoryFormat> optional_memory_format) {
959 AutoNoGIL no_gil;
960 return self.to(self.options().memory_format(optional_memory_format), non_blocking, copy);
961}
962
963static Tensor dispatch_to(const Tensor & self, ScalarType dtype, bool non_blocking, bool copy, c10::optional<c10::MemoryFormat> optional_memory_format) {
964 pybind11::gil_scoped_release no_gil;
965 // TODO: Make this call the TensorOptions version, maybe?
966 return self.to(dtype, non_blocking, copy, optional_memory_format);
967}
968
969static Tensor dispatch_to(const Tensor & self, Device device, ScalarType dtype, bool non_blocking, bool copy, c10::optional<c10::MemoryFormat> optional_memory_format) {
970 pybind11::gil_scoped_release no_gil;
971 // TODO: Make this call the TensorOptions version, maybe?
972 return self.to(device, dtype, non_blocking, copy, optional_memory_format);
973}
974
975static PyObject * THPVariable_cpu(PyObject* self, PyObject* args, PyObject* kwargs)
976{
977 HANDLE_TH_ERRORS
978 static PythonArgParser parser({
979 "cpu(*, MemoryFormat? memory_format=None)"
980 });
981 auto& self_ = THPVariable_Unpack(self);
982 ParsedArgs<1> parsed_args;
983 auto r = parser.parse(self, args, kwargs, parsed_args);
984
985 if(r.has_torch_function()){
986 return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
987 }
988
989 auto opt_memory_format = r.memoryformatOptional(0);
990 return THPVariable_Wrap(dispatch_to(self_, at::Device(at::DeviceType::CPU), false, false, opt_memory_format));
991 END_HANDLE_TH_ERRORS
992}
993
994static Tensor dispatch_nonzero(const Tensor & self) {
995 pybind11::gil_scoped_release no_gil;
996 OptionalDeviceGuard device_guard(device_of(self));
997 return self.nonzero();
998}
999
1000static std::vector<Tensor> dispatch_nonzero_numpy(const Tensor & self) {
1001 pybind11::gil_scoped_release no_gil;
1002 OptionalDeviceGuard device_guard(device_of(self));
1003 return self.nonzero_numpy();
1004}
1005
1006static PyObject * THPVariable_nonzero(PyObject* self, PyObject* args, PyObject* kwargs)
1007{
1008 HANDLE_TH_ERRORS
1009 static PythonArgParser parser({
1010 "nonzero()",
1011 "nonzero(*, bool as_tuple)",
1012 });
1013 auto& self_ = THPVariable_Unpack(self);
1014 ParsedArgs<2> parsed_args;
1015 auto r = parser.parse(self, args, kwargs, parsed_args);
1016
1017 if(r.has_torch_function()){
1018 return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
1019 }
1020
1021 if (r.idx == 0 || (r.idx == 1 && !r.toBool(0))) {
1022 return wrap(dispatch_nonzero(self_));
1023 } else {
1024 return wrap(dispatch_nonzero_numpy(self_));
1025 }
1026 END_HANDLE_TH_ERRORS
1027}
1028
1029static PyObject * THPVariable_cuda(PyObject* self, PyObject* args, PyObject* kwargs)
1030{
1031 HANDLE_TH_ERRORS
1032 static PythonArgParser parser({
1033 "cuda(Device? device=None, bool non_blocking=False, *, MemoryFormat? memory_format=None)",
1034 "cuda(Device? device=None, bool async=False, *, MemoryFormat? memory_format=None)|deprecated"
1035 });
1036 auto& self_ = THPVariable_Unpack(self);
1037 ParsedArgs<3> parsed_args;
1038 auto r = parser.parse(self, args, kwargs, parsed_args);
1039
1040 if(r.has_torch_function()){
1041 return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
1042 }
1043
1044 auto device = r.isNone(0) ? at::Device(at::DeviceType::CUDA) : r.device(0);
1045 auto opt_memory_format = r.memoryformatOptional(2);
1046 TORCH_CHECK(device.is_cuda(), "Invalid device, must be cuda device");
1047 torch::utils::cuda_lazy_init();
1048 return THPVariable_Wrap(dispatch_to(self_, device, r.toBool(1), false, opt_memory_format));
1049 END_HANDLE_TH_ERRORS
1050}
1051
1052static PyObject * THPVariable_xpu(PyObject* self, PyObject* args, PyObject* kwargs)
1053{
1054 HANDLE_TH_ERRORS
1055 static PythonArgParser parser({
1056 "xpu(Device? device=None, bool non_blocking=False, *, MemoryFormat? memory_format=None)",
1057 "xpu(Device? device=None, bool async=False, *, MemoryFormat? memory_format=None)|deprecated"
1058 });
1059 auto& self_ = THPVariable_Unpack(self);
1060 ParsedArgs<3> parsed_args;
1061 auto r = parser.parse(self, args, kwargs, parsed_args);
1062
1063 if (r.has_torch_function()) {
1064 return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
1065 }
1066
1067 auto device = r.isNone(0) ? at::Device(at::DeviceType::XPU) : r.device(0);
1068 auto opt_memory_format = r.memoryformatOptional(2);
1069 TORCH_CHECK(device.is_xpu(), "Invalid device, must be xpu device");
1070 return THPVariable_Wrap(dispatch_to(self_, device, r.toBool(1), false, opt_memory_format));
1071 END_HANDLE_TH_ERRORS
1072}
1073
1074static PyObject * THPVariable_ipu(PyObject* self, PyObject* args, PyObject* kwargs)
1075{
1076 HANDLE_TH_ERRORS
1077 static PythonArgParser parser({
1078 "ipu(Device? device=None, bool non_blocking=False, *, MemoryFormat? memory_format=None)",
1079 "ipu(Device? device=None, bool async=False, *, MemoryFormat? memory_format=None)|deprecated"
1080 });
1081 auto& self_ = THPVariable_Unpack(self);
1082 ParsedArgs<3> parsed_args;
1083 auto r = parser.parse(self, args, kwargs, parsed_args);
1084
1085 if (r.has_torch_function()) {
1086 return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
1087 }
1088
1089 auto device = r.isNone(0) ? at::Device(at::DeviceType::IPU) : r.device(0);
1090 auto opt_memory_format = r.memoryformatOptional(2);
1091 TORCH_CHECK(device.is_ipu(), "Invalid device, must be ipu device");
1092 return THPVariable_Wrap(dispatch_to(self_, device, r.toBool(1), false, opt_memory_format));
1093 END_HANDLE_TH_ERRORS
1094}
1095
1096static PyObject * THPVariable_to_type(PyObject* self, ScalarType scalarType, c10::optional<c10::MemoryFormat> optional_memory_format) {
1097 HANDLE_TH_ERRORS
1098 auto& self_ = THPVariable_Unpack(self);
1099 return THPVariable_Wrap(dispatch_to(self_, scalarType, false, false, optional_memory_format));
1100 END_HANDLE_TH_ERRORS
1101}
1102
1103static PyObject * THPVariable_byte(PyObject* self, PyObject* args, PyObject* kwargs) {
1104 HANDLE_TH_ERRORS
1105 static PythonArgParser parser({
1106 "byte(*, MemoryFormat? memory_format=None)"
1107 });
1108 ParsedArgs<1> parsed_args;
1109 auto r = parser.parse(self, args, kwargs, parsed_args);
1110
1111 if(r.has_torch_function()){
1112 return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
1113 }
1114
1115 auto opt_memory_format = r.memoryformatOptional(0);
1116 return THPVariable_to_type(self, ScalarType::Byte, opt_memory_format);
1117 END_HANDLE_TH_ERRORS
1118}
1119
1120static PyObject * THPVariable_char(PyObject* self, PyObject* args, PyObject* kwargs) {
1121 HANDLE_TH_ERRORS
1122 static PythonArgParser parser({
1123 "char(*, MemoryFormat? memory_format=None)"
1124 });
1125 ParsedArgs<1> parsed_args;
1126 auto r = parser.parse(self, args, kwargs, parsed_args);
1127
1128 if(r.has_torch_function()){
1129 return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
1130 }
1131
1132 auto opt_memory_format = r.memoryformatOptional(0);
1133 return THPVariable_to_type(self, ScalarType::Char, opt_memory_format);
1134 END_HANDLE_TH_ERRORS
1135}
1136
1137static PyObject * THPVariable_double(PyObject* self, PyObject* args, PyObject* kwargs) {
1138 HANDLE_TH_ERRORS
1139 static PythonArgParser parser({
1140 "double(*, MemoryFormat? memory_format=None)"
1141 });
1142 ParsedArgs<1> parsed_args;
1143 auto r = parser.parse(self, args, kwargs, parsed_args);
1144
1145 if(r.has_torch_function()){
1146 return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
1147 }
1148
1149 auto opt_memory_format = r.memoryformatOptional(0);
1150 return THPVariable_to_type(self, ScalarType::Double, opt_memory_format);
1151 END_HANDLE_TH_ERRORS
1152}
1153
1154static PyObject * THPVariable_float(PyObject* self, PyObject* args, PyObject* kwargs) {
1155 HANDLE_TH_ERRORS
1156 static PythonArgParser parser({
1157 "float(*, MemoryFormat? memory_format=None)"
1158 });
1159 ParsedArgs<1> parsed_args;
1160 auto r = parser.parse(self, args, kwargs, parsed_args);
1161
1162 if(r.has_torch_function()){
1163 return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
1164 }
1165
1166 auto opt_memory_format = r.memoryformatOptional(0);
1167 return THPVariable_to_type(self, ScalarType::Float, opt_memory_format);
1168 END_HANDLE_TH_ERRORS
1169}
1170
1171static PyObject * THPVariable_cdouble(PyObject* self, PyObject* args, PyObject* kwargs) {
1172 HANDLE_TH_ERRORS
1173 static PythonArgParser parser({
1174 "cdouble(*, MemoryFormat? memory_format=None)"
1175 });
1176 ParsedArgs<1> parsed_args;
1177 auto r = parser.parse(self, args, kwargs, parsed_args);
1178
1179 if(r.has_torch_function()){
1180 return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
1181 }
1182
1183 auto opt_memory_format = r.memoryformatOptional(0);
1184 return THPVariable_to_type(self, ScalarType::ComplexDouble, opt_memory_format);
1185 END_HANDLE_TH_ERRORS
1186}
1187
1188static PyObject * THPVariable_cfloat(PyObject* self, PyObject* args, PyObject* kwargs) {
1189 HANDLE_TH_ERRORS
1190 static PythonArgParser parser({
1191 "cfloat(*, MemoryFormat? memory_format=None)"
1192 });
1193 ParsedArgs<1> parsed_args;
1194 auto r = parser.parse(self, args, kwargs, parsed_args);
1195
1196 if(r.has_torch_function()){
1197 return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
1198 }
1199
1200 auto opt_memory_format = r.memoryformatOptional(0);
1201 return THPVariable_to_type(self, ScalarType::ComplexFloat, opt_memory_format);
1202 END_HANDLE_TH_ERRORS
1203}
1204
1205static PyObject * THPVariable_half(PyObject* self, PyObject* args, PyObject* kwargs) {
1206 HANDLE_TH_ERRORS
1207 static PythonArgParser parser({
1208 "half(*, MemoryFormat? memory_format=None)"
1209 });
1210 ParsedArgs<1> parsed_args;
1211 auto r = parser.parse(self, args, kwargs, parsed_args);
1212
1213 if(r.has_torch_function()){
1214 return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
1215 }
1216
1217 auto opt_memory_format = r.memoryformatOptional(0);
1218 return THPVariable_to_type(self, ScalarType::Half, opt_memory_format);
1219 END_HANDLE_TH_ERRORS
1220}
1221
1222static PyObject * THPVariable_int(PyObject* self, PyObject* args, PyObject* kwargs) {
1223 HANDLE_TH_ERRORS
1224 static PythonArgParser parser({
1225 "int(*, MemoryFormat? memory_format=None)"
1226 });
1227 ParsedArgs<1> parsed_args;
1228 auto r = parser.parse(self, args, kwargs, parsed_args);
1229
1230 if(r.has_torch_function()){
1231 return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
1232 }
1233
1234 auto opt_memory_format = r.memoryformatOptional(0);
1235 return THPVariable_to_type(self, ScalarType::Int, opt_memory_format);
1236 END_HANDLE_TH_ERRORS
1237}
1238
1239static PyObject * THPVariable_long(PyObject* self, PyObject* args, PyObject* kwargs) {
1240 HANDLE_TH_ERRORS
1241 static PythonArgParser parser({
1242 "long(*, MemoryFormat? memory_format=None)"
1243 });
1244 ParsedArgs<1> parsed_args;
1245 auto r = parser.parse(self, args, kwargs, parsed_args);
1246
1247 if(r.has_torch_function()){
1248 return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
1249 }
1250
1251 auto opt_memory_format = r.memoryformatOptional(0);
1252 return THPVariable_to_type(self, ScalarType::Long, opt_memory_format);
1253 END_HANDLE_TH_ERRORS
1254}
1255
1256static PyObject * THPVariable_short(PyObject* self, PyObject* args, PyObject* kwargs) {
1257 HANDLE_TH_ERRORS
1258 static PythonArgParser parser({
1259 "short(*, MemoryFormat? memory_format=None)"
1260 });
1261 ParsedArgs<1> parsed_args;
1262 auto r = parser.parse(self, args, kwargs, parsed_args);
1263
1264 if(r.has_torch_function()){
1265 return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
1266 }
1267
1268 auto opt_memory_format = r.memoryformatOptional(0);
1269 return THPVariable_to_type(self, ScalarType::Short, opt_memory_format);
1270 END_HANDLE_TH_ERRORS
1271}
1272
1273static PyObject * THPVariable_bool(PyObject* self, PyObject* args, PyObject* kwargs) {
1274 HANDLE_TH_ERRORS
1275 static PythonArgParser parser({
1276 "bool(*, MemoryFormat? memory_format=None)"
1277 });
1278 ParsedArgs<1> parsed_args;
1279 auto r = parser.parse(self, args, kwargs, parsed_args);
1280
1281 if(r.has_torch_function()){
1282 return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
1283 }
1284
1285 auto opt_memory_format = r.memoryformatOptional(0);
1286 return THPVariable_to_type(self, ScalarType::Bool, opt_memory_format);
1287 END_HANDLE_TH_ERRORS
1288}
1289
1290static PyObject * THPVariable_bfloat16(PyObject* self, PyObject* args, PyObject* kwargs) {
1291 HANDLE_TH_ERRORS
1292 static PythonArgParser parser({
1293 "bfloat16(*, MemoryFormat? memory_format=None)"
1294 });
1295 ParsedArgs<1> parsed_args;
1296 auto r = parser.parse(self, args, kwargs, parsed_args);
1297
1298 if(r.has_torch_function()){
1299 return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
1300 }
1301
1302 auto opt_memory_format = r.memoryformatOptional(0);
1303 return THPVariable_to_type(self, ScalarType::BFloat16, opt_memory_format);
1304 END_HANDLE_TH_ERRORS
1305}
1306
1307static PyObject * THPVariable_element_size(PyObject* self, PyObject* args)
1308{
1309 HANDLE_TH_ERRORS
1310 if (check_has_torch_function(self)) {
1311 return handle_torch_function(self, "element_size", args);
1312 }
1313 auto& self_ = THPVariable_Unpack(self);
1314 return THPUtils_packInt64(self_.element_size());
1315 END_HANDLE_TH_ERRORS
1316}
1317
1318// implemented on the python object bc PyObjects not declarable in native_functions.yaml
1319// See: ATen/native/README.md for more context
1320static PyObject * THPVariable_numpy(PyObject* self, PyObject* args, PyObject* kwargs)
1321{
1322 HANDLE_TH_ERRORS
1323 static PythonArgParser parser({
1324 "numpy(*, bool force=False)"
1325 });
1326 auto& self_ = THPVariable_Unpack(self);
1327 ParsedArgs<1> parsed_args;
1328 auto r = parser.parse(self, args, kwargs, parsed_args);
1329
1330 if (r.has_torch_function()) {
1331 return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
1332 }
1333
1334 jit::tracer::warn("Converting a tensor to a NumPy array", jit::tracer::WARN_PYTHON_DATAFLOW);
1335 return torch::utils::tensor_to_numpy(self_, r.toBool(0));
1336 END_HANDLE_TH_ERRORS
1337}
1338
1339static PyObject * THPVariable_requires_grad_(PyObject* self, PyObject* args, PyObject* kwargs)
1340{
1341 HANDLE_TH_ERRORS
1342 static PythonArgParser parser({
1343 "requires_grad_(bool requires_grad=True)",
1344 });
1345 auto& self_ = THPVariable_Unpack(self);
1346 ParsedArgs<1> parsed_args;
1347 auto r = parser.parse(self, args, kwargs, parsed_args);
1348
1349 if(r.has_torch_function()){
1350 return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
1351 }
1352
1353 // temporary hack to improve functorch UX.
1354 const auto& functorch_tls = at::functorch::functorchTLSAccessor();
1355 if (functorch_tls) {
1356 functorch_tls->checkSupportsInplaceRequiresGrad();
1357 }
1358
1359 auto requires_grad = r.toBool(0);
1360 // should we throw if requires_grad is true? var.requires_grad = True throws here
1361 // but it's nice to let this be a no-op.
1362 if (!self_.is_leaf() && !requires_grad) {
1363 throw std::runtime_error(autograd::utils::requires_grad_leaf_error(requires_grad));
1364 }
1365 if (requires_grad && ! isDifferentiableType(at::typeMetaToScalarType(self_.dtype()))) {
1366 throw std::runtime_error("only Tensors of floating point dtype can require gradients");
1367 }
1368 self_.set_requires_grad(requires_grad);
1369 return THPVariable_Wrap(self_);
1370 END_HANDLE_TH_ERRORS
1371}
1372
1373inline bool dispatch_is_contiguous(const Tensor & self, MemoryFormat memory_format) {
1374 return self.is_contiguous(memory_format);
1375}
1376
1377// implemented on the python object to avoid dispatch overhead
1378static PyObject * THPVariable_is_contiguous(PyObject* self_, PyObject* args, PyObject* kwargs)
1379{
1380 HANDLE_TH_ERRORS
1381 static PythonArgParser parser({
1382 "is_contiguous(*, MemoryFormat memory_format=contiguous_format)",
1383 });
1384 ParsedArgs<1> parsed_args;
1385 auto r = parser.parse(self_, args, kwargs, parsed_args);
1386
1387 if(r.has_torch_function()){
1388 return handle_torch_function(r, self_, args, kwargs, PyObject_Type(self_), "torch.Tensor");
1389 }
1390
1391 auto memory_format = r.memoryformat(0);
1392 auto& self = THPVariable_Unpack(self_);
1393 return wrap(dispatch_is_contiguous(self, memory_format));
1394 END_HANDLE_TH_ERRORS
1395}
1396
1397// implemented on the python object to avoid dispatch overhead
1398static PyObject * THPVariable_item(PyObject* self, PyObject* args)
1399{
1400 HANDLE_TH_ERRORS
1401 if (check_has_torch_function(self)) {
1402 return handle_torch_function(self, "item", args);
1403 }
1404 jit::tracer::warn("Converting a tensor to a Python number", jit::tracer::WARN_PYTHON_DATAFLOW);
1405 auto& self_ = THPVariable_Unpack(self);
1406 return py::cast(self_.item()).release().ptr();
1407 END_HANDLE_TH_ERRORS
1408}
1409
1410// implemented on the python object bc no support for first class functions in native_functions.yaml
1411// See: ATen/native/README.md for more context
1412static PyObject * THPVariable_map_(PyObject* self, PyObject* args, PyObject* kwargs)
1413{
1414 HANDLE_TH_ERRORS
1415 static PythonArgParser parser({ "map_(Tensor other, PyObject* callable)" });
1416 auto& self_ = THPVariable_Unpack(self);
1417 ParsedArgs<2> parsed_args;
1418 auto r = parser.parse(self, args, kwargs, parsed_args);
1419
1420 if(r.has_torch_function()){
1421 return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
1422 }
1423
1424 Variable other = r.tensor(0);
1425 if (self_.requires_grad() || other.requires_grad()) {
1426 throw std::runtime_error(
1427 "Can't call map_() on Variable that requires grad. Use "
1428 "var.detach().map_() instead.");
1429 }
1430 TORCH_CHECK(
1431 !self_.unsafeGetTensorImpl()->is_python_dispatch() && !other.unsafeGetTensorImpl()->is_python_dispatch(),
1432 ".map_ is not supported for tensor subclasses.");
1433
1434 return THPVariable_Wrap(torch::utils::map_(self_, other, r.pyobject(1)));
1435 END_HANDLE_TH_ERRORS
1436}
1437
1438// implemented on the python object bc no support for first class functions in native_functions.yaml
1439// See: ATen/native/README.md for more context
1440static PyObject * THPVariable_map2_(PyObject* self, PyObject* args, PyObject* kwargs)
1441{
1442 HANDLE_TH_ERRORS
1443 static PythonArgParser parser({ "map2_(Tensor x, Tensor y, PyObject* callable)" });
1444 auto& self_ = THPVariable_Unpack(self);
1445 ParsedArgs<3> parsed_args;
1446 auto r = parser.parse(self, args, kwargs, parsed_args);
1447
1448 if(r.has_torch_function()){
1449 return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
1450 }
1451
1452 Variable x = r.tensor(0);
1453 Variable y = r.tensor(1);
1454 if (self_.requires_grad() || x.requires_grad() || y.requires_grad()) {
1455 throw std::runtime_error(
1456 "Can't call map2_() on Variable that requires grad. Use "
1457 "var.detach().map2_() instead.");
1458 }
1459 TORCH_CHECK(
1460 !x.unsafeGetTensorImpl()->is_python_dispatch() && !y.unsafeGetTensorImpl()->is_python_dispatch(),
1461 ".map2_ is not supported for tensor subclasses.");
1462 return THPVariable_Wrap(torch::utils::map2_(self_, x, y, r.pyobject(2)));
1463 END_HANDLE_TH_ERRORS
1464}
1465
1466static PyObject * THPVariable_new(PyObject* self, PyObject* args, PyObject* kwargs)
1467{
1468 HANDLE_TH_ERRORS
1469 if (check_has_torch_function(self)) {
1470 return handle_torch_function(self, "new", args, kwargs);
1471 }
1472 auto& self_ = THPVariable_Unpack(self);
1473 OptionalDeviceGuard device_guard(device_of(self_));
1474 return THPVariable_Wrap(torch::utils::legacy_tensor_new(legacyExtractDispatchKey(self_), self_.scalar_type(), args, kwargs));
1475 END_HANDLE_TH_ERRORS
1476}
1477
1478static PyObject * THPVariable_new_tensor(PyObject* self, PyObject* args, PyObject* kwargs)
1479{
1480 HANDLE_TH_ERRORS
1481 if (check_has_torch_function(self)) {
1482 return handle_torch_function(self, "new_tensor", args, kwargs);
1483 }
1484 auto& self_ = THPVariable_Unpack(self);
1485 OptionalDeviceGuard device_guard(device_of(self_));
1486 return THPVariable_Wrap(torch::utils::new_tensor(legacyExtractDispatchKey(self_), self_.scalar_type(), args, kwargs));
1487 END_HANDLE_TH_ERRORS
1488}
1489
1490static PyObject * THPVariable_storage(PyObject* self, PyObject* arg)
1491{
1492 HANDLE_TH_ERRORS
1493 if (check_has_torch_function(self)) {
1494 return handle_torch_function(self, "untyped_storage");
1495 }
1496 auto& self_ = THPVariable_Unpack(self);
1497 return createPyObject(self_.storage());
1498 END_HANDLE_TH_ERRORS
1499}
1500
1501static PyObject * THPVariable_to(PyObject* self, PyObject* args, PyObject* kwargs)
1502{
1503 HANDLE_TH_ERRORS
1504 static PythonArgParser parser({
1505 "to(Device device=None, ScalarType dtype=None, bool non_blocking=False, bool copy=False, *, MemoryFormat? memory_format=None)",
1506 "to(ScalarType dtype, bool non_blocking=False, bool copy=False, *, MemoryFormat? memory_format=None)",
1507 "to(Tensor tensor, bool non_blocking=False, bool copy=False, *, MemoryFormat? memory_format=None)",
1508 });
1509 ParsedArgs<5> parsed_args;
1510 auto r = parser.parse(self, args, kwargs, parsed_args);
1511 if (r.has_torch_function()) {
1512 return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
1513 }
1514 auto parsed = parse_to_conversion(r, /*allow_copy*/ true);
1515 auto& device = std::get<0>(parsed);
1516 auto& scalarType = std::get<1>(parsed);
1517 auto non_blocking = std::get<2>(parsed);
1518 auto copy = std::get<3>(parsed);
1519 auto opt_memory_format = std::get<4>(parsed);
1520 auto& self_ = THPVariable_Unpack(self);
1521 if (device && device->is_cuda()) {
1522 torch::utils::cuda_lazy_init();
1523 }
1524 if (!device && !scalarType && !copy && !opt_memory_format.has_value()) {
1525 Py_INCREF(self);
1526 return self;
1527 } else if (!device && !scalarType) {
1528 return THPVariable_Wrap(
1529 dispatch_to(self_, non_blocking, copy, opt_memory_format));
1530 } else if (!device) {
1531 return THPVariable_Wrap(dispatch_to(self_, *scalarType, non_blocking, copy, opt_memory_format));
1532 } else if (!scalarType) {
1533 return THPVariable_Wrap(dispatch_to(self_, *device, non_blocking, copy, opt_memory_format));
1534 } else {
1535 return THPVariable_Wrap(dispatch_to(self_, *device, *scalarType, non_blocking, copy, opt_memory_format));
1536 }
1537 Py_RETURN_NONE;
1538 END_HANDLE_TH_ERRORS
1539}
1540
1541// implemented on the python object b/c arbitrarily nested list not declarable in native_functions.yaml
1542// See: ATen/native/README.md for more context
1543static PyObject * THPVariable_tolist(PyObject* self, PyObject* args)
1544{
1545 HANDLE_TH_ERRORS
1546 if (check_has_torch_function(self)) {
1547 return handle_torch_function(self, "tolist", args);
1548 }
1549 jit::tracer::warn("Converting a tensor to a Python list", jit::tracer::WARN_PYTHON_DATAFLOW);
1550 auto self_ = THPVariable_Unpack(self);
1551 return torch::utils::tensor_to_list(self_);
1552 END_HANDLE_TH_ERRORS
1553}
1554
1555static PyObject * THPVariable_type(PyObject* self, PyObject* args, PyObject* kwargs)
1556{
1557 HANDLE_TH_ERRORS
1558 static PythonArgParser parser({
1559 "type(PyObject* dtype=None, bool non_blocking=False, *, MemoryFormat? memory_format=None)",
1560 "type(PyObject* dtype=None, bool async=False, *, MemoryFormat? memory_format=None)|deprecated"
1561 });
1562 auto& self_ = THPVariable_Unpack(self);
1563 ParsedArgs<3> parsed_args;
1564 auto r = parser.parse(self, args, kwargs, parsed_args);
1565
1566 if(r.has_torch_function()){
1567 return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
1568 }
1569
1570 if (r.isNone(0)) {
1571 return THPUtils_packString(torch::utils::options_to_string(self_.options()));
1572 }
1573 auto obj = r.pyobject(0);
1574 auto opt_memory_format = r.memoryformatOptional(2);
1575 std::string type_name;
1576 bool is_dtype = false;
1577 if (PyType_Check(obj)) {
1578 if (obj == THPVariableClass) {
1579 type_name = "torch.Tensor";
1580 } else {
1581 type_name = ((PyTypeObject*)obj)->tp_name;
1582 }
1583 } else if (THPUtils_checkString(obj)) {
1584 type_name = THPUtils_unpackString(obj);
1585 } else if (THPDtype_Check(obj)) {
1586 is_dtype = true;
1587 } else {
1588 throw TypeError("dtype must be a type, str, or dtype object");
1589 }
1590 ScalarType scalar_type;
1591 Device device = self_.device();
1592 if (is_dtype) {
1593 scalar_type = r.scalartype(0);
1594 } else {
1595 at::TensorOptions options = torch::utils::options_from_string(type_name);
1596 scalar_type = at::typeMetaToScalarType(options.dtype());
1597 auto device_type = options.device().type();
1598 if (device_type != device.type()) {
1599 device = at::Device(device_type);
1600 }
1601 }
1602 if (device.is_cuda()) {
1603 torch::utils::cuda_lazy_init();
1604 }
1605 return THPVariable_Wrap(dispatch_to(self_, device, scalar_type, /*non_blocking=*/ r.toBool(1), /*copy=*/ false, opt_memory_format));
1606 END_HANDLE_TH_ERRORS
1607}
1608
1609// generated methods start here
1610
1611\
1612// __and__
1613static PyObject * THPVariable___and__(PyObject* self_, PyObject* args, PyObject* kwargs)
1614{
1615 HANDLE_TH_ERRORS
1616 const Tensor& self = THPVariable_Unpack(self_);
1617 static PythonArgParser parser({
1618 "__and__(Tensor other)",
1619 "__and__(Scalar other)",
1620 }, /*traceable=*/true);
1621
1622 ParsedArgs<1> parsed_args;
1623 auto _r = parser.parse(self_, args, kwargs, parsed_args);
1624 if(_r.has_torch_function()) {
1625 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
1626 }
1627 switch (_r.idx) {
1628 case 0: {
1629 // aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor
1630
1631 auto dispatch___and__ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
1632 pybind11::gil_scoped_release no_gil;
1633 return self.__and__(other);
1634 };
1635 return wrap(dispatch___and__(self, _r.tensor(0)));
1636 }
1637 case 1: {
1638 // aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor
1639
1640 auto dispatch___and__ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
1641 pybind11::gil_scoped_release no_gil;
1642 return self.__and__(other);
1643 };
1644 return wrap(dispatch___and__(self, _r.scalar(0)));
1645 }
1646 }
1647 Py_RETURN_NONE;
1648 END_HANDLE_TH_ERRORS
1649}
1650
1651\
1652// __iand__
1653static PyObject * THPVariable___iand__(PyObject* self_, PyObject* args, PyObject* kwargs)
1654{
1655 HANDLE_TH_ERRORS
1656 const Tensor& self = THPVariable_Unpack(self_);
1657 static PythonArgParser parser({
1658 "__iand__(Tensor other)",
1659 "__iand__(Scalar other)",
1660 }, /*traceable=*/true);
1661
1662 ParsedArgs<1> parsed_args;
1663 auto _r = parser.parse(self_, args, kwargs, parsed_args);
1664 if(_r.has_torch_function()) {
1665 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
1666 }
1667 switch (_r.idx) {
1668 case 0: {
1669 // aten::__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
1670
1671 auto dispatch___iand__ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
1672 pybind11::gil_scoped_release no_gil;
1673 return self.__iand__(other);
1674 };
1675 return wrap(dispatch___iand__(self, _r.tensor(0)));
1676 }
1677 case 1: {
1678 // aten::__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
1679
1680 auto dispatch___iand__ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
1681 pybind11::gil_scoped_release no_gil;
1682 return self.__iand__(other);
1683 };
1684 return wrap(dispatch___iand__(self, _r.scalar(0)));
1685 }
1686 }
1687 Py_RETURN_NONE;
1688 END_HANDLE_TH_ERRORS
1689}
1690
1691\
1692// __ilshift__
1693static PyObject * THPVariable___ilshift__(PyObject* self_, PyObject* args, PyObject* kwargs)
1694{
1695 HANDLE_TH_ERRORS
1696 const Tensor& self = THPVariable_Unpack(self_);
1697 static PythonArgParser parser({
1698 "__ilshift__(Tensor other)",
1699 "__ilshift__(Scalar other)",
1700 }, /*traceable=*/true);
1701
1702 ParsedArgs<1> parsed_args;
1703 auto _r = parser.parse(self_, args, kwargs, parsed_args);
1704 if(_r.has_torch_function()) {
1705 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
1706 }
1707 switch (_r.idx) {
1708 case 0: {
1709 // aten::__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
1710
1711 auto dispatch___ilshift__ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
1712 pybind11::gil_scoped_release no_gil;
1713 return self.__ilshift__(other);
1714 };
1715 return wrap(dispatch___ilshift__(self, _r.tensor(0)));
1716 }
1717 case 1: {
1718 // aten::__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
1719
1720 auto dispatch___ilshift__ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
1721 pybind11::gil_scoped_release no_gil;
1722 return self.__ilshift__(other);
1723 };
1724 return wrap(dispatch___ilshift__(self, _r.scalar(0)));
1725 }
1726 }
1727 Py_RETURN_NONE;
1728 END_HANDLE_TH_ERRORS
1729}
1730
1731\
1732// __ior__
1733static PyObject * THPVariable___ior__(PyObject* self_, PyObject* args, PyObject* kwargs)
1734{
1735 HANDLE_TH_ERRORS
1736 const Tensor& self = THPVariable_Unpack(self_);
1737 static PythonArgParser parser({
1738 "__ior__(Tensor other)",
1739 "__ior__(Scalar other)",
1740 }, /*traceable=*/true);
1741
1742 ParsedArgs<1> parsed_args;
1743 auto _r = parser.parse(self_, args, kwargs, parsed_args);
1744 if(_r.has_torch_function()) {
1745 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
1746 }
1747 switch (_r.idx) {
1748 case 0: {
1749 // aten::__ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
1750
1751 auto dispatch___ior__ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
1752 pybind11::gil_scoped_release no_gil;
1753 return self.__ior__(other);
1754 };
1755 return wrap(dispatch___ior__(self, _r.tensor(0)));
1756 }
1757 case 1: {
1758 // aten::__ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
1759
1760 auto dispatch___ior__ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
1761 pybind11::gil_scoped_release no_gil;
1762 return self.__ior__(other);
1763 };
1764 return wrap(dispatch___ior__(self, _r.scalar(0)));
1765 }
1766 }
1767 Py_RETURN_NONE;
1768 END_HANDLE_TH_ERRORS
1769}
1770
1771\
1772// __irshift__
1773static PyObject * THPVariable___irshift__(PyObject* self_, PyObject* args, PyObject* kwargs)
1774{
1775 HANDLE_TH_ERRORS
1776 const Tensor& self = THPVariable_Unpack(self_);
1777 static PythonArgParser parser({
1778 "__irshift__(Tensor other)",
1779 "__irshift__(Scalar other)",
1780 }, /*traceable=*/true);
1781
1782 ParsedArgs<1> parsed_args;
1783 auto _r = parser.parse(self_, args, kwargs, parsed_args);
1784 if(_r.has_torch_function()) {
1785 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
1786 }
1787 switch (_r.idx) {
1788 case 0: {
1789 // aten::__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
1790
1791 auto dispatch___irshift__ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
1792 pybind11::gil_scoped_release no_gil;
1793 return self.__irshift__(other);
1794 };
1795 return wrap(dispatch___irshift__(self, _r.tensor(0)));
1796 }
1797 case 1: {
1798 // aten::__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
1799
1800 auto dispatch___irshift__ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
1801 pybind11::gil_scoped_release no_gil;
1802 return self.__irshift__(other);
1803 };
1804 return wrap(dispatch___irshift__(self, _r.scalar(0)));
1805 }
1806 }
1807 Py_RETURN_NONE;
1808 END_HANDLE_TH_ERRORS
1809}
1810
1811\
1812// __ixor__
1813static PyObject * THPVariable___ixor__(PyObject* self_, PyObject* args, PyObject* kwargs)
1814{
1815 HANDLE_TH_ERRORS
1816 const Tensor& self = THPVariable_Unpack(self_);
1817 static PythonArgParser parser({
1818 "__ixor__(Tensor other)",
1819 "__ixor__(Scalar other)",
1820 }, /*traceable=*/true);
1821
1822 ParsedArgs<1> parsed_args;
1823 auto _r = parser.parse(self_, args, kwargs, parsed_args);
1824 if(_r.has_torch_function()) {
1825 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
1826 }
1827 switch (_r.idx) {
1828 case 0: {
1829 // aten::__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
1830
1831 auto dispatch___ixor__ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
1832 pybind11::gil_scoped_release no_gil;
1833 return self.__ixor__(other);
1834 };
1835 return wrap(dispatch___ixor__(self, _r.tensor(0)));
1836 }
1837 case 1: {
1838 // aten::__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
1839
1840 auto dispatch___ixor__ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
1841 pybind11::gil_scoped_release no_gil;
1842 return self.__ixor__(other);
1843 };
1844 return wrap(dispatch___ixor__(self, _r.scalar(0)));
1845 }
1846 }
1847 Py_RETURN_NONE;
1848 END_HANDLE_TH_ERRORS
1849}
1850
1851\
1852// __lshift__
1853static PyObject * THPVariable___lshift__(PyObject* self_, PyObject* args, PyObject* kwargs)
1854{
1855 HANDLE_TH_ERRORS
1856 const Tensor& self = THPVariable_Unpack(self_);
1857 static PythonArgParser parser({
1858 "__lshift__(Tensor other)",
1859 "__lshift__(Scalar other)",
1860 }, /*traceable=*/true);
1861
1862 ParsedArgs<1> parsed_args;
1863 auto _r = parser.parse(self_, args, kwargs, parsed_args);
1864 if(_r.has_torch_function()) {
1865 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
1866 }
1867 switch (_r.idx) {
1868 case 0: {
1869 // aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor
1870
1871 auto dispatch___lshift__ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
1872 pybind11::gil_scoped_release no_gil;
1873 return self.__lshift__(other);
1874 };
1875 return wrap(dispatch___lshift__(self, _r.tensor(0)));
1876 }
1877 case 1: {
1878 // aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor
1879
1880 auto dispatch___lshift__ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
1881 pybind11::gil_scoped_release no_gil;
1882 return self.__lshift__(other);
1883 };
1884 return wrap(dispatch___lshift__(self, _r.scalar(0)));
1885 }
1886 }
1887 Py_RETURN_NONE;
1888 END_HANDLE_TH_ERRORS
1889}
1890
1891\
1892// __or__
1893static PyObject * THPVariable___or__(PyObject* self_, PyObject* args, PyObject* kwargs)
1894{
1895 HANDLE_TH_ERRORS
1896 const Tensor& self = THPVariable_Unpack(self_);
1897 static PythonArgParser parser({
1898 "__or__(Tensor other)",
1899 "__or__(Scalar other)",
1900 }, /*traceable=*/true);
1901
1902 ParsedArgs<1> parsed_args;
1903 auto _r = parser.parse(self_, args, kwargs, parsed_args);
1904 if(_r.has_torch_function()) {
1905 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
1906 }
1907 switch (_r.idx) {
1908 case 0: {
1909 // aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor
1910
1911 auto dispatch___or__ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
1912 pybind11::gil_scoped_release no_gil;
1913 return self.__or__(other);
1914 };
1915 return wrap(dispatch___or__(self, _r.tensor(0)));
1916 }
1917 case 1: {
1918 // aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor
1919
1920 auto dispatch___or__ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
1921 pybind11::gil_scoped_release no_gil;
1922 return self.__or__(other);
1923 };
1924 return wrap(dispatch___or__(self, _r.scalar(0)));
1925 }
1926 }
1927 Py_RETURN_NONE;
1928 END_HANDLE_TH_ERRORS
1929}
1930
1931\
1932// __rshift__
1933static PyObject * THPVariable___rshift__(PyObject* self_, PyObject* args, PyObject* kwargs)
1934{
1935 HANDLE_TH_ERRORS
1936 const Tensor& self = THPVariable_Unpack(self_);
1937 static PythonArgParser parser({
1938 "__rshift__(Tensor other)",
1939 "__rshift__(Scalar other)",
1940 }, /*traceable=*/true);
1941
1942 ParsedArgs<1> parsed_args;
1943 auto _r = parser.parse(self_, args, kwargs, parsed_args);
1944 if(_r.has_torch_function()) {
1945 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
1946 }
1947 switch (_r.idx) {
1948 case 0: {
1949 // aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor
1950
1951 auto dispatch___rshift__ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
1952 pybind11::gil_scoped_release no_gil;
1953 return self.__rshift__(other);
1954 };
1955 return wrap(dispatch___rshift__(self, _r.tensor(0)));
1956 }
1957 case 1: {
1958 // aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor
1959
1960 auto dispatch___rshift__ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
1961 pybind11::gil_scoped_release no_gil;
1962 return self.__rshift__(other);
1963 };
1964 return wrap(dispatch___rshift__(self, _r.scalar(0)));
1965 }
1966 }
1967 Py_RETURN_NONE;
1968 END_HANDLE_TH_ERRORS
1969}
1970
1971\
1972// __xor__
1973static PyObject * THPVariable___xor__(PyObject* self_, PyObject* args, PyObject* kwargs)
1974{
1975 HANDLE_TH_ERRORS
1976 const Tensor& self = THPVariable_Unpack(self_);
1977 static PythonArgParser parser({
1978 "__xor__(Tensor other)",
1979 "__xor__(Scalar other)",
1980 }, /*traceable=*/true);
1981
1982 ParsedArgs<1> parsed_args;
1983 auto _r = parser.parse(self_, args, kwargs, parsed_args);
1984 if(_r.has_torch_function()) {
1985 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
1986 }
1987 switch (_r.idx) {
1988 case 0: {
1989 // aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor
1990
1991 auto dispatch___xor__ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
1992 pybind11::gil_scoped_release no_gil;
1993 return self.__xor__(other);
1994 };
1995 return wrap(dispatch___xor__(self, _r.tensor(0)));
1996 }
1997 case 1: {
1998 // aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor
1999
2000 auto dispatch___xor__ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
2001 pybind11::gil_scoped_release no_gil;
2002 return self.__xor__(other);
2003 };
2004 return wrap(dispatch___xor__(self, _r.scalar(0)));
2005 }
2006 }
2007 Py_RETURN_NONE;
2008 END_HANDLE_TH_ERRORS
2009}
2010
2011// _addmm_activation
2012static PyObject * THPVariable__addmm_activation(PyObject* self_, PyObject* args, PyObject* kwargs)
2013{
2014 HANDLE_TH_ERRORS
2015 const Tensor& self = THPVariable_Unpack(self_);
2016 static PythonArgParser parser({
2017 "_addmm_activation(Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False)",
2018 }, /*traceable=*/true);
2019
2020 ParsedArgs<5> parsed_args;
2021 auto _r = parser.parse(self_, args, kwargs, parsed_args);
2022 if(_r.has_torch_function()) {
2023 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
2024 }
2025 // aten::_addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor
2026
2027 auto dispatch__addmm_activation = [](const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu) -> at::Tensor {
2028 pybind11::gil_scoped_release no_gil;
2029 return self._addmm_activation(mat1, mat2, beta, alpha, use_gelu);
2030 };
2031 return wrap(dispatch__addmm_activation(self, _r.tensor(0), _r.tensor(1), _r.scalar(2), _r.scalar(3), _r.toBool(4)));
2032 Py_RETURN_NONE;
2033 END_HANDLE_TH_ERRORS
2034}
2035
2036// _autocast_to_full_precision
2037static PyObject * THPVariable__autocast_to_full_precision(PyObject* self_, PyObject* args, PyObject* kwargs)
2038{
2039 HANDLE_TH_ERRORS
2040 const Tensor& self = THPVariable_Unpack(self_);
2041 static PythonArgParser parser({
2042 "_autocast_to_full_precision(bool cuda_enabled, bool cpu_enabled)",
2043 }, /*traceable=*/true);
2044
2045 ParsedArgs<2> parsed_args;
2046 auto _r = parser.parse(self_, args, kwargs, parsed_args);
2047 if(_r.has_torch_function()) {
2048 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
2049 }
2050 // aten::_autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a)
2051
2052 auto dispatch__autocast_to_full_precision = [](const at::Tensor & self, bool cuda_enabled, bool cpu_enabled) -> at::Tensor {
2053 pybind11::gil_scoped_release no_gil;
2054 return self._autocast_to_full_precision(cuda_enabled, cpu_enabled);
2055 };
2056 return wrap(dispatch__autocast_to_full_precision(self, _r.toBool(0), _r.toBool(1)));
2057 Py_RETURN_NONE;
2058 END_HANDLE_TH_ERRORS
2059}
2060
2061// _autocast_to_reduced_precision
2062static PyObject * THPVariable__autocast_to_reduced_precision(PyObject* self_, PyObject* args, PyObject* kwargs)
2063{
2064 HANDLE_TH_ERRORS
2065 const Tensor& self = THPVariable_Unpack(self_);
2066 static PythonArgParser parser({
2067 "_autocast_to_reduced_precision(bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype)",
2068 }, /*traceable=*/true);
2069
2070 ParsedArgs<4> parsed_args;
2071 auto _r = parser.parse(self_, args, kwargs, parsed_args);
2072 if(_r.has_torch_function()) {
2073 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
2074 }
2075 // aten::_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a)
2076
2077 auto dispatch__autocast_to_reduced_precision = [](const at::Tensor & self, bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype) -> at::Tensor {
2078 pybind11::gil_scoped_release no_gil;
2079 return self._autocast_to_reduced_precision(cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype);
2080 };
2081 return wrap(dispatch__autocast_to_reduced_precision(self, _r.toBool(0), _r.toBool(1), _r.scalartype(2), _r.scalartype(3)));
2082 Py_RETURN_NONE;
2083 END_HANDLE_TH_ERRORS
2084}
2085
2086// _coalesced_
2087static PyObject * THPVariable__coalesced_(PyObject* self_, PyObject* args, PyObject* kwargs)
2088{
2089 HANDLE_TH_ERRORS
2090 const Tensor& self = THPVariable_Unpack(self_);
2091 static PythonArgParser parser({
2092 "_coalesced_(bool coalesced)",
2093 }, /*traceable=*/false);
2094
2095 ParsedArgs<1> parsed_args;
2096 auto _r = parser.parse(self_, args, kwargs, parsed_args);
2097 if(_r.has_torch_function()) {
2098 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
2099 }
2100 // aten::_coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!)
2101
2102 auto dispatch__coalesced_ = [](const at::Tensor & self, bool coalesced) -> at::Tensor {
2103 pybind11::gil_scoped_release no_gil;
2104 return self._coalesced_(coalesced);
2105 };
2106 return wrap(dispatch__coalesced_(self, _r.toBool(0)));
2107 Py_RETURN_NONE;
2108 END_HANDLE_TH_ERRORS
2109}
2110
2111// _conj
2112static PyObject * THPVariable__conj(PyObject* self_, PyObject* args)
2113{
2114 HANDLE_TH_ERRORS
2115 const Tensor& self = THPVariable_Unpack(self_);
2116 if(check_has_torch_function(self_)) {
2117 return handle_torch_function(self_, "_conj");
2118 }
2119 // aten::_conj(Tensor(a) self) -> Tensor(a)
2120
2121 auto dispatch__conj = [](const at::Tensor & self) -> at::Tensor {
2122 pybind11::gil_scoped_release no_gil;
2123 return self._conj();
2124 };
2125 return wrap(dispatch__conj(self));
2126 END_HANDLE_TH_ERRORS
2127}
2128
2129// _conj_physical
2130static PyObject * THPVariable__conj_physical(PyObject* self_, PyObject* args)
2131{
2132 HANDLE_TH_ERRORS
2133 const Tensor& self = THPVariable_Unpack(self_);
2134 if(check_has_torch_function(self_)) {
2135 return handle_torch_function(self_, "_conj_physical");
2136 }
2137 // aten::_conj_physical(Tensor self) -> Tensor
2138
2139 auto dispatch__conj_physical = [](const at::Tensor & self) -> at::Tensor {
2140 pybind11::gil_scoped_release no_gil;
2141 return self._conj_physical();
2142 };
2143 return wrap(dispatch__conj_physical(self));
2144 END_HANDLE_TH_ERRORS
2145}
2146
2147// _dimI
2148static PyObject * THPVariable__dimI(PyObject* self_, PyObject* args)
2149{
2150 HANDLE_TH_ERRORS
2151 const Tensor& self = THPVariable_Unpack(self_);
2152 if(check_has_torch_function(self_)) {
2153 return handle_torch_function(self_, "_dimI");
2154 }
2155 // aten::_dimI(Tensor self) -> int
2156
2157 auto dispatch__dimI = [](const at::Tensor & self) -> int64_t {
2158 pybind11::gil_scoped_release no_gil;
2159 return self._dimI();
2160 };
2161 return wrap(dispatch__dimI(self));
2162 END_HANDLE_TH_ERRORS
2163}
2164
2165// _dimV
2166static PyObject * THPVariable__dimV(PyObject* self_, PyObject* args)
2167{
2168 HANDLE_TH_ERRORS
2169 const Tensor& self = THPVariable_Unpack(self_);
2170 if(check_has_torch_function(self_)) {
2171 return handle_torch_function(self_, "_dimV");
2172 }
2173 // aten::_dimV(Tensor self) -> int
2174
2175 auto dispatch__dimV = [](const at::Tensor & self) -> int64_t {
2176 pybind11::gil_scoped_release no_gil;
2177 return self._dimV();
2178 };
2179 return wrap(dispatch__dimV(self));
2180 END_HANDLE_TH_ERRORS
2181}
2182
2183// _indices
2184static PyObject * THPVariable__indices(PyObject* self_, PyObject* args)
2185{
2186 HANDLE_TH_ERRORS
2187 const Tensor& self = THPVariable_Unpack(self_);
2188 if(check_has_torch_function(self_)) {
2189 return handle_torch_function(self_, "_indices");
2190 }
2191 // aten::_indices(Tensor(a) self) -> Tensor(a)
2192
2193 auto dispatch__indices = [](const at::Tensor & self) -> at::Tensor {
2194 pybind11::gil_scoped_release no_gil;
2195 return self._indices();
2196 };
2197 return wrap(dispatch__indices(self));
2198 END_HANDLE_TH_ERRORS
2199}
2200
2201// _is_all_true
2202static PyObject * THPVariable__is_all_true(PyObject* self_, PyObject* args)
2203{
2204 HANDLE_TH_ERRORS
2205 const Tensor& self = THPVariable_Unpack(self_);
2206 if(check_has_torch_function(self_)) {
2207 return handle_torch_function(self_, "_is_all_true");
2208 }
2209 // aten::_is_all_true(Tensor self) -> Tensor
2210
2211 auto dispatch__is_all_true = [](const at::Tensor & self) -> at::Tensor {
2212 pybind11::gil_scoped_release no_gil;
2213 return self._is_all_true();
2214 };
2215 return wrap(dispatch__is_all_true(self));
2216 END_HANDLE_TH_ERRORS
2217}
2218
2219// _is_any_true
2220static PyObject * THPVariable__is_any_true(PyObject* self_, PyObject* args)
2221{
2222 HANDLE_TH_ERRORS
2223 const Tensor& self = THPVariable_Unpack(self_);
2224 if(check_has_torch_function(self_)) {
2225 return handle_torch_function(self_, "_is_any_true");
2226 }
2227 // aten::_is_any_true(Tensor self) -> Tensor
2228
2229 auto dispatch__is_any_true = [](const at::Tensor & self) -> at::Tensor {
2230 pybind11::gil_scoped_release no_gil;
2231 return self._is_any_true();
2232 };
2233 return wrap(dispatch__is_any_true(self));
2234 END_HANDLE_TH_ERRORS
2235}
2236
2237// _is_zerotensor
2238static PyObject * THPVariable__is_zerotensor(PyObject* self_, PyObject* args)
2239{
2240 HANDLE_TH_ERRORS
2241 const Tensor& self = THPVariable_Unpack(self_);
2242 if(check_has_torch_function(self_)) {
2243 return handle_torch_function(self_, "_is_zerotensor");
2244 }
2245 // aten::_is_zerotensor(Tensor self) -> bool
2246
2247 auto dispatch__is_zerotensor = [](const at::Tensor & self) -> bool {
2248 pybind11::gil_scoped_release no_gil;
2249 return self._is_zerotensor();
2250 };
2251 return wrap(dispatch__is_zerotensor(self));
2252 END_HANDLE_TH_ERRORS
2253}
2254
2255// _neg_view
2256static PyObject * THPVariable__neg_view(PyObject* self_, PyObject* args)
2257{
2258 HANDLE_TH_ERRORS
2259 const Tensor& self = THPVariable_Unpack(self_);
2260 if(check_has_torch_function(self_)) {
2261 return handle_torch_function(self_, "_neg_view");
2262 }
2263 // aten::_neg_view(Tensor(a) self) -> Tensor(a)
2264
2265 auto dispatch__neg_view = [](const at::Tensor & self) -> at::Tensor {
2266 pybind11::gil_scoped_release no_gil;
2267 return self._neg_view();
2268 };
2269 return wrap(dispatch__neg_view(self));
2270 END_HANDLE_TH_ERRORS
2271}
2272
2273// _nested_tensor_size
2274static PyObject * THPVariable__nested_tensor_size(PyObject* self_, PyObject* args)
2275{
2276 HANDLE_TH_ERRORS
2277 const Tensor& self = THPVariable_Unpack(self_);
2278 if(check_has_torch_function(self_)) {
2279 return handle_torch_function(self_, "_nested_tensor_size");
2280 }
2281 // aten::_nested_tensor_size(Tensor self) -> Tensor
2282
2283 auto dispatch__nested_tensor_size = [](const at::Tensor & self) -> at::Tensor {
2284 pybind11::gil_scoped_release no_gil;
2285 return self._nested_tensor_size();
2286 };
2287 return wrap(dispatch__nested_tensor_size(self));
2288 END_HANDLE_TH_ERRORS
2289}
2290
2291// _nested_tensor_strides
2292static PyObject * THPVariable__nested_tensor_strides(PyObject* self_, PyObject* args)
2293{
2294 HANDLE_TH_ERRORS
2295 const Tensor& self = THPVariable_Unpack(self_);
2296 if(check_has_torch_function(self_)) {
2297 return handle_torch_function(self_, "_nested_tensor_strides");
2298 }
2299 // aten::_nested_tensor_strides(Tensor self) -> Tensor
2300
2301 auto dispatch__nested_tensor_strides = [](const at::Tensor & self) -> at::Tensor {
2302 pybind11::gil_scoped_release no_gil;
2303 return self._nested_tensor_strides();
2304 };
2305 return wrap(dispatch__nested_tensor_strides(self));
2306 END_HANDLE_TH_ERRORS
2307}
2308
2309// _nnz
2310static PyObject * THPVariable__nnz(PyObject* self_, PyObject* args)
2311{
2312 HANDLE_TH_ERRORS
2313 const Tensor& self = THPVariable_Unpack(self_);
2314 if(check_has_torch_function(self_)) {
2315 return handle_torch_function(self_, "_nnz");
2316 }
2317 // aten::_nnz(Tensor self) -> int
2318
2319 auto dispatch__nnz = [](const at::Tensor & self) -> int64_t {
2320 pybind11::gil_scoped_release no_gil;
2321 return self._nnz();
2322 };
2323 return wrap(dispatch__nnz(self));
2324 END_HANDLE_TH_ERRORS
2325}
2326
2327// _to_dense
2328static PyObject * THPVariable__to_dense(PyObject* self_, PyObject* args, PyObject* kwargs)
2329{
2330 HANDLE_TH_ERRORS
2331 const Tensor& self = THPVariable_Unpack(self_);
2332 static PythonArgParser parser({
2333 "_to_dense(ScalarType? dtype=None)",
2334 }, /*traceable=*/true);
2335
2336 ParsedArgs<1> parsed_args;
2337 auto _r = parser.parse(self_, args, kwargs, parsed_args);
2338 if(_r.has_torch_function()) {
2339 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
2340 }
2341 // aten::_to_dense(Tensor self, ScalarType? dtype=None) -> Tensor
2342
2343 auto dispatch__to_dense = [](const at::Tensor & self, c10::optional<at::ScalarType> dtype) -> at::Tensor {
2344 pybind11::gil_scoped_release no_gil;
2345 return self._to_dense(dtype);
2346 };
2347 return wrap(dispatch__to_dense(self, _r.scalartypeOptional(0)));
2348 Py_RETURN_NONE;
2349 END_HANDLE_TH_ERRORS
2350}
2351
2352// _values
2353static PyObject * THPVariable__values(PyObject* self_, PyObject* args)
2354{
2355 HANDLE_TH_ERRORS
2356 const Tensor& self = THPVariable_Unpack(self_);
2357 if(check_has_torch_function(self_)) {
2358 return handle_torch_function(self_, "_values");
2359 }
2360 // aten::_values(Tensor(a) self) -> Tensor(a)
2361
2362 auto dispatch__values = [](const at::Tensor & self) -> at::Tensor {
2363 pybind11::gil_scoped_release no_gil;
2364 return self._values();
2365 };
2366 return wrap(dispatch__values(self));
2367 END_HANDLE_TH_ERRORS
2368}
2369
2370// abs
2371static PyObject * THPVariable_abs(PyObject* self_, PyObject* args)
2372{
2373 HANDLE_TH_ERRORS
2374 const Tensor& self = THPVariable_Unpack(self_);
2375 if(check_has_torch_function(self_)) {
2376 return handle_torch_function(self_, "abs");
2377 }
2378 // aten::abs(Tensor self) -> Tensor
2379
2380 auto dispatch_abs = [](const at::Tensor & self) -> at::Tensor {
2381 pybind11::gil_scoped_release no_gil;
2382 return self.abs();
2383 };
2384 return wrap(dispatch_abs(self));
2385 END_HANDLE_TH_ERRORS
2386}
2387
2388// abs_
2389static PyObject * THPVariable_abs_(PyObject* self_, PyObject* args)
2390{
2391 HANDLE_TH_ERRORS
2392 const Tensor& self = THPVariable_Unpack(self_);
2393 if(check_has_torch_function(self_)) {
2394 return handle_torch_function(self_, "abs_");
2395 }
2396 // aten::abs_(Tensor(a!) self) -> Tensor(a!)
2397
2398 auto dispatch_abs_ = [](const at::Tensor & self) -> at::Tensor {
2399 pybind11::gil_scoped_release no_gil;
2400 return self.abs_();
2401 };
2402 return wrap(dispatch_abs_(self));
2403 END_HANDLE_TH_ERRORS
2404}
2405
2406// absolute
2407static PyObject * THPVariable_absolute(PyObject* self_, PyObject* args)
2408{
2409 HANDLE_TH_ERRORS
2410 const Tensor& self = THPVariable_Unpack(self_);
2411 if(check_has_torch_function(self_)) {
2412 return handle_torch_function(self_, "absolute");
2413 }
2414 // aten::absolute(Tensor self) -> Tensor
2415
2416 auto dispatch_absolute = [](const at::Tensor & self) -> at::Tensor {
2417 pybind11::gil_scoped_release no_gil;
2418 return self.absolute();
2419 };
2420 return wrap(dispatch_absolute(self));
2421 END_HANDLE_TH_ERRORS
2422}
2423
2424// absolute_
2425static PyObject * THPVariable_absolute_(PyObject* self_, PyObject* args)
2426{
2427 HANDLE_TH_ERRORS
2428 const Tensor& self = THPVariable_Unpack(self_);
2429 if(check_has_torch_function(self_)) {
2430 return handle_torch_function(self_, "absolute_");
2431 }
2432 // aten::absolute_(Tensor(a!) self) -> Tensor(a!)
2433
2434 auto dispatch_absolute_ = [](const at::Tensor & self) -> at::Tensor {
2435 pybind11::gil_scoped_release no_gil;
2436 return self.absolute_();
2437 };
2438 return wrap(dispatch_absolute_(self));
2439 END_HANDLE_TH_ERRORS
2440}
2441
2442// acos
2443static PyObject * THPVariable_acos(PyObject* self_, PyObject* args)
2444{
2445 HANDLE_TH_ERRORS
2446 const Tensor& self = THPVariable_Unpack(self_);
2447 if(check_has_torch_function(self_)) {
2448 return handle_torch_function(self_, "acos");
2449 }
2450 // aten::acos(Tensor self) -> Tensor
2451
2452 auto dispatch_acos = [](const at::Tensor & self) -> at::Tensor {
2453 pybind11::gil_scoped_release no_gil;
2454 return self.acos();
2455 };
2456 return wrap(dispatch_acos(self));
2457 END_HANDLE_TH_ERRORS
2458}
2459
2460// acos_
2461static PyObject * THPVariable_acos_(PyObject* self_, PyObject* args)
2462{
2463 HANDLE_TH_ERRORS
2464 const Tensor& self = THPVariable_Unpack(self_);
2465 if(check_has_torch_function(self_)) {
2466 return handle_torch_function(self_, "acos_");
2467 }
2468 // aten::acos_(Tensor(a!) self) -> Tensor(a!)
2469
2470 auto dispatch_acos_ = [](const at::Tensor & self) -> at::Tensor {
2471 pybind11::gil_scoped_release no_gil;
2472 return self.acos_();
2473 };
2474 return wrap(dispatch_acos_(self));
2475 END_HANDLE_TH_ERRORS
2476}
2477
2478// acosh
2479static PyObject * THPVariable_acosh(PyObject* self_, PyObject* args)
2480{
2481 HANDLE_TH_ERRORS
2482 const Tensor& self = THPVariable_Unpack(self_);
2483 if(check_has_torch_function(self_)) {
2484 return handle_torch_function(self_, "acosh");
2485 }
2486 // aten::acosh(Tensor self) -> Tensor
2487
2488 auto dispatch_acosh = [](const at::Tensor & self) -> at::Tensor {
2489 pybind11::gil_scoped_release no_gil;
2490 return self.acosh();
2491 };
2492 return wrap(dispatch_acosh(self));
2493 END_HANDLE_TH_ERRORS
2494}
2495
2496// acosh_
2497static PyObject * THPVariable_acosh_(PyObject* self_, PyObject* args)
2498{
2499 HANDLE_TH_ERRORS
2500 const Tensor& self = THPVariable_Unpack(self_);
2501 if(check_has_torch_function(self_)) {
2502 return handle_torch_function(self_, "acosh_");
2503 }
2504 // aten::acosh_(Tensor(a!) self) -> Tensor(a!)
2505
2506 auto dispatch_acosh_ = [](const at::Tensor & self) -> at::Tensor {
2507 pybind11::gil_scoped_release no_gil;
2508 return self.acosh_();
2509 };
2510 return wrap(dispatch_acosh_(self));
2511 END_HANDLE_TH_ERRORS
2512}
2513
2514\
2515// add
2516static PyObject * THPVariable_add(PyObject* self_, PyObject* args, PyObject* kwargs)
2517{
2518 HANDLE_TH_ERRORS
2519 const Tensor& self = THPVariable_Unpack(self_);
2520 static PythonArgParser parser({
2521 "add(Scalar alpha, Tensor other)|deprecated",
2522 "add(Tensor other, *, Scalar alpha=1)",
2523 }, /*traceable=*/true);
2524
2525 ParsedArgs<2> parsed_args;
2526 auto _r = parser.parse(self_, args, kwargs, parsed_args);
2527 if(_r.has_torch_function()) {
2528 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
2529 }
2530 switch (_r.idx) {
2531 case 0: {
2532 // [deprecated] aten::add(Tensor self, Scalar alpha, Tensor other) -> Tensor
2533
2534 auto dispatch_add = [](const at::Tensor & self, const at::Scalar & alpha, const at::Tensor & other) -> at::Tensor {
2535 pybind11::gil_scoped_release no_gil;
2536 return self.add(other, alpha);
2537 };
2538 return wrap(dispatch_add(self, _r.scalar(0), _r.tensor(1)));
2539 }
2540 case 1: {
2541 // aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
2542
2543 auto dispatch_add = [](const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) -> at::Tensor {
2544 pybind11::gil_scoped_release no_gil;
2545 return self.add(other, alpha);
2546 };
2547 return wrap(dispatch_add(self, _r.tensor(0), _r.scalar(1)));
2548 }
2549 }
2550 Py_RETURN_NONE;
2551 END_HANDLE_TH_ERRORS
2552}
2553
2554\
2555// add_
2556static PyObject * THPVariable_add_(PyObject* self_, PyObject* args, PyObject* kwargs)
2557{
2558 HANDLE_TH_ERRORS
2559 const Tensor& self = THPVariable_Unpack(self_);
2560 static PythonArgParser parser({
2561 "add_(Scalar alpha, Tensor other)|deprecated",
2562 "add_(Tensor other, *, Scalar alpha=1)",
2563 }, /*traceable=*/true);
2564
2565 ParsedArgs<2> parsed_args;
2566 auto _r = parser.parse(self_, args, kwargs, parsed_args);
2567 if(_r.has_torch_function()) {
2568 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
2569 }
2570 switch (_r.idx) {
2571 case 0: {
2572 // [deprecated] aten::add_(Tensor(a!) self, Scalar alpha, Tensor other) -> Tensor(a!)
2573
2574 auto dispatch_add_ = [](const at::Tensor & self, const at::Scalar & alpha, const at::Tensor & other) -> at::Tensor {
2575 pybind11::gil_scoped_release no_gil;
2576 return self.add_(other, alpha);
2577 };
2578 return wrap(dispatch_add_(self, _r.scalar(0), _r.tensor(1)));
2579 }
2580 case 1: {
2581 // aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
2582
2583 auto dispatch_add_ = [](const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) -> at::Tensor {
2584 pybind11::gil_scoped_release no_gil;
2585 return self.add_(other, alpha);
2586 };
2587 return wrap(dispatch_add_(self, _r.tensor(0), _r.scalar(1)));
2588 }
2589 }
2590 Py_RETURN_NONE;
2591 END_HANDLE_TH_ERRORS
2592}
2593
2594\
2595// addbmm
2596static PyObject * THPVariable_addbmm(PyObject* self_, PyObject* args, PyObject* kwargs)
2597{
2598 HANDLE_TH_ERRORS
2599 const Tensor& self = THPVariable_Unpack(self_);
2600 static PythonArgParser parser({
2601 "addbmm(Scalar beta, Scalar alpha, Tensor batch1, Tensor batch2)|deprecated",
2602 "addbmm(Scalar beta, Tensor batch1, Tensor batch2)|deprecated",
2603 "addbmm(Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1)",
2604 }, /*traceable=*/true);
2605
2606 ParsedArgs<4> parsed_args;
2607 auto _r = parser.parse(self_, args, kwargs, parsed_args);
2608 if(_r.has_torch_function()) {
2609 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
2610 }
2611 switch (_r.idx) {
2612 case 0: {
2613 // [deprecated] aten::addbmm(Scalar beta, Tensor self, Scalar alpha, Tensor batch1, Tensor batch2) -> Tensor
2614
2615 auto dispatch_addbmm = [](const at::Scalar & beta, const at::Tensor & self, const at::Scalar & alpha, const at::Tensor & batch1, const at::Tensor & batch2) -> at::Tensor {
2616 pybind11::gil_scoped_release no_gil;
2617 return self.addbmm(batch1, batch2, beta, alpha);
2618 };
2619 return wrap(dispatch_addbmm(_r.scalar(0), self, _r.scalar(1), _r.tensor(2), _r.tensor(3)));
2620 }
2621 case 1: {
2622 // [deprecated] aten::addbmm(Scalar beta, Tensor self, Tensor batch1, Tensor batch2) -> Tensor
2623
2624 auto dispatch_addbmm = [](const at::Scalar & beta, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2) -> at::Tensor {
2625 pybind11::gil_scoped_release no_gil;
2626 return self.addbmm(batch1, batch2, beta, 1);
2627 };
2628 return wrap(dispatch_addbmm(_r.scalar(0), self, _r.tensor(1), _r.tensor(2)));
2629 }
2630 case 2: {
2631 // aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
2632
2633 auto dispatch_addbmm = [](const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) -> at::Tensor {
2634 pybind11::gil_scoped_release no_gil;
2635 return self.addbmm(batch1, batch2, beta, alpha);
2636 };
2637 return wrap(dispatch_addbmm(self, _r.tensor(0), _r.tensor(1), _r.scalar(2), _r.scalar(3)));
2638 }
2639 }
2640 Py_RETURN_NONE;
2641 END_HANDLE_TH_ERRORS
2642}
2643
2644\
2645// addbmm_
2646static PyObject * THPVariable_addbmm_(PyObject* self_, PyObject* args, PyObject* kwargs)
2647{
2648 HANDLE_TH_ERRORS
2649 const Tensor& self = THPVariable_Unpack(self_);
2650 static PythonArgParser parser({
2651 "addbmm_(Scalar beta, Scalar alpha, Tensor batch1, Tensor batch2)|deprecated",
2652 "addbmm_(Scalar beta, Tensor batch1, Tensor batch2)|deprecated",
2653 "addbmm_(Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1)",
2654 }, /*traceable=*/true);
2655
2656 ParsedArgs<4> parsed_args;
2657 auto _r = parser.parse(self_, args, kwargs, parsed_args);
2658 if(_r.has_torch_function()) {
2659 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
2660 }
2661 switch (_r.idx) {
2662 case 0: {
2663 // [deprecated] aten::addbmm_(Scalar beta, Tensor(a!) self, Scalar alpha, Tensor batch1, Tensor batch2) -> Tensor(a!)
2664
2665 auto dispatch_addbmm_ = [](const at::Scalar & beta, const at::Tensor & self, const at::Scalar & alpha, const at::Tensor & batch1, const at::Tensor & batch2) -> at::Tensor {
2666 pybind11::gil_scoped_release no_gil;
2667 return self.addbmm_(batch1, batch2, beta, alpha);
2668 };
2669 return wrap(dispatch_addbmm_(_r.scalar(0), self, _r.scalar(1), _r.tensor(2), _r.tensor(3)));
2670 }
2671 case 1: {
2672 // [deprecated] aten::addbmm_(Scalar beta, Tensor(a!) self, Tensor batch1, Tensor batch2) -> Tensor(a!)
2673
2674 auto dispatch_addbmm_ = [](const at::Scalar & beta, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2) -> at::Tensor {
2675 pybind11::gil_scoped_release no_gil;
2676 return self.addbmm_(batch1, batch2, beta, 1);
2677 };
2678 return wrap(dispatch_addbmm_(_r.scalar(0), self, _r.tensor(1), _r.tensor(2)));
2679 }
2680 case 2: {
2681 // aten::addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
2682
2683 auto dispatch_addbmm_ = [](const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) -> at::Tensor {
2684 pybind11::gil_scoped_release no_gil;
2685 return self.addbmm_(batch1, batch2, beta, alpha);
2686 };
2687 return wrap(dispatch_addbmm_(self, _r.tensor(0), _r.tensor(1), _r.scalar(2), _r.scalar(3)));
2688 }
2689 }
2690 Py_RETURN_NONE;
2691 END_HANDLE_TH_ERRORS
2692}
2693
2694\
2695// addcdiv
2696static PyObject * THPVariable_addcdiv(PyObject* self_, PyObject* args, PyObject* kwargs)
2697{
2698 HANDLE_TH_ERRORS
2699 const Tensor& self = THPVariable_Unpack(self_);
2700 static PythonArgParser parser({
2701 "addcdiv(Scalar value, Tensor tensor1, Tensor tensor2)|deprecated",
2702 "addcdiv(Tensor tensor1, Tensor tensor2, *, Scalar value=1)",
2703 }, /*traceable=*/true);
2704
2705 ParsedArgs<3> parsed_args;
2706 auto _r = parser.parse(self_, args, kwargs, parsed_args);
2707 if(_r.has_torch_function()) {
2708 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
2709 }
2710 switch (_r.idx) {
2711 case 0: {
2712 // [deprecated] aten::addcdiv(Tensor self, Scalar value, Tensor tensor1, Tensor tensor2) -> Tensor
2713
2714 auto dispatch_addcdiv = [](const at::Tensor & self, const at::Scalar & value, const at::Tensor & tensor1, const at::Tensor & tensor2) -> at::Tensor {
2715 pybind11::gil_scoped_release no_gil;
2716 return self.addcdiv(tensor1, tensor2, value);
2717 };
2718 return wrap(dispatch_addcdiv(self, _r.scalar(0), _r.tensor(1), _r.tensor(2)));
2719 }
2720 case 1: {
2721 // aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
2722
2723 auto dispatch_addcdiv = [](const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) -> at::Tensor {
2724 pybind11::gil_scoped_release no_gil;
2725 return self.addcdiv(tensor1, tensor2, value);
2726 };
2727 return wrap(dispatch_addcdiv(self, _r.tensor(0), _r.tensor(1), _r.scalar(2)));
2728 }
2729 }
2730 Py_RETURN_NONE;
2731 END_HANDLE_TH_ERRORS
2732}
2733
2734\
2735// addcdiv_
2736static PyObject * THPVariable_addcdiv_(PyObject* self_, PyObject* args, PyObject* kwargs)
2737{
2738 HANDLE_TH_ERRORS
2739 const Tensor& self = THPVariable_Unpack(self_);
2740 static PythonArgParser parser({
2741 "addcdiv_(Scalar value, Tensor tensor1, Tensor tensor2)|deprecated",
2742 "addcdiv_(Tensor tensor1, Tensor tensor2, *, Scalar value=1)",
2743 }, /*traceable=*/true);
2744
2745 ParsedArgs<3> parsed_args;
2746 auto _r = parser.parse(self_, args, kwargs, parsed_args);
2747 if(_r.has_torch_function()) {
2748 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
2749 }
2750 switch (_r.idx) {
2751 case 0: {
2752 // [deprecated] aten::addcdiv_(Tensor(a!) self, Scalar value, Tensor tensor1, Tensor tensor2) -> Tensor(a!)
2753
2754 auto dispatch_addcdiv_ = [](const at::Tensor & self, const at::Scalar & value, const at::Tensor & tensor1, const at::Tensor & tensor2) -> at::Tensor {
2755 pybind11::gil_scoped_release no_gil;
2756 return self.addcdiv_(tensor1, tensor2, value);
2757 };
2758 return wrap(dispatch_addcdiv_(self, _r.scalar(0), _r.tensor(1), _r.tensor(2)));
2759 }
2760 case 1: {
2761 // aten::addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
2762
2763 auto dispatch_addcdiv_ = [](const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) -> at::Tensor {
2764 pybind11::gil_scoped_release no_gil;
2765 return self.addcdiv_(tensor1, tensor2, value);
2766 };
2767 return wrap(dispatch_addcdiv_(self, _r.tensor(0), _r.tensor(1), _r.scalar(2)));
2768 }
2769 }
2770 Py_RETURN_NONE;
2771 END_HANDLE_TH_ERRORS
2772}
2773
2774\
2775// addcmul
2776static PyObject * THPVariable_addcmul(PyObject* self_, PyObject* args, PyObject* kwargs)
2777{
2778 HANDLE_TH_ERRORS
2779 const Tensor& self = THPVariable_Unpack(self_);
2780 static PythonArgParser parser({
2781 "addcmul(Scalar value, Tensor tensor1, Tensor tensor2)|deprecated",
2782 "addcmul(Tensor tensor1, Tensor tensor2, *, Scalar value=1)",
2783 }, /*traceable=*/true);
2784
2785 ParsedArgs<3> parsed_args;
2786 auto _r = parser.parse(self_, args, kwargs, parsed_args);
2787 if(_r.has_torch_function()) {
2788 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
2789 }
2790 switch (_r.idx) {
2791 case 0: {
2792 // [deprecated] aten::addcmul(Tensor self, Scalar value, Tensor tensor1, Tensor tensor2) -> Tensor
2793
2794 auto dispatch_addcmul = [](const at::Tensor & self, const at::Scalar & value, const at::Tensor & tensor1, const at::Tensor & tensor2) -> at::Tensor {
2795 pybind11::gil_scoped_release no_gil;
2796 return self.addcmul(tensor1, tensor2, value);
2797 };
2798 return wrap(dispatch_addcmul(self, _r.scalar(0), _r.tensor(1), _r.tensor(2)));
2799 }
2800 case 1: {
2801 // aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
2802
2803 auto dispatch_addcmul = [](const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) -> at::Tensor {
2804 pybind11::gil_scoped_release no_gil;
2805 return self.addcmul(tensor1, tensor2, value);
2806 };
2807 return wrap(dispatch_addcmul(self, _r.tensor(0), _r.tensor(1), _r.scalar(2)));
2808 }
2809 }
2810 Py_RETURN_NONE;
2811 END_HANDLE_TH_ERRORS
2812}
2813
2814\
2815// addcmul_
2816static PyObject * THPVariable_addcmul_(PyObject* self_, PyObject* args, PyObject* kwargs)
2817{
2818 HANDLE_TH_ERRORS
2819 const Tensor& self = THPVariable_Unpack(self_);
2820 static PythonArgParser parser({
2821 "addcmul_(Scalar value, Tensor tensor1, Tensor tensor2)|deprecated",
2822 "addcmul_(Tensor tensor1, Tensor tensor2, *, Scalar value=1)",
2823 }, /*traceable=*/true);
2824
2825 ParsedArgs<3> parsed_args;
2826 auto _r = parser.parse(self_, args, kwargs, parsed_args);
2827 if(_r.has_torch_function()) {
2828 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
2829 }
2830 switch (_r.idx) {
2831 case 0: {
2832 // [deprecated] aten::addcmul_(Tensor(a!) self, Scalar value, Tensor tensor1, Tensor tensor2) -> Tensor(a!)
2833
2834 auto dispatch_addcmul_ = [](const at::Tensor & self, const at::Scalar & value, const at::Tensor & tensor1, const at::Tensor & tensor2) -> at::Tensor {
2835 pybind11::gil_scoped_release no_gil;
2836 return self.addcmul_(tensor1, tensor2, value);
2837 };
2838 return wrap(dispatch_addcmul_(self, _r.scalar(0), _r.tensor(1), _r.tensor(2)));
2839 }
2840 case 1: {
2841 // aten::addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
2842
2843 auto dispatch_addcmul_ = [](const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) -> at::Tensor {
2844 pybind11::gil_scoped_release no_gil;
2845 return self.addcmul_(tensor1, tensor2, value);
2846 };
2847 return wrap(dispatch_addcmul_(self, _r.tensor(0), _r.tensor(1), _r.scalar(2)));
2848 }
2849 }
2850 Py_RETURN_NONE;
2851 END_HANDLE_TH_ERRORS
2852}
2853
2854\
2855// addmm
2856static PyObject * THPVariable_addmm(PyObject* self_, PyObject* args, PyObject* kwargs)
2857{
2858 HANDLE_TH_ERRORS
2859 const Tensor& self = THPVariable_Unpack(self_);
2860 static PythonArgParser parser({
2861 "addmm(Scalar beta, Scalar alpha, Tensor mat1, Tensor mat2)|deprecated",
2862 "addmm(Scalar beta, Tensor mat1, Tensor mat2)|deprecated",
2863 "addmm(Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1)",
2864 }, /*traceable=*/true);
2865
2866 ParsedArgs<4> parsed_args;
2867 auto _r = parser.parse(self_, args, kwargs, parsed_args);
2868 if(_r.has_torch_function()) {
2869 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
2870 }
2871 switch (_r.idx) {
2872 case 0: {
2873 // [deprecated] aten::addmm(Scalar beta, Tensor self, Scalar alpha, Tensor mat1, Tensor mat2) -> Tensor
2874
2875 auto dispatch_addmm = [](const at::Scalar & beta, const at::Tensor & self, const at::Scalar & alpha, const at::Tensor & mat1, const at::Tensor & mat2) -> at::Tensor {
2876 pybind11::gil_scoped_release no_gil;
2877 return self.addmm(mat1, mat2, beta, alpha);
2878 };
2879 return wrap(dispatch_addmm(_r.scalar(0), self, _r.scalar(1), _r.tensor(2), _r.tensor(3)));
2880 }
2881 case 1: {
2882 // [deprecated] aten::addmm(Scalar beta, Tensor self, Tensor mat1, Tensor mat2) -> Tensor
2883
2884 auto dispatch_addmm = [](const at::Scalar & beta, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2) -> at::Tensor {
2885 pybind11::gil_scoped_release no_gil;
2886 return self.addmm(mat1, mat2, beta, 1);
2887 };
2888 return wrap(dispatch_addmm(_r.scalar(0), self, _r.tensor(1), _r.tensor(2)));
2889 }
2890 case 2: {
2891 // aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
2892
2893 auto dispatch_addmm = [](const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) -> at::Tensor {
2894 pybind11::gil_scoped_release no_gil;
2895 return self.addmm(mat1, mat2, beta, alpha);
2896 };
2897 return wrap(dispatch_addmm(self, _r.tensor(0), _r.tensor(1), _r.scalar(2), _r.scalar(3)));
2898 }
2899 }
2900 Py_RETURN_NONE;
2901 END_HANDLE_TH_ERRORS
2902}
2903
2904\
2905// addmm_
2906static PyObject * THPVariable_addmm_(PyObject* self_, PyObject* args, PyObject* kwargs)
2907{
2908 HANDLE_TH_ERRORS
2909 const Tensor& self = THPVariable_Unpack(self_);
2910 static PythonArgParser parser({
2911 "addmm_(Scalar beta, Scalar alpha, Tensor mat1, Tensor mat2)|deprecated",
2912 "addmm_(Scalar beta, Tensor mat1, Tensor mat2)|deprecated",
2913 "addmm_(Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1)",
2914 }, /*traceable=*/true);
2915
2916 ParsedArgs<4> parsed_args;
2917 auto _r = parser.parse(self_, args, kwargs, parsed_args);
2918 if(_r.has_torch_function()) {
2919 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
2920 }
2921 switch (_r.idx) {
2922 case 0: {
2923 // [deprecated] aten::addmm_(Scalar beta, Tensor(a!) self, Scalar alpha, Tensor mat1, Tensor mat2) -> Tensor(a!)
2924
2925 auto dispatch_addmm_ = [](const at::Scalar & beta, const at::Tensor & self, const at::Scalar & alpha, const at::Tensor & mat1, const at::Tensor & mat2) -> at::Tensor {
2926 pybind11::gil_scoped_release no_gil;
2927 return self.addmm_(mat1, mat2, beta, alpha);
2928 };
2929 return wrap(dispatch_addmm_(_r.scalar(0), self, _r.scalar(1), _r.tensor(2), _r.tensor(3)));
2930 }
2931 case 1: {
2932 // [deprecated] aten::addmm_(Scalar beta, Tensor(a!) self, Tensor mat1, Tensor mat2) -> Tensor(a!)
2933
2934 auto dispatch_addmm_ = [](const at::Scalar & beta, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2) -> at::Tensor {
2935 pybind11::gil_scoped_release no_gil;
2936 return self.addmm_(mat1, mat2, beta, 1);
2937 };
2938 return wrap(dispatch_addmm_(_r.scalar(0), self, _r.tensor(1), _r.tensor(2)));
2939 }
2940 case 2: {
2941 // aten::addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
2942
2943 auto dispatch_addmm_ = [](const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) -> at::Tensor {
2944 pybind11::gil_scoped_release no_gil;
2945 return self.addmm_(mat1, mat2, beta, alpha);
2946 };
2947 return wrap(dispatch_addmm_(self, _r.tensor(0), _r.tensor(1), _r.scalar(2), _r.scalar(3)));
2948 }
2949 }
2950 Py_RETURN_NONE;
2951 END_HANDLE_TH_ERRORS
2952}
2953
2954\
2955// addmv
2956static PyObject * THPVariable_addmv(PyObject* self_, PyObject* args, PyObject* kwargs)
2957{
2958 HANDLE_TH_ERRORS
2959 const Tensor& self = THPVariable_Unpack(self_);
2960 static PythonArgParser parser({
2961 "addmv(Scalar beta, Scalar alpha, Tensor mat, Tensor vec)|deprecated",
2962 "addmv(Scalar beta, Tensor mat, Tensor vec)|deprecated",
2963 "addmv(Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1)",
2964 }, /*traceable=*/true);
2965
2966 ParsedArgs<4> parsed_args;
2967 auto _r = parser.parse(self_, args, kwargs, parsed_args);
2968 if(_r.has_torch_function()) {
2969 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
2970 }
2971 switch (_r.idx) {
2972 case 0: {
2973 // [deprecated] aten::addmv(Scalar beta, Tensor self, Scalar alpha, Tensor mat, Tensor vec) -> Tensor
2974
2975 auto dispatch_addmv = [](const at::Scalar & beta, const at::Tensor & self, const at::Scalar & alpha, const at::Tensor & mat, const at::Tensor & vec) -> at::Tensor {
2976 pybind11::gil_scoped_release no_gil;
2977 return self.addmv(mat, vec, beta, alpha);
2978 };
2979 return wrap(dispatch_addmv(_r.scalar(0), self, _r.scalar(1), _r.tensor(2), _r.tensor(3)));
2980 }
2981 case 1: {
2982 // [deprecated] aten::addmv(Scalar beta, Tensor self, Tensor mat, Tensor vec) -> Tensor
2983
2984 auto dispatch_addmv = [](const at::Scalar & beta, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec) -> at::Tensor {
2985 pybind11::gil_scoped_release no_gil;
2986 return self.addmv(mat, vec, beta, 1);
2987 };
2988 return wrap(dispatch_addmv(_r.scalar(0), self, _r.tensor(1), _r.tensor(2)));
2989 }
2990 case 2: {
2991 // aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor
2992
2993 auto dispatch_addmv = [](const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) -> at::Tensor {
2994 pybind11::gil_scoped_release no_gil;
2995 return self.addmv(mat, vec, beta, alpha);
2996 };
2997 return wrap(dispatch_addmv(self, _r.tensor(0), _r.tensor(1), _r.scalar(2), _r.scalar(3)));
2998 }
2999 }
3000 Py_RETURN_NONE;
3001 END_HANDLE_TH_ERRORS
3002}
3003
3004\
3005// addmv_
3006static PyObject * THPVariable_addmv_(PyObject* self_, PyObject* args, PyObject* kwargs)
3007{
3008 HANDLE_TH_ERRORS
3009 const Tensor& self = THPVariable_Unpack(self_);
3010 static PythonArgParser parser({
3011 "addmv_(Scalar beta, Scalar alpha, Tensor mat, Tensor vec)|deprecated",
3012 "addmv_(Scalar beta, Tensor mat, Tensor vec)|deprecated",
3013 "addmv_(Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1)",
3014 }, /*traceable=*/true);
3015
3016 ParsedArgs<4> parsed_args;
3017 auto _r = parser.parse(self_, args, kwargs, parsed_args);
3018 if(_r.has_torch_function()) {
3019 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
3020 }
3021 switch (_r.idx) {
3022 case 0: {
3023 // [deprecated] aten::addmv_(Scalar beta, Tensor(a!) self, Scalar alpha, Tensor mat, Tensor vec) -> Tensor(a!)
3024
3025 auto dispatch_addmv_ = [](const at::Scalar & beta, const at::Tensor & self, const at::Scalar & alpha, const at::Tensor & mat, const at::Tensor & vec) -> at::Tensor {
3026 pybind11::gil_scoped_release no_gil;
3027 return self.addmv_(mat, vec, beta, alpha);
3028 };
3029 return wrap(dispatch_addmv_(_r.scalar(0), self, _r.scalar(1), _r.tensor(2), _r.tensor(3)));
3030 }
3031 case 1: {
3032 // [deprecated] aten::addmv_(Scalar beta, Tensor(a!) self, Tensor mat, Tensor vec) -> Tensor(a!)
3033
3034 auto dispatch_addmv_ = [](const at::Scalar & beta, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec) -> at::Tensor {
3035 pybind11::gil_scoped_release no_gil;
3036 return self.addmv_(mat, vec, beta, 1);
3037 };
3038 return wrap(dispatch_addmv_(_r.scalar(0), self, _r.tensor(1), _r.tensor(2)));
3039 }
3040 case 2: {
3041 // aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
3042
3043 auto dispatch_addmv_ = [](const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) -> at::Tensor {
3044 pybind11::gil_scoped_release no_gil;
3045 return self.addmv_(mat, vec, beta, alpha);
3046 };
3047 return wrap(dispatch_addmv_(self, _r.tensor(0), _r.tensor(1), _r.scalar(2), _r.scalar(3)));
3048 }
3049 }
3050 Py_RETURN_NONE;
3051 END_HANDLE_TH_ERRORS
3052}
3053
3054\
3055// addr
3056static PyObject * THPVariable_addr(PyObject* self_, PyObject* args, PyObject* kwargs)
3057{
3058 HANDLE_TH_ERRORS
3059 const Tensor& self = THPVariable_Unpack(self_);
3060 static PythonArgParser parser({
3061 "addr(Scalar beta, Scalar alpha, Tensor vec1, Tensor vec2)|deprecated",
3062 "addr(Scalar beta, Tensor vec1, Tensor vec2)|deprecated",
3063 "addr(Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1)",
3064 }, /*traceable=*/true);
3065
3066 ParsedArgs<4> parsed_args;
3067 auto _r = parser.parse(self_, args, kwargs, parsed_args);
3068 if(_r.has_torch_function()) {
3069 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
3070 }
3071 switch (_r.idx) {
3072 case 0: {
3073 // [deprecated] aten::addr(Scalar beta, Tensor self, Scalar alpha, Tensor vec1, Tensor vec2) -> Tensor
3074
3075 auto dispatch_addr = [](const at::Scalar & beta, const at::Tensor & self, const at::Scalar & alpha, const at::Tensor & vec1, const at::Tensor & vec2) -> at::Tensor {
3076 pybind11::gil_scoped_release no_gil;
3077 return self.addr(vec1, vec2, beta, alpha);
3078 };
3079 return wrap(dispatch_addr(_r.scalar(0), self, _r.scalar(1), _r.tensor(2), _r.tensor(3)));
3080 }
3081 case 1: {
3082 // [deprecated] aten::addr(Scalar beta, Tensor self, Tensor vec1, Tensor vec2) -> Tensor
3083
3084 auto dispatch_addr = [](const at::Scalar & beta, const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2) -> at::Tensor {
3085 pybind11::gil_scoped_release no_gil;
3086 return self.addr(vec1, vec2, beta, 1);
3087 };
3088 return wrap(dispatch_addr(_r.scalar(0), self, _r.tensor(1), _r.tensor(2)));
3089 }
3090 case 2: {
3091 // aten::addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
3092
3093 auto dispatch_addr = [](const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) -> at::Tensor {
3094 pybind11::gil_scoped_release no_gil;
3095 return self.addr(vec1, vec2, beta, alpha);
3096 };
3097 return wrap(dispatch_addr(self, _r.tensor(0), _r.tensor(1), _r.scalar(2), _r.scalar(3)));
3098 }
3099 }
3100 Py_RETURN_NONE;
3101 END_HANDLE_TH_ERRORS
3102}
3103
3104\
3105// addr_
3106static PyObject * THPVariable_addr_(PyObject* self_, PyObject* args, PyObject* kwargs)
3107{
3108 HANDLE_TH_ERRORS
3109 const Tensor& self = THPVariable_Unpack(self_);
3110 static PythonArgParser parser({
3111 "addr_(Scalar beta, Scalar alpha, Tensor vec1, Tensor vec2)|deprecated",
3112 "addr_(Scalar beta, Tensor vec1, Tensor vec2)|deprecated",
3113 "addr_(Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1)",
3114 }, /*traceable=*/true);
3115
3116 ParsedArgs<4> parsed_args;
3117 auto _r = parser.parse(self_, args, kwargs, parsed_args);
3118 if(_r.has_torch_function()) {
3119 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
3120 }
3121 switch (_r.idx) {
3122 case 0: {
3123 // [deprecated] aten::addr_(Scalar beta, Tensor(a!) self, Scalar alpha, Tensor vec1, Tensor vec2) -> Tensor(a!)
3124
3125 auto dispatch_addr_ = [](const at::Scalar & beta, const at::Tensor & self, const at::Scalar & alpha, const at::Tensor & vec1, const at::Tensor & vec2) -> at::Tensor {
3126 pybind11::gil_scoped_release no_gil;
3127 return self.addr_(vec1, vec2, beta, alpha);
3128 };
3129 return wrap(dispatch_addr_(_r.scalar(0), self, _r.scalar(1), _r.tensor(2), _r.tensor(3)));
3130 }
3131 case 1: {
3132 // [deprecated] aten::addr_(Scalar beta, Tensor(a!) self, Tensor vec1, Tensor vec2) -> Tensor(a!)
3133
3134 auto dispatch_addr_ = [](const at::Scalar & beta, const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2) -> at::Tensor {
3135 pybind11::gil_scoped_release no_gil;
3136 return self.addr_(vec1, vec2, beta, 1);
3137 };
3138 return wrap(dispatch_addr_(_r.scalar(0), self, _r.tensor(1), _r.tensor(2)));
3139 }
3140 case 2: {
3141 // aten::addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
3142
3143 auto dispatch_addr_ = [](const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) -> at::Tensor {
3144 pybind11::gil_scoped_release no_gil;
3145 return self.addr_(vec1, vec2, beta, alpha);
3146 };
3147 return wrap(dispatch_addr_(self, _r.tensor(0), _r.tensor(1), _r.scalar(2), _r.scalar(3)));
3148 }
3149 }
3150 Py_RETURN_NONE;
3151 END_HANDLE_TH_ERRORS
3152}
3153
3154// adjoint
3155static PyObject * THPVariable_adjoint(PyObject* self_, PyObject* args)
3156{
3157 HANDLE_TH_ERRORS
3158 const Tensor& self = THPVariable_Unpack(self_);
3159 if(check_has_torch_function(self_)) {
3160 return handle_torch_function(self_, "adjoint");
3161 }
3162 // aten::adjoint(Tensor(a) self) -> Tensor(a)
3163
3164 auto dispatch_adjoint = [](const at::Tensor & self) -> at::Tensor {
3165 pybind11::gil_scoped_release no_gil;
3166 return self.adjoint();
3167 };
3168 return wrap(dispatch_adjoint(self));
3169 END_HANDLE_TH_ERRORS
3170}
3171
3172// align_as
3173static PyObject * THPVariable_align_as(PyObject* self_, PyObject* args, PyObject* kwargs)
3174{
3175 HANDLE_TH_ERRORS
3176 const Tensor& self = THPVariable_Unpack(self_);
3177 static PythonArgParser parser({
3178 "align_as(Tensor other)",
3179 }, /*traceable=*/true);
3180
3181 ParsedArgs<1> parsed_args;
3182 auto _r = parser.parse(self_, args, kwargs, parsed_args);
3183 if(_r.has_torch_function()) {
3184 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
3185 }
3186 // aten::align_as(Tensor self, Tensor other) -> Tensor
3187
3188 auto dispatch_align_as = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
3189 pybind11::gil_scoped_release no_gil;
3190 return self.align_as(other);
3191 };
3192 return wrap(dispatch_align_as(self, _r.tensor(0)));
3193 Py_RETURN_NONE;
3194 END_HANDLE_TH_ERRORS
3195}
3196
3197\
3198// align_to
3199static PyObject * THPVariable_align_to(PyObject* self_, PyObject* args, PyObject* kwargs)
3200{
3201 HANDLE_TH_ERRORS
3202 const Tensor& self = THPVariable_Unpack(self_);
3203 static PythonArgParser parser({
3204 "align_to(DimnameList names)",
3205 "align_to(DimnameList order, int64_t ellipsis_idx)",
3206 }, /*traceable=*/true);
3207
3208 ParsedArgs<2> parsed_args;
3209 auto _r = parser.parse(self_, args, kwargs, parsed_args);
3210 if(_r.has_torch_function()) {
3211 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
3212 }
3213 switch (_r.idx) {
3214 case 0: {
3215 // aten::align_to(Tensor(a) self, Dimname[] names) -> Tensor(a)
3216
3217 auto dispatch_align_to = [](const at::Tensor & self, at::DimnameList names) -> at::Tensor {
3218 pybind11::gil_scoped_release no_gil;
3219 return self.align_to(names);
3220 };
3221 return wrap(dispatch_align_to(self, _r.dimnamelist(0)));
3222 }
3223 case 1: {
3224 // aten::align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a)
3225
3226 auto dispatch_align_to = [](const at::Tensor & self, at::DimnameList order, int64_t ellipsis_idx) -> at::Tensor {
3227 pybind11::gil_scoped_release no_gil;
3228 return self.align_to(order, ellipsis_idx);
3229 };
3230 return wrap(dispatch_align_to(self, _r.dimnamelist(0), _r.toInt64(1)));
3231 }
3232 }
3233 Py_RETURN_NONE;
3234 END_HANDLE_TH_ERRORS
3235}
3236
3237\
3238// all
3239static PyObject * THPVariable_all(PyObject* self_, PyObject* args, PyObject* kwargs)
3240{
3241 HANDLE_TH_ERRORS
3242 const Tensor& self = THPVariable_Unpack(self_);
3243 static PythonArgParser parser({
3244 "all()",
3245 "all(int64_t dim, bool keepdim=False)",
3246 "all(Dimname dim, bool keepdim=False)",
3247 }, /*traceable=*/true);
3248
3249 ParsedArgs<2> parsed_args;
3250 auto _r = parser.parse(self_, args, kwargs, parsed_args);
3251 if(_r.has_torch_function()) {
3252 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
3253 }
3254 switch (_r.idx) {
3255 case 0: {
3256 // aten::all(Tensor self) -> Tensor
3257
3258 auto dispatch_all = [](const at::Tensor & self) -> at::Tensor {
3259 pybind11::gil_scoped_release no_gil;
3260 return self.all();
3261 };
3262 return wrap(dispatch_all(self));
3263 }
3264 case 1: {
3265 // aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
3266
3267 auto dispatch_all = [](const at::Tensor & self, int64_t dim, bool keepdim) -> at::Tensor {
3268 pybind11::gil_scoped_release no_gil;
3269 return self.all(dim, keepdim);
3270 };
3271 return wrap(dispatch_all(self, _r.toInt64(0), _r.toBool(1)));
3272 }
3273 case 2: {
3274 // aten::all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor
3275
3276 auto dispatch_all = [](const at::Tensor & self, at::Dimname dim, bool keepdim) -> at::Tensor {
3277 pybind11::gil_scoped_release no_gil;
3278 return self.all(dim, keepdim);
3279 };
3280 return wrap(dispatch_all(self, _r.dimname(0), _r.toBool(1)));
3281 }
3282 }
3283 Py_RETURN_NONE;
3284 END_HANDLE_TH_ERRORS
3285}
3286
3287// allclose
3288static PyObject * THPVariable_allclose(PyObject* self_, PyObject* args, PyObject* kwargs)
3289{
3290 HANDLE_TH_ERRORS
3291 const Tensor& self = THPVariable_Unpack(self_);
3292 static PythonArgParser parser({
3293 "allclose(Tensor other, double rtol=1e-05, double atol=1e-08, bool equal_nan=False)",
3294 }, /*traceable=*/false);
3295
3296 ParsedArgs<4> parsed_args;
3297 auto _r = parser.parse(self_, args, kwargs, parsed_args);
3298 if(_r.has_torch_function()) {
3299 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
3300 }
3301 // aten::allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool
3302
3303 auto dispatch_allclose = [](const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan) -> bool {
3304 pybind11::gil_scoped_release no_gil;
3305 return self.allclose(other, rtol, atol, equal_nan);
3306 };
3307 return wrap(dispatch_allclose(self, _r.tensor(0), _r.toDouble(1), _r.toDouble(2), _r.toBool(3)));
3308 Py_RETURN_NONE;
3309 END_HANDLE_TH_ERRORS
3310}
3311
3312// amax
3313static PyObject * THPVariable_amax(PyObject* self_, PyObject* args, PyObject* kwargs)
3314{
3315 HANDLE_TH_ERRORS
3316 const Tensor& self = THPVariable_Unpack(self_);
3317 static PythonArgParser parser({
3318 "amax(IntArrayRef[1] dim=None, bool keepdim=False)",
3319 }, /*traceable=*/true);
3320
3321 ParsedArgs<2> parsed_args;
3322 auto _r = parser.parse(self_, args, kwargs, parsed_args);
3323 if(_r.has_torch_function()) {
3324 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
3325 }
3326 // aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor
3327
3328 auto dispatch_amax = [](const at::Tensor & self, at::IntArrayRef dim, bool keepdim) -> at::Tensor {
3329 pybind11::gil_scoped_release no_gil;
3330 return self.amax(dim, keepdim);
3331 };
3332 return wrap(dispatch_amax(self, _r.intlist(0), _r.toBool(1)));
3333 Py_RETURN_NONE;
3334 END_HANDLE_TH_ERRORS
3335}
3336
3337// amin
3338static PyObject * THPVariable_amin(PyObject* self_, PyObject* args, PyObject* kwargs)
3339{
3340 HANDLE_TH_ERRORS
3341 const Tensor& self = THPVariable_Unpack(self_);
3342 static PythonArgParser parser({
3343 "amin(IntArrayRef[1] dim=None, bool keepdim=False)",
3344 }, /*traceable=*/true);
3345
3346 ParsedArgs<2> parsed_args;
3347 auto _r = parser.parse(self_, args, kwargs, parsed_args);
3348 if(_r.has_torch_function()) {
3349 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
3350 }
3351 // aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor
3352
3353 auto dispatch_amin = [](const at::Tensor & self, at::IntArrayRef dim, bool keepdim) -> at::Tensor {
3354 pybind11::gil_scoped_release no_gil;
3355 return self.amin(dim, keepdim);
3356 };
3357 return wrap(dispatch_amin(self, _r.intlist(0), _r.toBool(1)));
3358 Py_RETURN_NONE;
3359 END_HANDLE_TH_ERRORS
3360}
3361
3362// aminmax
3363static PyObject * THPVariable_aminmax(PyObject* self_, PyObject* args, PyObject* kwargs)
3364{
3365 HANDLE_TH_ERRORS
3366 static PyTypeObject* NamedTuple = get_namedtuple("aminmax");
3367 const Tensor& self = THPVariable_Unpack(self_);
3368 static PythonArgParser parser({
3369 "aminmax(*, int64_t? dim=None, bool keepdim=False)",
3370 }, /*traceable=*/true);
3371
3372 ParsedArgs<2> parsed_args;
3373 auto _r = parser.parse(self_, args, kwargs, parsed_args);
3374 if(_r.has_torch_function()) {
3375 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
3376 }
3377 // aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)
3378
3379 auto dispatch_aminmax = [](const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) -> ::std::tuple<at::Tensor,at::Tensor> {
3380 pybind11::gil_scoped_release no_gil;
3381 return self.aminmax(dim, keepdim);
3382 };
3383 return wrap(NamedTuple, dispatch_aminmax(self, _r.toInt64Optional(0), _r.toBool(1)));
3384 Py_RETURN_NONE;
3385 END_HANDLE_TH_ERRORS
3386}
3387
3388// angle
3389static PyObject * THPVariable_angle(PyObject* self_, PyObject* args)
3390{
3391 HANDLE_TH_ERRORS
3392 const Tensor& self = THPVariable_Unpack(self_);
3393 if(check_has_torch_function(self_)) {
3394 return handle_torch_function(self_, "angle");
3395 }
3396 // aten::angle(Tensor self) -> Tensor
3397
3398 auto dispatch_angle = [](const at::Tensor & self) -> at::Tensor {
3399 pybind11::gil_scoped_release no_gil;
3400 return self.angle();
3401 };
3402 return wrap(dispatch_angle(self));
3403 END_HANDLE_TH_ERRORS
3404}
3405
3406\
3407// any
3408static PyObject * THPVariable_any(PyObject* self_, PyObject* args, PyObject* kwargs)
3409{
3410 HANDLE_TH_ERRORS
3411 const Tensor& self = THPVariable_Unpack(self_);
3412 static PythonArgParser parser({
3413 "any()",
3414 "any(int64_t dim, bool keepdim=False)",
3415 "any(Dimname dim, bool keepdim=False)",
3416 }, /*traceable=*/true);
3417
3418 ParsedArgs<2> parsed_args;
3419 auto _r = parser.parse(self_, args, kwargs, parsed_args);
3420 if(_r.has_torch_function()) {
3421 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
3422 }
3423 switch (_r.idx) {
3424 case 0: {
3425 // aten::any(Tensor self) -> Tensor
3426
3427 auto dispatch_any = [](const at::Tensor & self) -> at::Tensor {
3428 pybind11::gil_scoped_release no_gil;
3429 return self.any();
3430 };
3431 return wrap(dispatch_any(self));
3432 }
3433 case 1: {
3434 // aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
3435
3436 auto dispatch_any = [](const at::Tensor & self, int64_t dim, bool keepdim) -> at::Tensor {
3437 pybind11::gil_scoped_release no_gil;
3438 return self.any(dim, keepdim);
3439 };
3440 return wrap(dispatch_any(self, _r.toInt64(0), _r.toBool(1)));
3441 }
3442 case 2: {
3443 // aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor
3444
3445 auto dispatch_any = [](const at::Tensor & self, at::Dimname dim, bool keepdim) -> at::Tensor {
3446 pybind11::gil_scoped_release no_gil;
3447 return self.any(dim, keepdim);
3448 };
3449 return wrap(dispatch_any(self, _r.dimname(0), _r.toBool(1)));
3450 }
3451 }
3452 Py_RETURN_NONE;
3453 END_HANDLE_TH_ERRORS
3454}
3455
3456// arccos
3457static PyObject * THPVariable_arccos(PyObject* self_, PyObject* args)
3458{
3459 HANDLE_TH_ERRORS
3460 const Tensor& self = THPVariable_Unpack(self_);
3461 if(check_has_torch_function(self_)) {
3462 return handle_torch_function(self_, "arccos");
3463 }
3464 // aten::arccos(Tensor self) -> Tensor
3465
3466 auto dispatch_arccos = [](const at::Tensor & self) -> at::Tensor {
3467 pybind11::gil_scoped_release no_gil;
3468 return self.arccos();
3469 };
3470 return wrap(dispatch_arccos(self));
3471 END_HANDLE_TH_ERRORS
3472}
3473
3474// arccos_
3475static PyObject * THPVariable_arccos_(PyObject* self_, PyObject* args)
3476{
3477 HANDLE_TH_ERRORS
3478 const Tensor& self = THPVariable_Unpack(self_);
3479 if(check_has_torch_function(self_)) {
3480 return handle_torch_function(self_, "arccos_");
3481 }
3482 // aten::arccos_(Tensor(a!) self) -> Tensor(a!)
3483
3484 auto dispatch_arccos_ = [](const at::Tensor & self) -> at::Tensor {
3485 pybind11::gil_scoped_release no_gil;
3486 return self.arccos_();
3487 };
3488 return wrap(dispatch_arccos_(self));
3489 END_HANDLE_TH_ERRORS
3490}
3491
3492// arccosh
3493static PyObject * THPVariable_arccosh(PyObject* self_, PyObject* args)
3494{
3495 HANDLE_TH_ERRORS
3496 const Tensor& self = THPVariable_Unpack(self_);
3497 if(check_has_torch_function(self_)) {
3498 return handle_torch_function(self_, "arccosh");
3499 }
3500 // aten::arccosh(Tensor self) -> Tensor
3501
3502 auto dispatch_arccosh = [](const at::Tensor & self) -> at::Tensor {
3503 pybind11::gil_scoped_release no_gil;
3504 return self.arccosh();
3505 };
3506 return wrap(dispatch_arccosh(self));
3507 END_HANDLE_TH_ERRORS
3508}
3509
3510// arccosh_
3511static PyObject * THPVariable_arccosh_(PyObject* self_, PyObject* args)
3512{
3513 HANDLE_TH_ERRORS
3514 const Tensor& self = THPVariable_Unpack(self_);
3515 if(check_has_torch_function(self_)) {
3516 return handle_torch_function(self_, "arccosh_");
3517 }
3518 // aten::arccosh_(Tensor(a!) self) -> Tensor(a!)
3519
3520 auto dispatch_arccosh_ = [](const at::Tensor & self) -> at::Tensor {
3521 pybind11::gil_scoped_release no_gil;
3522 return self.arccosh_();
3523 };
3524 return wrap(dispatch_arccosh_(self));
3525 END_HANDLE_TH_ERRORS
3526}
3527
3528// arcsin
3529static PyObject * THPVariable_arcsin(PyObject* self_, PyObject* args)
3530{
3531 HANDLE_TH_ERRORS
3532 const Tensor& self = THPVariable_Unpack(self_);
3533 if(check_has_torch_function(self_)) {
3534 return handle_torch_function(self_, "arcsin");
3535 }
3536 // aten::arcsin(Tensor self) -> Tensor
3537
3538 auto dispatch_arcsin = [](const at::Tensor & self) -> at::Tensor {
3539 pybind11::gil_scoped_release no_gil;
3540 return self.arcsin();
3541 };
3542 return wrap(dispatch_arcsin(self));
3543 END_HANDLE_TH_ERRORS
3544}
3545
3546// arcsin_
3547static PyObject * THPVariable_arcsin_(PyObject* self_, PyObject* args)
3548{
3549 HANDLE_TH_ERRORS
3550 const Tensor& self = THPVariable_Unpack(self_);
3551 if(check_has_torch_function(self_)) {
3552 return handle_torch_function(self_, "arcsin_");
3553 }
3554 // aten::arcsin_(Tensor(a!) self) -> Tensor(a!)
3555
3556 auto dispatch_arcsin_ = [](const at::Tensor & self) -> at::Tensor {
3557 pybind11::gil_scoped_release no_gil;
3558 return self.arcsin_();
3559 };
3560 return wrap(dispatch_arcsin_(self));
3561 END_HANDLE_TH_ERRORS
3562}
3563
3564// arcsinh
3565static PyObject * THPVariable_arcsinh(PyObject* self_, PyObject* args)
3566{
3567 HANDLE_TH_ERRORS
3568 const Tensor& self = THPVariable_Unpack(self_);
3569 if(check_has_torch_function(self_)) {
3570 return handle_torch_function(self_, "arcsinh");
3571 }
3572 // aten::arcsinh(Tensor self) -> Tensor
3573
3574 auto dispatch_arcsinh = [](const at::Tensor & self) -> at::Tensor {
3575 pybind11::gil_scoped_release no_gil;
3576 return self.arcsinh();
3577 };
3578 return wrap(dispatch_arcsinh(self));
3579 END_HANDLE_TH_ERRORS
3580}
3581
3582// arcsinh_
3583static PyObject * THPVariable_arcsinh_(PyObject* self_, PyObject* args)
3584{
3585 HANDLE_TH_ERRORS
3586 const Tensor& self = THPVariable_Unpack(self_);
3587 if(check_has_torch_function(self_)) {
3588 return handle_torch_function(self_, "arcsinh_");
3589 }
3590 // aten::arcsinh_(Tensor(a!) self) -> Tensor(a!)
3591
3592 auto dispatch_arcsinh_ = [](const at::Tensor & self) -> at::Tensor {
3593 pybind11::gil_scoped_release no_gil;
3594 return self.arcsinh_();
3595 };
3596 return wrap(dispatch_arcsinh_(self));
3597 END_HANDLE_TH_ERRORS
3598}
3599
3600// arctan
3601static PyObject * THPVariable_arctan(PyObject* self_, PyObject* args)
3602{
3603 HANDLE_TH_ERRORS
3604 const Tensor& self = THPVariable_Unpack(self_);
3605 if(check_has_torch_function(self_)) {
3606 return handle_torch_function(self_, "arctan");
3607 }
3608 // aten::arctan(Tensor self) -> Tensor
3609
3610 auto dispatch_arctan = [](const at::Tensor & self) -> at::Tensor {
3611 pybind11::gil_scoped_release no_gil;
3612 return self.arctan();
3613 };
3614 return wrap(dispatch_arctan(self));
3615 END_HANDLE_TH_ERRORS
3616}
3617
3618// arctan2
3619static PyObject * THPVariable_arctan2(PyObject* self_, PyObject* args, PyObject* kwargs)
3620{
3621 HANDLE_TH_ERRORS
3622 const Tensor& self = THPVariable_Unpack(self_);
3623 static PythonArgParser parser({
3624 "arctan2(Tensor other)",
3625 }, /*traceable=*/true);
3626
3627 ParsedArgs<1> parsed_args;
3628 auto _r = parser.parse(self_, args, kwargs, parsed_args);
3629 if(_r.has_torch_function()) {
3630 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
3631 }
3632 // aten::arctan2(Tensor self, Tensor other) -> Tensor
3633
3634 auto dispatch_arctan2 = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
3635 pybind11::gil_scoped_release no_gil;
3636 return self.arctan2(other);
3637 };
3638 return wrap(dispatch_arctan2(self, _r.tensor(0)));
3639 Py_RETURN_NONE;
3640 END_HANDLE_TH_ERRORS
3641}
3642
3643// arctan2_
3644static PyObject * THPVariable_arctan2_(PyObject* self_, PyObject* args, PyObject* kwargs)
3645{
3646 HANDLE_TH_ERRORS
3647 const Tensor& self = THPVariable_Unpack(self_);
3648 static PythonArgParser parser({
3649 "arctan2_(Tensor other)",
3650 }, /*traceable=*/true);
3651
3652 ParsedArgs<1> parsed_args;
3653 auto _r = parser.parse(self_, args, kwargs, parsed_args);
3654 if(_r.has_torch_function()) {
3655 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
3656 }
3657 // aten::arctan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)
3658
3659 auto dispatch_arctan2_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
3660 pybind11::gil_scoped_release no_gil;
3661 return self.arctan2_(other);
3662 };
3663 return wrap(dispatch_arctan2_(self, _r.tensor(0)));
3664 Py_RETURN_NONE;
3665 END_HANDLE_TH_ERRORS
3666}
3667
3668// arctan_
3669static PyObject * THPVariable_arctan_(PyObject* self_, PyObject* args)
3670{
3671 HANDLE_TH_ERRORS
3672 const Tensor& self = THPVariable_Unpack(self_);
3673 if(check_has_torch_function(self_)) {
3674 return handle_torch_function(self_, "arctan_");
3675 }
3676 // aten::arctan_(Tensor(a!) self) -> Tensor(a!)
3677
3678 auto dispatch_arctan_ = [](const at::Tensor & self) -> at::Tensor {
3679 pybind11::gil_scoped_release no_gil;
3680 return self.arctan_();
3681 };
3682 return wrap(dispatch_arctan_(self));
3683 END_HANDLE_TH_ERRORS
3684}
3685
3686// arctanh
3687static PyObject * THPVariable_arctanh(PyObject* self_, PyObject* args)
3688{
3689 HANDLE_TH_ERRORS
3690 const Tensor& self = THPVariable_Unpack(self_);
3691 if(check_has_torch_function(self_)) {
3692 return handle_torch_function(self_, "arctanh");
3693 }
3694 // aten::arctanh(Tensor self) -> Tensor
3695
3696 auto dispatch_arctanh = [](const at::Tensor & self) -> at::Tensor {
3697 pybind11::gil_scoped_release no_gil;
3698 return self.arctanh();
3699 };
3700 return wrap(dispatch_arctanh(self));
3701 END_HANDLE_TH_ERRORS
3702}
3703
3704// arctanh_
3705static PyObject * THPVariable_arctanh_(PyObject* self_, PyObject* args)
3706{
3707 HANDLE_TH_ERRORS
3708 const Tensor& self = THPVariable_Unpack(self_);
3709 if(check_has_torch_function(self_)) {
3710 return handle_torch_function(self_, "arctanh_");
3711 }
3712 // aten::arctanh_(Tensor(a!) self) -> Tensor(a!)
3713
3714 auto dispatch_arctanh_ = [](const at::Tensor & self) -> at::Tensor {
3715 pybind11::gil_scoped_release no_gil;
3716 return self.arctanh_();
3717 };
3718 return wrap(dispatch_arctanh_(self));
3719 END_HANDLE_TH_ERRORS
3720}
3721
3722// argmax
3723static PyObject * THPVariable_argmax(PyObject* self_, PyObject* args, PyObject* kwargs)
3724{
3725 HANDLE_TH_ERRORS
3726 const Tensor& self = THPVariable_Unpack(self_);
3727 static PythonArgParser parser({
3728 "argmax(int64_t? dim=None, bool keepdim=False)",
3729 }, /*traceable=*/true);
3730
3731 ParsedArgs<2> parsed_args;
3732 auto _r = parser.parse(self_, args, kwargs, parsed_args);
3733 if(_r.has_torch_function()) {
3734 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
3735 }
3736 // aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
3737
3738 auto dispatch_argmax = [](const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) -> at::Tensor {
3739 pybind11::gil_scoped_release no_gil;
3740 return self.argmax(dim, keepdim);
3741 };
3742 return wrap(dispatch_argmax(self, _r.toInt64Optional(0), _r.toBool(1)));
3743 Py_RETURN_NONE;
3744 END_HANDLE_TH_ERRORS
3745}
3746
3747// argmin
3748static PyObject * THPVariable_argmin(PyObject* self_, PyObject* args, PyObject* kwargs)
3749{
3750 HANDLE_TH_ERRORS
3751 const Tensor& self = THPVariable_Unpack(self_);
3752 static PythonArgParser parser({
3753 "argmin(int64_t? dim=None, bool keepdim=False)",
3754 }, /*traceable=*/true);
3755
3756 ParsedArgs<2> parsed_args;
3757 auto _r = parser.parse(self_, args, kwargs, parsed_args);
3758 if(_r.has_torch_function()) {
3759 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
3760 }
3761 // aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
3762
3763 auto dispatch_argmin = [](const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) -> at::Tensor {
3764 pybind11::gil_scoped_release no_gil;
3765 return self.argmin(dim, keepdim);
3766 };
3767 return wrap(dispatch_argmin(self, _r.toInt64Optional(0), _r.toBool(1)));
3768 Py_RETURN_NONE;
3769 END_HANDLE_TH_ERRORS
3770}
3771
3772\
3773// argsort
3774static PyObject * THPVariable_argsort(PyObject* self_, PyObject* args, PyObject* kwargs)
3775{
3776 HANDLE_TH_ERRORS
3777 const Tensor& self = THPVariable_Unpack(self_);
3778 static PythonArgParser parser({
3779 "argsort(*, bool stable, int64_t dim=-1, bool descending=False)",
3780 "argsort(int64_t dim=-1, bool descending=False)",
3781 "argsort(Dimname dim, bool descending=False)",
3782 }, /*traceable=*/true);
3783
3784 ParsedArgs<3> parsed_args;
3785 auto _r = parser.parse(self_, args, kwargs, parsed_args);
3786 if(_r.has_torch_function()) {
3787 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
3788 }
3789 switch (_r.idx) {
3790 case 0: {
3791 // aten::argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor
3792
3793 auto dispatch_argsort = [](const at::Tensor & self, bool stable, int64_t dim, bool descending) -> at::Tensor {
3794 pybind11::gil_scoped_release no_gil;
3795 return self.argsort(stable, dim, descending);
3796 };
3797 return wrap(dispatch_argsort(self, _r.toBool(0), _r.toInt64(1), _r.toBool(2)));
3798 }
3799 case 1: {
3800 // aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor
3801
3802 auto dispatch_argsort = [](const at::Tensor & self, int64_t dim, bool descending) -> at::Tensor {
3803 pybind11::gil_scoped_release no_gil;
3804 return self.argsort(dim, descending);
3805 };
3806 return wrap(dispatch_argsort(self, _r.toInt64(0), _r.toBool(1)));
3807 }
3808 case 2: {
3809 // aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor
3810
3811 auto dispatch_argsort = [](const at::Tensor & self, at::Dimname dim, bool descending) -> at::Tensor {
3812 pybind11::gil_scoped_release no_gil;
3813 return self.argsort(dim, descending);
3814 };
3815 return wrap(dispatch_argsort(self, _r.dimname(0), _r.toBool(1)));
3816 }
3817 }
3818 Py_RETURN_NONE;
3819 END_HANDLE_TH_ERRORS
3820}
3821
3822// argwhere
3823static PyObject * THPVariable_argwhere(PyObject* self_, PyObject* args)
3824{
3825 HANDLE_TH_ERRORS
3826 const Tensor& self = THPVariable_Unpack(self_);
3827 if(check_has_torch_function(self_)) {
3828 return handle_torch_function(self_, "argwhere");
3829 }
3830 // aten::argwhere(Tensor self) -> Tensor
3831
3832 auto dispatch_argwhere = [](const at::Tensor & self) -> at::Tensor {
3833 pybind11::gil_scoped_release no_gil;
3834 return self.argwhere();
3835 };
3836 return wrap(dispatch_argwhere(self));
3837 END_HANDLE_TH_ERRORS
3838}
3839
3840// as_strided
3841static PyObject * THPVariable_as_strided(PyObject* self_, PyObject* args, PyObject* kwargs)
3842{
3843 HANDLE_TH_ERRORS
3844 const Tensor& self = THPVariable_Unpack(self_);
3845 static PythonArgParser parser({
3846 "as_strided(SymIntArrayRef size, SymIntArrayRef stride, SymInt? storage_offset=None)",
3847 }, /*traceable=*/true);
3848
3849 ParsedArgs<3> parsed_args;
3850 auto _r = parser.parse(self_, args, kwargs, parsed_args);
3851 if(_r.has_torch_function()) {
3852 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
3853 }
3854 // aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)
3855
3856 auto dispatch_as_strided = [](const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) -> at::Tensor {
3857 pybind11::gil_scoped_release no_gil;
3858 return self.as_strided_symint(size, stride, storage_offset);
3859 };
3860 return wrap(dispatch_as_strided(self, _r.symintlist(0), _r.symintlist(1), _r.toSymIntOptional(2)));
3861 Py_RETURN_NONE;
3862 END_HANDLE_TH_ERRORS
3863}
3864
3865// as_strided_
3866static PyObject * THPVariable_as_strided_(PyObject* self_, PyObject* args, PyObject* kwargs)
3867{
3868 HANDLE_TH_ERRORS
3869 const Tensor& self = THPVariable_Unpack(self_);
3870 static PythonArgParser parser({
3871 "as_strided_(SymIntArrayRef size, SymIntArrayRef stride, SymInt? storage_offset=None)",
3872 }, /*traceable=*/true);
3873
3874 ParsedArgs<3> parsed_args;
3875 auto _r = parser.parse(self_, args, kwargs, parsed_args);
3876 if(_r.has_torch_function()) {
3877 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
3878 }
3879 // aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)
3880
3881 auto dispatch_as_strided_ = [](const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) -> at::Tensor {
3882 pybind11::gil_scoped_release no_gil;
3883 return self.as_strided__symint(size, stride, storage_offset);
3884 };
3885 return wrap(dispatch_as_strided_(self, _r.symintlist(0), _r.symintlist(1), _r.toSymIntOptional(2)));
3886 Py_RETURN_NONE;
3887 END_HANDLE_TH_ERRORS
3888}
3889
3890// as_strided_scatter
3891static PyObject * THPVariable_as_strided_scatter(PyObject* self_, PyObject* args, PyObject* kwargs)
3892{
3893 HANDLE_TH_ERRORS
3894 const Tensor& self = THPVariable_Unpack(self_);
3895 static PythonArgParser parser({
3896 "as_strided_scatter(Tensor src, SymIntArrayRef size, SymIntArrayRef stride, SymInt? storage_offset=None)",
3897 }, /*traceable=*/true);
3898
3899 ParsedArgs<4> parsed_args;
3900 auto _r = parser.parse(self_, args, kwargs, parsed_args);
3901 if(_r.has_torch_function()) {
3902 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
3903 }
3904 // aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
3905
3906 auto dispatch_as_strided_scatter = [](const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) -> at::Tensor {
3907 pybind11::gil_scoped_release no_gil;
3908 return self.as_strided_scatter_symint(src, size, stride, storage_offset);
3909 };
3910 return wrap(dispatch_as_strided_scatter(self, _r.tensor(0), _r.symintlist(1), _r.symintlist(2), _r.toSymIntOptional(3)));
3911 Py_RETURN_NONE;
3912 END_HANDLE_TH_ERRORS
3913}
3914
3915// asin
3916static PyObject * THPVariable_asin(PyObject* self_, PyObject* args)
3917{
3918 HANDLE_TH_ERRORS
3919 const Tensor& self = THPVariable_Unpack(self_);
3920 if(check_has_torch_function(self_)) {
3921 return handle_torch_function(self_, "asin");
3922 }
3923 // aten::asin(Tensor self) -> Tensor
3924
3925 auto dispatch_asin = [](const at::Tensor & self) -> at::Tensor {
3926 pybind11::gil_scoped_release no_gil;
3927 return self.asin();
3928 };
3929 return wrap(dispatch_asin(self));
3930 END_HANDLE_TH_ERRORS
3931}
3932
3933// asin_
3934static PyObject * THPVariable_asin_(PyObject* self_, PyObject* args)
3935{
3936 HANDLE_TH_ERRORS
3937 const Tensor& self = THPVariable_Unpack(self_);
3938 if(check_has_torch_function(self_)) {
3939 return handle_torch_function(self_, "asin_");
3940 }
3941 // aten::asin_(Tensor(a!) self) -> Tensor(a!)
3942
3943 auto dispatch_asin_ = [](const at::Tensor & self) -> at::Tensor {
3944 pybind11::gil_scoped_release no_gil;
3945 return self.asin_();
3946 };
3947 return wrap(dispatch_asin_(self));
3948 END_HANDLE_TH_ERRORS
3949}
3950
3951// asinh
3952static PyObject * THPVariable_asinh(PyObject* self_, PyObject* args)
3953{
3954 HANDLE_TH_ERRORS
3955 const Tensor& self = THPVariable_Unpack(self_);
3956 if(check_has_torch_function(self_)) {
3957 return handle_torch_function(self_, "asinh");
3958 }
3959 // aten::asinh(Tensor self) -> Tensor
3960
3961 auto dispatch_asinh = [](const at::Tensor & self) -> at::Tensor {
3962 pybind11::gil_scoped_release no_gil;
3963 return self.asinh();
3964 };
3965 return wrap(dispatch_asinh(self));
3966 END_HANDLE_TH_ERRORS
3967}
3968
3969// asinh_
3970static PyObject * THPVariable_asinh_(PyObject* self_, PyObject* args)
3971{
3972 HANDLE_TH_ERRORS
3973 const Tensor& self = THPVariable_Unpack(self_);
3974 if(check_has_torch_function(self_)) {
3975 return handle_torch_function(self_, "asinh_");
3976 }
3977 // aten::asinh_(Tensor(a!) self) -> Tensor(a!)
3978
3979 auto dispatch_asinh_ = [](const at::Tensor & self) -> at::Tensor {
3980 pybind11::gil_scoped_release no_gil;
3981 return self.asinh_();
3982 };
3983 return wrap(dispatch_asinh_(self));
3984 END_HANDLE_TH_ERRORS
3985}
3986
3987// atan
3988static PyObject * THPVariable_atan(PyObject* self_, PyObject* args)
3989{
3990 HANDLE_TH_ERRORS
3991 const Tensor& self = THPVariable_Unpack(self_);
3992 if(check_has_torch_function(self_)) {
3993 return handle_torch_function(self_, "atan");
3994 }
3995 // aten::atan(Tensor self) -> Tensor
3996
3997 auto dispatch_atan = [](const at::Tensor & self) -> at::Tensor {
3998 pybind11::gil_scoped_release no_gil;
3999 return self.atan();
4000 };
4001 return wrap(dispatch_atan(self));
4002 END_HANDLE_TH_ERRORS
4003}
4004
4005// atan2
4006static PyObject * THPVariable_atan2(PyObject* self_, PyObject* args, PyObject* kwargs)
4007{
4008 HANDLE_TH_ERRORS
4009 const Tensor& self = THPVariable_Unpack(self_);
4010 static PythonArgParser parser({
4011 "atan2(Tensor other)",
4012 }, /*traceable=*/true);
4013
4014 ParsedArgs<1> parsed_args;
4015 auto _r = parser.parse(self_, args, kwargs, parsed_args);
4016 if(_r.has_torch_function()) {
4017 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
4018 }
4019 // aten::atan2(Tensor self, Tensor other) -> Tensor
4020
4021 auto dispatch_atan2 = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
4022 pybind11::gil_scoped_release no_gil;
4023 return self.atan2(other);
4024 };
4025 return wrap(dispatch_atan2(self, _r.tensor(0)));
4026 Py_RETURN_NONE;
4027 END_HANDLE_TH_ERRORS
4028}
4029
4030// atan2_
4031static PyObject * THPVariable_atan2_(PyObject* self_, PyObject* args, PyObject* kwargs)
4032{
4033 HANDLE_TH_ERRORS
4034 const Tensor& self = THPVariable_Unpack(self_);
4035 static PythonArgParser parser({
4036 "atan2_(Tensor other)",
4037 }, /*traceable=*/true);
4038
4039 ParsedArgs<1> parsed_args;
4040 auto _r = parser.parse(self_, args, kwargs, parsed_args);
4041 if(_r.has_torch_function()) {
4042 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
4043 }
4044 // aten::atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)
4045
4046 auto dispatch_atan2_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
4047 pybind11::gil_scoped_release no_gil;
4048 return self.atan2_(other);
4049 };
4050 return wrap(dispatch_atan2_(self, _r.tensor(0)));
4051 Py_RETURN_NONE;
4052 END_HANDLE_TH_ERRORS
4053}
4054
4055// atan_
4056static PyObject * THPVariable_atan_(PyObject* self_, PyObject* args)
4057{
4058 HANDLE_TH_ERRORS
4059 const Tensor& self = THPVariable_Unpack(self_);
4060 if(check_has_torch_function(self_)) {
4061 return handle_torch_function(self_, "atan_");
4062 }
4063 // aten::atan_(Tensor(a!) self) -> Tensor(a!)
4064
4065 auto dispatch_atan_ = [](const at::Tensor & self) -> at::Tensor {
4066 pybind11::gil_scoped_release no_gil;
4067 return self.atan_();
4068 };
4069 return wrap(dispatch_atan_(self));
4070 END_HANDLE_TH_ERRORS
4071}
4072
4073// atanh
4074static PyObject * THPVariable_atanh(PyObject* self_, PyObject* args)
4075{
4076 HANDLE_TH_ERRORS
4077 const Tensor& self = THPVariable_Unpack(self_);
4078 if(check_has_torch_function(self_)) {
4079 return handle_torch_function(self_, "atanh");
4080 }
4081 // aten::atanh(Tensor self) -> Tensor
4082
4083 auto dispatch_atanh = [](const at::Tensor & self) -> at::Tensor {
4084 pybind11::gil_scoped_release no_gil;
4085 return self.atanh();
4086 };
4087 return wrap(dispatch_atanh(self));
4088 END_HANDLE_TH_ERRORS
4089}
4090
4091// atanh_
4092static PyObject * THPVariable_atanh_(PyObject* self_, PyObject* args)
4093{
4094 HANDLE_TH_ERRORS
4095 const Tensor& self = THPVariable_Unpack(self_);
4096 if(check_has_torch_function(self_)) {
4097 return handle_torch_function(self_, "atanh_");
4098 }
4099 // aten::atanh_(Tensor(a!) self) -> Tensor(a!)
4100
4101 auto dispatch_atanh_ = [](const at::Tensor & self) -> at::Tensor {
4102 pybind11::gil_scoped_release no_gil;
4103 return self.atanh_();
4104 };
4105 return wrap(dispatch_atanh_(self));
4106 END_HANDLE_TH_ERRORS
4107}
4108
4109\
4110// baddbmm
4111static PyObject * THPVariable_baddbmm(PyObject* self_, PyObject* args, PyObject* kwargs)
4112{
4113 HANDLE_TH_ERRORS
4114 const Tensor& self = THPVariable_Unpack(self_);
4115 static PythonArgParser parser({
4116 "baddbmm(Scalar beta, Scalar alpha, Tensor batch1, Tensor batch2)|deprecated",
4117 "baddbmm(Scalar beta, Tensor batch1, Tensor batch2)|deprecated",
4118 "baddbmm(Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1)",
4119 }, /*traceable=*/true);
4120
4121 ParsedArgs<4> parsed_args;
4122 auto _r = parser.parse(self_, args, kwargs, parsed_args);
4123 if(_r.has_torch_function()) {
4124 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
4125 }
4126 switch (_r.idx) {
4127 case 0: {
4128 // [deprecated] aten::baddbmm(Scalar beta, Tensor self, Scalar alpha, Tensor batch1, Tensor batch2) -> Tensor
4129
4130 auto dispatch_baddbmm = [](const at::Scalar & beta, const at::Tensor & self, const at::Scalar & alpha, const at::Tensor & batch1, const at::Tensor & batch2) -> at::Tensor {
4131 pybind11::gil_scoped_release no_gil;
4132 return self.baddbmm(batch1, batch2, beta, alpha);
4133 };
4134 return wrap(dispatch_baddbmm(_r.scalar(0), self, _r.scalar(1), _r.tensor(2), _r.tensor(3)));
4135 }
4136 case 1: {
4137 // [deprecated] aten::baddbmm(Scalar beta, Tensor self, Tensor batch1, Tensor batch2) -> Tensor
4138
4139 auto dispatch_baddbmm = [](const at::Scalar & beta, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2) -> at::Tensor {
4140 pybind11::gil_scoped_release no_gil;
4141 return self.baddbmm(batch1, batch2, beta, 1);
4142 };
4143 return wrap(dispatch_baddbmm(_r.scalar(0), self, _r.tensor(1), _r.tensor(2)));
4144 }
4145 case 2: {
4146 // aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
4147
4148 auto dispatch_baddbmm = [](const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) -> at::Tensor {
4149 pybind11::gil_scoped_release no_gil;
4150 return self.baddbmm(batch1, batch2, beta, alpha);
4151 };
4152 return wrap(dispatch_baddbmm(self, _r.tensor(0), _r.tensor(1), _r.scalar(2), _r.scalar(3)));
4153 }
4154 }
4155 Py_RETURN_NONE;
4156 END_HANDLE_TH_ERRORS
4157}
4158
4159\
4160// baddbmm_
4161static PyObject * THPVariable_baddbmm_(PyObject* self_, PyObject* args, PyObject* kwargs)
4162{
4163 HANDLE_TH_ERRORS
4164 const Tensor& self = THPVariable_Unpack(self_);
4165 static PythonArgParser parser({
4166 "baddbmm_(Scalar beta, Scalar alpha, Tensor batch1, Tensor batch2)|deprecated",
4167 "baddbmm_(Scalar beta, Tensor batch1, Tensor batch2)|deprecated",
4168 "baddbmm_(Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1)",
4169 }, /*traceable=*/true);
4170
4171 ParsedArgs<4> parsed_args;
4172 auto _r = parser.parse(self_, args, kwargs, parsed_args);
4173 if(_r.has_torch_function()) {
4174 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
4175 }
4176 switch (_r.idx) {
4177 case 0: {
4178 // [deprecated] aten::baddbmm_(Scalar beta, Tensor(a!) self, Scalar alpha, Tensor batch1, Tensor batch2) -> Tensor(a!)
4179
4180 auto dispatch_baddbmm_ = [](const at::Scalar & beta, const at::Tensor & self, const at::Scalar & alpha, const at::Tensor & batch1, const at::Tensor & batch2) -> at::Tensor {
4181 pybind11::gil_scoped_release no_gil;
4182 return self.baddbmm_(batch1, batch2, beta, alpha);
4183 };
4184 return wrap(dispatch_baddbmm_(_r.scalar(0), self, _r.scalar(1), _r.tensor(2), _r.tensor(3)));
4185 }
4186 case 1: {
4187 // [deprecated] aten::baddbmm_(Scalar beta, Tensor(a!) self, Tensor batch1, Tensor batch2) -> Tensor(a!)
4188
4189 auto dispatch_baddbmm_ = [](const at::Scalar & beta, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2) -> at::Tensor {
4190 pybind11::gil_scoped_release no_gil;
4191 return self.baddbmm_(batch1, batch2, beta, 1);
4192 };
4193 return wrap(dispatch_baddbmm_(_r.scalar(0), self, _r.tensor(1), _r.tensor(2)));
4194 }
4195 case 2: {
4196 // aten::baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
4197
4198 auto dispatch_baddbmm_ = [](const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) -> at::Tensor {
4199 pybind11::gil_scoped_release no_gil;
4200 return self.baddbmm_(batch1, batch2, beta, alpha);
4201 };
4202 return wrap(dispatch_baddbmm_(self, _r.tensor(0), _r.tensor(1), _r.scalar(2), _r.scalar(3)));
4203 }
4204 }
4205 Py_RETURN_NONE;
4206 END_HANDLE_TH_ERRORS
4207}
4208
4209\
4210// bernoulli
4211static PyObject * THPVariable_bernoulli(PyObject* self_, PyObject* args, PyObject* kwargs)
4212{
4213 HANDLE_TH_ERRORS
4214 const Tensor& self = THPVariable_Unpack(self_);
4215 static PythonArgParser parser({
4216 "bernoulli(*, Generator? generator=None)",
4217 "bernoulli(double p, *, Generator? generator=None)",
4218 }, /*traceable=*/true);
4219
4220 ParsedArgs<2> parsed_args;
4221 auto _r = parser.parse(self_, args, kwargs, parsed_args);
4222 if(_r.has_torch_function()) {
4223 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
4224 }
4225 switch (_r.idx) {
4226 case 0: {
4227 // aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor
4228
4229 auto dispatch_bernoulli = [](const at::Tensor & self, c10::optional<at::Generator> generator) -> at::Tensor {
4230 pybind11::gil_scoped_release no_gil;
4231 return self.bernoulli(generator);
4232 };
4233 return wrap(dispatch_bernoulli(self, _r.generator(0)));
4234 }
4235 case 1: {
4236 // aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor
4237
4238 auto dispatch_bernoulli = [](const at::Tensor & self, double p, c10::optional<at::Generator> generator) -> at::Tensor {
4239 pybind11::gil_scoped_release no_gil;
4240 return self.bernoulli(p, generator);
4241 };
4242 return wrap(dispatch_bernoulli(self, _r.toDouble(0), _r.generator(1)));
4243 }
4244 }
4245 Py_RETURN_NONE;
4246 END_HANDLE_TH_ERRORS
4247}
4248
4249\
4250// bernoulli_
4251static PyObject * THPVariable_bernoulli_(PyObject* self_, PyObject* args, PyObject* kwargs)
4252{
4253 HANDLE_TH_ERRORS
4254 const Tensor& self = THPVariable_Unpack(self_);
4255 static PythonArgParser parser({
4256 "bernoulli_(Tensor p, *, Generator? generator=None)",
4257 "bernoulli_(double p=0.5, *, Generator? generator=None)",
4258 }, /*traceable=*/true);
4259
4260 ParsedArgs<2> parsed_args;
4261 auto _r = parser.parse(self_, args, kwargs, parsed_args);
4262 if(_r.has_torch_function()) {
4263 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
4264 }
4265 switch (_r.idx) {
4266 case 0: {
4267 // aten::bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!)
4268
4269 auto dispatch_bernoulli_ = [](const at::Tensor & self, const at::Tensor & p, c10::optional<at::Generator> generator) -> at::Tensor {
4270 pybind11::gil_scoped_release no_gil;
4271 return self.bernoulli_(p, generator);
4272 };
4273 return wrap(dispatch_bernoulli_(self, _r.tensor(0), _r.generator(1)));
4274 }
4275 case 1: {
4276 // aten::bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!)
4277
4278 auto dispatch_bernoulli_ = [](const at::Tensor & self, double p, c10::optional<at::Generator> generator) -> at::Tensor {
4279 pybind11::gil_scoped_release no_gil;
4280 return self.bernoulli_(p, generator);
4281 };
4282 return wrap(dispatch_bernoulli_(self, _r.toDouble(0), _r.generator(1)));
4283 }
4284 }
4285 Py_RETURN_NONE;
4286 END_HANDLE_TH_ERRORS
4287}
4288
4289// bincount
4290static PyObject * THPVariable_bincount(PyObject* self_, PyObject* args, PyObject* kwargs)
4291{
4292 HANDLE_TH_ERRORS
4293 const Tensor& self = THPVariable_Unpack(self_);
4294 static PythonArgParser parser({
4295 "bincount(Tensor? weights=None, int64_t minlength=0)",
4296 }, /*traceable=*/true);
4297
4298 ParsedArgs<2> parsed_args;
4299 auto _r = parser.parse(self_, args, kwargs, parsed_args);
4300 if(_r.has_torch_function()) {
4301 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
4302 }
4303 // aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor
4304
4305 auto dispatch_bincount = [](const at::Tensor & self, const c10::optional<at::Tensor> & weights, int64_t minlength) -> at::Tensor {
4306 pybind11::gil_scoped_release no_gil;
4307 return self.bincount(weights, minlength);
4308 };
4309 return wrap(dispatch_bincount(self, _r.optionalTensor(0), _r.toInt64(1)));
4310 Py_RETURN_NONE;
4311 END_HANDLE_TH_ERRORS
4312}
4313
4314\
4315// bitwise_and
4316static PyObject * THPVariable_bitwise_and(PyObject* self_, PyObject* args, PyObject* kwargs)
4317{
4318 HANDLE_TH_ERRORS
4319 const Tensor& self = THPVariable_Unpack(self_);
4320 static PythonArgParser parser({
4321 "bitwise_and(Tensor other)",
4322 "bitwise_and(Scalar other)",
4323 }, /*traceable=*/true);
4324
4325 ParsedArgs<1> parsed_args;
4326 auto _r = parser.parse(self_, args, kwargs, parsed_args);
4327 if(_r.has_torch_function()) {
4328 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
4329 }
4330 switch (_r.idx) {
4331 case 0: {
4332 // aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor
4333
4334 auto dispatch_bitwise_and = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
4335 pybind11::gil_scoped_release no_gil;
4336 return self.bitwise_and(other);
4337 };
4338 return wrap(dispatch_bitwise_and(self, _r.tensor(0)));
4339 }
4340 case 1: {
4341 // aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor
4342
4343 auto dispatch_bitwise_and = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
4344 pybind11::gil_scoped_release no_gil;
4345 return self.bitwise_and(other);
4346 };
4347 return wrap(dispatch_bitwise_and(self, _r.scalar(0)));
4348 }
4349 }
4350 Py_RETURN_NONE;
4351 END_HANDLE_TH_ERRORS
4352}
4353
4354\
4355// bitwise_and_
4356static PyObject * THPVariable_bitwise_and_(PyObject* self_, PyObject* args, PyObject* kwargs)
4357{
4358 HANDLE_TH_ERRORS
4359 const Tensor& self = THPVariable_Unpack(self_);
4360 static PythonArgParser parser({
4361 "bitwise_and_(Tensor other)",
4362 "bitwise_and_(Scalar other)",
4363 }, /*traceable=*/true);
4364
4365 ParsedArgs<1> parsed_args;
4366 auto _r = parser.parse(self_, args, kwargs, parsed_args);
4367 if(_r.has_torch_function()) {
4368 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
4369 }
4370 switch (_r.idx) {
4371 case 0: {
4372 // aten::bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4373
4374 auto dispatch_bitwise_and_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
4375 pybind11::gil_scoped_release no_gil;
4376 return self.bitwise_and_(other);
4377 };
4378 return wrap(dispatch_bitwise_and_(self, _r.tensor(0)));
4379 }
4380 case 1: {
4381 // aten::bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4382
4383 auto dispatch_bitwise_and_ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
4384 pybind11::gil_scoped_release no_gil;
4385 return self.bitwise_and_(other);
4386 };
4387 return wrap(dispatch_bitwise_and_(self, _r.scalar(0)));
4388 }
4389 }
4390 Py_RETURN_NONE;
4391 END_HANDLE_TH_ERRORS
4392}
4393
4394\
4395// bitwise_left_shift
4396static PyObject * THPVariable_bitwise_left_shift(PyObject* self_, PyObject* args, PyObject* kwargs)
4397{
4398 HANDLE_TH_ERRORS
4399 const Tensor& self = THPVariable_Unpack(self_);
4400 static PythonArgParser parser({
4401 "bitwise_left_shift(Tensor other)",
4402 "bitwise_left_shift(Scalar other)",
4403 }, /*traceable=*/true);
4404
4405 ParsedArgs<1> parsed_args;
4406 auto _r = parser.parse(self_, args, kwargs, parsed_args);
4407 if(_r.has_torch_function()) {
4408 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
4409 }
4410 switch (_r.idx) {
4411 case 0: {
4412 // aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor
4413
4414 auto dispatch_bitwise_left_shift = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
4415 pybind11::gil_scoped_release no_gil;
4416 return self.bitwise_left_shift(other);
4417 };
4418 return wrap(dispatch_bitwise_left_shift(self, _r.tensor(0)));
4419 }
4420 case 1: {
4421 // aten::bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
4422
4423 auto dispatch_bitwise_left_shift = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
4424 pybind11::gil_scoped_release no_gil;
4425 return self.bitwise_left_shift(other);
4426 };
4427 return wrap(dispatch_bitwise_left_shift(self, _r.scalar(0)));
4428 }
4429 }
4430 Py_RETURN_NONE;
4431 END_HANDLE_TH_ERRORS
4432}
4433
4434\
4435// bitwise_left_shift_
4436static PyObject * THPVariable_bitwise_left_shift_(PyObject* self_, PyObject* args, PyObject* kwargs)
4437{
4438 HANDLE_TH_ERRORS
4439 const Tensor& self = THPVariable_Unpack(self_);
4440 static PythonArgParser parser({
4441 "bitwise_left_shift_(Tensor other)",
4442 "bitwise_left_shift_(Scalar other)",
4443 }, /*traceable=*/true);
4444
4445 ParsedArgs<1> parsed_args;
4446 auto _r = parser.parse(self_, args, kwargs, parsed_args);
4447 if(_r.has_torch_function()) {
4448 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
4449 }
4450 switch (_r.idx) {
4451 case 0: {
4452 // aten::bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4453
4454 auto dispatch_bitwise_left_shift_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
4455 pybind11::gil_scoped_release no_gil;
4456 return self.bitwise_left_shift_(other);
4457 };
4458 return wrap(dispatch_bitwise_left_shift_(self, _r.tensor(0)));
4459 }
4460 case 1: {
4461 // aten::bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4462
4463 auto dispatch_bitwise_left_shift_ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
4464 pybind11::gil_scoped_release no_gil;
4465 return self.bitwise_left_shift_(other);
4466 };
4467 return wrap(dispatch_bitwise_left_shift_(self, _r.scalar(0)));
4468 }
4469 }
4470 Py_RETURN_NONE;
4471 END_HANDLE_TH_ERRORS
4472}
4473
4474// bitwise_not
4475static PyObject * THPVariable_bitwise_not(PyObject* self_, PyObject* args)
4476{
4477 HANDLE_TH_ERRORS
4478 const Tensor& self = THPVariable_Unpack(self_);
4479 if(check_has_torch_function(self_)) {
4480 return handle_torch_function(self_, "bitwise_not");
4481 }
4482 // aten::bitwise_not(Tensor self) -> Tensor
4483
4484 auto dispatch_bitwise_not = [](const at::Tensor & self) -> at::Tensor {
4485 pybind11::gil_scoped_release no_gil;
4486 return self.bitwise_not();
4487 };
4488 return wrap(dispatch_bitwise_not(self));
4489 END_HANDLE_TH_ERRORS
4490}
4491
4492// bitwise_not_
4493static PyObject * THPVariable_bitwise_not_(PyObject* self_, PyObject* args)
4494{
4495 HANDLE_TH_ERRORS
4496 const Tensor& self = THPVariable_Unpack(self_);
4497 if(check_has_torch_function(self_)) {
4498 return handle_torch_function(self_, "bitwise_not_");
4499 }
4500 // aten::bitwise_not_(Tensor(a!) self) -> Tensor(a!)
4501
4502 auto dispatch_bitwise_not_ = [](const at::Tensor & self) -> at::Tensor {
4503 pybind11::gil_scoped_release no_gil;
4504 return self.bitwise_not_();
4505 };
4506 return wrap(dispatch_bitwise_not_(self));
4507 END_HANDLE_TH_ERRORS
4508}
4509
4510\
4511// bitwise_or
4512static PyObject * THPVariable_bitwise_or(PyObject* self_, PyObject* args, PyObject* kwargs)
4513{
4514 HANDLE_TH_ERRORS
4515 const Tensor& self = THPVariable_Unpack(self_);
4516 static PythonArgParser parser({
4517 "bitwise_or(Tensor other)",
4518 "bitwise_or(Scalar other)",
4519 }, /*traceable=*/true);
4520
4521 ParsedArgs<1> parsed_args;
4522 auto _r = parser.parse(self_, args, kwargs, parsed_args);
4523 if(_r.has_torch_function()) {
4524 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
4525 }
4526 switch (_r.idx) {
4527 case 0: {
4528 // aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor
4529
4530 auto dispatch_bitwise_or = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
4531 pybind11::gil_scoped_release no_gil;
4532 return self.bitwise_or(other);
4533 };
4534 return wrap(dispatch_bitwise_or(self, _r.tensor(0)));
4535 }
4536 case 1: {
4537 // aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor
4538
4539 auto dispatch_bitwise_or = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
4540 pybind11::gil_scoped_release no_gil;
4541 return self.bitwise_or(other);
4542 };
4543 return wrap(dispatch_bitwise_or(self, _r.scalar(0)));
4544 }
4545 }
4546 Py_RETURN_NONE;
4547 END_HANDLE_TH_ERRORS
4548}
4549
4550\
4551// bitwise_or_
4552static PyObject * THPVariable_bitwise_or_(PyObject* self_, PyObject* args, PyObject* kwargs)
4553{
4554 HANDLE_TH_ERRORS
4555 const Tensor& self = THPVariable_Unpack(self_);
4556 static PythonArgParser parser({
4557 "bitwise_or_(Tensor other)",
4558 "bitwise_or_(Scalar other)",
4559 }, /*traceable=*/true);
4560
4561 ParsedArgs<1> parsed_args;
4562 auto _r = parser.parse(self_, args, kwargs, parsed_args);
4563 if(_r.has_torch_function()) {
4564 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
4565 }
4566 switch (_r.idx) {
4567 case 0: {
4568 // aten::bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4569
4570 auto dispatch_bitwise_or_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
4571 pybind11::gil_scoped_release no_gil;
4572 return self.bitwise_or_(other);
4573 };
4574 return wrap(dispatch_bitwise_or_(self, _r.tensor(0)));
4575 }
4576 case 1: {
4577 // aten::bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4578
4579 auto dispatch_bitwise_or_ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
4580 pybind11::gil_scoped_release no_gil;
4581 return self.bitwise_or_(other);
4582 };
4583 return wrap(dispatch_bitwise_or_(self, _r.scalar(0)));
4584 }
4585 }
4586 Py_RETURN_NONE;
4587 END_HANDLE_TH_ERRORS
4588}
4589
4590\
4591// bitwise_right_shift
4592static PyObject * THPVariable_bitwise_right_shift(PyObject* self_, PyObject* args, PyObject* kwargs)
4593{
4594 HANDLE_TH_ERRORS
4595 const Tensor& self = THPVariable_Unpack(self_);
4596 static PythonArgParser parser({
4597 "bitwise_right_shift(Tensor other)",
4598 "bitwise_right_shift(Scalar other)",
4599 }, /*traceable=*/true);
4600
4601 ParsedArgs<1> parsed_args;
4602 auto _r = parser.parse(self_, args, kwargs, parsed_args);
4603 if(_r.has_torch_function()) {
4604 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
4605 }
4606 switch (_r.idx) {
4607 case 0: {
4608 // aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor
4609
4610 auto dispatch_bitwise_right_shift = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
4611 pybind11::gil_scoped_release no_gil;
4612 return self.bitwise_right_shift(other);
4613 };
4614 return wrap(dispatch_bitwise_right_shift(self, _r.tensor(0)));
4615 }
4616 case 1: {
4617 // aten::bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
4618
4619 auto dispatch_bitwise_right_shift = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
4620 pybind11::gil_scoped_release no_gil;
4621 return self.bitwise_right_shift(other);
4622 };
4623 return wrap(dispatch_bitwise_right_shift(self, _r.scalar(0)));
4624 }
4625 }
4626 Py_RETURN_NONE;
4627 END_HANDLE_TH_ERRORS
4628}
4629
4630\
4631// bitwise_right_shift_
4632static PyObject * THPVariable_bitwise_right_shift_(PyObject* self_, PyObject* args, PyObject* kwargs)
4633{
4634 HANDLE_TH_ERRORS
4635 const Tensor& self = THPVariable_Unpack(self_);
4636 static PythonArgParser parser({
4637 "bitwise_right_shift_(Tensor other)",
4638 "bitwise_right_shift_(Scalar other)",
4639 }, /*traceable=*/true);
4640
4641 ParsedArgs<1> parsed_args;
4642 auto _r = parser.parse(self_, args, kwargs, parsed_args);
4643 if(_r.has_torch_function()) {
4644 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
4645 }
4646 switch (_r.idx) {
4647 case 0: {
4648 // aten::bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4649
4650 auto dispatch_bitwise_right_shift_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
4651 pybind11::gil_scoped_release no_gil;
4652 return self.bitwise_right_shift_(other);
4653 };
4654 return wrap(dispatch_bitwise_right_shift_(self, _r.tensor(0)));
4655 }
4656 case 1: {
4657 // aten::bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4658
4659 auto dispatch_bitwise_right_shift_ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
4660 pybind11::gil_scoped_release no_gil;
4661 return self.bitwise_right_shift_(other);
4662 };
4663 return wrap(dispatch_bitwise_right_shift_(self, _r.scalar(0)));
4664 }
4665 }
4666 Py_RETURN_NONE;
4667 END_HANDLE_TH_ERRORS
4668}
4669
4670\
4671// bitwise_xor
4672static PyObject * THPVariable_bitwise_xor(PyObject* self_, PyObject* args, PyObject* kwargs)
4673{
4674 HANDLE_TH_ERRORS
4675 const Tensor& self = THPVariable_Unpack(self_);
4676 static PythonArgParser parser({
4677 "bitwise_xor(Tensor other)",
4678 "bitwise_xor(Scalar other)",
4679 }, /*traceable=*/true);
4680
4681 ParsedArgs<1> parsed_args;
4682 auto _r = parser.parse(self_, args, kwargs, parsed_args);
4683 if(_r.has_torch_function()) {
4684 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
4685 }
4686 switch (_r.idx) {
4687 case 0: {
4688 // aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor
4689
4690 auto dispatch_bitwise_xor = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
4691 pybind11::gil_scoped_release no_gil;
4692 return self.bitwise_xor(other);
4693 };
4694 return wrap(dispatch_bitwise_xor(self, _r.tensor(0)));
4695 }
4696 case 1: {
4697 // aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor
4698
4699 auto dispatch_bitwise_xor = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
4700 pybind11::gil_scoped_release no_gil;
4701 return self.bitwise_xor(other);
4702 };
4703 return wrap(dispatch_bitwise_xor(self, _r.scalar(0)));
4704 }
4705 }
4706 Py_RETURN_NONE;
4707 END_HANDLE_TH_ERRORS
4708}
4709
4710\
4711// bitwise_xor_
4712static PyObject * THPVariable_bitwise_xor_(PyObject* self_, PyObject* args, PyObject* kwargs)
4713{
4714 HANDLE_TH_ERRORS
4715 const Tensor& self = THPVariable_Unpack(self_);
4716 static PythonArgParser parser({
4717 "bitwise_xor_(Tensor other)",
4718 "bitwise_xor_(Scalar other)",
4719 }, /*traceable=*/true);
4720
4721 ParsedArgs<1> parsed_args;
4722 auto _r = parser.parse(self_, args, kwargs, parsed_args);
4723 if(_r.has_torch_function()) {
4724 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
4725 }
4726 switch (_r.idx) {
4727 case 0: {
4728 // aten::bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4729
4730 auto dispatch_bitwise_xor_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
4731 pybind11::gil_scoped_release no_gil;
4732 return self.bitwise_xor_(other);
4733 };
4734 return wrap(dispatch_bitwise_xor_(self, _r.tensor(0)));
4735 }
4736 case 1: {
4737 // aten::bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4738
4739 auto dispatch_bitwise_xor_ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
4740 pybind11::gil_scoped_release no_gil;
4741 return self.bitwise_xor_(other);
4742 };
4743 return wrap(dispatch_bitwise_xor_(self, _r.scalar(0)));
4744 }
4745 }
4746 Py_RETURN_NONE;
4747 END_HANDLE_TH_ERRORS
4748}
4749
4750// bmm
4751static PyObject * THPVariable_bmm(PyObject* self_, PyObject* args, PyObject* kwargs)
4752{
4753 HANDLE_TH_ERRORS
4754 const Tensor& self = THPVariable_Unpack(self_);
4755 static PythonArgParser parser({
4756 "bmm(Tensor mat2)",
4757 }, /*traceable=*/true);
4758
4759 ParsedArgs<1> parsed_args;
4760 auto _r = parser.parse(self_, args, kwargs, parsed_args);
4761 if(_r.has_torch_function()) {
4762 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
4763 }
4764 // aten::bmm(Tensor self, Tensor mat2) -> Tensor
4765
4766 auto dispatch_bmm = [](const at::Tensor & self, const at::Tensor & mat2) -> at::Tensor {
4767 pybind11::gil_scoped_release no_gil;
4768 return self.bmm(mat2);
4769 };
4770 return wrap(dispatch_bmm(self, _r.tensor(0)));
4771 Py_RETURN_NONE;
4772 END_HANDLE_TH_ERRORS
4773}
4774
4775// broadcast_to
4776static PyObject * THPVariable_broadcast_to(PyObject* self_, PyObject* args, PyObject* kwargs)
4777{
4778 HANDLE_TH_ERRORS
4779 const Tensor& self = THPVariable_Unpack(self_);
4780 static PythonArgParser parser({
4781 "broadcast_to(SymIntArrayRef size)",
4782 }, /*traceable=*/true);
4783
4784 ParsedArgs<1> parsed_args;
4785 auto _r = parser.parse(self_, args, kwargs, parsed_args);
4786 if(_r.has_torch_function()) {
4787 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
4788 }
4789 // aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a)
4790
4791 auto dispatch_broadcast_to = [](const at::Tensor & self, c10::SymIntArrayRef size) -> at::Tensor {
4792 pybind11::gil_scoped_release no_gil;
4793 return self.broadcast_to_symint(size);
4794 };
4795 return wrap(dispatch_broadcast_to(self, _r.symintlist(0)));
4796 Py_RETURN_NONE;
4797 END_HANDLE_TH_ERRORS
4798}
4799
4800// cauchy_
4801static PyObject * THPVariable_cauchy_(PyObject* self_, PyObject* args, PyObject* kwargs)
4802{
4803 HANDLE_TH_ERRORS
4804 const Tensor& self = THPVariable_Unpack(self_);
4805 static PythonArgParser parser({
4806 "cauchy_(double median=0, double sigma=1, *, Generator? generator=None)",
4807 }, /*traceable=*/true);
4808
4809 ParsedArgs<3> parsed_args;
4810 auto _r = parser.parse(self_, args, kwargs, parsed_args);
4811 if(_r.has_torch_function()) {
4812 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
4813 }
4814 // aten::cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!)
4815
4816 auto dispatch_cauchy_ = [](const at::Tensor & self, double median, double sigma, c10::optional<at::Generator> generator) -> at::Tensor {
4817 pybind11::gil_scoped_release no_gil;
4818 return self.cauchy_(median, sigma, generator);
4819 };
4820 return wrap(dispatch_cauchy_(self, _r.toDouble(0), _r.toDouble(1), _r.generator(2)));
4821 Py_RETURN_NONE;
4822 END_HANDLE_TH_ERRORS
4823}
4824
4825// ccol_indices
4826static PyObject * THPVariable_ccol_indices(PyObject* self_, PyObject* args)
4827{
4828 HANDLE_TH_ERRORS
4829 const Tensor& self = THPVariable_Unpack(self_);
4830 if(check_has_torch_function(self_)) {
4831 return handle_torch_function(self_, "ccol_indices");
4832 }
4833 // aten::ccol_indices(Tensor(a) self) -> Tensor(a)
4834
4835 auto dispatch_ccol_indices = [](const at::Tensor & self) -> at::Tensor {
4836 pybind11::gil_scoped_release no_gil;
4837 return self.ccol_indices();
4838 };
4839 return wrap(dispatch_ccol_indices(self));
4840 END_HANDLE_TH_ERRORS
4841}
4842
4843// ceil
4844static PyObject * THPVariable_ceil(PyObject* self_, PyObject* args)
4845{
4846 HANDLE_TH_ERRORS
4847 const Tensor& self = THPVariable_Unpack(self_);
4848 if(check_has_torch_function(self_)) {
4849 return handle_torch_function(self_, "ceil");
4850 }
4851 // aten::ceil(Tensor self) -> Tensor
4852
4853 auto dispatch_ceil = [](const at::Tensor & self) -> at::Tensor {
4854 pybind11::gil_scoped_release no_gil;
4855 return self.ceil();
4856 };
4857 return wrap(dispatch_ceil(self));
4858 END_HANDLE_TH_ERRORS
4859}
4860
4861// ceil_
4862static PyObject * THPVariable_ceil_(PyObject* self_, PyObject* args)
4863{
4864 HANDLE_TH_ERRORS
4865 const Tensor& self = THPVariable_Unpack(self_);
4866 if(check_has_torch_function(self_)) {
4867 return handle_torch_function(self_, "ceil_");
4868 }
4869 // aten::ceil_(Tensor(a!) self) -> Tensor(a!)
4870
4871 auto dispatch_ceil_ = [](const at::Tensor & self) -> at::Tensor {
4872 pybind11::gil_scoped_release no_gil;
4873 return self.ceil_();
4874 };
4875 return wrap(dispatch_ceil_(self));
4876 END_HANDLE_TH_ERRORS
4877}
4878
4879// chalf
4880static PyObject * THPVariable_chalf(PyObject* self_, PyObject* args, PyObject* kwargs)
4881{
4882 HANDLE_TH_ERRORS
4883 const Tensor& self = THPVariable_Unpack(self_);
4884 static PythonArgParser parser({
4885 "chalf(*, MemoryFormat? memory_format=None)",
4886 }, /*traceable=*/true);
4887
4888 ParsedArgs<1> parsed_args;
4889 auto _r = parser.parse(self_, args, kwargs, parsed_args);
4890 if(_r.has_torch_function()) {
4891 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
4892 }
4893 // aten::chalf(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor
4894
4895 auto dispatch_chalf = [](const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) -> at::Tensor {
4896 pybind11::gil_scoped_release no_gil;
4897 return self.chalf(memory_format);
4898 };
4899 return wrap(dispatch_chalf(self, _r.memoryformatOptional(0)));
4900 Py_RETURN_NONE;
4901 END_HANDLE_TH_ERRORS
4902}
4903
4904// cholesky
4905static PyObject * THPVariable_cholesky(PyObject* self_, PyObject* args, PyObject* kwargs)
4906{
4907 HANDLE_TH_ERRORS
4908 const Tensor& self = THPVariable_Unpack(self_);
4909 static PythonArgParser parser({
4910 "cholesky(bool upper=False)",
4911 }, /*traceable=*/true);
4912
4913 ParsedArgs<1> parsed_args;
4914 auto _r = parser.parse(self_, args, kwargs, parsed_args);
4915 if(_r.has_torch_function()) {
4916 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
4917 }
4918 // aten::cholesky(Tensor self, bool upper=False) -> Tensor
4919
4920 auto dispatch_cholesky = [](const at::Tensor & self, bool upper) -> at::Tensor {
4921 pybind11::gil_scoped_release no_gil;
4922 return self.cholesky(upper);
4923 };
4924 return wrap(dispatch_cholesky(self, _r.toBool(0)));
4925 Py_RETURN_NONE;
4926 END_HANDLE_TH_ERRORS
4927}
4928
4929// cholesky_inverse
4930static PyObject * THPVariable_cholesky_inverse(PyObject* self_, PyObject* args, PyObject* kwargs)
4931{
4932 HANDLE_TH_ERRORS
4933 const Tensor& self = THPVariable_Unpack(self_);
4934 static PythonArgParser parser({
4935 "cholesky_inverse(bool upper=False)",
4936 }, /*traceable=*/true);
4937
4938 ParsedArgs<1> parsed_args;
4939 auto _r = parser.parse(self_, args, kwargs, parsed_args);
4940 if(_r.has_torch_function()) {
4941 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
4942 }
4943 // aten::cholesky_inverse(Tensor self, bool upper=False) -> Tensor
4944
4945 auto dispatch_cholesky_inverse = [](const at::Tensor & self, bool upper) -> at::Tensor {
4946 pybind11::gil_scoped_release no_gil;
4947 return self.cholesky_inverse(upper);
4948 };
4949 return wrap(dispatch_cholesky_inverse(self, _r.toBool(0)));
4950 Py_RETURN_NONE;
4951 END_HANDLE_TH_ERRORS
4952}
4953
4954// cholesky_solve
4955static PyObject * THPVariable_cholesky_solve(PyObject* self_, PyObject* args, PyObject* kwargs)
4956{
4957 HANDLE_TH_ERRORS
4958 const Tensor& self = THPVariable_Unpack(self_);
4959 static PythonArgParser parser({
4960 "cholesky_solve(Tensor input2, bool upper=False)",
4961 }, /*traceable=*/true);
4962
4963 ParsedArgs<2> parsed_args;
4964 auto _r = parser.parse(self_, args, kwargs, parsed_args);
4965 if(_r.has_torch_function()) {
4966 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
4967 }
4968 // aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor
4969
4970 auto dispatch_cholesky_solve = [](const at::Tensor & self, const at::Tensor & input2, bool upper) -> at::Tensor {
4971 pybind11::gil_scoped_release no_gil;
4972 return self.cholesky_solve(input2, upper);
4973 };
4974 return wrap(dispatch_cholesky_solve(self, _r.tensor(0), _r.toBool(1)));
4975 Py_RETURN_NONE;
4976 END_HANDLE_TH_ERRORS
4977}
4978
4979// chunk
4980static PyObject * THPVariable_chunk(PyObject* self_, PyObject* args, PyObject* kwargs)
4981{
4982 HANDLE_TH_ERRORS
4983 const Tensor& self = THPVariable_Unpack(self_);
4984 static PythonArgParser parser({
4985 "chunk(int64_t chunks, int64_t dim=0)",
4986 }, /*traceable=*/true);
4987
4988 ParsedArgs<2> parsed_args;
4989 auto _r = parser.parse(self_, args, kwargs, parsed_args);
4990 if(_r.has_torch_function()) {
4991 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
4992 }
4993 // aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[]
4994
4995 auto dispatch_chunk = [](const at::Tensor & self, int64_t chunks, int64_t dim) -> ::std::vector<at::Tensor> {
4996 pybind11::gil_scoped_release no_gil;
4997 return self.chunk(chunks, dim);
4998 };
4999 return wrap(dispatch_chunk(self, _r.toInt64(0), _r.toInt64(1)));
5000 Py_RETURN_NONE;
5001 END_HANDLE_TH_ERRORS
5002}
5003
5004\
5005// clamp
5006static PyObject * THPVariable_clamp(PyObject* self_, PyObject* args, PyObject* kwargs)
5007{
5008 HANDLE_TH_ERRORS
5009 const Tensor& self = THPVariable_Unpack(self_);
5010 static PythonArgParser parser({
5011 "clamp(Tensor? min=None, Tensor? max=None)",
5012 "clamp(Scalar? min=None, Scalar? max=None)",
5013 }, /*traceable=*/true);
5014
5015 ParsedArgs<2> parsed_args;
5016 auto _r = parser.parse(self_, args, kwargs, parsed_args);
5017 if(_r.has_torch_function()) {
5018 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
5019 }
5020 switch (_r.idx) {
5021 case 0: {
5022 // aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
5023
5024 auto dispatch_clamp = [](const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) -> at::Tensor {
5025 pybind11::gil_scoped_release no_gil;
5026 return self.clamp(min, max);
5027 };
5028 return wrap(dispatch_clamp(self, _r.optionalTensor(0), _r.optionalTensor(1)));
5029 }
5030 case 1: {
5031 // aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
5032
5033 auto dispatch_clamp = [](const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) -> at::Tensor {
5034 pybind11::gil_scoped_release no_gil;
5035 return self.clamp(min, max);
5036 };
5037 return wrap(dispatch_clamp(self, _r.scalarOptional(0), _r.scalarOptional(1)));
5038 }
5039 }
5040 Py_RETURN_NONE;
5041 END_HANDLE_TH_ERRORS
5042}
5043
5044\
5045// clamp_
5046static PyObject * THPVariable_clamp_(PyObject* self_, PyObject* args, PyObject* kwargs)
5047{
5048 HANDLE_TH_ERRORS
5049 const Tensor& self = THPVariable_Unpack(self_);
5050 static PythonArgParser parser({
5051 "clamp_(Tensor? min=None, Tensor? max=None)",
5052 "clamp_(Scalar? min=None, Scalar? max=None)",
5053 }, /*traceable=*/true);
5054
5055 ParsedArgs<2> parsed_args;
5056 auto _r = parser.parse(self_, args, kwargs, parsed_args);
5057 if(_r.has_torch_function()) {
5058 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
5059 }
5060 switch (_r.idx) {
5061 case 0: {
5062 // aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
5063
5064 auto dispatch_clamp_ = [](const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) -> at::Tensor {
5065 pybind11::gil_scoped_release no_gil;
5066 return self.clamp_(min, max);
5067 };
5068 return wrap(dispatch_clamp_(self, _r.optionalTensor(0), _r.optionalTensor(1)));
5069 }
5070 case 1: {
5071 // aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
5072
5073 auto dispatch_clamp_ = [](const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) -> at::Tensor {
5074 pybind11::gil_scoped_release no_gil;
5075 return self.clamp_(min, max);
5076 };
5077 return wrap(dispatch_clamp_(self, _r.scalarOptional(0), _r.scalarOptional(1)));
5078 }
5079 }
5080 Py_RETURN_NONE;
5081 END_HANDLE_TH_ERRORS
5082}
5083
5084\
5085// clamp_max
5086static PyObject * THPVariable_clamp_max(PyObject* self_, PyObject* args, PyObject* kwargs)
5087{
5088 HANDLE_TH_ERRORS
5089 const Tensor& self = THPVariable_Unpack(self_);
5090 static PythonArgParser parser({
5091 "clamp_max(Tensor max)",
5092 "clamp_max(Scalar max)",
5093 }, /*traceable=*/true);
5094
5095 ParsedArgs<1> parsed_args;
5096 auto _r = parser.parse(self_, args, kwargs, parsed_args);
5097 if(_r.has_torch_function()) {
5098 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
5099 }
5100 switch (_r.idx) {
5101 case 0: {
5102 // aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor
5103
5104 auto dispatch_clamp_max = [](const at::Tensor & self, const at::Tensor & max) -> at::Tensor {
5105 pybind11::gil_scoped_release no_gil;
5106 return self.clamp_max(max);
5107 };
5108 return wrap(dispatch_clamp_max(self, _r.tensor(0)));
5109 }
5110 case 1: {
5111 // aten::clamp_max(Tensor self, Scalar max) -> Tensor
5112
5113 auto dispatch_clamp_max = [](const at::Tensor & self, const at::Scalar & max) -> at::Tensor {
5114 pybind11::gil_scoped_release no_gil;
5115 return self.clamp_max(max);
5116 };
5117 return wrap(dispatch_clamp_max(self, _r.scalar(0)));
5118 }
5119 }
5120 Py_RETURN_NONE;
5121 END_HANDLE_TH_ERRORS
5122}
5123
5124\
5125// clamp_max_
5126static PyObject * THPVariable_clamp_max_(PyObject* self_, PyObject* args, PyObject* kwargs)
5127{
5128 HANDLE_TH_ERRORS
5129 const Tensor& self = THPVariable_Unpack(self_);
5130 static PythonArgParser parser({
5131 "clamp_max_(Tensor max)",
5132 "clamp_max_(Scalar max)",
5133 }, /*traceable=*/true);
5134
5135 ParsedArgs<1> parsed_args;
5136 auto _r = parser.parse(self_, args, kwargs, parsed_args);
5137 if(_r.has_torch_function()) {
5138 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
5139 }
5140 switch (_r.idx) {
5141 case 0: {
5142 // aten::clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!)
5143
5144 auto dispatch_clamp_max_ = [](const at::Tensor & self, const at::Tensor & max) -> at::Tensor {
5145 pybind11::gil_scoped_release no_gil;
5146 return self.clamp_max_(max);
5147 };
5148 return wrap(dispatch_clamp_max_(self, _r.tensor(0)));
5149 }
5150 case 1: {
5151 // aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!)
5152
5153 auto dispatch_clamp_max_ = [](const at::Tensor & self, const at::Scalar & max) -> at::Tensor {
5154 pybind11::gil_scoped_release no_gil;
5155 return self.clamp_max_(max);
5156 };
5157 return wrap(dispatch_clamp_max_(self, _r.scalar(0)));
5158 }
5159 }
5160 Py_RETURN_NONE;
5161 END_HANDLE_TH_ERRORS
5162}
5163
5164\
5165// clamp_min
5166static PyObject * THPVariable_clamp_min(PyObject* self_, PyObject* args, PyObject* kwargs)
5167{
5168 HANDLE_TH_ERRORS
5169 const Tensor& self = THPVariable_Unpack(self_);
5170 static PythonArgParser parser({
5171 "clamp_min(Tensor min)",
5172 "clamp_min(Scalar min)",
5173 }, /*traceable=*/true);
5174
5175 ParsedArgs<1> parsed_args;
5176 auto _r = parser.parse(self_, args, kwargs, parsed_args);
5177 if(_r.has_torch_function()) {
5178 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
5179 }
5180 switch (_r.idx) {
5181 case 0: {
5182 // aten::clamp_min.Tensor(Tensor self, Tensor min) -> Tensor
5183
5184 auto dispatch_clamp_min = [](const at::Tensor & self, const at::Tensor & min) -> at::Tensor {
5185 pybind11::gil_scoped_release no_gil;
5186 return self.clamp_min(min);
5187 };
5188 return wrap(dispatch_clamp_min(self, _r.tensor(0)));
5189 }
5190 case 1: {
5191 // aten::clamp_min(Tensor self, Scalar min) -> Tensor
5192
5193 auto dispatch_clamp_min = [](const at::Tensor & self, const at::Scalar & min) -> at::Tensor {
5194 pybind11::gil_scoped_release no_gil;
5195 return self.clamp_min(min);
5196 };
5197 return wrap(dispatch_clamp_min(self, _r.scalar(0)));
5198 }
5199 }
5200 Py_RETURN_NONE;
5201 END_HANDLE_TH_ERRORS
5202}
5203
5204\
5205// clamp_min_
5206static PyObject * THPVariable_clamp_min_(PyObject* self_, PyObject* args, PyObject* kwargs)
5207{
5208 HANDLE_TH_ERRORS
5209 const Tensor& self = THPVariable_Unpack(self_);
5210 static PythonArgParser parser({
5211 "clamp_min_(Tensor min)",
5212 "clamp_min_(Scalar min)",
5213 }, /*traceable=*/true);
5214
5215 ParsedArgs<1> parsed_args;
5216 auto _r = parser.parse(self_, args, kwargs, parsed_args);
5217 if(_r.has_torch_function()) {
5218 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
5219 }
5220 switch (_r.idx) {
5221 case 0: {
5222 // aten::clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!)
5223
5224 auto dispatch_clamp_min_ = [](const at::Tensor & self, const at::Tensor & min) -> at::Tensor {
5225 pybind11::gil_scoped_release no_gil;
5226 return self.clamp_min_(min);
5227 };
5228 return wrap(dispatch_clamp_min_(self, _r.tensor(0)));
5229 }
5230 case 1: {
5231 // aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)
5232
5233 auto dispatch_clamp_min_ = [](const at::Tensor & self, const at::Scalar & min) -> at::Tensor {
5234 pybind11::gil_scoped_release no_gil;
5235 return self.clamp_min_(min);
5236 };
5237 return wrap(dispatch_clamp_min_(self, _r.scalar(0)));
5238 }
5239 }
5240 Py_RETURN_NONE;
5241 END_HANDLE_TH_ERRORS
5242}
5243
5244\
5245// clip
5246static PyObject * THPVariable_clip(PyObject* self_, PyObject* args, PyObject* kwargs)
5247{
5248 HANDLE_TH_ERRORS
5249 const Tensor& self = THPVariable_Unpack(self_);
5250 static PythonArgParser parser({
5251 "clip(Tensor? min=None, Tensor? max=None)",
5252 "clip(Scalar? min=None, Scalar? max=None)",
5253 }, /*traceable=*/true);
5254
5255 ParsedArgs<2> parsed_args;
5256 auto _r = parser.parse(self_, args, kwargs, parsed_args);
5257 if(_r.has_torch_function()) {
5258 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
5259 }
5260 switch (_r.idx) {
5261 case 0: {
5262 // aten::clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
5263
5264 auto dispatch_clip = [](const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) -> at::Tensor {
5265 pybind11::gil_scoped_release no_gil;
5266 return self.clip(min, max);
5267 };
5268 return wrap(dispatch_clip(self, _r.optionalTensor(0), _r.optionalTensor(1)));
5269 }
5270 case 1: {
5271 // aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
5272
5273 auto dispatch_clip = [](const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) -> at::Tensor {
5274 pybind11::gil_scoped_release no_gil;
5275 return self.clip(min, max);
5276 };
5277 return wrap(dispatch_clip(self, _r.scalarOptional(0), _r.scalarOptional(1)));
5278 }
5279 }
5280 Py_RETURN_NONE;
5281 END_HANDLE_TH_ERRORS
5282}
5283
5284\
5285// clip_
5286static PyObject * THPVariable_clip_(PyObject* self_, PyObject* args, PyObject* kwargs)
5287{
5288 HANDLE_TH_ERRORS
5289 const Tensor& self = THPVariable_Unpack(self_);
5290 static PythonArgParser parser({
5291 "clip_(Tensor? min=None, Tensor? max=None)",
5292 "clip_(Scalar? min=None, Scalar? max=None)",
5293 }, /*traceable=*/true);
5294
5295 ParsedArgs<2> parsed_args;
5296 auto _r = parser.parse(self_, args, kwargs, parsed_args);
5297 if(_r.has_torch_function()) {
5298 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
5299 }
5300 switch (_r.idx) {
5301 case 0: {
5302 // aten::clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
5303
5304 auto dispatch_clip_ = [](const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) -> at::Tensor {
5305 pybind11::gil_scoped_release no_gil;
5306 return self.clip_(min, max);
5307 };
5308 return wrap(dispatch_clip_(self, _r.optionalTensor(0), _r.optionalTensor(1)));
5309 }
5310 case 1: {
5311 // aten::clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
5312
5313 auto dispatch_clip_ = [](const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) -> at::Tensor {
5314 pybind11::gil_scoped_release no_gil;
5315 return self.clip_(min, max);
5316 };
5317 return wrap(dispatch_clip_(self, _r.scalarOptional(0), _r.scalarOptional(1)));
5318 }
5319 }
5320 Py_RETURN_NONE;
5321 END_HANDLE_TH_ERRORS
5322}
5323
5324// clone
5325static PyObject * THPVariable_clone(PyObject* self_, PyObject* args, PyObject* kwargs)
5326{
5327 HANDLE_TH_ERRORS
5328 const Tensor& self = THPVariable_Unpack(self_);
5329 static PythonArgParser parser({
5330 "clone(*, MemoryFormat? memory_format=None)",
5331 }, /*traceable=*/true);
5332
5333 ParsedArgs<1> parsed_args;
5334 auto _r = parser.parse(self_, args, kwargs, parsed_args);
5335 if(_r.has_torch_function()) {
5336 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
5337 }
5338 // aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor
5339
5340 auto dispatch_clone = [](const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) -> at::Tensor {
5341 pybind11::gil_scoped_release no_gil;
5342 return self.clone(memory_format);
5343 };
5344 return wrap(dispatch_clone(self, _r.memoryformatOptional(0)));
5345 Py_RETURN_NONE;
5346 END_HANDLE_TH_ERRORS
5347}
5348
5349// coalesce
5350static PyObject * THPVariable_coalesce(PyObject* self_, PyObject* args)
5351{
5352 HANDLE_TH_ERRORS
5353 const Tensor& self = THPVariable_Unpack(self_);
5354 if(check_has_torch_function(self_)) {
5355 return handle_torch_function(self_, "coalesce");
5356 }
5357 // aten::coalesce(Tensor(a) self) -> Tensor(a)
5358
5359 auto dispatch_coalesce = [](const at::Tensor & self) -> at::Tensor {
5360 pybind11::gil_scoped_release no_gil;
5361 return self.coalesce();
5362 };
5363 return wrap(dispatch_coalesce(self));
5364 END_HANDLE_TH_ERRORS
5365}
5366
5367// col_indices
5368static PyObject * THPVariable_col_indices(PyObject* self_, PyObject* args)
5369{
5370 HANDLE_TH_ERRORS
5371 const Tensor& self = THPVariable_Unpack(self_);
5372 if(check_has_torch_function(self_)) {
5373 return handle_torch_function(self_, "col_indices");
5374 }
5375 // aten::col_indices(Tensor(a) self) -> Tensor(a)
5376
5377 auto dispatch_col_indices = [](const at::Tensor & self) -> at::Tensor {
5378 pybind11::gil_scoped_release no_gil;
5379 return self.col_indices();
5380 };
5381 return wrap(dispatch_col_indices(self));
5382 END_HANDLE_TH_ERRORS
5383}
5384
5385// conj
5386static PyObject * THPVariable_conj(PyObject* self_, PyObject* args)
5387{
5388 HANDLE_TH_ERRORS
5389 const Tensor& self = THPVariable_Unpack(self_);
5390 if(check_has_torch_function(self_)) {
5391 return handle_torch_function(self_, "conj");
5392 }
5393 // aten::conj(Tensor(a) self) -> Tensor(a)
5394
5395 auto dispatch_conj = [](const at::Tensor & self) -> at::Tensor {
5396 pybind11::gil_scoped_release no_gil;
5397 return self.conj();
5398 };
5399 return wrap(dispatch_conj(self));
5400 END_HANDLE_TH_ERRORS
5401}
5402
5403// conj_physical
5404static PyObject * THPVariable_conj_physical(PyObject* self_, PyObject* args)
5405{
5406 HANDLE_TH_ERRORS
5407 const Tensor& self = THPVariable_Unpack(self_);
5408 if(check_has_torch_function(self_)) {
5409 return handle_torch_function(self_, "conj_physical");
5410 }
5411 // aten::conj_physical(Tensor self) -> Tensor
5412
5413 auto dispatch_conj_physical = [](const at::Tensor & self) -> at::Tensor {
5414 pybind11::gil_scoped_release no_gil;
5415 return self.conj_physical();
5416 };
5417 return wrap(dispatch_conj_physical(self));
5418 END_HANDLE_TH_ERRORS
5419}
5420
5421// conj_physical_
5422static PyObject * THPVariable_conj_physical_(PyObject* self_, PyObject* args)
5423{
5424 HANDLE_TH_ERRORS
5425 const Tensor& self = THPVariable_Unpack(self_);
5426 if(check_has_torch_function(self_)) {
5427 return handle_torch_function(self_, "conj_physical_");
5428 }
5429 // aten::conj_physical_(Tensor(a!) self) -> Tensor(a!)
5430
5431 auto dispatch_conj_physical_ = [](const at::Tensor & self) -> at::Tensor {
5432 pybind11::gil_scoped_release no_gil;
5433 return self.conj_physical_();
5434 };
5435 return wrap(dispatch_conj_physical_(self));
5436 END_HANDLE_TH_ERRORS
5437}
5438
5439\
5440// copysign
5441static PyObject * THPVariable_copysign(PyObject* self_, PyObject* args, PyObject* kwargs)
5442{
5443 HANDLE_TH_ERRORS
5444 const Tensor& self = THPVariable_Unpack(self_);
5445 static PythonArgParser parser({
5446 "copysign(Tensor other)",
5447 "copysign(Scalar other)",
5448 }, /*traceable=*/true);
5449
5450 ParsedArgs<1> parsed_args;
5451 auto _r = parser.parse(self_, args, kwargs, parsed_args);
5452 if(_r.has_torch_function()) {
5453 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
5454 }
5455 switch (_r.idx) {
5456 case 0: {
5457 // aten::copysign.Tensor(Tensor self, Tensor other) -> Tensor
5458
5459 auto dispatch_copysign = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
5460 pybind11::gil_scoped_release no_gil;
5461 return self.copysign(other);
5462 };
5463 return wrap(dispatch_copysign(self, _r.tensor(0)));
5464 }
5465 case 1: {
5466 // aten::copysign.Scalar(Tensor self, Scalar other) -> Tensor
5467
5468 auto dispatch_copysign = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
5469 pybind11::gil_scoped_release no_gil;
5470 return self.copysign(other);
5471 };
5472 return wrap(dispatch_copysign(self, _r.scalar(0)));
5473 }
5474 }
5475 Py_RETURN_NONE;
5476 END_HANDLE_TH_ERRORS
5477}
5478
5479\
5480// copysign_
5481static PyObject * THPVariable_copysign_(PyObject* self_, PyObject* args, PyObject* kwargs)
5482{
5483 HANDLE_TH_ERRORS
5484 const Tensor& self = THPVariable_Unpack(self_);
5485 static PythonArgParser parser({
5486 "copysign_(Tensor other)",
5487 "copysign_(Scalar other)",
5488 }, /*traceable=*/true);
5489
5490 ParsedArgs<1> parsed_args;
5491 auto _r = parser.parse(self_, args, kwargs, parsed_args);
5492 if(_r.has_torch_function()) {
5493 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
5494 }
5495 switch (_r.idx) {
5496 case 0: {
5497 // aten::copysign_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
5498
5499 auto dispatch_copysign_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
5500 pybind11::gil_scoped_release no_gil;
5501 return self.copysign_(other);
5502 };
5503 return wrap(dispatch_copysign_(self, _r.tensor(0)));
5504 }
5505 case 1: {
5506 // aten::copysign_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
5507
5508 auto dispatch_copysign_ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
5509 pybind11::gil_scoped_release no_gil;
5510 return self.copysign_(other);
5511 };
5512 return wrap(dispatch_copysign_(self, _r.scalar(0)));
5513 }
5514 }
5515 Py_RETURN_NONE;
5516 END_HANDLE_TH_ERRORS
5517}
5518
5519// corrcoef
5520static PyObject * THPVariable_corrcoef(PyObject* self_, PyObject* args)
5521{
5522 HANDLE_TH_ERRORS
5523 const Tensor& self = THPVariable_Unpack(self_);
5524 if(check_has_torch_function(self_)) {
5525 return handle_torch_function(self_, "corrcoef");
5526 }
5527 // aten::corrcoef(Tensor self) -> Tensor
5528
5529 auto dispatch_corrcoef = [](const at::Tensor & self) -> at::Tensor {
5530 pybind11::gil_scoped_release no_gil;
5531 return self.corrcoef();
5532 };
5533 return wrap(dispatch_corrcoef(self));
5534 END_HANDLE_TH_ERRORS
5535}
5536
5537// cos
5538static PyObject * THPVariable_cos(PyObject* self_, PyObject* args)
5539{
5540 HANDLE_TH_ERRORS
5541 const Tensor& self = THPVariable_Unpack(self_);
5542 if(check_has_torch_function(self_)) {
5543 return handle_torch_function(self_, "cos");
5544 }
5545 // aten::cos(Tensor self) -> Tensor
5546
5547 auto dispatch_cos = [](const at::Tensor & self) -> at::Tensor {
5548 pybind11::gil_scoped_release no_gil;
5549 return self.cos();
5550 };
5551 return wrap(dispatch_cos(self));
5552 END_HANDLE_TH_ERRORS
5553}
5554
5555// cos_
5556static PyObject * THPVariable_cos_(PyObject* self_, PyObject* args)
5557{
5558 HANDLE_TH_ERRORS
5559 const Tensor& self = THPVariable_Unpack(self_);
5560 if(check_has_torch_function(self_)) {
5561 return handle_torch_function(self_, "cos_");
5562 }
5563 // aten::cos_(Tensor(a!) self) -> Tensor(a!)
5564
5565 auto dispatch_cos_ = [](const at::Tensor & self) -> at::Tensor {
5566 pybind11::gil_scoped_release no_gil;
5567 return self.cos_();
5568 };
5569 return wrap(dispatch_cos_(self));
5570 END_HANDLE_TH_ERRORS
5571}
5572
5573// cosh
5574static PyObject * THPVariable_cosh(PyObject* self_, PyObject* args)
5575{
5576 HANDLE_TH_ERRORS
5577 const Tensor& self = THPVariable_Unpack(self_);
5578 if(check_has_torch_function(self_)) {
5579 return handle_torch_function(self_, "cosh");
5580 }
5581 // aten::cosh(Tensor self) -> Tensor
5582
5583 auto dispatch_cosh = [](const at::Tensor & self) -> at::Tensor {
5584 pybind11::gil_scoped_release no_gil;
5585 return self.cosh();
5586 };
5587 return wrap(dispatch_cosh(self));
5588 END_HANDLE_TH_ERRORS
5589}
5590
5591// cosh_
5592static PyObject * THPVariable_cosh_(PyObject* self_, PyObject* args)
5593{
5594 HANDLE_TH_ERRORS
5595 const Tensor& self = THPVariable_Unpack(self_);
5596 if(check_has_torch_function(self_)) {
5597 return handle_torch_function(self_, "cosh_");
5598 }
5599 // aten::cosh_(Tensor(a!) self) -> Tensor(a!)
5600
5601 auto dispatch_cosh_ = [](const at::Tensor & self) -> at::Tensor {
5602 pybind11::gil_scoped_release no_gil;
5603 return self.cosh_();
5604 };
5605 return wrap(dispatch_cosh_(self));
5606 END_HANDLE_TH_ERRORS
5607}
5608
5609\
5610// count_nonzero
5611static PyObject * THPVariable_count_nonzero(PyObject* self_, PyObject* args, PyObject* kwargs)
5612{
5613 HANDLE_TH_ERRORS
5614 const Tensor& self = THPVariable_Unpack(self_);
5615 static PythonArgParser parser({
5616 "count_nonzero(int64_t? dim=None)",
5617 "count_nonzero(IntArrayRef dim)",
5618 }, /*traceable=*/true);
5619
5620 ParsedArgs<1> parsed_args;
5621 auto _r = parser.parse(self_, args, kwargs, parsed_args);
5622 if(_r.has_torch_function()) {
5623 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
5624 }
5625 switch (_r.idx) {
5626 case 0: {
5627 // aten::count_nonzero(Tensor self, int? dim=None) -> Tensor
5628
5629 auto dispatch_count_nonzero = [](const at::Tensor & self, c10::optional<int64_t> dim) -> at::Tensor {
5630 pybind11::gil_scoped_release no_gil;
5631 return self.count_nonzero(dim);
5632 };
5633 return wrap(dispatch_count_nonzero(self, _r.toInt64Optional(0)));
5634 }
5635 case 1: {
5636 // aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor
5637
5638 auto dispatch_count_nonzero = [](const at::Tensor & self, at::IntArrayRef dim) -> at::Tensor {
5639 pybind11::gil_scoped_release no_gil;
5640 return self.count_nonzero(dim);
5641 };
5642 return wrap(dispatch_count_nonzero(self, _r.intlist(0)));
5643 }
5644 }
5645 Py_RETURN_NONE;
5646 END_HANDLE_TH_ERRORS
5647}
5648
5649// cov
5650static PyObject * THPVariable_cov(PyObject* self_, PyObject* args, PyObject* kwargs)
5651{
5652 HANDLE_TH_ERRORS
5653 const Tensor& self = THPVariable_Unpack(self_);
5654 static PythonArgParser parser({
5655 "cov(*, int64_t correction=1, Tensor? fweights=None, Tensor? aweights=None)",
5656 }, /*traceable=*/true);
5657
5658 ParsedArgs<3> parsed_args;
5659 auto _r = parser.parse(self_, args, kwargs, parsed_args);
5660 if(_r.has_torch_function()) {
5661 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
5662 }
5663 // aten::cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor
5664
5665 auto dispatch_cov = [](const at::Tensor & self, int64_t correction, const c10::optional<at::Tensor> & fweights, const c10::optional<at::Tensor> & aweights) -> at::Tensor {
5666 pybind11::gil_scoped_release no_gil;
5667 return self.cov(correction, fweights, aweights);
5668 };
5669 return wrap(dispatch_cov(self, _r.toInt64(0), _r.optionalTensor(1), _r.optionalTensor(2)));
5670 Py_RETURN_NONE;
5671 END_HANDLE_TH_ERRORS
5672}
5673
5674// cross
5675static PyObject * THPVariable_cross(PyObject* self_, PyObject* args, PyObject* kwargs)
5676{
5677 HANDLE_TH_ERRORS
5678 const Tensor& self = THPVariable_Unpack(self_);
5679 static PythonArgParser parser({
5680 "cross(Tensor other, int64_t? dim=None)",
5681 }, /*traceable=*/true);
5682
5683 ParsedArgs<2> parsed_args;
5684 auto _r = parser.parse(self_, args, kwargs, parsed_args);
5685 if(_r.has_torch_function()) {
5686 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
5687 }
5688 // aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor
5689
5690 auto dispatch_cross = [](const at::Tensor & self, const at::Tensor & other, c10::optional<int64_t> dim) -> at::Tensor {
5691 pybind11::gil_scoped_release no_gil;
5692 return self.cross(other, dim);
5693 };
5694 return wrap(dispatch_cross(self, _r.tensor(0), _r.toInt64Optional(1)));
5695 Py_RETURN_NONE;
5696 END_HANDLE_TH_ERRORS
5697}
5698
5699// crow_indices
5700static PyObject * THPVariable_crow_indices(PyObject* self_, PyObject* args)
5701{
5702 HANDLE_TH_ERRORS
5703 const Tensor& self = THPVariable_Unpack(self_);
5704 if(check_has_torch_function(self_)) {
5705 return handle_torch_function(self_, "crow_indices");
5706 }
5707 // aten::crow_indices(Tensor(a) self) -> Tensor(a)
5708
5709 auto dispatch_crow_indices = [](const at::Tensor & self) -> at::Tensor {
5710 pybind11::gil_scoped_release no_gil;
5711 return self.crow_indices();
5712 };
5713 return wrap(dispatch_crow_indices(self));
5714 END_HANDLE_TH_ERRORS
5715}
5716
5717\
5718// cummax
5719static PyObject * THPVariable_cummax(PyObject* self_, PyObject* args, PyObject* kwargs)
5720{
5721 HANDLE_TH_ERRORS
5722 static PyTypeObject* NamedTuple = get_namedtuple("cummax");
5723 const Tensor& self = THPVariable_Unpack(self_);
5724 static PythonArgParser parser({
5725 "cummax(int64_t dim)",
5726 "cummax(Dimname dim)",
5727 }, /*traceable=*/true);
5728
5729 ParsedArgs<1> parsed_args;
5730 auto _r = parser.parse(self_, args, kwargs, parsed_args);
5731 if(_r.has_torch_function()) {
5732 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
5733 }
5734 switch (_r.idx) {
5735 case 0: {
5736 // aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices)
5737
5738 auto dispatch_cummax = [](const at::Tensor & self, int64_t dim) -> ::std::tuple<at::Tensor,at::Tensor> {
5739 pybind11::gil_scoped_release no_gil;
5740 return self.cummax(dim);
5741 };
5742 return wrap(NamedTuple, dispatch_cummax(self, _r.toInt64(0)));
5743 }
5744 case 1: {
5745 // aten::cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)
5746
5747 auto dispatch_cummax = [](const at::Tensor & self, at::Dimname dim) -> ::std::tuple<at::Tensor,at::Tensor> {
5748 pybind11::gil_scoped_release no_gil;
5749 return self.cummax(dim);
5750 };
5751 return wrap(NamedTuple, dispatch_cummax(self, _r.dimname(0)));
5752 }
5753 }
5754 Py_RETURN_NONE;
5755 END_HANDLE_TH_ERRORS
5756}
5757
5758\
5759// cummin
5760static PyObject * THPVariable_cummin(PyObject* self_, PyObject* args, PyObject* kwargs)
5761{
5762 HANDLE_TH_ERRORS
5763 static PyTypeObject* NamedTuple = get_namedtuple("cummin");
5764 const Tensor& self = THPVariable_Unpack(self_);
5765 static PythonArgParser parser({
5766 "cummin(int64_t dim)",
5767 "cummin(Dimname dim)",
5768 }, /*traceable=*/true);
5769
5770 ParsedArgs<1> parsed_args;
5771 auto _r = parser.parse(self_, args, kwargs, parsed_args);
5772 if(_r.has_torch_function()) {
5773 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
5774 }
5775 switch (_r.idx) {
5776 case 0: {
5777 // aten::cummin(Tensor self, int dim) -> (Tensor values, Tensor indices)
5778
5779 auto dispatch_cummin = [](const at::Tensor & self, int64_t dim) -> ::std::tuple<at::Tensor,at::Tensor> {
5780 pybind11::gil_scoped_release no_gil;
5781 return self.cummin(dim);
5782 };
5783 return wrap(NamedTuple, dispatch_cummin(self, _r.toInt64(0)));
5784 }
5785 case 1: {
5786 // aten::cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)
5787
5788 auto dispatch_cummin = [](const at::Tensor & self, at::Dimname dim) -> ::std::tuple<at::Tensor,at::Tensor> {
5789 pybind11::gil_scoped_release no_gil;
5790 return self.cummin(dim);
5791 };
5792 return wrap(NamedTuple, dispatch_cummin(self, _r.dimname(0)));
5793 }
5794 }
5795 Py_RETURN_NONE;
5796 END_HANDLE_TH_ERRORS
5797}
5798
5799\
5800// cumprod
5801static PyObject * THPVariable_cumprod(PyObject* self_, PyObject* args, PyObject* kwargs)
5802{
5803 HANDLE_TH_ERRORS
5804 const Tensor& self = THPVariable_Unpack(self_);
5805 static PythonArgParser parser({
5806 "cumprod(int64_t dim, *, ScalarType? dtype=None)",
5807 "cumprod(Dimname dim, *, ScalarType? dtype=None)",
5808 }, /*traceable=*/true);
5809
5810 ParsedArgs<2> parsed_args;
5811 auto _r = parser.parse(self_, args, kwargs, parsed_args);
5812 if(_r.has_torch_function()) {
5813 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
5814 }
5815 switch (_r.idx) {
5816 case 0: {
5817 // aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
5818
5819 auto dispatch_cumprod = [](const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) -> at::Tensor {
5820 pybind11::gil_scoped_release no_gil;
5821 return self.cumprod(dim, dtype);
5822 };
5823 return wrap(dispatch_cumprod(self, _r.toInt64(0), _r.scalartypeOptional(1)));
5824 }
5825 case 1: {
5826 // aten::cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
5827
5828 auto dispatch_cumprod = [](const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) -> at::Tensor {
5829 pybind11::gil_scoped_release no_gil;
5830 return self.cumprod(dim, dtype);
5831 };
5832 return wrap(dispatch_cumprod(self, _r.dimname(0), _r.scalartypeOptional(1)));
5833 }
5834 }
5835 Py_RETURN_NONE;
5836 END_HANDLE_TH_ERRORS
5837}
5838
5839\
5840// cumprod_
5841static PyObject * THPVariable_cumprod_(PyObject* self_, PyObject* args, PyObject* kwargs)
5842{
5843 HANDLE_TH_ERRORS
5844 const Tensor& self = THPVariable_Unpack(self_);
5845 static PythonArgParser parser({
5846 "cumprod_(int64_t dim, *, ScalarType? dtype=None)",
5847 "cumprod_(Dimname dim, *, ScalarType? dtype=None)",
5848 }, /*traceable=*/true);
5849
5850 ParsedArgs<2> parsed_args;
5851 auto _r = parser.parse(self_, args, kwargs, parsed_args);
5852 if(_r.has_torch_function()) {
5853 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
5854 }
5855 switch (_r.idx) {
5856 case 0: {
5857 // aten::cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)
5858
5859 auto dispatch_cumprod_ = [](const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) -> at::Tensor {
5860 pybind11::gil_scoped_release no_gil;
5861 return self.cumprod_(dim, dtype);
5862 };
5863 return wrap(dispatch_cumprod_(self, _r.toInt64(0), _r.scalartypeOptional(1)));
5864 }
5865 case 1: {
5866 // aten::cumprod_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)
5867
5868 auto dispatch_cumprod_ = [](const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) -> at::Tensor {
5869 pybind11::gil_scoped_release no_gil;
5870 return self.cumprod_(dim, dtype);
5871 };
5872 return wrap(dispatch_cumprod_(self, _r.dimname(0), _r.scalartypeOptional(1)));
5873 }
5874 }
5875 Py_RETURN_NONE;
5876 END_HANDLE_TH_ERRORS
5877}
5878
5879\
5880// cumsum
5881static PyObject * THPVariable_cumsum(PyObject* self_, PyObject* args, PyObject* kwargs)
5882{
5883 HANDLE_TH_ERRORS
5884 const Tensor& self = THPVariable_Unpack(self_);
5885 static PythonArgParser parser({
5886 "cumsum(int64_t dim, *, ScalarType? dtype=None)",
5887 "cumsum(Dimname dim, *, ScalarType? dtype=None)",
5888 }, /*traceable=*/true);
5889
5890 ParsedArgs<2> parsed_args;
5891 auto _r = parser.parse(self_, args, kwargs, parsed_args);
5892 if(_r.has_torch_function()) {
5893 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
5894 }
5895 switch (_r.idx) {
5896 case 0: {
5897 // aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
5898
5899 auto dispatch_cumsum = [](const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) -> at::Tensor {
5900 pybind11::gil_scoped_release no_gil;
5901 return self.cumsum(dim, dtype);
5902 };
5903 return wrap(dispatch_cumsum(self, _r.toInt64(0), _r.scalartypeOptional(1)));
5904 }
5905 case 1: {
5906 // aten::cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
5907
5908 auto dispatch_cumsum = [](const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) -> at::Tensor {
5909 pybind11::gil_scoped_release no_gil;
5910 return self.cumsum(dim, dtype);
5911 };
5912 return wrap(dispatch_cumsum(self, _r.dimname(0), _r.scalartypeOptional(1)));
5913 }
5914 }
5915 Py_RETURN_NONE;
5916 END_HANDLE_TH_ERRORS
5917}
5918
5919\
5920// cumsum_
5921static PyObject * THPVariable_cumsum_(PyObject* self_, PyObject* args, PyObject* kwargs)
5922{
5923 HANDLE_TH_ERRORS
5924 const Tensor& self = THPVariable_Unpack(self_);
5925 static PythonArgParser parser({
5926 "cumsum_(int64_t dim, *, ScalarType? dtype=None)",
5927 "cumsum_(Dimname dim, *, ScalarType? dtype=None)",
5928 }, /*traceable=*/true);
5929
5930 ParsedArgs<2> parsed_args;
5931 auto _r = parser.parse(self_, args, kwargs, parsed_args);
5932 if(_r.has_torch_function()) {
5933 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
5934 }
5935 switch (_r.idx) {
5936 case 0: {
5937 // aten::cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)
5938
5939 auto dispatch_cumsum_ = [](const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) -> at::Tensor {
5940 pybind11::gil_scoped_release no_gil;
5941 return self.cumsum_(dim, dtype);
5942 };
5943 return wrap(dispatch_cumsum_(self, _r.toInt64(0), _r.scalartypeOptional(1)));
5944 }
5945 case 1: {
5946 // aten::cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)
5947
5948 auto dispatch_cumsum_ = [](const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) -> at::Tensor {
5949 pybind11::gil_scoped_release no_gil;
5950 return self.cumsum_(dim, dtype);
5951 };
5952 return wrap(dispatch_cumsum_(self, _r.dimname(0), _r.scalartypeOptional(1)));
5953 }
5954 }
5955 Py_RETURN_NONE;
5956 END_HANDLE_TH_ERRORS
5957}
5958
5959// deg2rad
5960static PyObject * THPVariable_deg2rad(PyObject* self_, PyObject* args)
5961{
5962 HANDLE_TH_ERRORS
5963 const Tensor& self = THPVariable_Unpack(self_);
5964 if(check_has_torch_function(self_)) {
5965 return handle_torch_function(self_, "deg2rad");
5966 }
5967 // aten::deg2rad(Tensor self) -> Tensor
5968
5969 auto dispatch_deg2rad = [](const at::Tensor & self) -> at::Tensor {
5970 pybind11::gil_scoped_release no_gil;
5971 return self.deg2rad();
5972 };
5973 return wrap(dispatch_deg2rad(self));
5974 END_HANDLE_TH_ERRORS
5975}
5976
5977// deg2rad_
5978static PyObject * THPVariable_deg2rad_(PyObject* self_, PyObject* args)
5979{
5980 HANDLE_TH_ERRORS
5981 const Tensor& self = THPVariable_Unpack(self_);
5982 if(check_has_torch_function(self_)) {
5983 return handle_torch_function(self_, "deg2rad_");
5984 }
5985 // aten::deg2rad_(Tensor(a!) self) -> Tensor(a!)
5986
5987 auto dispatch_deg2rad_ = [](const at::Tensor & self) -> at::Tensor {
5988 pybind11::gil_scoped_release no_gil;
5989 return self.deg2rad_();
5990 };
5991 return wrap(dispatch_deg2rad_(self));
5992 END_HANDLE_TH_ERRORS
5993}
5994
5995// dense_dim
5996static PyObject * THPVariable_dense_dim(PyObject* self_, PyObject* args)
5997{
5998 HANDLE_TH_ERRORS
5999 const Tensor& self = THPVariable_Unpack(self_);
6000 if(check_has_torch_function(self_)) {
6001 return handle_torch_function(self_, "dense_dim");
6002 }
6003 // aten::dense_dim(Tensor self) -> int
6004
6005 auto dispatch_dense_dim = [](const at::Tensor & self) -> int64_t {
6006 pybind11::gil_scoped_release no_gil;
6007 return self.dense_dim();
6008 };
6009 return wrap(dispatch_dense_dim(self));
6010 END_HANDLE_TH_ERRORS
6011}
6012
6013// dequantize
6014static PyObject * THPVariable_dequantize(PyObject* self_, PyObject* args)
6015{
6016 HANDLE_TH_ERRORS
6017 const Tensor& self = THPVariable_Unpack(self_);
6018 if(check_has_torch_function(self_)) {
6019 return handle_torch_function(self_, "dequantize");
6020 }
6021 // aten::dequantize.self(Tensor self) -> Tensor
6022
6023 auto dispatch_dequantize = [](const at::Tensor & self) -> at::Tensor {
6024 pybind11::gil_scoped_release no_gil;
6025 return self.dequantize();
6026 };
6027 return wrap(dispatch_dequantize(self));
6028 END_HANDLE_TH_ERRORS
6029}
6030
6031// det
6032static PyObject * THPVariable_det(PyObject* self_, PyObject* args)
6033{
6034 HANDLE_TH_ERRORS
6035 const Tensor& self = THPVariable_Unpack(self_);
6036 if(check_has_torch_function(self_)) {
6037 return handle_torch_function(self_, "det");
6038 }
6039 // aten::det(Tensor self) -> Tensor
6040
6041 auto dispatch_det = [](const at::Tensor & self) -> at::Tensor {
6042 pybind11::gil_scoped_release no_gil;
6043 return self.det();
6044 };
6045 return wrap(dispatch_det(self));
6046 END_HANDLE_TH_ERRORS
6047}
6048
6049// detach
6050static PyObject * THPVariable_detach(PyObject* self_, PyObject* args)
6051{
6052 HANDLE_TH_ERRORS
6053 const Tensor& self = THPVariable_Unpack(self_);
6054 if(check_has_torch_function(self_)) {
6055 return handle_torch_function(self_, "detach");
6056 }
6057 // aten::detach(Tensor(a) self) -> Tensor(a)
6058
6059 auto dispatch_detach = [](const at::Tensor & self) -> at::Tensor {
6060 pybind11::gil_scoped_release no_gil;
6061 return self.detach();
6062 };
6063 return wrap(dispatch_detach(self));
6064 END_HANDLE_TH_ERRORS
6065}
6066
6067// detach_
6068static PyObject * THPVariable_detach_(PyObject* self_, PyObject* args)
6069{
6070 HANDLE_TH_ERRORS
6071 const Tensor& self = THPVariable_Unpack(self_);
6072 if(check_has_torch_function(self_)) {
6073 return handle_torch_function(self_, "detach_");
6074 }
6075 // aten::detach_(Tensor(a!) self) -> Tensor(a!)
6076
6077 auto dispatch_detach_ = [](const at::Tensor & self) -> at::Tensor {
6078 pybind11::gil_scoped_release no_gil;
6079 return self.detach_();
6080 };
6081 return wrap(dispatch_detach_(self));
6082 END_HANDLE_TH_ERRORS
6083}
6084
6085// diag
6086static PyObject * THPVariable_diag(PyObject* self_, PyObject* args, PyObject* kwargs)
6087{
6088 HANDLE_TH_ERRORS
6089 const Tensor& self = THPVariable_Unpack(self_);
6090 static PythonArgParser parser({
6091 "diag(int64_t diagonal=0)",
6092 }, /*traceable=*/true);
6093
6094 ParsedArgs<1> parsed_args;
6095 auto _r = parser.parse(self_, args, kwargs, parsed_args);
6096 if(_r.has_torch_function()) {
6097 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
6098 }
6099 // aten::diag(Tensor self, int diagonal=0) -> Tensor
6100
6101 auto dispatch_diag = [](const at::Tensor & self, int64_t diagonal) -> at::Tensor {
6102 pybind11::gil_scoped_release no_gil;
6103 return self.diag(diagonal);
6104 };
6105 return wrap(dispatch_diag(self, _r.toInt64(0)));
6106 Py_RETURN_NONE;
6107 END_HANDLE_TH_ERRORS
6108}
6109
6110// diag_embed
6111static PyObject * THPVariable_diag_embed(PyObject* self_, PyObject* args, PyObject* kwargs)
6112{
6113 HANDLE_TH_ERRORS
6114 const Tensor& self = THPVariable_Unpack(self_);
6115 static PythonArgParser parser({
6116 "diag_embed(int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1)",
6117 }, /*traceable=*/true);
6118
6119 ParsedArgs<3> parsed_args;
6120 auto _r = parser.parse(self_, args, kwargs, parsed_args);
6121 if(_r.has_torch_function()) {
6122 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
6123 }
6124 // aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor
6125
6126 auto dispatch_diag_embed = [](const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) -> at::Tensor {
6127 pybind11::gil_scoped_release no_gil;
6128 return self.diag_embed(offset, dim1, dim2);
6129 };
6130 return wrap(dispatch_diag_embed(self, _r.toInt64(0), _r.toInt64(1), _r.toInt64(2)));
6131 Py_RETURN_NONE;
6132 END_HANDLE_TH_ERRORS
6133}
6134
6135// diagflat
6136static PyObject * THPVariable_diagflat(PyObject* self_, PyObject* args, PyObject* kwargs)
6137{
6138 HANDLE_TH_ERRORS
6139 const Tensor& self = THPVariable_Unpack(self_);
6140 static PythonArgParser parser({
6141 "diagflat(int64_t offset=0)",
6142 }, /*traceable=*/true);
6143
6144 ParsedArgs<1> parsed_args;
6145 auto _r = parser.parse(self_, args, kwargs, parsed_args);
6146 if(_r.has_torch_function()) {
6147 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
6148 }
6149 // aten::diagflat(Tensor self, int offset=0) -> Tensor
6150
6151 auto dispatch_diagflat = [](const at::Tensor & self, int64_t offset) -> at::Tensor {
6152 pybind11::gil_scoped_release no_gil;
6153 return self.diagflat(offset);
6154 };
6155 return wrap(dispatch_diagflat(self, _r.toInt64(0)));
6156 Py_RETURN_NONE;
6157 END_HANDLE_TH_ERRORS
6158}
6159
6160\
6161// diagonal
6162static PyObject * THPVariable_diagonal(PyObject* self_, PyObject* args, PyObject* kwargs)
6163{
6164 HANDLE_TH_ERRORS
6165 const Tensor& self = THPVariable_Unpack(self_);
6166 static PythonArgParser parser({
6167 "diagonal(*, Dimname outdim, Dimname dim1, Dimname dim2, int64_t offset=0)",
6168 "diagonal(int64_t offset=0, int64_t dim1=0, int64_t dim2=1)",
6169 }, /*traceable=*/true);
6170
6171 ParsedArgs<4> parsed_args;
6172 auto _r = parser.parse(self_, args, kwargs, parsed_args);
6173 if(_r.has_torch_function()) {
6174 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
6175 }
6176 switch (_r.idx) {
6177 case 0: {
6178 // aten::diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a)
6179
6180 auto dispatch_diagonal = [](const at::Tensor & self, at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset) -> at::Tensor {
6181 pybind11::gil_scoped_release no_gil;
6182 return self.diagonal(outdim, dim1, dim2, offset);
6183 };
6184 return wrap(dispatch_diagonal(self, _r.dimname(0), _r.dimname(1), _r.dimname(2), _r.toInt64(3)));
6185 }
6186 case 1: {
6187 // aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)
6188
6189 auto dispatch_diagonal = [](const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) -> at::Tensor {
6190 pybind11::gil_scoped_release no_gil;
6191 return self.diagonal(offset, dim1, dim2);
6192 };
6193 return wrap(dispatch_diagonal(self, _r.toInt64(0), _r.toInt64(1), _r.toInt64(2)));
6194 }
6195 }
6196 Py_RETURN_NONE;
6197 END_HANDLE_TH_ERRORS
6198}
6199
6200// diagonal_scatter
6201static PyObject * THPVariable_diagonal_scatter(PyObject* self_, PyObject* args, PyObject* kwargs)
6202{
6203 HANDLE_TH_ERRORS
6204 const Tensor& self = THPVariable_Unpack(self_);
6205 static PythonArgParser parser({
6206 "diagonal_scatter(Tensor src, int64_t offset=0, int64_t dim1=0, int64_t dim2=1)",
6207 }, /*traceable=*/true);
6208
6209 ParsedArgs<4> parsed_args;
6210 auto _r = parser.parse(self_, args, kwargs, parsed_args);
6211 if(_r.has_torch_function()) {
6212 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
6213 }
6214 // aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor
6215
6216 auto dispatch_diagonal_scatter = [](const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2) -> at::Tensor {
6217 pybind11::gil_scoped_release no_gil;
6218 return self.diagonal_scatter(src, offset, dim1, dim2);
6219 };
6220 return wrap(dispatch_diagonal_scatter(self, _r.tensor(0), _r.toInt64(1), _r.toInt64(2), _r.toInt64(3)));
6221 Py_RETURN_NONE;
6222 END_HANDLE_TH_ERRORS
6223}
6224
6225// diff
6226static PyObject * THPVariable_diff(PyObject* self_, PyObject* args, PyObject* kwargs)
6227{
6228 HANDLE_TH_ERRORS
6229 const Tensor& self = THPVariable_Unpack(self_);
6230 static PythonArgParser parser({
6231 "diff(int64_t n=1, int64_t dim=-1, Tensor? prepend=None, Tensor? append=None)",
6232 }, /*traceable=*/true);
6233
6234 ParsedArgs<4> parsed_args;
6235 auto _r = parser.parse(self_, args, kwargs, parsed_args);
6236 if(_r.has_torch_function()) {
6237 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
6238 }
6239 // aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor
6240
6241 auto dispatch_diff = [](const at::Tensor & self, int64_t n, int64_t dim, const c10::optional<at::Tensor> & prepend, const c10::optional<at::Tensor> & append) -> at::Tensor {
6242 pybind11::gil_scoped_release no_gil;
6243 return self.diff(n, dim, prepend, append);
6244 };
6245 return wrap(dispatch_diff(self, _r.toInt64(0), _r.toInt64(1), _r.optionalTensor(2), _r.optionalTensor(3)));
6246 Py_RETURN_NONE;
6247 END_HANDLE_TH_ERRORS
6248}
6249
6250// digamma
6251static PyObject * THPVariable_digamma(PyObject* self_, PyObject* args)
6252{
6253 HANDLE_TH_ERRORS
6254 const Tensor& self = THPVariable_Unpack(self_);
6255 if(check_has_torch_function(self_)) {
6256 return handle_torch_function(self_, "digamma");
6257 }
6258 // aten::digamma(Tensor self) -> Tensor
6259
6260 auto dispatch_digamma = [](const at::Tensor & self) -> at::Tensor {
6261 pybind11::gil_scoped_release no_gil;
6262 return self.digamma();
6263 };
6264 return wrap(dispatch_digamma(self));
6265 END_HANDLE_TH_ERRORS
6266}
6267
6268// digamma_
6269static PyObject * THPVariable_digamma_(PyObject* self_, PyObject* args)
6270{
6271 HANDLE_TH_ERRORS
6272 const Tensor& self = THPVariable_Unpack(self_);
6273 if(check_has_torch_function(self_)) {
6274 return handle_torch_function(self_, "digamma_");
6275 }
6276 // aten::digamma_(Tensor(a!) self) -> Tensor(a!)
6277
6278 auto dispatch_digamma_ = [](const at::Tensor & self) -> at::Tensor {
6279 pybind11::gil_scoped_release no_gil;
6280 return self.digamma_();
6281 };
6282 return wrap(dispatch_digamma_(self));
6283 END_HANDLE_TH_ERRORS
6284}
6285
6286// dist
6287static PyObject * THPVariable_dist(PyObject* self_, PyObject* args, PyObject* kwargs)
6288{
6289 HANDLE_TH_ERRORS
6290 const Tensor& self = THPVariable_Unpack(self_);
6291 static PythonArgParser parser({
6292 "dist(Tensor other, Scalar p=2)",
6293 }, /*traceable=*/true);
6294
6295 ParsedArgs<2> parsed_args;
6296 auto _r = parser.parse(self_, args, kwargs, parsed_args);
6297 if(_r.has_torch_function()) {
6298 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
6299 }
6300 // aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor
6301
6302 auto dispatch_dist = [](const at::Tensor & self, const at::Tensor & other, const at::Scalar & p) -> at::Tensor {
6303 pybind11::gil_scoped_release no_gil;
6304 return self.dist(other, p);
6305 };
6306 return wrap(dispatch_dist(self, _r.tensor(0), _r.scalar(1)));
6307 Py_RETURN_NONE;
6308 END_HANDLE_TH_ERRORS
6309}
6310
6311\
6312// div
6313static PyObject * THPVariable_div(PyObject* self_, PyObject* args, PyObject* kwargs)
6314{
6315 HANDLE_TH_ERRORS
6316 const Tensor& self = THPVariable_Unpack(self_);
6317 static PythonArgParser parser({
6318 "div(Tensor other)",
6319 "div(Tensor other, *, c10::string_view? rounding_mode)",
6320 "div(Scalar other, *, c10::string_view? rounding_mode)",
6321 }, /*traceable=*/true);
6322
6323 ParsedArgs<2> parsed_args;
6324 auto _r = parser.parse(self_, args, kwargs, parsed_args);
6325 if(_r.has_torch_function()) {
6326 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
6327 }
6328 switch (_r.idx) {
6329 case 0: {
6330 // aten::div.Tensor(Tensor self, Tensor other) -> Tensor
6331
6332 auto dispatch_div = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
6333 pybind11::gil_scoped_release no_gil;
6334 return self.div(other);
6335 };
6336 return wrap(dispatch_div(self, _r.tensor(0)));
6337 }
6338 case 1: {
6339 // aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor
6340
6341 auto dispatch_div = [](const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) -> at::Tensor {
6342 pybind11::gil_scoped_release no_gil;
6343 return self.div(other, rounding_mode);
6344 };
6345 return wrap(dispatch_div(self, _r.tensor(0), _r.stringViewOptional(1)));
6346 }
6347 case 2: {
6348 // aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor
6349
6350 auto dispatch_div = [](const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) -> at::Tensor {
6351 pybind11::gil_scoped_release no_gil;
6352 return self.div(other, rounding_mode);
6353 };
6354 return wrap(dispatch_div(self, _r.scalar(0), _r.stringViewOptional(1)));
6355 }
6356 }
6357 Py_RETURN_NONE;
6358 END_HANDLE_TH_ERRORS
6359}
6360
6361\
6362// div_
6363static PyObject * THPVariable_div_(PyObject* self_, PyObject* args, PyObject* kwargs)
6364{
6365 HANDLE_TH_ERRORS
6366 const Tensor& self = THPVariable_Unpack(self_);
6367 static PythonArgParser parser({
6368 "div_(Tensor other)",
6369 "div_(Tensor other, *, c10::string_view? rounding_mode)",
6370 "div_(Scalar other, *, c10::string_view? rounding_mode)",
6371 }, /*traceable=*/true);
6372
6373 ParsedArgs<2> parsed_args;
6374 auto _r = parser.parse(self_, args, kwargs, parsed_args);
6375 if(_r.has_torch_function()) {
6376 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
6377 }
6378 switch (_r.idx) {
6379 case 0: {
6380 // aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
6381
6382 auto dispatch_div_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
6383 pybind11::gil_scoped_release no_gil;
6384 return self.div_(other);
6385 };
6386 return wrap(dispatch_div_(self, _r.tensor(0)));
6387 }
6388 case 1: {
6389 // aten::div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)
6390
6391 auto dispatch_div_ = [](const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) -> at::Tensor {
6392 pybind11::gil_scoped_release no_gil;
6393 return self.div_(other, rounding_mode);
6394 };
6395 return wrap(dispatch_div_(self, _r.tensor(0), _r.stringViewOptional(1)));
6396 }
6397 case 2: {
6398 // aten::div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)
6399
6400 auto dispatch_div_ = [](const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) -> at::Tensor {
6401 pybind11::gil_scoped_release no_gil;
6402 return self.div_(other, rounding_mode);
6403 };
6404 return wrap(dispatch_div_(self, _r.scalar(0), _r.stringViewOptional(1)));
6405 }
6406 }
6407 Py_RETURN_NONE;
6408 END_HANDLE_TH_ERRORS
6409}
6410
6411\
6412// divide
6413static PyObject * THPVariable_divide(PyObject* self_, PyObject* args, PyObject* kwargs)
6414{
6415 HANDLE_TH_ERRORS
6416 const Tensor& self = THPVariable_Unpack(self_);
6417 static PythonArgParser parser({
6418 "divide(Tensor other)",
6419 "divide(Tensor other, *, c10::string_view? rounding_mode)",
6420 "divide(Scalar other)",
6421 "divide(Scalar other, *, c10::string_view? rounding_mode)",
6422 }, /*traceable=*/true);
6423
6424 ParsedArgs<2> parsed_args;
6425 auto _r = parser.parse(self_, args, kwargs, parsed_args);
6426 if(_r.has_torch_function()) {
6427 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
6428 }
6429 switch (_r.idx) {
6430 case 0: {
6431 // aten::divide.Tensor(Tensor self, Tensor other) -> Tensor
6432
6433 auto dispatch_divide = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
6434 pybind11::gil_scoped_release no_gil;
6435 return self.divide(other);
6436 };
6437 return wrap(dispatch_divide(self, _r.tensor(0)));
6438 }
6439 case 1: {
6440 // aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor
6441
6442 auto dispatch_divide = [](const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) -> at::Tensor {
6443 pybind11::gil_scoped_release no_gil;
6444 return self.divide(other, rounding_mode);
6445 };
6446 return wrap(dispatch_divide(self, _r.tensor(0), _r.stringViewOptional(1)));
6447 }
6448 case 2: {
6449 // aten::divide.Scalar(Tensor self, Scalar other) -> Tensor
6450
6451 auto dispatch_divide = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
6452 pybind11::gil_scoped_release no_gil;
6453 return self.divide(other);
6454 };
6455 return wrap(dispatch_divide(self, _r.scalar(0)));
6456 }
6457 case 3: {
6458 // aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor
6459
6460 auto dispatch_divide = [](const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) -> at::Tensor {
6461 pybind11::gil_scoped_release no_gil;
6462 return self.divide(other, rounding_mode);
6463 };
6464 return wrap(dispatch_divide(self, _r.scalar(0), _r.stringViewOptional(1)));
6465 }
6466 }
6467 Py_RETURN_NONE;
6468 END_HANDLE_TH_ERRORS
6469}
6470
6471\
6472// divide_
6473static PyObject * THPVariable_divide_(PyObject* self_, PyObject* args, PyObject* kwargs)
6474{
6475 HANDLE_TH_ERRORS
6476 const Tensor& self = THPVariable_Unpack(self_);
6477 static PythonArgParser parser({
6478 "divide_(Tensor other)",
6479 "divide_(Tensor other, *, c10::string_view? rounding_mode)",
6480 "divide_(Scalar other)",
6481 "divide_(Scalar other, *, c10::string_view? rounding_mode)",
6482 }, /*traceable=*/true);
6483
6484 ParsedArgs<2> parsed_args;
6485 auto _r = parser.parse(self_, args, kwargs, parsed_args);
6486 if(_r.has_torch_function()) {
6487 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
6488 }
6489 switch (_r.idx) {
6490 case 0: {
6491 // aten::divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
6492
6493 auto dispatch_divide_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
6494 pybind11::gil_scoped_release no_gil;
6495 return self.divide_(other);
6496 };
6497 return wrap(dispatch_divide_(self, _r.tensor(0)));
6498 }
6499 case 1: {
6500 // aten::divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)
6501
6502 auto dispatch_divide_ = [](const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) -> at::Tensor {
6503 pybind11::gil_scoped_release no_gil;
6504 return self.divide_(other, rounding_mode);
6505 };
6506 return wrap(dispatch_divide_(self, _r.tensor(0), _r.stringViewOptional(1)));
6507 }
6508 case 2: {
6509 // aten::divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
6510
6511 auto dispatch_divide_ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
6512 pybind11::gil_scoped_release no_gil;
6513 return self.divide_(other);
6514 };
6515 return wrap(dispatch_divide_(self, _r.scalar(0)));
6516 }
6517 case 3: {
6518 // aten::divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)
6519
6520 auto dispatch_divide_ = [](const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) -> at::Tensor {
6521 pybind11::gil_scoped_release no_gil;
6522 return self.divide_(other, rounding_mode);
6523 };
6524 return wrap(dispatch_divide_(self, _r.scalar(0), _r.stringViewOptional(1)));
6525 }
6526 }
6527 Py_RETURN_NONE;
6528 END_HANDLE_TH_ERRORS
6529}
6530
6531// dot
6532static PyObject * THPVariable_dot(PyObject* self_, PyObject* args, PyObject* kwargs)
6533{
6534 HANDLE_TH_ERRORS
6535 const Tensor& self = THPVariable_Unpack(self_);
6536 static PythonArgParser parser({
6537 "dot(Tensor tensor)",
6538 }, /*traceable=*/true);
6539
6540 ParsedArgs<1> parsed_args;
6541 auto _r = parser.parse(self_, args, kwargs, parsed_args);
6542 if(_r.has_torch_function()) {
6543 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
6544 }
6545 // aten::dot(Tensor self, Tensor tensor) -> Tensor
6546
6547 auto dispatch_dot = [](const at::Tensor & self, const at::Tensor & tensor) -> at::Tensor {
6548 pybind11::gil_scoped_release no_gil;
6549 return self.dot(tensor);
6550 };
6551 return wrap(dispatch_dot(self, _r.tensor(0)));
6552 Py_RETURN_NONE;
6553 END_HANDLE_TH_ERRORS
6554}
6555
6556\
6557// dsplit
6558static PyObject * THPVariable_dsplit(PyObject* self_, PyObject* args, PyObject* kwargs)
6559{
6560 HANDLE_TH_ERRORS
6561 const Tensor& self = THPVariable_Unpack(self_);
6562 static PythonArgParser parser({
6563 "dsplit(int64_t sections)",
6564 "dsplit(IntArrayRef indices)",
6565 }, /*traceable=*/true);
6566
6567 ParsedArgs<1> parsed_args;
6568 auto _r = parser.parse(self_, args, kwargs, parsed_args);
6569 if(_r.has_torch_function()) {
6570 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
6571 }
6572 switch (_r.idx) {
6573 case 0: {
6574 // aten::dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
6575
6576 auto dispatch_dsplit = [](const at::Tensor & self, int64_t sections) -> ::std::vector<at::Tensor> {
6577 pybind11::gil_scoped_release no_gil;
6578 return self.dsplit(sections);
6579 };
6580 return wrap(dispatch_dsplit(self, _r.toInt64(0)));
6581 }
6582 case 1: {
6583 // aten::dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
6584
6585 auto dispatch_dsplit = [](const at::Tensor & self, at::IntArrayRef indices) -> ::std::vector<at::Tensor> {
6586 pybind11::gil_scoped_release no_gil;
6587 return self.dsplit(indices);
6588 };
6589 return wrap(dispatch_dsplit(self, _r.intlist(0)));
6590 }
6591 }
6592 Py_RETURN_NONE;
6593 END_HANDLE_TH_ERRORS
6594}
6595
6596\
6597// eq
6598static PyObject * THPVariable_eq(PyObject* self_, PyObject* args, PyObject* kwargs)
6599{
6600 HANDLE_TH_ERRORS
6601 const Tensor& self = THPVariable_Unpack(self_);
6602 static PythonArgParser parser({
6603 "eq(Tensor other)",
6604 "eq(Scalar other)",
6605 }, /*traceable=*/true);
6606
6607 ParsedArgs<1> parsed_args;
6608 auto _r = parser.parse(self_, args, kwargs, parsed_args);
6609 if(_r.has_torch_function()) {
6610 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
6611 }
6612 switch (_r.idx) {
6613 case 0: {
6614 // aten::eq.Tensor(Tensor self, Tensor other) -> Tensor
6615
6616 auto dispatch_eq = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
6617 pybind11::gil_scoped_release no_gil;
6618 return self.eq(other);
6619 };
6620 return wrap(dispatch_eq(self, _r.tensor(0)));
6621 }
6622 case 1: {
6623 // aten::eq.Scalar(Tensor self, Scalar other) -> Tensor
6624
6625 auto dispatch_eq = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
6626 pybind11::gil_scoped_release no_gil;
6627 return self.eq(other);
6628 };
6629 return wrap(dispatch_eq(self, _r.scalar(0)));
6630 }
6631 }
6632 Py_RETURN_NONE;
6633 END_HANDLE_TH_ERRORS
6634}
6635
6636\
6637// eq_
6638static PyObject * THPVariable_eq_(PyObject* self_, PyObject* args, PyObject* kwargs)
6639{
6640 HANDLE_TH_ERRORS
6641 const Tensor& self = THPVariable_Unpack(self_);
6642 static PythonArgParser parser({
6643 "eq_(Tensor other)",
6644 "eq_(Scalar other)",
6645 }, /*traceable=*/true);
6646
6647 ParsedArgs<1> parsed_args;
6648 auto _r = parser.parse(self_, args, kwargs, parsed_args);
6649 if(_r.has_torch_function()) {
6650 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
6651 }
6652 switch (_r.idx) {
6653 case 0: {
6654 // aten::eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
6655
6656 auto dispatch_eq_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
6657 pybind11::gil_scoped_release no_gil;
6658 return self.eq_(other);
6659 };
6660 return wrap(dispatch_eq_(self, _r.tensor(0)));
6661 }
6662 case 1: {
6663 // aten::eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
6664
6665 auto dispatch_eq_ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
6666 pybind11::gil_scoped_release no_gil;
6667 return self.eq_(other);
6668 };
6669 return wrap(dispatch_eq_(self, _r.scalar(0)));
6670 }
6671 }
6672 Py_RETURN_NONE;
6673 END_HANDLE_TH_ERRORS
6674}
6675
6676// equal
6677static PyObject * THPVariable_equal(PyObject* self_, PyObject* args, PyObject* kwargs)
6678{
6679 HANDLE_TH_ERRORS
6680 const Tensor& self = THPVariable_Unpack(self_);
6681 static PythonArgParser parser({
6682 "equal(Tensor other)",
6683 }, /*traceable=*/false);
6684
6685 ParsedArgs<1> parsed_args;
6686 auto _r = parser.parse(self_, args, kwargs, parsed_args);
6687 if(_r.has_torch_function()) {
6688 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
6689 }
6690 // aten::equal(Tensor self, Tensor other) -> bool
6691
6692 auto dispatch_equal = [](const at::Tensor & self, const at::Tensor & other) -> bool {
6693 pybind11::gil_scoped_release no_gil;
6694 return self.equal(other);
6695 };
6696 return wrap(dispatch_equal(self, _r.tensor(0)));
6697 Py_RETURN_NONE;
6698 END_HANDLE_TH_ERRORS
6699}
6700
6701// erf
6702static PyObject * THPVariable_erf(PyObject* self_, PyObject* args)
6703{
6704 HANDLE_TH_ERRORS
6705 const Tensor& self = THPVariable_Unpack(self_);
6706 if(check_has_torch_function(self_)) {
6707 return handle_torch_function(self_, "erf");
6708 }
6709 // aten::erf(Tensor self) -> Tensor
6710
6711 auto dispatch_erf = [](const at::Tensor & self) -> at::Tensor {
6712 pybind11::gil_scoped_release no_gil;
6713 return self.erf();
6714 };
6715 return wrap(dispatch_erf(self));
6716 END_HANDLE_TH_ERRORS
6717}
6718
6719// erf_
6720static PyObject * THPVariable_erf_(PyObject* self_, PyObject* args)
6721{
6722 HANDLE_TH_ERRORS
6723 const Tensor& self = THPVariable_Unpack(self_);
6724 if(check_has_torch_function(self_)) {
6725 return handle_torch_function(self_, "erf_");
6726 }
6727 // aten::erf_(Tensor(a!) self) -> Tensor(a!)
6728
6729 auto dispatch_erf_ = [](const at::Tensor & self) -> at::Tensor {
6730 pybind11::gil_scoped_release no_gil;
6731 return self.erf_();
6732 };
6733 return wrap(dispatch_erf_(self));
6734 END_HANDLE_TH_ERRORS
6735}
6736
6737// erfc
6738static PyObject * THPVariable_erfc(PyObject* self_, PyObject* args)
6739{
6740 HANDLE_TH_ERRORS
6741 const Tensor& self = THPVariable_Unpack(self_);
6742 if(check_has_torch_function(self_)) {
6743 return handle_torch_function(self_, "erfc");
6744 }
6745 // aten::erfc(Tensor self) -> Tensor
6746
6747 auto dispatch_erfc = [](const at::Tensor & self) -> at::Tensor {
6748 pybind11::gil_scoped_release no_gil;
6749 return self.erfc();
6750 };
6751 return wrap(dispatch_erfc(self));
6752 END_HANDLE_TH_ERRORS
6753}
6754
6755// erfc_
6756static PyObject * THPVariable_erfc_(PyObject* self_, PyObject* args)
6757{
6758 HANDLE_TH_ERRORS
6759 const Tensor& self = THPVariable_Unpack(self_);
6760 if(check_has_torch_function(self_)) {
6761 return handle_torch_function(self_, "erfc_");
6762 }
6763 // aten::erfc_(Tensor(a!) self) -> Tensor(a!)
6764
6765 auto dispatch_erfc_ = [](const at::Tensor & self) -> at::Tensor {
6766 pybind11::gil_scoped_release no_gil;
6767 return self.erfc_();
6768 };
6769 return wrap(dispatch_erfc_(self));
6770 END_HANDLE_TH_ERRORS
6771}
6772
6773// erfinv
6774static PyObject * THPVariable_erfinv(PyObject* self_, PyObject* args)
6775{
6776 HANDLE_TH_ERRORS
6777 const Tensor& self = THPVariable_Unpack(self_);
6778 if(check_has_torch_function(self_)) {
6779 return handle_torch_function(self_, "erfinv");
6780 }
6781 // aten::erfinv(Tensor self) -> Tensor
6782
6783 auto dispatch_erfinv = [](const at::Tensor & self) -> at::Tensor {
6784 pybind11::gil_scoped_release no_gil;
6785 return self.erfinv();
6786 };
6787 return wrap(dispatch_erfinv(self));
6788 END_HANDLE_TH_ERRORS
6789}
6790
6791// erfinv_
6792static PyObject * THPVariable_erfinv_(PyObject* self_, PyObject* args)
6793{
6794 HANDLE_TH_ERRORS
6795 const Tensor& self = THPVariable_Unpack(self_);
6796 if(check_has_torch_function(self_)) {
6797 return handle_torch_function(self_, "erfinv_");
6798 }
6799 // aten::erfinv_(Tensor(a!) self) -> Tensor(a!)
6800
6801 auto dispatch_erfinv_ = [](const at::Tensor & self) -> at::Tensor {
6802 pybind11::gil_scoped_release no_gil;
6803 return self.erfinv_();
6804 };
6805 return wrap(dispatch_erfinv_(self));
6806 END_HANDLE_TH_ERRORS
6807}
6808
6809// exp
6810static PyObject * THPVariable_exp(PyObject* self_, PyObject* args)
6811{
6812 HANDLE_TH_ERRORS
6813 const Tensor& self = THPVariable_Unpack(self_);
6814 if(check_has_torch_function(self_)) {
6815 return handle_torch_function(self_, "exp");
6816 }
6817 // aten::exp(Tensor self) -> Tensor
6818
6819 auto dispatch_exp = [](const at::Tensor & self) -> at::Tensor {
6820 pybind11::gil_scoped_release no_gil;
6821 return self.exp();
6822 };
6823 return wrap(dispatch_exp(self));
6824 END_HANDLE_TH_ERRORS
6825}
6826
6827// exp2
6828static PyObject * THPVariable_exp2(PyObject* self_, PyObject* args)
6829{
6830 HANDLE_TH_ERRORS
6831 const Tensor& self = THPVariable_Unpack(self_);
6832 if(check_has_torch_function(self_)) {
6833 return handle_torch_function(self_, "exp2");
6834 }
6835 // aten::exp2(Tensor self) -> Tensor
6836
6837 auto dispatch_exp2 = [](const at::Tensor & self) -> at::Tensor {
6838 pybind11::gil_scoped_release no_gil;
6839 return self.exp2();
6840 };
6841 return wrap(dispatch_exp2(self));
6842 END_HANDLE_TH_ERRORS
6843}
6844
6845// exp2_
6846static PyObject * THPVariable_exp2_(PyObject* self_, PyObject* args)
6847{
6848 HANDLE_TH_ERRORS
6849 const Tensor& self = THPVariable_Unpack(self_);
6850 if(check_has_torch_function(self_)) {
6851 return handle_torch_function(self_, "exp2_");
6852 }
6853 // aten::exp2_(Tensor(a!) self) -> Tensor(a!)
6854
6855 auto dispatch_exp2_ = [](const at::Tensor & self) -> at::Tensor {
6856 pybind11::gil_scoped_release no_gil;
6857 return self.exp2_();
6858 };
6859 return wrap(dispatch_exp2_(self));
6860 END_HANDLE_TH_ERRORS
6861}
6862
6863// exp_
6864static PyObject * THPVariable_exp_(PyObject* self_, PyObject* args)
6865{
6866 HANDLE_TH_ERRORS
6867 const Tensor& self = THPVariable_Unpack(self_);
6868 if(check_has_torch_function(self_)) {
6869 return handle_torch_function(self_, "exp_");
6870 }
6871 // aten::exp_(Tensor(a!) self) -> Tensor(a!)
6872
6873 auto dispatch_exp_ = [](const at::Tensor & self) -> at::Tensor {
6874 pybind11::gil_scoped_release no_gil;
6875 return self.exp_();
6876 };
6877 return wrap(dispatch_exp_(self));
6878 END_HANDLE_TH_ERRORS
6879}
6880
6881// expand
6882static PyObject * THPVariable_expand(PyObject* self_, PyObject* args, PyObject* kwargs)
6883{
6884 HANDLE_TH_ERRORS
6885 const Tensor& self = THPVariable_Unpack(self_);
6886 static PythonArgParser parser({
6887 "expand(SymIntArrayRef size, *, bool implicit=False)",
6888 }, /*traceable=*/true);
6889
6890 ParsedArgs<2> parsed_args;
6891 auto _r = parser.parse(self_, args, kwargs, parsed_args);
6892 if(_r.has_torch_function()) {
6893 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
6894 }
6895 // aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)
6896
6897 auto dispatch_expand = [](const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) -> at::Tensor {
6898 pybind11::gil_scoped_release no_gil;
6899 return self.expand_symint(size, implicit);
6900 };
6901 return wrap(dispatch_expand(self, _r.symintlist(0), _r.toBool(1)));
6902 Py_RETURN_NONE;
6903 END_HANDLE_TH_ERRORS
6904}
6905
6906// expand_as
6907static PyObject * THPVariable_expand_as(PyObject* self_, PyObject* args, PyObject* kwargs)
6908{
6909 HANDLE_TH_ERRORS
6910 const Tensor& self = THPVariable_Unpack(self_);
6911 static PythonArgParser parser({
6912 "expand_as(Tensor other)",
6913 }, /*traceable=*/true);
6914
6915 ParsedArgs<1> parsed_args;
6916 auto _r = parser.parse(self_, args, kwargs, parsed_args);
6917 if(_r.has_torch_function()) {
6918 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
6919 }
6920 // aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a)
6921
6922 auto dispatch_expand_as = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
6923 pybind11::gil_scoped_release no_gil;
6924 return self.expand_as(other);
6925 };
6926 return wrap(dispatch_expand_as(self, _r.tensor(0)));
6927 Py_RETURN_NONE;
6928 END_HANDLE_TH_ERRORS
6929}
6930
6931// expm1
6932static PyObject * THPVariable_expm1(PyObject* self_, PyObject* args)
6933{
6934 HANDLE_TH_ERRORS
6935 const Tensor& self = THPVariable_Unpack(self_);
6936 if(check_has_torch_function(self_)) {
6937 return handle_torch_function(self_, "expm1");
6938 }
6939 // aten::expm1(Tensor self) -> Tensor
6940
6941 auto dispatch_expm1 = [](const at::Tensor & self) -> at::Tensor {
6942 pybind11::gil_scoped_release no_gil;
6943 return self.expm1();
6944 };
6945 return wrap(dispatch_expm1(self));
6946 END_HANDLE_TH_ERRORS
6947}
6948
6949// expm1_
6950static PyObject * THPVariable_expm1_(PyObject* self_, PyObject* args)
6951{
6952 HANDLE_TH_ERRORS
6953 const Tensor& self = THPVariable_Unpack(self_);
6954 if(check_has_torch_function(self_)) {
6955 return handle_torch_function(self_, "expm1_");
6956 }
6957 // aten::expm1_(Tensor(a!) self) -> Tensor(a!)
6958
6959 auto dispatch_expm1_ = [](const at::Tensor & self) -> at::Tensor {
6960 pybind11::gil_scoped_release no_gil;
6961 return self.expm1_();
6962 };
6963 return wrap(dispatch_expm1_(self));
6964 END_HANDLE_TH_ERRORS
6965}
6966
6967// exponential_
6968static PyObject * THPVariable_exponential_(PyObject* self_, PyObject* args, PyObject* kwargs)
6969{
6970 HANDLE_TH_ERRORS
6971 const Tensor& self = THPVariable_Unpack(self_);
6972 static PythonArgParser parser({
6973 "exponential_(double lambd=1, *, Generator? generator=None)",
6974 }, /*traceable=*/true);
6975
6976 ParsedArgs<2> parsed_args;
6977 auto _r = parser.parse(self_, args, kwargs, parsed_args);
6978 if(_r.has_torch_function()) {
6979 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
6980 }
6981 // aten::exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!)
6982
6983 auto dispatch_exponential_ = [](const at::Tensor & self, double lambd, c10::optional<at::Generator> generator) -> at::Tensor {
6984 pybind11::gil_scoped_release no_gil;
6985 return self.exponential_(lambd, generator);
6986 };
6987 return wrap(dispatch_exponential_(self, _r.toDouble(0), _r.generator(1)));
6988 Py_RETURN_NONE;
6989 END_HANDLE_TH_ERRORS
6990}
6991
6992\
6993// fill_
6994static PyObject * THPVariable_fill_(PyObject* self_, PyObject* args, PyObject* kwargs)
6995{
6996 HANDLE_TH_ERRORS
6997 const Tensor& self = THPVariable_Unpack(self_);
6998 static PythonArgParser parser({
6999 "fill_(Tensor value)",
7000 "fill_(Scalar value)",
7001 }, /*traceable=*/true);
7002
7003 ParsedArgs<1> parsed_args;
7004 auto _r = parser.parse(self_, args, kwargs, parsed_args);
7005 if(_r.has_torch_function()) {
7006 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
7007 }
7008 switch (_r.idx) {
7009 case 0: {
7010 // aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!)
7011
7012 auto dispatch_fill_ = [](const at::Tensor & self, const at::Tensor & value) -> at::Tensor {
7013 pybind11::gil_scoped_release no_gil;
7014 return self.fill_(value);
7015 };
7016 return wrap(dispatch_fill_(self, _r.tensor(0)));
7017 }
7018 case 1: {
7019 // aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)
7020
7021 auto dispatch_fill_ = [](const at::Tensor & self, const at::Scalar & value) -> at::Tensor {
7022 pybind11::gil_scoped_release no_gil;
7023 return self.fill_(value);
7024 };
7025 return wrap(dispatch_fill_(self, _r.scalar(0)));
7026 }
7027 }
7028 Py_RETURN_NONE;
7029 END_HANDLE_TH_ERRORS
7030}
7031
7032// fill_diagonal_
7033static PyObject * THPVariable_fill_diagonal_(PyObject* self_, PyObject* args, PyObject* kwargs)
7034{
7035 HANDLE_TH_ERRORS
7036 const Tensor& self = THPVariable_Unpack(self_);
7037 static PythonArgParser parser({
7038 "fill_diagonal_(Scalar fill_value, bool wrap=False)",
7039 }, /*traceable=*/true);
7040
7041 ParsedArgs<2> parsed_args;
7042 auto _r = parser.parse(self_, args, kwargs, parsed_args);
7043 if(_r.has_torch_function()) {
7044 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
7045 }
7046 // aten::fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!)
7047
7048 auto dispatch_fill_diagonal_ = [](const at::Tensor & self, const at::Scalar & fill_value, bool wrap) -> at::Tensor {
7049 pybind11::gil_scoped_release no_gil;
7050 return self.fill_diagonal_(fill_value, wrap);
7051 };
7052 return wrap(dispatch_fill_diagonal_(self, _r.scalar(0), _r.toBool(1)));
7053 Py_RETURN_NONE;
7054 END_HANDLE_TH_ERRORS
7055}
7056
7057// fix
7058static PyObject * THPVariable_fix(PyObject* self_, PyObject* args)
7059{
7060 HANDLE_TH_ERRORS
7061 const Tensor& self = THPVariable_Unpack(self_);
7062 if(check_has_torch_function(self_)) {
7063 return handle_torch_function(self_, "fix");
7064 }
7065 // aten::fix(Tensor self) -> Tensor
7066
7067 auto dispatch_fix = [](const at::Tensor & self) -> at::Tensor {
7068 pybind11::gil_scoped_release no_gil;
7069 return self.fix();
7070 };
7071 return wrap(dispatch_fix(self));
7072 END_HANDLE_TH_ERRORS
7073}
7074
7075// fix_
7076static PyObject * THPVariable_fix_(PyObject* self_, PyObject* args)
7077{
7078 HANDLE_TH_ERRORS
7079 const Tensor& self = THPVariable_Unpack(self_);
7080 if(check_has_torch_function(self_)) {
7081 return handle_torch_function(self_, "fix_");
7082 }
7083 // aten::fix_(Tensor(a!) self) -> Tensor(a!)
7084
7085 auto dispatch_fix_ = [](const at::Tensor & self) -> at::Tensor {
7086 pybind11::gil_scoped_release no_gil;
7087 return self.fix_();
7088 };
7089 return wrap(dispatch_fix_(self));
7090 END_HANDLE_TH_ERRORS
7091}
7092
7093\
7094// flatten
7095static PyObject * THPVariable_flatten(PyObject* self_, PyObject* args, PyObject* kwargs)
7096{
7097 HANDLE_TH_ERRORS
7098 const Tensor& self = THPVariable_Unpack(self_);
7099 static PythonArgParser parser({
7100 "flatten(int64_t start_dim, int64_t end_dim, Dimname out_dim)",
7101 "flatten(int64_t start_dim=0, int64_t end_dim=-1)",
7102 "flatten(Dimname start_dim, Dimname end_dim, Dimname out_dim)",
7103 "flatten(DimnameList dims, Dimname out_dim)",
7104 }, /*traceable=*/true);
7105
7106 ParsedArgs<3> parsed_args;
7107 auto _r = parser.parse(self_, args, kwargs, parsed_args);
7108 if(_r.has_torch_function()) {
7109 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
7110 }
7111 switch (_r.idx) {
7112 case 0: {
7113 // aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a)
7114
7115 auto dispatch_flatten = [](const at::Tensor & self, int64_t start_dim, int64_t end_dim, at::Dimname out_dim) -> at::Tensor {
7116 pybind11::gil_scoped_release no_gil;
7117 return self.flatten(start_dim, end_dim, out_dim);
7118 };
7119 return wrap(dispatch_flatten(self, _r.toInt64(0), _r.toInt64(1), _r.dimname(2)));
7120 }
7121 case 1: {
7122 // aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)
7123
7124 auto dispatch_flatten = [](const at::Tensor & self, int64_t start_dim, int64_t end_dim) -> at::Tensor {
7125 pybind11::gil_scoped_release no_gil;
7126 return self.flatten(start_dim, end_dim);
7127 };
7128 return wrap(dispatch_flatten(self, _r.toInt64(0), _r.toInt64(1)));
7129 }
7130 case 2: {
7131 // aten::flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a)
7132
7133 auto dispatch_flatten = [](const at::Tensor & self, at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim) -> at::Tensor {
7134 pybind11::gil_scoped_release no_gil;
7135 return self.flatten(start_dim, end_dim, out_dim);
7136 };
7137 return wrap(dispatch_flatten(self, _r.dimname(0), _r.dimname(1), _r.dimname(2)));
7138 }
7139 case 3: {
7140 // aten::flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a)
7141
7142 auto dispatch_flatten = [](const at::Tensor & self, at::DimnameList dims, at::Dimname out_dim) -> at::Tensor {
7143 pybind11::gil_scoped_release no_gil;
7144 return self.flatten(dims, out_dim);
7145 };
7146 return wrap(dispatch_flatten(self, _r.dimnamelist(0), _r.dimname(1)));
7147 }
7148 }
7149 Py_RETURN_NONE;
7150 END_HANDLE_TH_ERRORS
7151}
7152
7153// flip
7154static PyObject * THPVariable_flip(PyObject* self_, PyObject* args, PyObject* kwargs)
7155{
7156 HANDLE_TH_ERRORS
7157 const Tensor& self = THPVariable_Unpack(self_);
7158 static PythonArgParser parser({
7159 "flip(IntArrayRef dims)",
7160 }, /*traceable=*/true);
7161
7162 ParsedArgs<1> parsed_args;
7163 auto _r = parser.parse(self_, args, kwargs, parsed_args);
7164 if(_r.has_torch_function()) {
7165 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
7166 }
7167 // aten::flip(Tensor self, int[] dims) -> Tensor
7168
7169 auto dispatch_flip = [](const at::Tensor & self, at::IntArrayRef dims) -> at::Tensor {
7170 pybind11::gil_scoped_release no_gil;
7171 return self.flip(dims);
7172 };
7173 return wrap(dispatch_flip(self, _r.intlist(0)));
7174 Py_RETURN_NONE;
7175 END_HANDLE_TH_ERRORS
7176}
7177
7178// fliplr
7179static PyObject * THPVariable_fliplr(PyObject* self_, PyObject* args)
7180{
7181 HANDLE_TH_ERRORS
7182 const Tensor& self = THPVariable_Unpack(self_);
7183 if(check_has_torch_function(self_)) {
7184 return handle_torch_function(self_, "fliplr");
7185 }
7186 // aten::fliplr(Tensor self) -> Tensor
7187
7188 auto dispatch_fliplr = [](const at::Tensor & self) -> at::Tensor {
7189 pybind11::gil_scoped_release no_gil;
7190 return self.fliplr();
7191 };
7192 return wrap(dispatch_fliplr(self));
7193 END_HANDLE_TH_ERRORS
7194}
7195
7196// flipud
7197static PyObject * THPVariable_flipud(PyObject* self_, PyObject* args)
7198{
7199 HANDLE_TH_ERRORS
7200 const Tensor& self = THPVariable_Unpack(self_);
7201 if(check_has_torch_function(self_)) {
7202 return handle_torch_function(self_, "flipud");
7203 }
7204 // aten::flipud(Tensor self) -> Tensor
7205
7206 auto dispatch_flipud = [](const at::Tensor & self) -> at::Tensor {
7207 pybind11::gil_scoped_release no_gil;
7208 return self.flipud();
7209 };
7210 return wrap(dispatch_flipud(self));
7211 END_HANDLE_TH_ERRORS
7212}
7213
7214\
7215// float_power
7216static PyObject * THPVariable_float_power(PyObject* self_, PyObject* args, PyObject* kwargs)
7217{
7218 HANDLE_TH_ERRORS
7219 const Tensor& self = THPVariable_Unpack(self_);
7220 static PythonArgParser parser({
7221 "float_power(Tensor exponent)",
7222 "float_power(Scalar exponent)",
7223 }, /*traceable=*/true);
7224
7225 ParsedArgs<1> parsed_args;
7226 auto _r = parser.parse(self_, args, kwargs, parsed_args);
7227 if(_r.has_torch_function()) {
7228 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
7229 }
7230 switch (_r.idx) {
7231 case 0: {
7232 // aten::float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor
7233
7234 auto dispatch_float_power = [](const at::Tensor & self, const at::Tensor & exponent) -> at::Tensor {
7235 pybind11::gil_scoped_release no_gil;
7236 return self.float_power(exponent);
7237 };
7238 return wrap(dispatch_float_power(self, _r.tensor(0)));
7239 }
7240 case 1: {
7241 // aten::float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor
7242
7243 auto dispatch_float_power = [](const at::Tensor & self, const at::Scalar & exponent) -> at::Tensor {
7244 pybind11::gil_scoped_release no_gil;
7245 return self.float_power(exponent);
7246 };
7247 return wrap(dispatch_float_power(self, _r.scalar(0)));
7248 }
7249 }
7250 Py_RETURN_NONE;
7251 END_HANDLE_TH_ERRORS
7252}
7253
7254\
7255// float_power_
7256static PyObject * THPVariable_float_power_(PyObject* self_, PyObject* args, PyObject* kwargs)
7257{
7258 HANDLE_TH_ERRORS
7259 const Tensor& self = THPVariable_Unpack(self_);
7260 static PythonArgParser parser({
7261 "float_power_(Tensor exponent)",
7262 "float_power_(Scalar exponent)",
7263 }, /*traceable=*/true);
7264
7265 ParsedArgs<1> parsed_args;
7266 auto _r = parser.parse(self_, args, kwargs, parsed_args);
7267 if(_r.has_torch_function()) {
7268 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
7269 }
7270 switch (_r.idx) {
7271 case 0: {
7272 // aten::float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)
7273
7274 auto dispatch_float_power_ = [](const at::Tensor & self, const at::Tensor & exponent) -> at::Tensor {
7275 pybind11::gil_scoped_release no_gil;
7276 return self.float_power_(exponent);
7277 };
7278 return wrap(dispatch_float_power_(self, _r.tensor(0)));
7279 }
7280 case 1: {
7281 // aten::float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)
7282
7283 auto dispatch_float_power_ = [](const at::Tensor & self, const at::Scalar & exponent) -> at::Tensor {
7284 pybind11::gil_scoped_release no_gil;
7285 return self.float_power_(exponent);
7286 };
7287 return wrap(dispatch_float_power_(self, _r.scalar(0)));
7288 }
7289 }
7290 Py_RETURN_NONE;
7291 END_HANDLE_TH_ERRORS
7292}
7293
7294// floor
7295static PyObject * THPVariable_floor(PyObject* self_, PyObject* args)
7296{
7297 HANDLE_TH_ERRORS
7298 const Tensor& self = THPVariable_Unpack(self_);
7299 if(check_has_torch_function(self_)) {
7300 return handle_torch_function(self_, "floor");
7301 }
7302 // aten::floor(Tensor self) -> Tensor
7303
7304 auto dispatch_floor = [](const at::Tensor & self) -> at::Tensor {
7305 pybind11::gil_scoped_release no_gil;
7306 return self.floor();
7307 };
7308 return wrap(dispatch_floor(self));
7309 END_HANDLE_TH_ERRORS
7310}
7311
7312// floor_
7313static PyObject * THPVariable_floor_(PyObject* self_, PyObject* args)
7314{
7315 HANDLE_TH_ERRORS
7316 const Tensor& self = THPVariable_Unpack(self_);
7317 if(check_has_torch_function(self_)) {
7318 return handle_torch_function(self_, "floor_");
7319 }
7320 // aten::floor_(Tensor(a!) self) -> Tensor(a!)
7321
7322 auto dispatch_floor_ = [](const at::Tensor & self) -> at::Tensor {
7323 pybind11::gil_scoped_release no_gil;
7324 return self.floor_();
7325 };
7326 return wrap(dispatch_floor_(self));
7327 END_HANDLE_TH_ERRORS
7328}
7329
7330\
7331// floor_divide
7332static PyObject * THPVariable_floor_divide(PyObject* self_, PyObject* args, PyObject* kwargs)
7333{
7334 HANDLE_TH_ERRORS
7335 const Tensor& self = THPVariable_Unpack(self_);
7336 static PythonArgParser parser({
7337 "floor_divide(Tensor other)",
7338 "floor_divide(Scalar other)",
7339 }, /*traceable=*/true);
7340
7341 ParsedArgs<1> parsed_args;
7342 auto _r = parser.parse(self_, args, kwargs, parsed_args);
7343 if(_r.has_torch_function()) {
7344 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
7345 }
7346 switch (_r.idx) {
7347 case 0: {
7348 // aten::floor_divide(Tensor self, Tensor other) -> Tensor
7349
7350 auto dispatch_floor_divide = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
7351 pybind11::gil_scoped_release no_gil;
7352 return self.floor_divide(other);
7353 };
7354 return wrap(dispatch_floor_divide(self, _r.tensor(0)));
7355 }
7356 case 1: {
7357 // aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor
7358
7359 auto dispatch_floor_divide = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
7360 pybind11::gil_scoped_release no_gil;
7361 return self.floor_divide(other);
7362 };
7363 return wrap(dispatch_floor_divide(self, _r.scalar(0)));
7364 }
7365 }
7366 Py_RETURN_NONE;
7367 END_HANDLE_TH_ERRORS
7368}
7369
7370\
7371// floor_divide_
7372static PyObject * THPVariable_floor_divide_(PyObject* self_, PyObject* args, PyObject* kwargs)
7373{
7374 HANDLE_TH_ERRORS
7375 const Tensor& self = THPVariable_Unpack(self_);
7376 static PythonArgParser parser({
7377 "floor_divide_(Tensor other)",
7378 "floor_divide_(Scalar other)",
7379 }, /*traceable=*/true);
7380
7381 ParsedArgs<1> parsed_args;
7382 auto _r = parser.parse(self_, args, kwargs, parsed_args);
7383 if(_r.has_torch_function()) {
7384 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
7385 }
7386 switch (_r.idx) {
7387 case 0: {
7388 // aten::floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
7389
7390 auto dispatch_floor_divide_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
7391 pybind11::gil_scoped_release no_gil;
7392 return self.floor_divide_(other);
7393 };
7394 return wrap(dispatch_floor_divide_(self, _r.tensor(0)));
7395 }
7396 case 1: {
7397 // aten::floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7398
7399 auto dispatch_floor_divide_ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
7400 pybind11::gil_scoped_release no_gil;
7401 return self.floor_divide_(other);
7402 };
7403 return wrap(dispatch_floor_divide_(self, _r.scalar(0)));
7404 }
7405 }
7406 Py_RETURN_NONE;
7407 END_HANDLE_TH_ERRORS
7408}
7409
7410// fmax
7411static PyObject * THPVariable_fmax(PyObject* self_, PyObject* args, PyObject* kwargs)
7412{
7413 HANDLE_TH_ERRORS
7414 const Tensor& self = THPVariable_Unpack(self_);
7415 static PythonArgParser parser({
7416 "fmax(Tensor other)",
7417 }, /*traceable=*/true);
7418
7419 ParsedArgs<1> parsed_args;
7420 auto _r = parser.parse(self_, args, kwargs, parsed_args);
7421 if(_r.has_torch_function()) {
7422 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
7423 }
7424 // aten::fmax(Tensor self, Tensor other) -> Tensor
7425
7426 auto dispatch_fmax = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
7427 pybind11::gil_scoped_release no_gil;
7428 return self.fmax(other);
7429 };
7430 return wrap(dispatch_fmax(self, _r.tensor(0)));
7431 Py_RETURN_NONE;
7432 END_HANDLE_TH_ERRORS
7433}
7434
7435// fmin
7436static PyObject * THPVariable_fmin(PyObject* self_, PyObject* args, PyObject* kwargs)
7437{
7438 HANDLE_TH_ERRORS
7439 const Tensor& self = THPVariable_Unpack(self_);
7440 static PythonArgParser parser({
7441 "fmin(Tensor other)",
7442 }, /*traceable=*/true);
7443
7444 ParsedArgs<1> parsed_args;
7445 auto _r = parser.parse(self_, args, kwargs, parsed_args);
7446 if(_r.has_torch_function()) {
7447 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
7448 }
7449 // aten::fmin(Tensor self, Tensor other) -> Tensor
7450
7451 auto dispatch_fmin = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
7452 pybind11::gil_scoped_release no_gil;
7453 return self.fmin(other);
7454 };
7455 return wrap(dispatch_fmin(self, _r.tensor(0)));
7456 Py_RETURN_NONE;
7457 END_HANDLE_TH_ERRORS
7458}
7459
7460\
7461// fmod
7462static PyObject * THPVariable_fmod(PyObject* self_, PyObject* args, PyObject* kwargs)
7463{
7464 HANDLE_TH_ERRORS
7465 const Tensor& self = THPVariable_Unpack(self_);
7466 static PythonArgParser parser({
7467 "fmod(Tensor other)",
7468 "fmod(Scalar other)",
7469 }, /*traceable=*/true);
7470
7471 ParsedArgs<1> parsed_args;
7472 auto _r = parser.parse(self_, args, kwargs, parsed_args);
7473 if(_r.has_torch_function()) {
7474 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
7475 }
7476 switch (_r.idx) {
7477 case 0: {
7478 // aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor
7479
7480 auto dispatch_fmod = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
7481 pybind11::gil_scoped_release no_gil;
7482 return self.fmod(other);
7483 };
7484 return wrap(dispatch_fmod(self, _r.tensor(0)));
7485 }
7486 case 1: {
7487 // aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor
7488
7489 auto dispatch_fmod = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
7490 pybind11::gil_scoped_release no_gil;
7491 return self.fmod(other);
7492 };
7493 return wrap(dispatch_fmod(self, _r.scalar(0)));
7494 }
7495 }
7496 Py_RETURN_NONE;
7497 END_HANDLE_TH_ERRORS
7498}
7499
7500\
7501// fmod_
7502static PyObject * THPVariable_fmod_(PyObject* self_, PyObject* args, PyObject* kwargs)
7503{
7504 HANDLE_TH_ERRORS
7505 const Tensor& self = THPVariable_Unpack(self_);
7506 static PythonArgParser parser({
7507 "fmod_(Tensor other)",
7508 "fmod_(Scalar other)",
7509 }, /*traceable=*/true);
7510
7511 ParsedArgs<1> parsed_args;
7512 auto _r = parser.parse(self_, args, kwargs, parsed_args);
7513 if(_r.has_torch_function()) {
7514 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
7515 }
7516 switch (_r.idx) {
7517 case 0: {
7518 // aten::fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
7519
7520 auto dispatch_fmod_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
7521 pybind11::gil_scoped_release no_gil;
7522 return self.fmod_(other);
7523 };
7524 return wrap(dispatch_fmod_(self, _r.tensor(0)));
7525 }
7526 case 1: {
7527 // aten::fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7528
7529 auto dispatch_fmod_ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
7530 pybind11::gil_scoped_release no_gil;
7531 return self.fmod_(other);
7532 };
7533 return wrap(dispatch_fmod_(self, _r.scalar(0)));
7534 }
7535 }
7536 Py_RETURN_NONE;
7537 END_HANDLE_TH_ERRORS
7538}
7539
7540// frac
7541static PyObject * THPVariable_frac(PyObject* self_, PyObject* args)
7542{
7543 HANDLE_TH_ERRORS
7544 const Tensor& self = THPVariable_Unpack(self_);
7545 if(check_has_torch_function(self_)) {
7546 return handle_torch_function(self_, "frac");
7547 }
7548 // aten::frac(Tensor self) -> Tensor
7549
7550 auto dispatch_frac = [](const at::Tensor & self) -> at::Tensor {
7551 pybind11::gil_scoped_release no_gil;
7552 return self.frac();
7553 };
7554 return wrap(dispatch_frac(self));
7555 END_HANDLE_TH_ERRORS
7556}
7557
7558// frac_
7559static PyObject * THPVariable_frac_(PyObject* self_, PyObject* args)
7560{
7561 HANDLE_TH_ERRORS
7562 const Tensor& self = THPVariable_Unpack(self_);
7563 if(check_has_torch_function(self_)) {
7564 return handle_torch_function(self_, "frac_");
7565 }
7566 // aten::frac_(Tensor(a!) self) -> Tensor(a!)
7567
7568 auto dispatch_frac_ = [](const at::Tensor & self) -> at::Tensor {
7569 pybind11::gil_scoped_release no_gil;
7570 return self.frac_();
7571 };
7572 return wrap(dispatch_frac_(self));
7573 END_HANDLE_TH_ERRORS
7574}
7575
7576// frexp
7577static PyObject * THPVariable_frexp(PyObject* self_, PyObject* args)
7578{
7579 HANDLE_TH_ERRORS
7580 static PyTypeObject* NamedTuple = get_namedtuple("frexp");
7581 const Tensor& self = THPVariable_Unpack(self_);
7582 if(check_has_torch_function(self_)) {
7583 return handle_torch_function(self_, "frexp");
7584 }
7585 // aten::frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent)
7586
7587 auto dispatch_frexp = [](const at::Tensor & self) -> ::std::tuple<at::Tensor,at::Tensor> {
7588 pybind11::gil_scoped_release no_gil;
7589 return self.frexp();
7590 };
7591 return wrap(NamedTuple, dispatch_frexp(self));
7592 END_HANDLE_TH_ERRORS
7593}
7594
7595\
7596// gather
7597static PyObject * THPVariable_gather(PyObject* self_, PyObject* args, PyObject* kwargs)
7598{
7599 HANDLE_TH_ERRORS
7600 const Tensor& self = THPVariable_Unpack(self_);
7601 static PythonArgParser parser({
7602 "gather(int64_t dim, Tensor index, *, bool sparse_grad=False)",
7603 "gather(Dimname dim, Tensor index, *, bool sparse_grad=False)",
7604 }, /*traceable=*/true);
7605
7606 ParsedArgs<3> parsed_args;
7607 auto _r = parser.parse(self_, args, kwargs, parsed_args);
7608 if(_r.has_torch_function()) {
7609 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
7610 }
7611 switch (_r.idx) {
7612 case 0: {
7613 // aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor
7614
7615 auto dispatch_gather = [](const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) -> at::Tensor {
7616 pybind11::gil_scoped_release no_gil;
7617 return self.gather(dim, index, sparse_grad);
7618 };
7619 return wrap(dispatch_gather(self, _r.toInt64(0), _r.tensor(1), _r.toBool(2)));
7620 }
7621 case 1: {
7622 // aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor
7623
7624 auto dispatch_gather = [](const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad) -> at::Tensor {
7625 pybind11::gil_scoped_release no_gil;
7626 return self.gather(dim, index, sparse_grad);
7627 };
7628 return wrap(dispatch_gather(self, _r.dimname(0), _r.tensor(1), _r.toBool(2)));
7629 }
7630 }
7631 Py_RETURN_NONE;
7632 END_HANDLE_TH_ERRORS
7633}
7634
7635// gcd
7636static PyObject * THPVariable_gcd(PyObject* self_, PyObject* args, PyObject* kwargs)
7637{
7638 HANDLE_TH_ERRORS
7639 const Tensor& self = THPVariable_Unpack(self_);
7640 static PythonArgParser parser({
7641 "gcd(Tensor other)",
7642 }, /*traceable=*/true);
7643
7644 ParsedArgs<1> parsed_args;
7645 auto _r = parser.parse(self_, args, kwargs, parsed_args);
7646 if(_r.has_torch_function()) {
7647 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
7648 }
7649 // aten::gcd(Tensor self, Tensor other) -> Tensor
7650
7651 auto dispatch_gcd = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
7652 pybind11::gil_scoped_release no_gil;
7653 return self.gcd(other);
7654 };
7655 return wrap(dispatch_gcd(self, _r.tensor(0)));
7656 Py_RETURN_NONE;
7657 END_HANDLE_TH_ERRORS
7658}
7659
7660// gcd_
7661static PyObject * THPVariable_gcd_(PyObject* self_, PyObject* args, PyObject* kwargs)
7662{
7663 HANDLE_TH_ERRORS
7664 const Tensor& self = THPVariable_Unpack(self_);
7665 static PythonArgParser parser({
7666 "gcd_(Tensor other)",
7667 }, /*traceable=*/true);
7668
7669 ParsedArgs<1> parsed_args;
7670 auto _r = parser.parse(self_, args, kwargs, parsed_args);
7671 if(_r.has_torch_function()) {
7672 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
7673 }
7674 // aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!)
7675
7676 auto dispatch_gcd_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
7677 pybind11::gil_scoped_release no_gil;
7678 return self.gcd_(other);
7679 };
7680 return wrap(dispatch_gcd_(self, _r.tensor(0)));
7681 Py_RETURN_NONE;
7682 END_HANDLE_TH_ERRORS
7683}
7684
7685\
7686// ge
7687static PyObject * THPVariable_ge(PyObject* self_, PyObject* args, PyObject* kwargs)
7688{
7689 HANDLE_TH_ERRORS
7690 const Tensor& self = THPVariable_Unpack(self_);
7691 static PythonArgParser parser({
7692 "ge(Tensor other)",
7693 "ge(Scalar other)",
7694 }, /*traceable=*/true);
7695
7696 ParsedArgs<1> parsed_args;
7697 auto _r = parser.parse(self_, args, kwargs, parsed_args);
7698 if(_r.has_torch_function()) {
7699 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
7700 }
7701 switch (_r.idx) {
7702 case 0: {
7703 // aten::ge.Tensor(Tensor self, Tensor other) -> Tensor
7704
7705 auto dispatch_ge = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
7706 pybind11::gil_scoped_release no_gil;
7707 return self.ge(other);
7708 };
7709 return wrap(dispatch_ge(self, _r.tensor(0)));
7710 }
7711 case 1: {
7712 // aten::ge.Scalar(Tensor self, Scalar other) -> Tensor
7713
7714 auto dispatch_ge = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
7715 pybind11::gil_scoped_release no_gil;
7716 return self.ge(other);
7717 };
7718 return wrap(dispatch_ge(self, _r.scalar(0)));
7719 }
7720 }
7721 Py_RETURN_NONE;
7722 END_HANDLE_TH_ERRORS
7723}
7724
7725\
7726// ge_
7727static PyObject * THPVariable_ge_(PyObject* self_, PyObject* args, PyObject* kwargs)
7728{
7729 HANDLE_TH_ERRORS
7730 const Tensor& self = THPVariable_Unpack(self_);
7731 static PythonArgParser parser({
7732 "ge_(Tensor other)",
7733 "ge_(Scalar other)",
7734 }, /*traceable=*/true);
7735
7736 ParsedArgs<1> parsed_args;
7737 auto _r = parser.parse(self_, args, kwargs, parsed_args);
7738 if(_r.has_torch_function()) {
7739 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
7740 }
7741 switch (_r.idx) {
7742 case 0: {
7743 // aten::ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
7744
7745 auto dispatch_ge_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
7746 pybind11::gil_scoped_release no_gil;
7747 return self.ge_(other);
7748 };
7749 return wrap(dispatch_ge_(self, _r.tensor(0)));
7750 }
7751 case 1: {
7752 // aten::ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7753
7754 auto dispatch_ge_ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
7755 pybind11::gil_scoped_release no_gil;
7756 return self.ge_(other);
7757 };
7758 return wrap(dispatch_ge_(self, _r.scalar(0)));
7759 }
7760 }
7761 Py_RETURN_NONE;
7762 END_HANDLE_TH_ERRORS
7763}
7764
7765// geometric_
7766static PyObject * THPVariable_geometric_(PyObject* self_, PyObject* args, PyObject* kwargs)
7767{
7768 HANDLE_TH_ERRORS
7769 const Tensor& self = THPVariable_Unpack(self_);
7770 static PythonArgParser parser({
7771 "geometric_(double p, *, Generator? generator=None)",
7772 }, /*traceable=*/true);
7773
7774 ParsedArgs<2> parsed_args;
7775 auto _r = parser.parse(self_, args, kwargs, parsed_args);
7776 if(_r.has_torch_function()) {
7777 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
7778 }
7779 // aten::geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)
7780
7781 auto dispatch_geometric_ = [](const at::Tensor & self, double p, c10::optional<at::Generator> generator) -> at::Tensor {
7782 pybind11::gil_scoped_release no_gil;
7783 return self.geometric_(p, generator);
7784 };
7785 return wrap(dispatch_geometric_(self, _r.toDouble(0), _r.generator(1)));
7786 Py_RETURN_NONE;
7787 END_HANDLE_TH_ERRORS
7788}
7789
7790// geqrf
7791static PyObject * THPVariable_geqrf(PyObject* self_, PyObject* args)
7792{
7793 HANDLE_TH_ERRORS
7794 static PyTypeObject* NamedTuple = get_namedtuple("geqrf");
7795 const Tensor& self = THPVariable_Unpack(self_);
7796 if(check_has_torch_function(self_)) {
7797 return handle_torch_function(self_, "geqrf");
7798 }
7799 // aten::geqrf(Tensor self) -> (Tensor a, Tensor tau)
7800
7801 auto dispatch_geqrf = [](const at::Tensor & self) -> ::std::tuple<at::Tensor,at::Tensor> {
7802 pybind11::gil_scoped_release no_gil;
7803 return self.geqrf();
7804 };
7805 return wrap(NamedTuple, dispatch_geqrf(self));
7806 END_HANDLE_TH_ERRORS
7807}
7808
7809// ger
7810static PyObject * THPVariable_ger(PyObject* self_, PyObject* args, PyObject* kwargs)
7811{
7812 HANDLE_TH_ERRORS
7813 const Tensor& self = THPVariable_Unpack(self_);
7814 static PythonArgParser parser({
7815 "ger(Tensor vec2)",
7816 }, /*traceable=*/true);
7817
7818 ParsedArgs<1> parsed_args;
7819 auto _r = parser.parse(self_, args, kwargs, parsed_args);
7820 if(_r.has_torch_function()) {
7821 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
7822 }
7823 // aten::ger(Tensor self, Tensor vec2) -> Tensor
7824
7825 auto dispatch_ger = [](const at::Tensor & self, const at::Tensor & vec2) -> at::Tensor {
7826 pybind11::gil_scoped_release no_gil;
7827 return self.ger(vec2);
7828 };
7829 return wrap(dispatch_ger(self, _r.tensor(0)));
7830 Py_RETURN_NONE;
7831 END_HANDLE_TH_ERRORS
7832}
7833
7834\
7835// greater
7836static PyObject * THPVariable_greater(PyObject* self_, PyObject* args, PyObject* kwargs)
7837{
7838 HANDLE_TH_ERRORS
7839 const Tensor& self = THPVariable_Unpack(self_);
7840 static PythonArgParser parser({
7841 "greater(Tensor other)",
7842 "greater(Scalar other)",
7843 }, /*traceable=*/true);
7844
7845 ParsedArgs<1> parsed_args;
7846 auto _r = parser.parse(self_, args, kwargs, parsed_args);
7847 if(_r.has_torch_function()) {
7848 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
7849 }
7850 switch (_r.idx) {
7851 case 0: {
7852 // aten::greater.Tensor(Tensor self, Tensor other) -> Tensor
7853
7854 auto dispatch_greater = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
7855 pybind11::gil_scoped_release no_gil;
7856 return self.greater(other);
7857 };
7858 return wrap(dispatch_greater(self, _r.tensor(0)));
7859 }
7860 case 1: {
7861 // aten::greater.Scalar(Tensor self, Scalar other) -> Tensor
7862
7863 auto dispatch_greater = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
7864 pybind11::gil_scoped_release no_gil;
7865 return self.greater(other);
7866 };
7867 return wrap(dispatch_greater(self, _r.scalar(0)));
7868 }
7869 }
7870 Py_RETURN_NONE;
7871 END_HANDLE_TH_ERRORS
7872}
7873
7874\
7875// greater_
7876static PyObject * THPVariable_greater_(PyObject* self_, PyObject* args, PyObject* kwargs)
7877{
7878 HANDLE_TH_ERRORS
7879 const Tensor& self = THPVariable_Unpack(self_);
7880 static PythonArgParser parser({
7881 "greater_(Tensor other)",
7882 "greater_(Scalar other)",
7883 }, /*traceable=*/true);
7884
7885 ParsedArgs<1> parsed_args;
7886 auto _r = parser.parse(self_, args, kwargs, parsed_args);
7887 if(_r.has_torch_function()) {
7888 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
7889 }
7890 switch (_r.idx) {
7891 case 0: {
7892 // aten::greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
7893
7894 auto dispatch_greater_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
7895 pybind11::gil_scoped_release no_gil;
7896 return self.greater_(other);
7897 };
7898 return wrap(dispatch_greater_(self, _r.tensor(0)));
7899 }
7900 case 1: {
7901 // aten::greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7902
7903 auto dispatch_greater_ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
7904 pybind11::gil_scoped_release no_gil;
7905 return self.greater_(other);
7906 };
7907 return wrap(dispatch_greater_(self, _r.scalar(0)));
7908 }
7909 }
7910 Py_RETURN_NONE;
7911 END_HANDLE_TH_ERRORS
7912}
7913
7914\
7915// greater_equal
7916static PyObject * THPVariable_greater_equal(PyObject* self_, PyObject* args, PyObject* kwargs)
7917{
7918 HANDLE_TH_ERRORS
7919 const Tensor& self = THPVariable_Unpack(self_);
7920 static PythonArgParser parser({
7921 "greater_equal(Tensor other)",
7922 "greater_equal(Scalar other)",
7923 }, /*traceable=*/true);
7924
7925 ParsedArgs<1> parsed_args;
7926 auto _r = parser.parse(self_, args, kwargs, parsed_args);
7927 if(_r.has_torch_function()) {
7928 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
7929 }
7930 switch (_r.idx) {
7931 case 0: {
7932 // aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor
7933
7934 auto dispatch_greater_equal = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
7935 pybind11::gil_scoped_release no_gil;
7936 return self.greater_equal(other);
7937 };
7938 return wrap(dispatch_greater_equal(self, _r.tensor(0)));
7939 }
7940 case 1: {
7941 // aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor
7942
7943 auto dispatch_greater_equal = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
7944 pybind11::gil_scoped_release no_gil;
7945 return self.greater_equal(other);
7946 };
7947 return wrap(dispatch_greater_equal(self, _r.scalar(0)));
7948 }
7949 }
7950 Py_RETURN_NONE;
7951 END_HANDLE_TH_ERRORS
7952}
7953
7954\
7955// greater_equal_
7956static PyObject * THPVariable_greater_equal_(PyObject* self_, PyObject* args, PyObject* kwargs)
7957{
7958 HANDLE_TH_ERRORS
7959 const Tensor& self = THPVariable_Unpack(self_);
7960 static PythonArgParser parser({
7961 "greater_equal_(Tensor other)",
7962 "greater_equal_(Scalar other)",
7963 }, /*traceable=*/true);
7964
7965 ParsedArgs<1> parsed_args;
7966 auto _r = parser.parse(self_, args, kwargs, parsed_args);
7967 if(_r.has_torch_function()) {
7968 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
7969 }
7970 switch (_r.idx) {
7971 case 0: {
7972 // aten::greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
7973
7974 auto dispatch_greater_equal_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
7975 pybind11::gil_scoped_release no_gil;
7976 return self.greater_equal_(other);
7977 };
7978 return wrap(dispatch_greater_equal_(self, _r.tensor(0)));
7979 }
7980 case 1: {
7981 // aten::greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
7982
7983 auto dispatch_greater_equal_ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
7984 pybind11::gil_scoped_release no_gil;
7985 return self.greater_equal_(other);
7986 };
7987 return wrap(dispatch_greater_equal_(self, _r.scalar(0)));
7988 }
7989 }
7990 Py_RETURN_NONE;
7991 END_HANDLE_TH_ERRORS
7992}
7993
7994\
7995// gt
7996static PyObject * THPVariable_gt(PyObject* self_, PyObject* args, PyObject* kwargs)
7997{
7998 HANDLE_TH_ERRORS
7999 const Tensor& self = THPVariable_Unpack(self_);
8000 static PythonArgParser parser({
8001 "gt(Tensor other)",
8002 "gt(Scalar other)",
8003 }, /*traceable=*/true);
8004
8005 ParsedArgs<1> parsed_args;
8006 auto _r = parser.parse(self_, args, kwargs, parsed_args);
8007 if(_r.has_torch_function()) {
8008 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
8009 }
8010 switch (_r.idx) {
8011 case 0: {
8012 // aten::gt.Tensor(Tensor self, Tensor other) -> Tensor
8013
8014 auto dispatch_gt = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
8015 pybind11::gil_scoped_release no_gil;
8016 return self.gt(other);
8017 };
8018 return wrap(dispatch_gt(self, _r.tensor(0)));
8019 }
8020 case 1: {
8021 // aten::gt.Scalar(Tensor self, Scalar other) -> Tensor
8022
8023 auto dispatch_gt = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
8024 pybind11::gil_scoped_release no_gil;
8025 return self.gt(other);
8026 };
8027 return wrap(dispatch_gt(self, _r.scalar(0)));
8028 }
8029 }
8030 Py_RETURN_NONE;
8031 END_HANDLE_TH_ERRORS
8032}
8033
8034\
8035// gt_
8036static PyObject * THPVariable_gt_(PyObject* self_, PyObject* args, PyObject* kwargs)
8037{
8038 HANDLE_TH_ERRORS
8039 const Tensor& self = THPVariable_Unpack(self_);
8040 static PythonArgParser parser({
8041 "gt_(Tensor other)",
8042 "gt_(Scalar other)",
8043 }, /*traceable=*/true);
8044
8045 ParsedArgs<1> parsed_args;
8046 auto _r = parser.parse(self_, args, kwargs, parsed_args);
8047 if(_r.has_torch_function()) {
8048 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
8049 }
8050 switch (_r.idx) {
8051 case 0: {
8052 // aten::gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
8053
8054 auto dispatch_gt_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
8055 pybind11::gil_scoped_release no_gil;
8056 return self.gt_(other);
8057 };
8058 return wrap(dispatch_gt_(self, _r.tensor(0)));
8059 }
8060 case 1: {
8061 // aten::gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
8062
8063 auto dispatch_gt_ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
8064 pybind11::gil_scoped_release no_gil;
8065 return self.gt_(other);
8066 };
8067 return wrap(dispatch_gt_(self, _r.scalar(0)));
8068 }
8069 }
8070 Py_RETURN_NONE;
8071 END_HANDLE_TH_ERRORS
8072}
8073
8074// hardshrink
8075static PyObject * THPVariable_hardshrink(PyObject* self_, PyObject* args, PyObject* kwargs)
8076{
8077 HANDLE_TH_ERRORS
8078 const Tensor& self = THPVariable_Unpack(self_);
8079 static PythonArgParser parser({
8080 "hardshrink(Scalar lambd=0.5)",
8081 }, /*traceable=*/true);
8082
8083 ParsedArgs<1> parsed_args;
8084 auto _r = parser.parse(self_, args, kwargs, parsed_args);
8085 if(_r.has_torch_function()) {
8086 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
8087 }
8088 // aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor
8089
8090 auto dispatch_hardshrink = [](const at::Tensor & self, const at::Scalar & lambd) -> at::Tensor {
8091 pybind11::gil_scoped_release no_gil;
8092 return self.hardshrink(lambd);
8093 };
8094 return wrap(dispatch_hardshrink(self, _r.scalar(0)));
8095 Py_RETURN_NONE;
8096 END_HANDLE_TH_ERRORS
8097}
8098
8099// heaviside
8100static PyObject * THPVariable_heaviside(PyObject* self_, PyObject* args, PyObject* kwargs)
8101{
8102 HANDLE_TH_ERRORS
8103 const Tensor& self = THPVariable_Unpack(self_);
8104 static PythonArgParser parser({
8105 "heaviside(Tensor values)",
8106 }, /*traceable=*/true);
8107
8108 ParsedArgs<1> parsed_args;
8109 auto _r = parser.parse(self_, args, kwargs, parsed_args);
8110 if(_r.has_torch_function()) {
8111 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
8112 }
8113 // aten::heaviside(Tensor self, Tensor values) -> Tensor
8114
8115 auto dispatch_heaviside = [](const at::Tensor & self, const at::Tensor & values) -> at::Tensor {
8116 pybind11::gil_scoped_release no_gil;
8117 return self.heaviside(values);
8118 };
8119 return wrap(dispatch_heaviside(self, _r.tensor(0)));
8120 Py_RETURN_NONE;
8121 END_HANDLE_TH_ERRORS
8122}
8123
8124// heaviside_
8125static PyObject * THPVariable_heaviside_(PyObject* self_, PyObject* args, PyObject* kwargs)
8126{
8127 HANDLE_TH_ERRORS
8128 const Tensor& self = THPVariable_Unpack(self_);
8129 static PythonArgParser parser({
8130 "heaviside_(Tensor values)",
8131 }, /*traceable=*/true);
8132
8133 ParsedArgs<1> parsed_args;
8134 auto _r = parser.parse(self_, args, kwargs, parsed_args);
8135 if(_r.has_torch_function()) {
8136 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
8137 }
8138 // aten::heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!)
8139
8140 auto dispatch_heaviside_ = [](const at::Tensor & self, const at::Tensor & values) -> at::Tensor {
8141 pybind11::gil_scoped_release no_gil;
8142 return self.heaviside_(values);
8143 };
8144 return wrap(dispatch_heaviside_(self, _r.tensor(0)));
8145 Py_RETURN_NONE;
8146 END_HANDLE_TH_ERRORS
8147}
8148
8149// histc
8150static PyObject * THPVariable_histc(PyObject* self_, PyObject* args, PyObject* kwargs)
8151{
8152 HANDLE_TH_ERRORS
8153 const Tensor& self = THPVariable_Unpack(self_);
8154 static PythonArgParser parser({
8155 "histc(int64_t bins=100, Scalar min=0, Scalar max=0)",
8156 }, /*traceable=*/true);
8157
8158 ParsedArgs<3> parsed_args;
8159 auto _r = parser.parse(self_, args, kwargs, parsed_args);
8160 if(_r.has_torch_function()) {
8161 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
8162 }
8163 // aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor
8164
8165 auto dispatch_histc = [](const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max) -> at::Tensor {
8166 pybind11::gil_scoped_release no_gil;
8167 return self.histc(bins, min, max);
8168 };
8169 return wrap(dispatch_histc(self, _r.toInt64(0), _r.scalar(1), _r.scalar(2)));
8170 Py_RETURN_NONE;
8171 END_HANDLE_TH_ERRORS
8172}
8173
8174\
8175// histogram
8176static PyObject * THPVariable_histogram(PyObject* self_, PyObject* args, PyObject* kwargs)
8177{
8178 HANDLE_TH_ERRORS
8179 static PyTypeObject* NamedTuple = get_namedtuple("histogram");
8180 const Tensor& self = THPVariable_Unpack(self_);
8181 static PythonArgParser parser({
8182 "histogram(Tensor bins, *, Tensor? weight=None, bool density=False)",
8183 "histogram(int64_t bins=100, *, ArrayRef<double>? range=None, Tensor? weight=None, bool density=False)",
8184 }, /*traceable=*/true);
8185
8186 ParsedArgs<4> parsed_args;
8187 auto _r = parser.parse(self_, args, kwargs, parsed_args);
8188 if(_r.has_torch_function()) {
8189 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
8190 }
8191 switch (_r.idx) {
8192 case 0: {
8193 // aten::histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)
8194
8195 auto dispatch_histogram = [](const at::Tensor & self, const at::Tensor & bins, const c10::optional<at::Tensor> & weight, bool density) -> ::std::tuple<at::Tensor,at::Tensor> {
8196 pybind11::gil_scoped_release no_gil;
8197 return self.histogram(bins, weight, density);
8198 };
8199 return wrap(NamedTuple, dispatch_histogram(self, _r.tensor(0), _r.optionalTensor(1), _r.toBool(2)));
8200 }
8201 case 1: {
8202 // aten::histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)
8203
8204 auto dispatch_histogram = [](const at::Tensor & self, int64_t bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) -> ::std::tuple<at::Tensor,at::Tensor> {
8205 pybind11::gil_scoped_release no_gil;
8206 return self.histogram(bins, range, weight, density);
8207 };
8208 return wrap(NamedTuple, dispatch_histogram(self, _r.toInt64(0), _r.doublelistOptional(1), _r.optionalTensor(2), _r.toBool(3)));
8209 }
8210 }
8211 Py_RETURN_NONE;
8212 END_HANDLE_TH_ERRORS
8213}
8214
8215\
8216// hsplit
8217static PyObject * THPVariable_hsplit(PyObject* self_, PyObject* args, PyObject* kwargs)
8218{
8219 HANDLE_TH_ERRORS
8220 const Tensor& self = THPVariable_Unpack(self_);
8221 static PythonArgParser parser({
8222 "hsplit(int64_t sections)",
8223 "hsplit(IntArrayRef indices)",
8224 }, /*traceable=*/true);
8225
8226 ParsedArgs<1> parsed_args;
8227 auto _r = parser.parse(self_, args, kwargs, parsed_args);
8228 if(_r.has_torch_function()) {
8229 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
8230 }
8231 switch (_r.idx) {
8232 case 0: {
8233 // aten::hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
8234
8235 auto dispatch_hsplit = [](const at::Tensor & self, int64_t sections) -> ::std::vector<at::Tensor> {
8236 pybind11::gil_scoped_release no_gil;
8237 return self.hsplit(sections);
8238 };
8239 return wrap(dispatch_hsplit(self, _r.toInt64(0)));
8240 }
8241 case 1: {
8242 // aten::hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
8243
8244 auto dispatch_hsplit = [](const at::Tensor & self, at::IntArrayRef indices) -> ::std::vector<at::Tensor> {
8245 pybind11::gil_scoped_release no_gil;
8246 return self.hsplit(indices);
8247 };
8248 return wrap(dispatch_hsplit(self, _r.intlist(0)));
8249 }
8250 }
8251 Py_RETURN_NONE;
8252 END_HANDLE_TH_ERRORS
8253}
8254
8255// hypot
8256static PyObject * THPVariable_hypot(PyObject* self_, PyObject* args, PyObject* kwargs)
8257{
8258 HANDLE_TH_ERRORS
8259 const Tensor& self = THPVariable_Unpack(self_);
8260 static PythonArgParser parser({
8261 "hypot(Tensor other)",
8262 }, /*traceable=*/true);
8263
8264 ParsedArgs<1> parsed_args;
8265 auto _r = parser.parse(self_, args, kwargs, parsed_args);
8266 if(_r.has_torch_function()) {
8267 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
8268 }
8269 // aten::hypot(Tensor self, Tensor other) -> Tensor
8270
8271 auto dispatch_hypot = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
8272 pybind11::gil_scoped_release no_gil;
8273 return self.hypot(other);
8274 };
8275 return wrap(dispatch_hypot(self, _r.tensor(0)));
8276 Py_RETURN_NONE;
8277 END_HANDLE_TH_ERRORS
8278}
8279
8280// hypot_
8281static PyObject * THPVariable_hypot_(PyObject* self_, PyObject* args, PyObject* kwargs)
8282{
8283 HANDLE_TH_ERRORS
8284 const Tensor& self = THPVariable_Unpack(self_);
8285 static PythonArgParser parser({
8286 "hypot_(Tensor other)",
8287 }, /*traceable=*/true);
8288
8289 ParsedArgs<1> parsed_args;
8290 auto _r = parser.parse(self_, args, kwargs, parsed_args);
8291 if(_r.has_torch_function()) {
8292 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
8293 }
8294 // aten::hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!)
8295
8296 auto dispatch_hypot_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
8297 pybind11::gil_scoped_release no_gil;
8298 return self.hypot_(other);
8299 };
8300 return wrap(dispatch_hypot_(self, _r.tensor(0)));
8301 Py_RETURN_NONE;
8302 END_HANDLE_TH_ERRORS
8303}
8304
8305// i0
8306static PyObject * THPVariable_i0(PyObject* self_, PyObject* args)
8307{
8308 HANDLE_TH_ERRORS
8309 const Tensor& self = THPVariable_Unpack(self_);
8310 if(check_has_torch_function(self_)) {
8311 return handle_torch_function(self_, "i0");
8312 }
8313 // aten::i0(Tensor self) -> Tensor
8314
8315 auto dispatch_i0 = [](const at::Tensor & self) -> at::Tensor {
8316 pybind11::gil_scoped_release no_gil;
8317 return self.i0();
8318 };
8319 return wrap(dispatch_i0(self));
8320 END_HANDLE_TH_ERRORS
8321}
8322
8323// i0_
8324static PyObject * THPVariable_i0_(PyObject* self_, PyObject* args)
8325{
8326 HANDLE_TH_ERRORS
8327 const Tensor& self = THPVariable_Unpack(self_);
8328 if(check_has_torch_function(self_)) {
8329 return handle_torch_function(self_, "i0_");
8330 }
8331 // aten::i0_(Tensor(a!) self) -> Tensor(a!)
8332
8333 auto dispatch_i0_ = [](const at::Tensor & self) -> at::Tensor {
8334 pybind11::gil_scoped_release no_gil;
8335 return self.i0_();
8336 };
8337 return wrap(dispatch_i0_(self));
8338 END_HANDLE_TH_ERRORS
8339}
8340
8341// igamma
8342static PyObject * THPVariable_igamma(PyObject* self_, PyObject* args, PyObject* kwargs)
8343{
8344 HANDLE_TH_ERRORS
8345 const Tensor& self = THPVariable_Unpack(self_);
8346 static PythonArgParser parser({
8347 "igamma(Tensor other)",
8348 }, /*traceable=*/true);
8349
8350 ParsedArgs<1> parsed_args;
8351 auto _r = parser.parse(self_, args, kwargs, parsed_args);
8352 if(_r.has_torch_function()) {
8353 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
8354 }
8355 // aten::igamma(Tensor self, Tensor other) -> Tensor
8356
8357 auto dispatch_igamma = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
8358 pybind11::gil_scoped_release no_gil;
8359 return self.igamma(other);
8360 };
8361 return wrap(dispatch_igamma(self, _r.tensor(0)));
8362 Py_RETURN_NONE;
8363 END_HANDLE_TH_ERRORS
8364}
8365
8366// igamma_
8367static PyObject * THPVariable_igamma_(PyObject* self_, PyObject* args, PyObject* kwargs)
8368{
8369 HANDLE_TH_ERRORS
8370 const Tensor& self = THPVariable_Unpack(self_);
8371 static PythonArgParser parser({
8372 "igamma_(Tensor other)",
8373 }, /*traceable=*/true);
8374
8375 ParsedArgs<1> parsed_args;
8376 auto _r = parser.parse(self_, args, kwargs, parsed_args);
8377 if(_r.has_torch_function()) {
8378 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
8379 }
8380 // aten::igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!)
8381
8382 auto dispatch_igamma_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
8383 pybind11::gil_scoped_release no_gil;
8384 return self.igamma_(other);
8385 };
8386 return wrap(dispatch_igamma_(self, _r.tensor(0)));
8387 Py_RETURN_NONE;
8388 END_HANDLE_TH_ERRORS
8389}
8390
8391// igammac
8392static PyObject * THPVariable_igammac(PyObject* self_, PyObject* args, PyObject* kwargs)
8393{
8394 HANDLE_TH_ERRORS
8395 const Tensor& self = THPVariable_Unpack(self_);
8396 static PythonArgParser parser({
8397 "igammac(Tensor other)",
8398 }, /*traceable=*/true);
8399
8400 ParsedArgs<1> parsed_args;
8401 auto _r = parser.parse(self_, args, kwargs, parsed_args);
8402 if(_r.has_torch_function()) {
8403 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
8404 }
8405 // aten::igammac(Tensor self, Tensor other) -> Tensor
8406
8407 auto dispatch_igammac = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
8408 pybind11::gil_scoped_release no_gil;
8409 return self.igammac(other);
8410 };
8411 return wrap(dispatch_igammac(self, _r.tensor(0)));
8412 Py_RETURN_NONE;
8413 END_HANDLE_TH_ERRORS
8414}
8415
8416// igammac_
8417static PyObject * THPVariable_igammac_(PyObject* self_, PyObject* args, PyObject* kwargs)
8418{
8419 HANDLE_TH_ERRORS
8420 const Tensor& self = THPVariable_Unpack(self_);
8421 static PythonArgParser parser({
8422 "igammac_(Tensor other)",
8423 }, /*traceable=*/true);
8424
8425 ParsedArgs<1> parsed_args;
8426 auto _r = parser.parse(self_, args, kwargs, parsed_args);
8427 if(_r.has_torch_function()) {
8428 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
8429 }
8430 // aten::igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!)
8431
8432 auto dispatch_igammac_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
8433 pybind11::gil_scoped_release no_gil;
8434 return self.igammac_(other);
8435 };
8436 return wrap(dispatch_igammac_(self, _r.tensor(0)));
8437 Py_RETURN_NONE;
8438 END_HANDLE_TH_ERRORS
8439}
8440
8441\
8442// index_add
8443static PyObject * THPVariable_index_add(PyObject* self_, PyObject* args, PyObject* kwargs)
8444{
8445 HANDLE_TH_ERRORS
8446 const Tensor& self = THPVariable_Unpack(self_);
8447 static PythonArgParser parser({
8448 "index_add(int64_t dim, Tensor index, Tensor source, *, Scalar alpha=1)",
8449 "index_add(Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1)",
8450 }, /*traceable=*/true);
8451
8452 ParsedArgs<4> parsed_args;
8453 auto _r = parser.parse(self_, args, kwargs, parsed_args);
8454 if(_r.has_torch_function()) {
8455 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
8456 }
8457 switch (_r.idx) {
8458 case 0: {
8459 // aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor
8460
8461 auto dispatch_index_add = [](const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) -> at::Tensor {
8462 pybind11::gil_scoped_release no_gil;
8463 return self.index_add(dim, index, source, alpha);
8464 };
8465 return wrap(dispatch_index_add(self, _r.toInt64(0), _r.tensor(1), _r.tensor(2), _r.scalar(3)));
8466 }
8467 case 1: {
8468 // aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor
8469
8470 auto dispatch_index_add = [](const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) -> at::Tensor {
8471 pybind11::gil_scoped_release no_gil;
8472 return self.index_add(dim, index, source, alpha);
8473 };
8474 return wrap(dispatch_index_add(self, _r.dimname(0), _r.tensor(1), _r.tensor(2), _r.scalar(3)));
8475 }
8476 }
8477 Py_RETURN_NONE;
8478 END_HANDLE_TH_ERRORS
8479}
8480
8481// index_add_
8482static PyObject * THPVariable_index_add_(PyObject* self_, PyObject* args, PyObject* kwargs)
8483{
8484 HANDLE_TH_ERRORS
8485 const Tensor& self = THPVariable_Unpack(self_);
8486 static PythonArgParser parser({
8487 "index_add_(int64_t dim, Tensor index, Tensor source, *, Scalar alpha=1)",
8488 }, /*traceable=*/true);
8489
8490 ParsedArgs<4> parsed_args;
8491 auto _r = parser.parse(self_, args, kwargs, parsed_args);
8492 if(_r.has_torch_function()) {
8493 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
8494 }
8495 // aten::index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!)
8496
8497 auto dispatch_index_add_ = [](const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) -> at::Tensor {
8498 pybind11::gil_scoped_release no_gil;
8499 return self.index_add_(dim, index, source, alpha);
8500 };
8501 return wrap(dispatch_index_add_(self, _r.toInt64(0), _r.tensor(1), _r.tensor(2), _r.scalar(3)));
8502 Py_RETURN_NONE;
8503 END_HANDLE_TH_ERRORS
8504}
8505
8506\
8507// index_copy
8508static PyObject * THPVariable_index_copy(PyObject* self_, PyObject* args, PyObject* kwargs)
8509{
8510 HANDLE_TH_ERRORS
8511 const Tensor& self = THPVariable_Unpack(self_);
8512 static PythonArgParser parser({
8513 "index_copy(int64_t dim, Tensor index, Tensor source)",
8514 "index_copy(Dimname dim, Tensor index, Tensor source)",
8515 }, /*traceable=*/true);
8516
8517 ParsedArgs<3> parsed_args;
8518 auto _r = parser.parse(self_, args, kwargs, parsed_args);
8519 if(_r.has_torch_function()) {
8520 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
8521 }
8522 switch (_r.idx) {
8523 case 0: {
8524 // aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor
8525
8526 auto dispatch_index_copy = [](const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) -> at::Tensor {
8527 pybind11::gil_scoped_release no_gil;
8528 return self.index_copy(dim, index, source);
8529 };
8530 return wrap(dispatch_index_copy(self, _r.toInt64(0), _r.tensor(1), _r.tensor(2)));
8531 }
8532 case 1: {
8533 // aten::index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor
8534
8535 auto dispatch_index_copy = [](const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) -> at::Tensor {
8536 pybind11::gil_scoped_release no_gil;
8537 return self.index_copy(dim, index, source);
8538 };
8539 return wrap(dispatch_index_copy(self, _r.dimname(0), _r.tensor(1), _r.tensor(2)));
8540 }
8541 }
8542 Py_RETURN_NONE;
8543 END_HANDLE_TH_ERRORS
8544}
8545
8546\
8547// index_copy_
8548static PyObject * THPVariable_index_copy_(PyObject* self_, PyObject* args, PyObject* kwargs)
8549{
8550 HANDLE_TH_ERRORS
8551 const Tensor& self = THPVariable_Unpack(self_);
8552 static PythonArgParser parser({
8553 "index_copy_(int64_t dim, Tensor index, Tensor source)",
8554 "index_copy_(Dimname dim, Tensor index, Tensor source)",
8555 }, /*traceable=*/true);
8556
8557 ParsedArgs<3> parsed_args;
8558 auto _r = parser.parse(self_, args, kwargs, parsed_args);
8559 if(_r.has_torch_function()) {
8560 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
8561 }
8562 switch (_r.idx) {
8563 case 0: {
8564 // aten::index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)
8565
8566 auto dispatch_index_copy_ = [](const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) -> at::Tensor {
8567 pybind11::gil_scoped_release no_gil;
8568 return self.index_copy_(dim, index, source);
8569 };
8570 return wrap(dispatch_index_copy_(self, _r.toInt64(0), _r.tensor(1), _r.tensor(2)));
8571 }
8572 case 1: {
8573 // aten::index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!)
8574
8575 auto dispatch_index_copy_ = [](const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) -> at::Tensor {
8576 pybind11::gil_scoped_release no_gil;
8577 return self.index_copy_(dim, index, source);
8578 };
8579 return wrap(dispatch_index_copy_(self, _r.dimname(0), _r.tensor(1), _r.tensor(2)));
8580 }
8581 }
8582 Py_RETURN_NONE;
8583 END_HANDLE_TH_ERRORS
8584}
8585
8586\
8587// index_fill
8588static PyObject * THPVariable_index_fill(PyObject* self_, PyObject* args, PyObject* kwargs)
8589{
8590 HANDLE_TH_ERRORS
8591 const Tensor& self = THPVariable_Unpack(self_);
8592 static PythonArgParser parser({
8593 "index_fill(int64_t dim, Tensor index, Tensor value)",
8594 "index_fill(Dimname dim, Tensor index, Tensor value)",
8595 "index_fill(int64_t dim, Tensor index, Scalar value)",
8596 "index_fill(Dimname dim, Tensor index, Scalar value)",
8597 }, /*traceable=*/true);
8598
8599 ParsedArgs<3> parsed_args;
8600 auto _r = parser.parse(self_, args, kwargs, parsed_args);
8601 if(_r.has_torch_function()) {
8602 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
8603 }
8604 switch (_r.idx) {
8605 case 0: {
8606 // aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor
8607
8608 auto dispatch_index_fill = [](const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) -> at::Tensor {
8609 pybind11::gil_scoped_release no_gil;
8610 return self.index_fill(dim, index, value);
8611 };
8612 return wrap(dispatch_index_fill(self, _r.toInt64(0), _r.tensor(1), _r.tensor(2)));
8613 }
8614 case 1: {
8615 // aten::index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor
8616
8617 auto dispatch_index_fill = [](const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) -> at::Tensor {
8618 pybind11::gil_scoped_release no_gil;
8619 return self.index_fill(dim, index, value);
8620 };
8621 return wrap(dispatch_index_fill(self, _r.dimname(0), _r.tensor(1), _r.tensor(2)));
8622 }
8623 case 2: {
8624 // aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
8625
8626 auto dispatch_index_fill = [](const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) -> at::Tensor {
8627 pybind11::gil_scoped_release no_gil;
8628 return self.index_fill(dim, index, value);
8629 };
8630 return wrap(dispatch_index_fill(self, _r.toInt64(0), _r.tensor(1), _r.scalar(2)));
8631 }
8632 case 3: {
8633 // aten::index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
8634
8635 auto dispatch_index_fill = [](const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) -> at::Tensor {
8636 pybind11::gil_scoped_release no_gil;
8637 return self.index_fill(dim, index, value);
8638 };
8639 return wrap(dispatch_index_fill(self, _r.dimname(0), _r.tensor(1), _r.scalar(2)));
8640 }
8641 }
8642 Py_RETURN_NONE;
8643 END_HANDLE_TH_ERRORS
8644}
8645
8646\
8647// index_fill_
8648static PyObject * THPVariable_index_fill_(PyObject* self_, PyObject* args, PyObject* kwargs)
8649{
8650 HANDLE_TH_ERRORS
8651 const Tensor& self = THPVariable_Unpack(self_);
8652 static PythonArgParser parser({
8653 "index_fill_(int64_t dim, Tensor index, Tensor value)",
8654 "index_fill_(Dimname dim, Tensor index, Tensor value)",
8655 "index_fill_(int64_t dim, Tensor index, Scalar value)",
8656 "index_fill_(Dimname dim, Tensor index, Scalar value)",
8657 }, /*traceable=*/true);
8658
8659 ParsedArgs<3> parsed_args;
8660 auto _r = parser.parse(self_, args, kwargs, parsed_args);
8661 if(_r.has_torch_function()) {
8662 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
8663 }
8664 switch (_r.idx) {
8665 case 0: {
8666 // aten::index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)
8667
8668 auto dispatch_index_fill_ = [](const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) -> at::Tensor {
8669 pybind11::gil_scoped_release no_gil;
8670 return self.index_fill_(dim, index, value);
8671 };
8672 return wrap(dispatch_index_fill_(self, _r.toInt64(0), _r.tensor(1), _r.tensor(2)));
8673 }
8674 case 1: {
8675 // aten::index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!)
8676
8677 auto dispatch_index_fill_ = [](const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) -> at::Tensor {
8678 pybind11::gil_scoped_release no_gil;
8679 return self.index_fill_(dim, index, value);
8680 };
8681 return wrap(dispatch_index_fill_(self, _r.dimname(0), _r.tensor(1), _r.tensor(2)));
8682 }
8683 case 2: {
8684 // aten::index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
8685
8686 auto dispatch_index_fill_ = [](const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) -> at::Tensor {
8687 pybind11::gil_scoped_release no_gil;
8688 return self.index_fill_(dim, index, value);
8689 };
8690 return wrap(dispatch_index_fill_(self, _r.toInt64(0), _r.tensor(1), _r.scalar(2)));
8691 }
8692 case 3: {
8693 // aten::index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!)
8694
8695 auto dispatch_index_fill_ = [](const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) -> at::Tensor {
8696 pybind11::gil_scoped_release no_gil;
8697 return self.index_fill_(dim, index, value);
8698 };
8699 return wrap(dispatch_index_fill_(self, _r.dimname(0), _r.tensor(1), _r.scalar(2)));
8700 }
8701 }
8702 Py_RETURN_NONE;
8703 END_HANDLE_TH_ERRORS
8704}
8705
8706// index_put
8707static PyObject * THPVariable_index_put(PyObject* self_, PyObject* args, PyObject* kwargs)
8708{
8709 HANDLE_TH_ERRORS
8710 const Tensor& self = THPVariable_Unpack(self_);
8711 static PythonArgParser parser({
8712 "index_put(c10::List<c10::optional<Tensor>> indices, Tensor values, bool accumulate=False)",
8713 }, /*traceable=*/true);
8714
8715 ParsedArgs<3> parsed_args;
8716 auto _r = parser.parse(self_, args, kwargs, parsed_args);
8717 if(_r.has_torch_function()) {
8718 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
8719 }
8720 // aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor
8721
8722 auto dispatch_index_put = [](const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) -> at::Tensor {
8723 pybind11::gil_scoped_release no_gil;
8724 return self.index_put(indices, values, accumulate);
8725 };
8726 return wrap(dispatch_index_put(self, _r.list_of_optional_tensors(0), _r.tensor(1), _r.toBool(2)));
8727 Py_RETURN_NONE;
8728 END_HANDLE_TH_ERRORS
8729}
8730
8731// index_put_
8732static PyObject * THPVariable_index_put_(PyObject* self_, PyObject* args, PyObject* kwargs)
8733{
8734 HANDLE_TH_ERRORS
8735 const Tensor& self = THPVariable_Unpack(self_);
8736 static PythonArgParser parser({
8737 "index_put_(c10::List<c10::optional<Tensor>> indices, Tensor values, bool accumulate=False)",
8738 }, /*traceable=*/true);
8739
8740 ParsedArgs<3> parsed_args;
8741 auto _r = parser.parse(self_, args, kwargs, parsed_args);
8742 if(_r.has_torch_function()) {
8743 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
8744 }
8745 // aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)
8746
8747 auto dispatch_index_put_ = [](const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) -> at::Tensor {
8748 pybind11::gil_scoped_release no_gil;
8749 return self.index_put_(indices, values, accumulate);
8750 };
8751 return wrap(dispatch_index_put_(self, _r.list_of_optional_tensors(0), _r.tensor(1), _r.toBool(2)));
8752 Py_RETURN_NONE;
8753 END_HANDLE_TH_ERRORS
8754}
8755
8756// index_reduce
8757static PyObject * THPVariable_index_reduce(PyObject* self_, PyObject* args, PyObject* kwargs)
8758{
8759 HANDLE_TH_ERRORS
8760 const Tensor& self = THPVariable_Unpack(self_);
8761 static PythonArgParser parser({
8762 "index_reduce(int64_t dim, Tensor index, Tensor source, c10::string_view reduce, *, bool include_self=True)",
8763 }, /*traceable=*/true);
8764
8765 ParsedArgs<5> parsed_args;
8766 auto _r = parser.parse(self_, args, kwargs, parsed_args);
8767 if(_r.has_torch_function()) {
8768 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
8769 }
8770 // aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor
8771
8772 auto dispatch_index_reduce = [](const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) -> at::Tensor {
8773 pybind11::gil_scoped_release no_gil;
8774 return self.index_reduce(dim, index, source, reduce, include_self);
8775 };
8776 return wrap(dispatch_index_reduce(self, _r.toInt64(0), _r.tensor(1), _r.tensor(2), _r.stringView(3), _r.toBool(4)));
8777 Py_RETURN_NONE;
8778 END_HANDLE_TH_ERRORS
8779}
8780
8781// index_reduce_
8782static PyObject * THPVariable_index_reduce_(PyObject* self_, PyObject* args, PyObject* kwargs)
8783{
8784 HANDLE_TH_ERRORS
8785 const Tensor& self = THPVariable_Unpack(self_);
8786 static PythonArgParser parser({
8787 "index_reduce_(int64_t dim, Tensor index, Tensor source, c10::string_view reduce, *, bool include_self=True)",
8788 }, /*traceable=*/true);
8789
8790 ParsedArgs<5> parsed_args;
8791 auto _r = parser.parse(self_, args, kwargs, parsed_args);
8792 if(_r.has_torch_function()) {
8793 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
8794 }
8795 // aten::index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!)
8796
8797 auto dispatch_index_reduce_ = [](const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) -> at::Tensor {
8798 pybind11::gil_scoped_release no_gil;
8799 return self.index_reduce_(dim, index, source, reduce, include_self);
8800 };
8801 return wrap(dispatch_index_reduce_(self, _r.toInt64(0), _r.tensor(1), _r.tensor(2), _r.stringView(3), _r.toBool(4)));
8802 Py_RETURN_NONE;
8803 END_HANDLE_TH_ERRORS
8804}
8805
8806\
8807// index_select
8808static PyObject * THPVariable_index_select(PyObject* self_, PyObject* args, PyObject* kwargs)
8809{
8810 HANDLE_TH_ERRORS
8811 const Tensor& self = THPVariable_Unpack(self_);
8812 static PythonArgParser parser({
8813 "index_select(int64_t dim, Tensor index)",
8814 "index_select(Dimname dim, Tensor index)",
8815 }, /*traceable=*/true);
8816
8817 ParsedArgs<2> parsed_args;
8818 auto _r = parser.parse(self_, args, kwargs, parsed_args);
8819 if(_r.has_torch_function()) {
8820 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
8821 }
8822 switch (_r.idx) {
8823 case 0: {
8824 // aten::index_select(Tensor self, int dim, Tensor index) -> Tensor
8825
8826 auto dispatch_index_select = [](const at::Tensor & self, int64_t dim, const at::Tensor & index) -> at::Tensor {
8827 pybind11::gil_scoped_release no_gil;
8828 return self.index_select(dim, index);
8829 };
8830 return wrap(dispatch_index_select(self, _r.toInt64(0), _r.tensor(1)));
8831 }
8832 case 1: {
8833 // aten::index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor
8834
8835 auto dispatch_index_select = [](const at::Tensor & self, at::Dimname dim, const at::Tensor & index) -> at::Tensor {
8836 pybind11::gil_scoped_release no_gil;
8837 return self.index_select(dim, index);
8838 };
8839 return wrap(dispatch_index_select(self, _r.dimname(0), _r.tensor(1)));
8840 }
8841 }
8842 Py_RETURN_NONE;
8843 END_HANDLE_TH_ERRORS
8844}
8845
8846// indices
8847static PyObject * THPVariable_indices(PyObject* self_, PyObject* args)
8848{
8849 HANDLE_TH_ERRORS
8850 const Tensor& self = THPVariable_Unpack(self_);
8851 if(check_has_torch_function(self_)) {
8852 return handle_torch_function(self_, "indices");
8853 }
8854 // aten::indices(Tensor(a) self) -> Tensor(a)
8855
8856 auto dispatch_indices = [](const at::Tensor & self) -> at::Tensor {
8857 pybind11::gil_scoped_release no_gil;
8858 return self.indices();
8859 };
8860 return wrap(dispatch_indices(self));
8861 END_HANDLE_TH_ERRORS
8862}
8863
8864// inner
8865static PyObject * THPVariable_inner(PyObject* self_, PyObject* args, PyObject* kwargs)
8866{
8867 HANDLE_TH_ERRORS
8868 const Tensor& self = THPVariable_Unpack(self_);
8869 static PythonArgParser parser({
8870 "inner(Tensor other)",
8871 }, /*traceable=*/true);
8872
8873 ParsedArgs<1> parsed_args;
8874 auto _r = parser.parse(self_, args, kwargs, parsed_args);
8875 if(_r.has_torch_function()) {
8876 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
8877 }
8878 // aten::inner(Tensor self, Tensor other) -> Tensor
8879
8880 auto dispatch_inner = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
8881 pybind11::gil_scoped_release no_gil;
8882 return self.inner(other);
8883 };
8884 return wrap(dispatch_inner(self, _r.tensor(0)));
8885 Py_RETURN_NONE;
8886 END_HANDLE_TH_ERRORS
8887}
8888
8889// int_repr
8890static PyObject * THPVariable_int_repr(PyObject* self_, PyObject* args)
8891{
8892 HANDLE_TH_ERRORS
8893 const Tensor& self = THPVariable_Unpack(self_);
8894 if(check_has_torch_function(self_)) {
8895 return handle_torch_function(self_, "int_repr");
8896 }
8897 // aten::int_repr(Tensor self) -> Tensor
8898
8899 auto dispatch_int_repr = [](const at::Tensor & self) -> at::Tensor {
8900 pybind11::gil_scoped_release no_gil;
8901 return self.int_repr();
8902 };
8903 return wrap(dispatch_int_repr(self));
8904 END_HANDLE_TH_ERRORS
8905}
8906
8907// inverse
8908static PyObject * THPVariable_inverse(PyObject* self_, PyObject* args)
8909{
8910 HANDLE_TH_ERRORS
8911 const Tensor& self = THPVariable_Unpack(self_);
8912 if(check_has_torch_function(self_)) {
8913 return handle_torch_function(self_, "inverse");
8914 }
8915 // aten::inverse(Tensor self) -> Tensor
8916
8917 auto dispatch_inverse = [](const at::Tensor & self) -> at::Tensor {
8918 pybind11::gil_scoped_release no_gil;
8919 return self.inverse();
8920 };
8921 return wrap(dispatch_inverse(self));
8922 END_HANDLE_TH_ERRORS
8923}
8924
8925// is_coalesced
8926static PyObject * THPVariable_is_coalesced(PyObject* self_, PyObject* args)
8927{
8928 HANDLE_TH_ERRORS
8929 const Tensor& self = THPVariable_Unpack(self_);
8930 if(check_has_torch_function(self_)) {
8931 return handle_torch_function(self_, "is_coalesced");
8932 }
8933 // aten::is_coalesced(Tensor self) -> bool
8934
8935 auto dispatch_is_coalesced = [](const at::Tensor & self) -> bool {
8936 pybind11::gil_scoped_release no_gil;
8937 return self.is_coalesced();
8938 };
8939 return wrap(dispatch_is_coalesced(self));
8940 END_HANDLE_TH_ERRORS
8941}
8942
8943// is_complex
8944static PyObject * THPVariable_is_complex(PyObject* self_, PyObject* args)
8945{
8946 HANDLE_TH_ERRORS
8947 const Tensor& self = THPVariable_Unpack(self_);
8948 if(check_has_torch_function(self_)) {
8949 return handle_torch_function(self_, "is_complex");
8950 }
8951 // aten::is_complex(Tensor self) -> bool
8952
8953 auto dispatch_is_complex = [](const at::Tensor & self) -> bool {
8954 pybind11::gil_scoped_release no_gil;
8955 return self.is_complex();
8956 };
8957 return wrap(dispatch_is_complex(self));
8958 END_HANDLE_TH_ERRORS
8959}
8960
8961// is_conj
8962static PyObject * THPVariable_is_conj(PyObject* self_, PyObject* args)
8963{
8964 HANDLE_TH_ERRORS
8965 const Tensor& self = THPVariable_Unpack(self_);
8966 if(check_has_torch_function(self_)) {
8967 return handle_torch_function(self_, "is_conj");
8968 }
8969 // aten::is_conj(Tensor self) -> bool
8970
8971 auto dispatch_is_conj = [](const at::Tensor & self) -> bool {
8972 pybind11::gil_scoped_release no_gil;
8973 return self.is_conj();
8974 };
8975 return wrap(dispatch_is_conj(self));
8976 END_HANDLE_TH_ERRORS
8977}
8978
8979// is_distributed
8980static PyObject * THPVariable_is_distributed(PyObject* self_, PyObject* args)
8981{
8982 HANDLE_TH_ERRORS
8983 const Tensor& self = THPVariable_Unpack(self_);
8984 if(check_has_torch_function(self_)) {
8985 return handle_torch_function(self_, "is_distributed");
8986 }
8987 // aten::is_distributed(Tensor self) -> bool
8988
8989 auto dispatch_is_distributed = [](const at::Tensor & self) -> bool {
8990 pybind11::gil_scoped_release no_gil;
8991 return self.is_distributed();
8992 };
8993 return wrap(dispatch_is_distributed(self));
8994 END_HANDLE_TH_ERRORS
8995}
8996
8997// is_floating_point
8998static PyObject * THPVariable_is_floating_point(PyObject* self_, PyObject* args)
8999{
9000 HANDLE_TH_ERRORS
9001 const Tensor& self = THPVariable_Unpack(self_);
9002 if(check_has_torch_function(self_)) {
9003 return handle_torch_function(self_, "is_floating_point");
9004 }
9005 // aten::is_floating_point(Tensor self) -> bool
9006
9007 auto dispatch_is_floating_point = [](const at::Tensor & self) -> bool {
9008 pybind11::gil_scoped_release no_gil;
9009 return self.is_floating_point();
9010 };
9011 return wrap(dispatch_is_floating_point(self));
9012 END_HANDLE_TH_ERRORS
9013}
9014
9015// is_inference
9016static PyObject * THPVariable_is_inference(PyObject* self_, PyObject* args)
9017{
9018 HANDLE_TH_ERRORS
9019 const Tensor& self = THPVariable_Unpack(self_);
9020 if(check_has_torch_function(self_)) {
9021 return handle_torch_function(self_, "is_inference");
9022 }
9023 // aten::is_inference(Tensor self) -> bool
9024
9025 auto dispatch_is_inference = [](const at::Tensor & self) -> bool {
9026 pybind11::gil_scoped_release no_gil;
9027 return self.is_inference();
9028 };
9029 return wrap(dispatch_is_inference(self));
9030 END_HANDLE_TH_ERRORS
9031}
9032
9033// is_neg
9034static PyObject * THPVariable_is_neg(PyObject* self_, PyObject* args)
9035{
9036 HANDLE_TH_ERRORS
9037 const Tensor& self = THPVariable_Unpack(self_);
9038 if(check_has_torch_function(self_)) {
9039 return handle_torch_function(self_, "is_neg");
9040 }
9041 // aten::is_neg(Tensor self) -> bool
9042
9043 auto dispatch_is_neg = [](const at::Tensor & self) -> bool {
9044 pybind11::gil_scoped_release no_gil;
9045 return self.is_neg();
9046 };
9047 return wrap(dispatch_is_neg(self));
9048 END_HANDLE_TH_ERRORS
9049}
9050
9051// is_nonzero
9052static PyObject * THPVariable_is_nonzero(PyObject* self_, PyObject* args)
9053{
9054 HANDLE_TH_ERRORS
9055 const Tensor& self = THPVariable_Unpack(self_);
9056 if(check_has_torch_function(self_)) {
9057 return handle_torch_function(self_, "is_nonzero");
9058 }
9059 // aten::is_nonzero(Tensor self) -> bool
9060
9061 auto dispatch_is_nonzero = [](const at::Tensor & self) -> bool {
9062 pybind11::gil_scoped_release no_gil;
9063 return self.is_nonzero();
9064 };
9065 return wrap(dispatch_is_nonzero(self));
9066 END_HANDLE_TH_ERRORS
9067}
9068
9069// is_pinned
9070static PyObject * THPVariable_is_pinned(PyObject* self_, PyObject* args, PyObject* kwargs)
9071{
9072 HANDLE_TH_ERRORS
9073 const Tensor& self = THPVariable_Unpack(self_);
9074 static PythonArgParser parser({
9075 "is_pinned(Device? device=None)",
9076 }, /*traceable=*/false);
9077
9078 ParsedArgs<1> parsed_args;
9079 auto _r = parser.parse(self_, args, kwargs, parsed_args);
9080 if(_r.has_torch_function()) {
9081 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
9082 }
9083 // aten::is_pinned(Tensor self, Device? device=None) -> bool
9084
9085 auto dispatch_is_pinned = [](const at::Tensor & self, c10::optional<at::Device> device) -> bool {
9086 pybind11::gil_scoped_release no_gil;
9087 return self.is_pinned(device);
9088 };
9089 return wrap(dispatch_is_pinned(self, _r.deviceOptional(0)));
9090 Py_RETURN_NONE;
9091 END_HANDLE_TH_ERRORS
9092}
9093
9094// is_same_size
9095static PyObject * THPVariable_is_same_size(PyObject* self_, PyObject* args, PyObject* kwargs)
9096{
9097 HANDLE_TH_ERRORS
9098 const Tensor& self = THPVariable_Unpack(self_);
9099 static PythonArgParser parser({
9100 "is_same_size(Tensor other)",
9101 }, /*traceable=*/false);
9102
9103 ParsedArgs<1> parsed_args;
9104 auto _r = parser.parse(self_, args, kwargs, parsed_args);
9105 if(_r.has_torch_function()) {
9106 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
9107 }
9108 // aten::is_same_size(Tensor self, Tensor other) -> bool
9109
9110 auto dispatch_is_same_size = [](const at::Tensor & self, const at::Tensor & other) -> bool {
9111 pybind11::gil_scoped_release no_gil;
9112 return self.is_same_size(other);
9113 };
9114 return wrap(dispatch_is_same_size(self, _r.tensor(0)));
9115 Py_RETURN_NONE;
9116 END_HANDLE_TH_ERRORS
9117}
9118
9119// is_set_to
9120static PyObject * THPVariable_is_set_to(PyObject* self_, PyObject* args, PyObject* kwargs)
9121{
9122 HANDLE_TH_ERRORS
9123 const Tensor& self = THPVariable_Unpack(self_);
9124 static PythonArgParser parser({
9125 "is_set_to(Tensor tensor)",
9126 }, /*traceable=*/false);
9127
9128 ParsedArgs<1> parsed_args;
9129 auto _r = parser.parse(self_, args, kwargs, parsed_args);
9130 if(_r.has_torch_function()) {
9131 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
9132 }
9133 // aten::is_set_to(Tensor self, Tensor tensor) -> bool
9134
9135 auto dispatch_is_set_to = [](const at::Tensor & self, const at::Tensor & tensor) -> bool {
9136 pybind11::gil_scoped_release no_gil;
9137 return self.is_set_to(tensor);
9138 };
9139 return wrap(dispatch_is_set_to(self, _r.tensor(0)));
9140 Py_RETURN_NONE;
9141 END_HANDLE_TH_ERRORS
9142}
9143
9144// is_signed
9145static PyObject * THPVariable_is_signed(PyObject* self_, PyObject* args)
9146{
9147 HANDLE_TH_ERRORS
9148 const Tensor& self = THPVariable_Unpack(self_);
9149 if(check_has_torch_function(self_)) {
9150 return handle_torch_function(self_, "is_signed");
9151 }
9152 // aten::is_signed(Tensor self) -> bool
9153
9154 auto dispatch_is_signed = [](const at::Tensor & self) -> bool {
9155 pybind11::gil_scoped_release no_gil;
9156 return self.is_signed();
9157 };
9158 return wrap(dispatch_is_signed(self));
9159 END_HANDLE_TH_ERRORS
9160}
9161
9162// isclose
9163static PyObject * THPVariable_isclose(PyObject* self_, PyObject* args, PyObject* kwargs)
9164{
9165 HANDLE_TH_ERRORS
9166 const Tensor& self = THPVariable_Unpack(self_);
9167 static PythonArgParser parser({
9168 "isclose(Tensor other, double rtol=1e-05, double atol=1e-08, bool equal_nan=False)",
9169 }, /*traceable=*/true);
9170
9171 ParsedArgs<4> parsed_args;
9172 auto _r = parser.parse(self_, args, kwargs, parsed_args);
9173 if(_r.has_torch_function()) {
9174 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
9175 }
9176 // aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor
9177
9178 auto dispatch_isclose = [](const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan) -> at::Tensor {
9179 pybind11::gil_scoped_release no_gil;
9180 return self.isclose(other, rtol, atol, equal_nan);
9181 };
9182 return wrap(dispatch_isclose(self, _r.tensor(0), _r.toDouble(1), _r.toDouble(2), _r.toBool(3)));
9183 Py_RETURN_NONE;
9184 END_HANDLE_TH_ERRORS
9185}
9186
9187// isfinite
9188static PyObject * THPVariable_isfinite(PyObject* self_, PyObject* args)
9189{
9190 HANDLE_TH_ERRORS
9191 const Tensor& self = THPVariable_Unpack(self_);
9192 if(check_has_torch_function(self_)) {
9193 return handle_torch_function(self_, "isfinite");
9194 }
9195 // aten::isfinite(Tensor self) -> Tensor
9196
9197 auto dispatch_isfinite = [](const at::Tensor & self) -> at::Tensor {
9198 pybind11::gil_scoped_release no_gil;
9199 return self.isfinite();
9200 };
9201 return wrap(dispatch_isfinite(self));
9202 END_HANDLE_TH_ERRORS
9203}
9204
9205// isinf
9206static PyObject * THPVariable_isinf(PyObject* self_, PyObject* args)
9207{
9208 HANDLE_TH_ERRORS
9209 const Tensor& self = THPVariable_Unpack(self_);
9210 if(check_has_torch_function(self_)) {
9211 return handle_torch_function(self_, "isinf");
9212 }
9213 // aten::isinf(Tensor self) -> Tensor
9214
9215 auto dispatch_isinf = [](const at::Tensor & self) -> at::Tensor {
9216 pybind11::gil_scoped_release no_gil;
9217 return self.isinf();
9218 };
9219 return wrap(dispatch_isinf(self));
9220 END_HANDLE_TH_ERRORS
9221}
9222
9223// isnan
9224static PyObject * THPVariable_isnan(PyObject* self_, PyObject* args)
9225{
9226 HANDLE_TH_ERRORS
9227 const Tensor& self = THPVariable_Unpack(self_);
9228 if(check_has_torch_function(self_)) {
9229 return handle_torch_function(self_, "isnan");
9230 }
9231 // aten::isnan(Tensor self) -> Tensor
9232
9233 auto dispatch_isnan = [](const at::Tensor & self) -> at::Tensor {
9234 pybind11::gil_scoped_release no_gil;
9235 return self.isnan();
9236 };
9237 return wrap(dispatch_isnan(self));
9238 END_HANDLE_TH_ERRORS
9239}
9240
9241// isneginf
9242static PyObject * THPVariable_isneginf(PyObject* self_, PyObject* args)
9243{
9244 HANDLE_TH_ERRORS
9245 const Tensor& self = THPVariable_Unpack(self_);
9246 if(check_has_torch_function(self_)) {
9247 return handle_torch_function(self_, "isneginf");
9248 }
9249 // aten::isneginf(Tensor self) -> Tensor
9250
9251 auto dispatch_isneginf = [](const at::Tensor & self) -> at::Tensor {
9252 pybind11::gil_scoped_release no_gil;
9253 return self.isneginf();
9254 };
9255 return wrap(dispatch_isneginf(self));
9256 END_HANDLE_TH_ERRORS
9257}
9258
9259// isposinf
9260static PyObject * THPVariable_isposinf(PyObject* self_, PyObject* args)
9261{
9262 HANDLE_TH_ERRORS
9263 const Tensor& self = THPVariable_Unpack(self_);
9264 if(check_has_torch_function(self_)) {
9265 return handle_torch_function(self_, "isposinf");
9266 }
9267 // aten::isposinf(Tensor self) -> Tensor
9268
9269 auto dispatch_isposinf = [](const at::Tensor & self) -> at::Tensor {
9270 pybind11::gil_scoped_release no_gil;
9271 return self.isposinf();
9272 };
9273 return wrap(dispatch_isposinf(self));
9274 END_HANDLE_TH_ERRORS
9275}
9276
9277// isreal
9278static PyObject * THPVariable_isreal(PyObject* self_, PyObject* args)
9279{
9280 HANDLE_TH_ERRORS
9281 const Tensor& self = THPVariable_Unpack(self_);
9282 if(check_has_torch_function(self_)) {
9283 return handle_torch_function(self_, "isreal");
9284 }
9285 // aten::isreal(Tensor self) -> Tensor
9286
9287 auto dispatch_isreal = [](const at::Tensor & self) -> at::Tensor {
9288 pybind11::gil_scoped_release no_gil;
9289 return self.isreal();
9290 };
9291 return wrap(dispatch_isreal(self));
9292 END_HANDLE_TH_ERRORS
9293}
9294
9295// istft
9296static PyObject * THPVariable_istft(PyObject* self_, PyObject* args, PyObject* kwargs)
9297{
9298 HANDLE_TH_ERRORS
9299 const Tensor& self = THPVariable_Unpack(self_);
9300 static PythonArgParser parser({
9301 "istft(int64_t n_fft, int64_t? hop_length=None, int64_t? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int64_t? length=None, bool return_complex=False)",
9302 }, /*traceable=*/true);
9303
9304 ParsedArgs<9> parsed_args;
9305 auto _r = parser.parse(self_, args, kwargs, parsed_args);
9306 if(_r.has_torch_function()) {
9307 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
9308 }
9309 // aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor
9310
9311 auto dispatch_istft = [](const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool center, bool normalized, c10::optional<bool> onesided, c10::optional<int64_t> length, bool return_complex) -> at::Tensor {
9312 pybind11::gil_scoped_release no_gil;
9313 return self.istft(n_fft, hop_length, win_length, window, center, normalized, onesided, length, return_complex);
9314 };
9315 return wrap(dispatch_istft(self, _r.toInt64(0), _r.toInt64Optional(1), _r.toInt64Optional(2), _r.optionalTensor(3), _r.toBool(4), _r.toBool(5), _r.toBoolOptional(6), _r.toInt64Optional(7), _r.toBool(8)));
9316 Py_RETURN_NONE;
9317 END_HANDLE_TH_ERRORS
9318}
9319
9320// kron
9321static PyObject * THPVariable_kron(PyObject* self_, PyObject* args, PyObject* kwargs)
9322{
9323 HANDLE_TH_ERRORS
9324 const Tensor& self = THPVariable_Unpack(self_);
9325 static PythonArgParser parser({
9326 "kron(Tensor other)",
9327 }, /*traceable=*/true);
9328
9329 ParsedArgs<1> parsed_args;
9330 auto _r = parser.parse(self_, args, kwargs, parsed_args);
9331 if(_r.has_torch_function()) {
9332 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
9333 }
9334 // aten::kron(Tensor self, Tensor other) -> Tensor
9335
9336 auto dispatch_kron = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
9337 pybind11::gil_scoped_release no_gil;
9338 return self.kron(other);
9339 };
9340 return wrap(dispatch_kron(self, _r.tensor(0)));
9341 Py_RETURN_NONE;
9342 END_HANDLE_TH_ERRORS
9343}
9344
9345\
9346// kthvalue
9347static PyObject * THPVariable_kthvalue(PyObject* self_, PyObject* args, PyObject* kwargs)
9348{
9349 HANDLE_TH_ERRORS
9350 static PyTypeObject* NamedTuple = get_namedtuple("kthvalue");
9351 const Tensor& self = THPVariable_Unpack(self_);
9352 static PythonArgParser parser({
9353 "kthvalue(int64_t k, int64_t dim=-1, bool keepdim=False)",
9354 "kthvalue(int64_t k, Dimname dim, bool keepdim=False)",
9355 }, /*traceable=*/true);
9356
9357 ParsedArgs<3> parsed_args;
9358 auto _r = parser.parse(self_, args, kwargs, parsed_args);
9359 if(_r.has_torch_function()) {
9360 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
9361 }
9362 switch (_r.idx) {
9363 case 0: {
9364 // aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
9365
9366 auto dispatch_kthvalue = [](const at::Tensor & self, int64_t k, int64_t dim, bool keepdim) -> ::std::tuple<at::Tensor,at::Tensor> {
9367 pybind11::gil_scoped_release no_gil;
9368 return self.kthvalue(k, dim, keepdim);
9369 };
9370 return wrap(NamedTuple, dispatch_kthvalue(self, _r.toInt64(0), _r.toInt64(1), _r.toBool(2)));
9371 }
9372 case 1: {
9373 // aten::kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
9374
9375 auto dispatch_kthvalue = [](const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim) -> ::std::tuple<at::Tensor,at::Tensor> {
9376 pybind11::gil_scoped_release no_gil;
9377 return self.kthvalue(k, dim, keepdim);
9378 };
9379 return wrap(NamedTuple, dispatch_kthvalue(self, _r.toInt64(0), _r.dimname(1), _r.toBool(2)));
9380 }
9381 }
9382 Py_RETURN_NONE;
9383 END_HANDLE_TH_ERRORS
9384}
9385
9386// lcm
9387static PyObject * THPVariable_lcm(PyObject* self_, PyObject* args, PyObject* kwargs)
9388{
9389 HANDLE_TH_ERRORS
9390 const Tensor& self = THPVariable_Unpack(self_);
9391 static PythonArgParser parser({
9392 "lcm(Tensor other)",
9393 }, /*traceable=*/true);
9394
9395 ParsedArgs<1> parsed_args;
9396 auto _r = parser.parse(self_, args, kwargs, parsed_args);
9397 if(_r.has_torch_function()) {
9398 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
9399 }
9400 // aten::lcm(Tensor self, Tensor other) -> Tensor
9401
9402 auto dispatch_lcm = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
9403 pybind11::gil_scoped_release no_gil;
9404 return self.lcm(other);
9405 };
9406 return wrap(dispatch_lcm(self, _r.tensor(0)));
9407 Py_RETURN_NONE;
9408 END_HANDLE_TH_ERRORS
9409}
9410
9411// lcm_
9412static PyObject * THPVariable_lcm_(PyObject* self_, PyObject* args, PyObject* kwargs)
9413{
9414 HANDLE_TH_ERRORS
9415 const Tensor& self = THPVariable_Unpack(self_);
9416 static PythonArgParser parser({
9417 "lcm_(Tensor other)",
9418 }, /*traceable=*/true);
9419
9420 ParsedArgs<1> parsed_args;
9421 auto _r = parser.parse(self_, args, kwargs, parsed_args);
9422 if(_r.has_torch_function()) {
9423 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
9424 }
9425 // aten::lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!)
9426
9427 auto dispatch_lcm_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
9428 pybind11::gil_scoped_release no_gil;
9429 return self.lcm_(other);
9430 };
9431 return wrap(dispatch_lcm_(self, _r.tensor(0)));
9432 Py_RETURN_NONE;
9433 END_HANDLE_TH_ERRORS
9434}
9435
9436// ldexp
9437static PyObject * THPVariable_ldexp(PyObject* self_, PyObject* args, PyObject* kwargs)
9438{
9439 HANDLE_TH_ERRORS
9440 const Tensor& self = THPVariable_Unpack(self_);
9441 static PythonArgParser parser({
9442 "ldexp(Tensor other)",
9443 }, /*traceable=*/true);
9444
9445 ParsedArgs<1> parsed_args;
9446 auto _r = parser.parse(self_, args, kwargs, parsed_args);
9447 if(_r.has_torch_function()) {
9448 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
9449 }
9450 // aten::ldexp.Tensor(Tensor self, Tensor other) -> Tensor
9451
9452 auto dispatch_ldexp = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
9453 pybind11::gil_scoped_release no_gil;
9454 return self.ldexp(other);
9455 };
9456 return wrap(dispatch_ldexp(self, _r.tensor(0)));
9457 Py_RETURN_NONE;
9458 END_HANDLE_TH_ERRORS
9459}
9460
9461// ldexp_
9462static PyObject * THPVariable_ldexp_(PyObject* self_, PyObject* args, PyObject* kwargs)
9463{
9464 HANDLE_TH_ERRORS
9465 const Tensor& self = THPVariable_Unpack(self_);
9466 static PythonArgParser parser({
9467 "ldexp_(Tensor other)",
9468 }, /*traceable=*/true);
9469
9470 ParsedArgs<1> parsed_args;
9471 auto _r = parser.parse(self_, args, kwargs, parsed_args);
9472 if(_r.has_torch_function()) {
9473 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
9474 }
9475 // aten::ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!)
9476
9477 auto dispatch_ldexp_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
9478 pybind11::gil_scoped_release no_gil;
9479 return self.ldexp_(other);
9480 };
9481 return wrap(dispatch_ldexp_(self, _r.tensor(0)));
9482 Py_RETURN_NONE;
9483 END_HANDLE_TH_ERRORS
9484}
9485
9486\
9487// le
9488static PyObject * THPVariable_le(PyObject* self_, PyObject* args, PyObject* kwargs)
9489{
9490 HANDLE_TH_ERRORS
9491 const Tensor& self = THPVariable_Unpack(self_);
9492 static PythonArgParser parser({
9493 "le(Tensor other)",
9494 "le(Scalar other)",
9495 }, /*traceable=*/true);
9496
9497 ParsedArgs<1> parsed_args;
9498 auto _r = parser.parse(self_, args, kwargs, parsed_args);
9499 if(_r.has_torch_function()) {
9500 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
9501 }
9502 switch (_r.idx) {
9503 case 0: {
9504 // aten::le.Tensor(Tensor self, Tensor other) -> Tensor
9505
9506 auto dispatch_le = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
9507 pybind11::gil_scoped_release no_gil;
9508 return self.le(other);
9509 };
9510 return wrap(dispatch_le(self, _r.tensor(0)));
9511 }
9512 case 1: {
9513 // aten::le.Scalar(Tensor self, Scalar other) -> Tensor
9514
9515 auto dispatch_le = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
9516 pybind11::gil_scoped_release no_gil;
9517 return self.le(other);
9518 };
9519 return wrap(dispatch_le(self, _r.scalar(0)));
9520 }
9521 }
9522 Py_RETURN_NONE;
9523 END_HANDLE_TH_ERRORS
9524}
9525
9526\
9527// le_
9528static PyObject * THPVariable_le_(PyObject* self_, PyObject* args, PyObject* kwargs)
9529{
9530 HANDLE_TH_ERRORS
9531 const Tensor& self = THPVariable_Unpack(self_);
9532 static PythonArgParser parser({
9533 "le_(Tensor other)",
9534 "le_(Scalar other)",
9535 }, /*traceable=*/true);
9536
9537 ParsedArgs<1> parsed_args;
9538 auto _r = parser.parse(self_, args, kwargs, parsed_args);
9539 if(_r.has_torch_function()) {
9540 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
9541 }
9542 switch (_r.idx) {
9543 case 0: {
9544 // aten::le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
9545
9546 auto dispatch_le_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
9547 pybind11::gil_scoped_release no_gil;
9548 return self.le_(other);
9549 };
9550 return wrap(dispatch_le_(self, _r.tensor(0)));
9551 }
9552 case 1: {
9553 // aten::le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
9554
9555 auto dispatch_le_ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
9556 pybind11::gil_scoped_release no_gil;
9557 return self.le_(other);
9558 };
9559 return wrap(dispatch_le_(self, _r.scalar(0)));
9560 }
9561 }
9562 Py_RETURN_NONE;
9563 END_HANDLE_TH_ERRORS
9564}
9565
9566\
9567// lerp
9568static PyObject * THPVariable_lerp(PyObject* self_, PyObject* args, PyObject* kwargs)
9569{
9570 HANDLE_TH_ERRORS
9571 const Tensor& self = THPVariable_Unpack(self_);
9572 static PythonArgParser parser({
9573 "lerp(Tensor end, Tensor weight)",
9574 "lerp(Tensor end, Scalar weight)",
9575 }, /*traceable=*/true);
9576
9577 ParsedArgs<2> parsed_args;
9578 auto _r = parser.parse(self_, args, kwargs, parsed_args);
9579 if(_r.has_torch_function()) {
9580 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
9581 }
9582 switch (_r.idx) {
9583 case 0: {
9584 // aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor
9585
9586 auto dispatch_lerp = [](const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) -> at::Tensor {
9587 pybind11::gil_scoped_release no_gil;
9588 return self.lerp(end, weight);
9589 };
9590 return wrap(dispatch_lerp(self, _r.tensor(0), _r.tensor(1)));
9591 }
9592 case 1: {
9593 // aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor
9594
9595 auto dispatch_lerp = [](const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) -> at::Tensor {
9596 pybind11::gil_scoped_release no_gil;
9597 return self.lerp(end, weight);
9598 };
9599 return wrap(dispatch_lerp(self, _r.tensor(0), _r.scalar(1)));
9600 }
9601 }
9602 Py_RETURN_NONE;
9603 END_HANDLE_TH_ERRORS
9604}
9605
9606\
9607// lerp_
9608static PyObject * THPVariable_lerp_(PyObject* self_, PyObject* args, PyObject* kwargs)
9609{
9610 HANDLE_TH_ERRORS
9611 const Tensor& self = THPVariable_Unpack(self_);
9612 static PythonArgParser parser({
9613 "lerp_(Tensor end, Tensor weight)",
9614 "lerp_(Tensor end, Scalar weight)",
9615 }, /*traceable=*/true);
9616
9617 ParsedArgs<2> parsed_args;
9618 auto _r = parser.parse(self_, args, kwargs, parsed_args);
9619 if(_r.has_torch_function()) {
9620 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
9621 }
9622 switch (_r.idx) {
9623 case 0: {
9624 // aten::lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!)
9625
9626 auto dispatch_lerp_ = [](const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) -> at::Tensor {
9627 pybind11::gil_scoped_release no_gil;
9628 return self.lerp_(end, weight);
9629 };
9630 return wrap(dispatch_lerp_(self, _r.tensor(0), _r.tensor(1)));
9631 }
9632 case 1: {
9633 // aten::lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!)
9634
9635 auto dispatch_lerp_ = [](const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) -> at::Tensor {
9636 pybind11::gil_scoped_release no_gil;
9637 return self.lerp_(end, weight);
9638 };
9639 return wrap(dispatch_lerp_(self, _r.tensor(0), _r.scalar(1)));
9640 }
9641 }
9642 Py_RETURN_NONE;
9643 END_HANDLE_TH_ERRORS
9644}
9645
9646\
9647// less
9648static PyObject * THPVariable_less(PyObject* self_, PyObject* args, PyObject* kwargs)
9649{
9650 HANDLE_TH_ERRORS
9651 const Tensor& self = THPVariable_Unpack(self_);
9652 static PythonArgParser parser({
9653 "less(Tensor other)",
9654 "less(Scalar other)",
9655 }, /*traceable=*/true);
9656
9657 ParsedArgs<1> parsed_args;
9658 auto _r = parser.parse(self_, args, kwargs, parsed_args);
9659 if(_r.has_torch_function()) {
9660 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
9661 }
9662 switch (_r.idx) {
9663 case 0: {
9664 // aten::less.Tensor(Tensor self, Tensor other) -> Tensor
9665
9666 auto dispatch_less = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
9667 pybind11::gil_scoped_release no_gil;
9668 return self.less(other);
9669 };
9670 return wrap(dispatch_less(self, _r.tensor(0)));
9671 }
9672 case 1: {
9673 // aten::less.Scalar(Tensor self, Scalar other) -> Tensor
9674
9675 auto dispatch_less = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
9676 pybind11::gil_scoped_release no_gil;
9677 return self.less(other);
9678 };
9679 return wrap(dispatch_less(self, _r.scalar(0)));
9680 }
9681 }
9682 Py_RETURN_NONE;
9683 END_HANDLE_TH_ERRORS
9684}
9685
9686\
9687// less_
9688static PyObject * THPVariable_less_(PyObject* self_, PyObject* args, PyObject* kwargs)
9689{
9690 HANDLE_TH_ERRORS
9691 const Tensor& self = THPVariable_Unpack(self_);
9692 static PythonArgParser parser({
9693 "less_(Tensor other)",
9694 "less_(Scalar other)",
9695 }, /*traceable=*/true);
9696
9697 ParsedArgs<1> parsed_args;
9698 auto _r = parser.parse(self_, args, kwargs, parsed_args);
9699 if(_r.has_torch_function()) {
9700 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
9701 }
9702 switch (_r.idx) {
9703 case 0: {
9704 // aten::less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
9705
9706 auto dispatch_less_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
9707 pybind11::gil_scoped_release no_gil;
9708 return self.less_(other);
9709 };
9710 return wrap(dispatch_less_(self, _r.tensor(0)));
9711 }
9712 case 1: {
9713 // aten::less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
9714
9715 auto dispatch_less_ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
9716 pybind11::gil_scoped_release no_gil;
9717 return self.less_(other);
9718 };
9719 return wrap(dispatch_less_(self, _r.scalar(0)));
9720 }
9721 }
9722 Py_RETURN_NONE;
9723 END_HANDLE_TH_ERRORS
9724}
9725
9726\
9727// less_equal
9728static PyObject * THPVariable_less_equal(PyObject* self_, PyObject* args, PyObject* kwargs)
9729{
9730 HANDLE_TH_ERRORS
9731 const Tensor& self = THPVariable_Unpack(self_);
9732 static PythonArgParser parser({
9733 "less_equal(Tensor other)",
9734 "less_equal(Scalar other)",
9735 }, /*traceable=*/true);
9736
9737 ParsedArgs<1> parsed_args;
9738 auto _r = parser.parse(self_, args, kwargs, parsed_args);
9739 if(_r.has_torch_function()) {
9740 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
9741 }
9742 switch (_r.idx) {
9743 case 0: {
9744 // aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor
9745
9746 auto dispatch_less_equal = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
9747 pybind11::gil_scoped_release no_gil;
9748 return self.less_equal(other);
9749 };
9750 return wrap(dispatch_less_equal(self, _r.tensor(0)));
9751 }
9752 case 1: {
9753 // aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor
9754
9755 auto dispatch_less_equal = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
9756 pybind11::gil_scoped_release no_gil;
9757 return self.less_equal(other);
9758 };
9759 return wrap(dispatch_less_equal(self, _r.scalar(0)));
9760 }
9761 }
9762 Py_RETURN_NONE;
9763 END_HANDLE_TH_ERRORS
9764}
9765
9766\
9767// less_equal_
9768static PyObject * THPVariable_less_equal_(PyObject* self_, PyObject* args, PyObject* kwargs)
9769{
9770 HANDLE_TH_ERRORS
9771 const Tensor& self = THPVariable_Unpack(self_);
9772 static PythonArgParser parser({
9773 "less_equal_(Tensor other)",
9774 "less_equal_(Scalar other)",
9775 }, /*traceable=*/true);
9776
9777 ParsedArgs<1> parsed_args;
9778 auto _r = parser.parse(self_, args, kwargs, parsed_args);
9779 if(_r.has_torch_function()) {
9780 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
9781 }
9782 switch (_r.idx) {
9783 case 0: {
9784 // aten::less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
9785
9786 auto dispatch_less_equal_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
9787 pybind11::gil_scoped_release no_gil;
9788 return self.less_equal_(other);
9789 };
9790 return wrap(dispatch_less_equal_(self, _r.tensor(0)));
9791 }
9792 case 1: {
9793 // aten::less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
9794
9795 auto dispatch_less_equal_ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
9796 pybind11::gil_scoped_release no_gil;
9797 return self.less_equal_(other);
9798 };
9799 return wrap(dispatch_less_equal_(self, _r.scalar(0)));
9800 }
9801 }
9802 Py_RETURN_NONE;
9803 END_HANDLE_TH_ERRORS
9804}
9805
9806// lgamma
9807static PyObject * THPVariable_lgamma(PyObject* self_, PyObject* args)
9808{
9809 HANDLE_TH_ERRORS
9810 const Tensor& self = THPVariable_Unpack(self_);
9811 if(check_has_torch_function(self_)) {
9812 return handle_torch_function(self_, "lgamma");
9813 }
9814 // aten::lgamma(Tensor self) -> Tensor
9815
9816 auto dispatch_lgamma = [](const at::Tensor & self) -> at::Tensor {
9817 pybind11::gil_scoped_release no_gil;
9818 return self.lgamma();
9819 };
9820 return wrap(dispatch_lgamma(self));
9821 END_HANDLE_TH_ERRORS
9822}
9823
9824// lgamma_
9825static PyObject * THPVariable_lgamma_(PyObject* self_, PyObject* args)
9826{
9827 HANDLE_TH_ERRORS
9828 const Tensor& self = THPVariable_Unpack(self_);
9829 if(check_has_torch_function(self_)) {
9830 return handle_torch_function(self_, "lgamma_");
9831 }
9832 // aten::lgamma_(Tensor(a!) self) -> Tensor(a!)
9833
9834 auto dispatch_lgamma_ = [](const at::Tensor & self) -> at::Tensor {
9835 pybind11::gil_scoped_release no_gil;
9836 return self.lgamma_();
9837 };
9838 return wrap(dispatch_lgamma_(self));
9839 END_HANDLE_TH_ERRORS
9840}
9841
9842// log
9843static PyObject * THPVariable_log(PyObject* self_, PyObject* args)
9844{
9845 HANDLE_TH_ERRORS
9846 const Tensor& self = THPVariable_Unpack(self_);
9847 if(check_has_torch_function(self_)) {
9848 return handle_torch_function(self_, "log");
9849 }
9850 // aten::log(Tensor self) -> Tensor
9851
9852 auto dispatch_log = [](const at::Tensor & self) -> at::Tensor {
9853 pybind11::gil_scoped_release no_gil;
9854 return self.log();
9855 };
9856 return wrap(dispatch_log(self));
9857 END_HANDLE_TH_ERRORS
9858}
9859
9860// log10
9861static PyObject * THPVariable_log10(PyObject* self_, PyObject* args)
9862{
9863 HANDLE_TH_ERRORS
9864 const Tensor& self = THPVariable_Unpack(self_);
9865 if(check_has_torch_function(self_)) {
9866 return handle_torch_function(self_, "log10");
9867 }
9868 // aten::log10(Tensor self) -> Tensor
9869
9870 auto dispatch_log10 = [](const at::Tensor & self) -> at::Tensor {
9871 pybind11::gil_scoped_release no_gil;
9872 return self.log10();
9873 };
9874 return wrap(dispatch_log10(self));
9875 END_HANDLE_TH_ERRORS
9876}
9877
9878// log10_
9879static PyObject * THPVariable_log10_(PyObject* self_, PyObject* args)
9880{
9881 HANDLE_TH_ERRORS
9882 const Tensor& self = THPVariable_Unpack(self_);
9883 if(check_has_torch_function(self_)) {
9884 return handle_torch_function(self_, "log10_");
9885 }
9886 // aten::log10_(Tensor(a!) self) -> Tensor(a!)
9887
9888 auto dispatch_log10_ = [](const at::Tensor & self) -> at::Tensor {
9889 pybind11::gil_scoped_release no_gil;
9890 return self.log10_();
9891 };
9892 return wrap(dispatch_log10_(self));
9893 END_HANDLE_TH_ERRORS
9894}
9895
9896// log1p
9897static PyObject * THPVariable_log1p(PyObject* self_, PyObject* args)
9898{
9899 HANDLE_TH_ERRORS
9900 const Tensor& self = THPVariable_Unpack(self_);
9901 if(check_has_torch_function(self_)) {
9902 return handle_torch_function(self_, "log1p");
9903 }
9904 // aten::log1p(Tensor self) -> Tensor
9905
9906 auto dispatch_log1p = [](const at::Tensor & self) -> at::Tensor {
9907 pybind11::gil_scoped_release no_gil;
9908 return self.log1p();
9909 };
9910 return wrap(dispatch_log1p(self));
9911 END_HANDLE_TH_ERRORS
9912}
9913
9914// log1p_
9915static PyObject * THPVariable_log1p_(PyObject* self_, PyObject* args)
9916{
9917 HANDLE_TH_ERRORS
9918 const Tensor& self = THPVariable_Unpack(self_);
9919 if(check_has_torch_function(self_)) {
9920 return handle_torch_function(self_, "log1p_");
9921 }
9922 // aten::log1p_(Tensor(a!) self) -> Tensor(a!)
9923
9924 auto dispatch_log1p_ = [](const at::Tensor & self) -> at::Tensor {
9925 pybind11::gil_scoped_release no_gil;
9926 return self.log1p_();
9927 };
9928 return wrap(dispatch_log1p_(self));
9929 END_HANDLE_TH_ERRORS
9930}
9931
9932// log2
9933static PyObject * THPVariable_log2(PyObject* self_, PyObject* args)
9934{
9935 HANDLE_TH_ERRORS
9936 const Tensor& self = THPVariable_Unpack(self_);
9937 if(check_has_torch_function(self_)) {
9938 return handle_torch_function(self_, "log2");
9939 }
9940 // aten::log2(Tensor self) -> Tensor
9941
9942 auto dispatch_log2 = [](const at::Tensor & self) -> at::Tensor {
9943 pybind11::gil_scoped_release no_gil;
9944 return self.log2();
9945 };
9946 return wrap(dispatch_log2(self));
9947 END_HANDLE_TH_ERRORS
9948}
9949
9950// log2_
9951static PyObject * THPVariable_log2_(PyObject* self_, PyObject* args)
9952{
9953 HANDLE_TH_ERRORS
9954 const Tensor& self = THPVariable_Unpack(self_);
9955 if(check_has_torch_function(self_)) {
9956 return handle_torch_function(self_, "log2_");
9957 }
9958 // aten::log2_(Tensor(a!) self) -> Tensor(a!)
9959
9960 auto dispatch_log2_ = [](const at::Tensor & self) -> at::Tensor {
9961 pybind11::gil_scoped_release no_gil;
9962 return self.log2_();
9963 };
9964 return wrap(dispatch_log2_(self));
9965 END_HANDLE_TH_ERRORS
9966}
9967
9968// log_
9969static PyObject * THPVariable_log_(PyObject* self_, PyObject* args)
9970{
9971 HANDLE_TH_ERRORS
9972 const Tensor& self = THPVariable_Unpack(self_);
9973 if(check_has_torch_function(self_)) {
9974 return handle_torch_function(self_, "log_");
9975 }
9976 // aten::log_(Tensor(a!) self) -> Tensor(a!)
9977
9978 auto dispatch_log_ = [](const at::Tensor & self) -> at::Tensor {
9979 pybind11::gil_scoped_release no_gil;
9980 return self.log_();
9981 };
9982 return wrap(dispatch_log_(self));
9983 END_HANDLE_TH_ERRORS
9984}
9985
9986// log_normal_
9987static PyObject * THPVariable_log_normal_(PyObject* self_, PyObject* args, PyObject* kwargs)
9988{
9989 HANDLE_TH_ERRORS
9990 const Tensor& self = THPVariable_Unpack(self_);
9991 static PythonArgParser parser({
9992 "log_normal_(double mean=1, double std=2, *, Generator? generator=None)",
9993 }, /*traceable=*/true);
9994
9995 ParsedArgs<3> parsed_args;
9996 auto _r = parser.parse(self_, args, kwargs, parsed_args);
9997 if(_r.has_torch_function()) {
9998 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
9999 }
10000 // aten::log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!)
10001
10002 auto dispatch_log_normal_ = [](const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) -> at::Tensor {
10003 pybind11::gil_scoped_release no_gil;
10004 return self.log_normal_(mean, std, generator);
10005 };
10006 return wrap(dispatch_log_normal_(self, _r.toDouble(0), _r.toDouble(1), _r.generator(2)));
10007 Py_RETURN_NONE;
10008 END_HANDLE_TH_ERRORS
10009}
10010
10011\
10012// log_softmax
10013static PyObject * THPVariable_log_softmax(PyObject* self_, PyObject* args, PyObject* kwargs)
10014{
10015 HANDLE_TH_ERRORS
10016 const Tensor& self = THPVariable_Unpack(self_);
10017 static PythonArgParser parser({
10018 "log_softmax(int64_t dim, ScalarType? dtype=None)",
10019 "log_softmax(Dimname dim, *, ScalarType? dtype=None)",
10020 }, /*traceable=*/true);
10021
10022 ParsedArgs<2> parsed_args;
10023 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10024 if(_r.has_torch_function()) {
10025 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10026 }
10027 switch (_r.idx) {
10028 case 0: {
10029 // aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
10030
10031 auto dispatch_log_softmax = [](const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) -> at::Tensor {
10032 pybind11::gil_scoped_release no_gil;
10033 return self.log_softmax(dim, dtype);
10034 };
10035 return wrap(dispatch_log_softmax(self, _r.toInt64(0), _r.scalartypeOptional(1)));
10036 }
10037 case 1: {
10038 // aten::log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
10039
10040 auto dispatch_log_softmax = [](const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) -> at::Tensor {
10041 pybind11::gil_scoped_release no_gil;
10042 return self.log_softmax(dim, dtype);
10043 };
10044 return wrap(dispatch_log_softmax(self, _r.dimname(0), _r.scalartypeOptional(1)));
10045 }
10046 }
10047 Py_RETURN_NONE;
10048 END_HANDLE_TH_ERRORS
10049}
10050
10051// logaddexp
10052static PyObject * THPVariable_logaddexp(PyObject* self_, PyObject* args, PyObject* kwargs)
10053{
10054 HANDLE_TH_ERRORS
10055 const Tensor& self = THPVariable_Unpack(self_);
10056 static PythonArgParser parser({
10057 "logaddexp(Tensor other)",
10058 }, /*traceable=*/true);
10059
10060 ParsedArgs<1> parsed_args;
10061 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10062 if(_r.has_torch_function()) {
10063 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10064 }
10065 // aten::logaddexp(Tensor self, Tensor other) -> Tensor
10066
10067 auto dispatch_logaddexp = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
10068 pybind11::gil_scoped_release no_gil;
10069 return self.logaddexp(other);
10070 };
10071 return wrap(dispatch_logaddexp(self, _r.tensor(0)));
10072 Py_RETURN_NONE;
10073 END_HANDLE_TH_ERRORS
10074}
10075
10076// logaddexp2
10077static PyObject * THPVariable_logaddexp2(PyObject* self_, PyObject* args, PyObject* kwargs)
10078{
10079 HANDLE_TH_ERRORS
10080 const Tensor& self = THPVariable_Unpack(self_);
10081 static PythonArgParser parser({
10082 "logaddexp2(Tensor other)",
10083 }, /*traceable=*/true);
10084
10085 ParsedArgs<1> parsed_args;
10086 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10087 if(_r.has_torch_function()) {
10088 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10089 }
10090 // aten::logaddexp2(Tensor self, Tensor other) -> Tensor
10091
10092 auto dispatch_logaddexp2 = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
10093 pybind11::gil_scoped_release no_gil;
10094 return self.logaddexp2(other);
10095 };
10096 return wrap(dispatch_logaddexp2(self, _r.tensor(0)));
10097 Py_RETURN_NONE;
10098 END_HANDLE_TH_ERRORS
10099}
10100
10101\
10102// logcumsumexp
10103static PyObject * THPVariable_logcumsumexp(PyObject* self_, PyObject* args, PyObject* kwargs)
10104{
10105 HANDLE_TH_ERRORS
10106 const Tensor& self = THPVariable_Unpack(self_);
10107 static PythonArgParser parser({
10108 "logcumsumexp(int64_t dim)",
10109 "logcumsumexp(Dimname dim)",
10110 }, /*traceable=*/true);
10111
10112 ParsedArgs<1> parsed_args;
10113 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10114 if(_r.has_torch_function()) {
10115 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10116 }
10117 switch (_r.idx) {
10118 case 0: {
10119 // aten::logcumsumexp(Tensor self, int dim) -> Tensor
10120
10121 auto dispatch_logcumsumexp = [](const at::Tensor & self, int64_t dim) -> at::Tensor {
10122 pybind11::gil_scoped_release no_gil;
10123 return self.logcumsumexp(dim);
10124 };
10125 return wrap(dispatch_logcumsumexp(self, _r.toInt64(0)));
10126 }
10127 case 1: {
10128 // aten::logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor
10129
10130 auto dispatch_logcumsumexp = [](const at::Tensor & self, at::Dimname dim) -> at::Tensor {
10131 pybind11::gil_scoped_release no_gil;
10132 return self.logcumsumexp(dim);
10133 };
10134 return wrap(dispatch_logcumsumexp(self, _r.dimname(0)));
10135 }
10136 }
10137 Py_RETURN_NONE;
10138 END_HANDLE_TH_ERRORS
10139}
10140
10141// logdet
10142static PyObject * THPVariable_logdet(PyObject* self_, PyObject* args)
10143{
10144 HANDLE_TH_ERRORS
10145 const Tensor& self = THPVariable_Unpack(self_);
10146 if(check_has_torch_function(self_)) {
10147 return handle_torch_function(self_, "logdet");
10148 }
10149 // aten::logdet(Tensor self) -> Tensor
10150
10151 auto dispatch_logdet = [](const at::Tensor & self) -> at::Tensor {
10152 pybind11::gil_scoped_release no_gil;
10153 return self.logdet();
10154 };
10155 return wrap(dispatch_logdet(self));
10156 END_HANDLE_TH_ERRORS
10157}
10158
10159// logical_and
10160static PyObject * THPVariable_logical_and(PyObject* self_, PyObject* args, PyObject* kwargs)
10161{
10162 HANDLE_TH_ERRORS
10163 const Tensor& self = THPVariable_Unpack(self_);
10164 static PythonArgParser parser({
10165 "logical_and(Tensor other)",
10166 }, /*traceable=*/true);
10167
10168 ParsedArgs<1> parsed_args;
10169 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10170 if(_r.has_torch_function()) {
10171 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10172 }
10173 // aten::logical_and(Tensor self, Tensor other) -> Tensor
10174
10175 auto dispatch_logical_and = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
10176 pybind11::gil_scoped_release no_gil;
10177 return self.logical_and(other);
10178 };
10179 return wrap(dispatch_logical_and(self, _r.tensor(0)));
10180 Py_RETURN_NONE;
10181 END_HANDLE_TH_ERRORS
10182}
10183
10184// logical_and_
10185static PyObject * THPVariable_logical_and_(PyObject* self_, PyObject* args, PyObject* kwargs)
10186{
10187 HANDLE_TH_ERRORS
10188 const Tensor& self = THPVariable_Unpack(self_);
10189 static PythonArgParser parser({
10190 "logical_and_(Tensor other)",
10191 }, /*traceable=*/true);
10192
10193 ParsedArgs<1> parsed_args;
10194 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10195 if(_r.has_torch_function()) {
10196 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10197 }
10198 // aten::logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!)
10199
10200 auto dispatch_logical_and_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
10201 pybind11::gil_scoped_release no_gil;
10202 return self.logical_and_(other);
10203 };
10204 return wrap(dispatch_logical_and_(self, _r.tensor(0)));
10205 Py_RETURN_NONE;
10206 END_HANDLE_TH_ERRORS
10207}
10208
10209// logical_not
10210static PyObject * THPVariable_logical_not(PyObject* self_, PyObject* args)
10211{
10212 HANDLE_TH_ERRORS
10213 const Tensor& self = THPVariable_Unpack(self_);
10214 if(check_has_torch_function(self_)) {
10215 return handle_torch_function(self_, "logical_not");
10216 }
10217 // aten::logical_not(Tensor self) -> Tensor
10218
10219 auto dispatch_logical_not = [](const at::Tensor & self) -> at::Tensor {
10220 pybind11::gil_scoped_release no_gil;
10221 return self.logical_not();
10222 };
10223 return wrap(dispatch_logical_not(self));
10224 END_HANDLE_TH_ERRORS
10225}
10226
10227// logical_not_
10228static PyObject * THPVariable_logical_not_(PyObject* self_, PyObject* args)
10229{
10230 HANDLE_TH_ERRORS
10231 const Tensor& self = THPVariable_Unpack(self_);
10232 if(check_has_torch_function(self_)) {
10233 return handle_torch_function(self_, "logical_not_");
10234 }
10235 // aten::logical_not_(Tensor(a!) self) -> Tensor(a!)
10236
10237 auto dispatch_logical_not_ = [](const at::Tensor & self) -> at::Tensor {
10238 pybind11::gil_scoped_release no_gil;
10239 return self.logical_not_();
10240 };
10241 return wrap(dispatch_logical_not_(self));
10242 END_HANDLE_TH_ERRORS
10243}
10244
10245// logical_or
10246static PyObject * THPVariable_logical_or(PyObject* self_, PyObject* args, PyObject* kwargs)
10247{
10248 HANDLE_TH_ERRORS
10249 const Tensor& self = THPVariable_Unpack(self_);
10250 static PythonArgParser parser({
10251 "logical_or(Tensor other)",
10252 }, /*traceable=*/true);
10253
10254 ParsedArgs<1> parsed_args;
10255 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10256 if(_r.has_torch_function()) {
10257 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10258 }
10259 // aten::logical_or(Tensor self, Tensor other) -> Tensor
10260
10261 auto dispatch_logical_or = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
10262 pybind11::gil_scoped_release no_gil;
10263 return self.logical_or(other);
10264 };
10265 return wrap(dispatch_logical_or(self, _r.tensor(0)));
10266 Py_RETURN_NONE;
10267 END_HANDLE_TH_ERRORS
10268}
10269
10270// logical_or_
10271static PyObject * THPVariable_logical_or_(PyObject* self_, PyObject* args, PyObject* kwargs)
10272{
10273 HANDLE_TH_ERRORS
10274 const Tensor& self = THPVariable_Unpack(self_);
10275 static PythonArgParser parser({
10276 "logical_or_(Tensor other)",
10277 }, /*traceable=*/true);
10278
10279 ParsedArgs<1> parsed_args;
10280 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10281 if(_r.has_torch_function()) {
10282 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10283 }
10284 // aten::logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!)
10285
10286 auto dispatch_logical_or_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
10287 pybind11::gil_scoped_release no_gil;
10288 return self.logical_or_(other);
10289 };
10290 return wrap(dispatch_logical_or_(self, _r.tensor(0)));
10291 Py_RETURN_NONE;
10292 END_HANDLE_TH_ERRORS
10293}
10294
10295// logical_xor
10296static PyObject * THPVariable_logical_xor(PyObject* self_, PyObject* args, PyObject* kwargs)
10297{
10298 HANDLE_TH_ERRORS
10299 const Tensor& self = THPVariable_Unpack(self_);
10300 static PythonArgParser parser({
10301 "logical_xor(Tensor other)",
10302 }, /*traceable=*/true);
10303
10304 ParsedArgs<1> parsed_args;
10305 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10306 if(_r.has_torch_function()) {
10307 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10308 }
10309 // aten::logical_xor(Tensor self, Tensor other) -> Tensor
10310
10311 auto dispatch_logical_xor = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
10312 pybind11::gil_scoped_release no_gil;
10313 return self.logical_xor(other);
10314 };
10315 return wrap(dispatch_logical_xor(self, _r.tensor(0)));
10316 Py_RETURN_NONE;
10317 END_HANDLE_TH_ERRORS
10318}
10319
10320// logical_xor_
10321static PyObject * THPVariable_logical_xor_(PyObject* self_, PyObject* args, PyObject* kwargs)
10322{
10323 HANDLE_TH_ERRORS
10324 const Tensor& self = THPVariable_Unpack(self_);
10325 static PythonArgParser parser({
10326 "logical_xor_(Tensor other)",
10327 }, /*traceable=*/true);
10328
10329 ParsedArgs<1> parsed_args;
10330 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10331 if(_r.has_torch_function()) {
10332 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10333 }
10334 // aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!)
10335
10336 auto dispatch_logical_xor_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
10337 pybind11::gil_scoped_release no_gil;
10338 return self.logical_xor_(other);
10339 };
10340 return wrap(dispatch_logical_xor_(self, _r.tensor(0)));
10341 Py_RETURN_NONE;
10342 END_HANDLE_TH_ERRORS
10343}
10344
10345// logit
10346static PyObject * THPVariable_logit(PyObject* self_, PyObject* args, PyObject* kwargs)
10347{
10348 HANDLE_TH_ERRORS
10349 const Tensor& self = THPVariable_Unpack(self_);
10350 static PythonArgParser parser({
10351 "logit(double? eps=None)",
10352 }, /*traceable=*/true);
10353
10354 ParsedArgs<1> parsed_args;
10355 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10356 if(_r.has_torch_function()) {
10357 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10358 }
10359 // aten::logit(Tensor self, float? eps=None) -> Tensor
10360
10361 auto dispatch_logit = [](const at::Tensor & self, c10::optional<double> eps) -> at::Tensor {
10362 pybind11::gil_scoped_release no_gil;
10363 return self.logit(eps);
10364 };
10365 return wrap(dispatch_logit(self, _r.toDoubleOptional(0)));
10366 Py_RETURN_NONE;
10367 END_HANDLE_TH_ERRORS
10368}
10369
10370// logit_
10371static PyObject * THPVariable_logit_(PyObject* self_, PyObject* args, PyObject* kwargs)
10372{
10373 HANDLE_TH_ERRORS
10374 const Tensor& self = THPVariable_Unpack(self_);
10375 static PythonArgParser parser({
10376 "logit_(double? eps=None)",
10377 }, /*traceable=*/true);
10378
10379 ParsedArgs<1> parsed_args;
10380 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10381 if(_r.has_torch_function()) {
10382 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10383 }
10384 // aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!)
10385
10386 auto dispatch_logit_ = [](const at::Tensor & self, c10::optional<double> eps) -> at::Tensor {
10387 pybind11::gil_scoped_release no_gil;
10388 return self.logit_(eps);
10389 };
10390 return wrap(dispatch_logit_(self, _r.toDoubleOptional(0)));
10391 Py_RETURN_NONE;
10392 END_HANDLE_TH_ERRORS
10393}
10394
10395\
10396// logsumexp
10397static PyObject * THPVariable_logsumexp(PyObject* self_, PyObject* args, PyObject* kwargs)
10398{
10399 HANDLE_TH_ERRORS
10400 const Tensor& self = THPVariable_Unpack(self_);
10401 static PythonArgParser parser({
10402 "logsumexp(IntArrayRef[1] dim, bool keepdim=False)",
10403 "logsumexp(DimnameList[1] dim, bool keepdim=False)",
10404 }, /*traceable=*/true);
10405
10406 ParsedArgs<2> parsed_args;
10407 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10408 if(_r.has_torch_function()) {
10409 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10410 }
10411 switch (_r.idx) {
10412 case 0: {
10413 // aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
10414
10415 auto dispatch_logsumexp = [](const at::Tensor & self, at::IntArrayRef dim, bool keepdim) -> at::Tensor {
10416 pybind11::gil_scoped_release no_gil;
10417 return self.logsumexp(dim, keepdim);
10418 };
10419 return wrap(dispatch_logsumexp(self, _r.intlist(0), _r.toBool(1)));
10420 }
10421 case 1: {
10422 // aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor
10423
10424 auto dispatch_logsumexp = [](const at::Tensor & self, at::DimnameList dim, bool keepdim) -> at::Tensor {
10425 pybind11::gil_scoped_release no_gil;
10426 return self.logsumexp(dim, keepdim);
10427 };
10428 return wrap(dispatch_logsumexp(self, _r.dimnamelist(0), _r.toBool(1)));
10429 }
10430 }
10431 Py_RETURN_NONE;
10432 END_HANDLE_TH_ERRORS
10433}
10434
10435\
10436// lt
10437static PyObject * THPVariable_lt(PyObject* self_, PyObject* args, PyObject* kwargs)
10438{
10439 HANDLE_TH_ERRORS
10440 const Tensor& self = THPVariable_Unpack(self_);
10441 static PythonArgParser parser({
10442 "lt(Tensor other)",
10443 "lt(Scalar other)",
10444 }, /*traceable=*/true);
10445
10446 ParsedArgs<1> parsed_args;
10447 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10448 if(_r.has_torch_function()) {
10449 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10450 }
10451 switch (_r.idx) {
10452 case 0: {
10453 // aten::lt.Tensor(Tensor self, Tensor other) -> Tensor
10454
10455 auto dispatch_lt = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
10456 pybind11::gil_scoped_release no_gil;
10457 return self.lt(other);
10458 };
10459 return wrap(dispatch_lt(self, _r.tensor(0)));
10460 }
10461 case 1: {
10462 // aten::lt.Scalar(Tensor self, Scalar other) -> Tensor
10463
10464 auto dispatch_lt = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
10465 pybind11::gil_scoped_release no_gil;
10466 return self.lt(other);
10467 };
10468 return wrap(dispatch_lt(self, _r.scalar(0)));
10469 }
10470 }
10471 Py_RETURN_NONE;
10472 END_HANDLE_TH_ERRORS
10473}
10474
10475\
10476// lt_
10477static PyObject * THPVariable_lt_(PyObject* self_, PyObject* args, PyObject* kwargs)
10478{
10479 HANDLE_TH_ERRORS
10480 const Tensor& self = THPVariable_Unpack(self_);
10481 static PythonArgParser parser({
10482 "lt_(Tensor other)",
10483 "lt_(Scalar other)",
10484 }, /*traceable=*/true);
10485
10486 ParsedArgs<1> parsed_args;
10487 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10488 if(_r.has_torch_function()) {
10489 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10490 }
10491 switch (_r.idx) {
10492 case 0: {
10493 // aten::lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
10494
10495 auto dispatch_lt_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
10496 pybind11::gil_scoped_release no_gil;
10497 return self.lt_(other);
10498 };
10499 return wrap(dispatch_lt_(self, _r.tensor(0)));
10500 }
10501 case 1: {
10502 // aten::lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
10503
10504 auto dispatch_lt_ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
10505 pybind11::gil_scoped_release no_gil;
10506 return self.lt_(other);
10507 };
10508 return wrap(dispatch_lt_(self, _r.scalar(0)));
10509 }
10510 }
10511 Py_RETURN_NONE;
10512 END_HANDLE_TH_ERRORS
10513}
10514
10515// lu_solve
10516static PyObject * THPVariable_lu_solve(PyObject* self_, PyObject* args, PyObject* kwargs)
10517{
10518 HANDLE_TH_ERRORS
10519 const Tensor& self = THPVariable_Unpack(self_);
10520 static PythonArgParser parser({
10521 "lu_solve(Tensor LU_data, Tensor LU_pivots)",
10522 }, /*traceable=*/true);
10523
10524 ParsedArgs<2> parsed_args;
10525 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10526 if(_r.has_torch_function()) {
10527 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10528 }
10529 // aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor
10530
10531 auto dispatch_lu_solve = [](const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) -> at::Tensor {
10532 pybind11::gil_scoped_release no_gil;
10533 return self.lu_solve(LU_data, LU_pivots);
10534 };
10535 return wrap(dispatch_lu_solve(self, _r.tensor(0), _r.tensor(1)));
10536 Py_RETURN_NONE;
10537 END_HANDLE_TH_ERRORS
10538}
10539
10540\
10541// masked_fill
10542static PyObject * THPVariable_masked_fill(PyObject* self_, PyObject* args, PyObject* kwargs)
10543{
10544 HANDLE_TH_ERRORS
10545 const Tensor& self = THPVariable_Unpack(self_);
10546 static PythonArgParser parser({
10547 "masked_fill(Tensor mask, Tensor value)",
10548 "masked_fill(Tensor mask, Scalar value)",
10549 }, /*traceable=*/true);
10550
10551 ParsedArgs<2> parsed_args;
10552 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10553 if(_r.has_torch_function()) {
10554 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10555 }
10556 switch (_r.idx) {
10557 case 0: {
10558 // aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor
10559
10560 auto dispatch_masked_fill = [](const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) -> at::Tensor {
10561 pybind11::gil_scoped_release no_gil;
10562 return self.masked_fill(mask, value);
10563 };
10564 return wrap(dispatch_masked_fill(self, _r.tensor(0), _r.tensor(1)));
10565 }
10566 case 1: {
10567 // aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor
10568
10569 auto dispatch_masked_fill = [](const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) -> at::Tensor {
10570 pybind11::gil_scoped_release no_gil;
10571 return self.masked_fill(mask, value);
10572 };
10573 return wrap(dispatch_masked_fill(self, _r.tensor(0), _r.scalar(1)));
10574 }
10575 }
10576 Py_RETURN_NONE;
10577 END_HANDLE_TH_ERRORS
10578}
10579
10580\
10581// masked_fill_
10582static PyObject * THPVariable_masked_fill_(PyObject* self_, PyObject* args, PyObject* kwargs)
10583{
10584 HANDLE_TH_ERRORS
10585 const Tensor& self = THPVariable_Unpack(self_);
10586 static PythonArgParser parser({
10587 "masked_fill_(Tensor mask, Tensor value)",
10588 "masked_fill_(Tensor mask, Scalar value)",
10589 }, /*traceable=*/true);
10590
10591 ParsedArgs<2> parsed_args;
10592 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10593 if(_r.has_torch_function()) {
10594 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10595 }
10596 switch (_r.idx) {
10597 case 0: {
10598 // aten::masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!)
10599
10600 auto dispatch_masked_fill_ = [](const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) -> at::Tensor {
10601 pybind11::gil_scoped_release no_gil;
10602 return self.masked_fill_(mask, value);
10603 };
10604 return wrap(dispatch_masked_fill_(self, _r.tensor(0), _r.tensor(1)));
10605 }
10606 case 1: {
10607 // aten::masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!)
10608
10609 auto dispatch_masked_fill_ = [](const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) -> at::Tensor {
10610 pybind11::gil_scoped_release no_gil;
10611 return self.masked_fill_(mask, value);
10612 };
10613 return wrap(dispatch_masked_fill_(self, _r.tensor(0), _r.scalar(1)));
10614 }
10615 }
10616 Py_RETURN_NONE;
10617 END_HANDLE_TH_ERRORS
10618}
10619
10620// masked_scatter
10621static PyObject * THPVariable_masked_scatter(PyObject* self_, PyObject* args, PyObject* kwargs)
10622{
10623 HANDLE_TH_ERRORS
10624 const Tensor& self = THPVariable_Unpack(self_);
10625 static PythonArgParser parser({
10626 "masked_scatter(Tensor mask, Tensor source)",
10627 }, /*traceable=*/true);
10628
10629 ParsedArgs<2> parsed_args;
10630 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10631 if(_r.has_torch_function()) {
10632 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10633 }
10634 // aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor
10635
10636 auto dispatch_masked_scatter = [](const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) -> at::Tensor {
10637 pybind11::gil_scoped_release no_gil;
10638 return self.masked_scatter(mask, source);
10639 };
10640 return wrap(dispatch_masked_scatter(self, _r.tensor(0), _r.tensor(1)));
10641 Py_RETURN_NONE;
10642 END_HANDLE_TH_ERRORS
10643}
10644
10645// masked_scatter_
10646static PyObject * THPVariable_masked_scatter_(PyObject* self_, PyObject* args, PyObject* kwargs)
10647{
10648 HANDLE_TH_ERRORS
10649 const Tensor& self = THPVariable_Unpack(self_);
10650 static PythonArgParser parser({
10651 "masked_scatter_(Tensor mask, Tensor source)",
10652 }, /*traceable=*/true);
10653
10654 ParsedArgs<2> parsed_args;
10655 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10656 if(_r.has_torch_function()) {
10657 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10658 }
10659 // aten::masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!)
10660
10661 auto dispatch_masked_scatter_ = [](const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) -> at::Tensor {
10662 pybind11::gil_scoped_release no_gil;
10663 return self.masked_scatter_(mask, source);
10664 };
10665 return wrap(dispatch_masked_scatter_(self, _r.tensor(0), _r.tensor(1)));
10666 Py_RETURN_NONE;
10667 END_HANDLE_TH_ERRORS
10668}
10669
10670// masked_select
10671static PyObject * THPVariable_masked_select(PyObject* self_, PyObject* args, PyObject* kwargs)
10672{
10673 HANDLE_TH_ERRORS
10674 const Tensor& self = THPVariable_Unpack(self_);
10675 static PythonArgParser parser({
10676 "masked_select(Tensor mask)",
10677 }, /*traceable=*/true);
10678
10679 ParsedArgs<1> parsed_args;
10680 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10681 if(_r.has_torch_function()) {
10682 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10683 }
10684 // aten::masked_select(Tensor self, Tensor mask) -> Tensor
10685
10686 auto dispatch_masked_select = [](const at::Tensor & self, const at::Tensor & mask) -> at::Tensor {
10687 pybind11::gil_scoped_release no_gil;
10688 return self.masked_select(mask);
10689 };
10690 return wrap(dispatch_masked_select(self, _r.tensor(0)));
10691 Py_RETURN_NONE;
10692 END_HANDLE_TH_ERRORS
10693}
10694
10695// matmul
10696static PyObject * THPVariable_matmul(PyObject* self_, PyObject* args, PyObject* kwargs)
10697{
10698 HANDLE_TH_ERRORS
10699 const Tensor& self = THPVariable_Unpack(self_);
10700 static PythonArgParser parser({
10701 "matmul(Tensor other)",
10702 }, /*traceable=*/true);
10703
10704 ParsedArgs<1> parsed_args;
10705 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10706 if(_r.has_torch_function()) {
10707 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10708 }
10709 // aten::matmul(Tensor self, Tensor other) -> Tensor
10710
10711 auto dispatch_matmul = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
10712 pybind11::gil_scoped_release no_gil;
10713 return self.matmul(other);
10714 };
10715 return wrap(dispatch_matmul(self, _r.tensor(0)));
10716 Py_RETURN_NONE;
10717 END_HANDLE_TH_ERRORS
10718}
10719
10720// matrix_exp
10721static PyObject * THPVariable_matrix_exp(PyObject* self_, PyObject* args)
10722{
10723 HANDLE_TH_ERRORS
10724 const Tensor& self = THPVariable_Unpack(self_);
10725 if(check_has_torch_function(self_)) {
10726 return handle_torch_function(self_, "matrix_exp");
10727 }
10728 // aten::matrix_exp(Tensor self) -> Tensor
10729
10730 auto dispatch_matrix_exp = [](const at::Tensor & self) -> at::Tensor {
10731 pybind11::gil_scoped_release no_gil;
10732 return self.matrix_exp();
10733 };
10734 return wrap(dispatch_matrix_exp(self));
10735 END_HANDLE_TH_ERRORS
10736}
10737
10738// matrix_power
10739static PyObject * THPVariable_matrix_power(PyObject* self_, PyObject* args, PyObject* kwargs)
10740{
10741 HANDLE_TH_ERRORS
10742 const Tensor& self = THPVariable_Unpack(self_);
10743 static PythonArgParser parser({
10744 "matrix_power(int64_t n)",
10745 }, /*traceable=*/true);
10746
10747 ParsedArgs<1> parsed_args;
10748 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10749 if(_r.has_torch_function()) {
10750 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10751 }
10752 // aten::matrix_power(Tensor self, int n) -> Tensor
10753
10754 auto dispatch_matrix_power = [](const at::Tensor & self, int64_t n) -> at::Tensor {
10755 pybind11::gil_scoped_release no_gil;
10756 return self.matrix_power(n);
10757 };
10758 return wrap(dispatch_matrix_power(self, _r.toInt64(0)));
10759 Py_RETURN_NONE;
10760 END_HANDLE_TH_ERRORS
10761}
10762
10763\
10764// max
10765static PyObject * THPVariable_max(PyObject* self_, PyObject* args, PyObject* kwargs)
10766{
10767 HANDLE_TH_ERRORS
10768 static PyTypeObject* NamedTuple = get_namedtuple("max");
10769 const Tensor& self = THPVariable_Unpack(self_);
10770 static PythonArgParser parser({
10771 "max()",
10772 "max(Tensor other)",
10773 "max(int64_t dim, bool keepdim=False)",
10774 "max(Dimname dim, bool keepdim=False)",
10775 }, /*traceable=*/true);
10776
10777 ParsedArgs<2> parsed_args;
10778 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10779 if(_r.has_torch_function()) {
10780 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10781 }
10782 switch (_r.idx) {
10783 case 0: {
10784 // aten::max(Tensor self) -> Tensor
10785
10786 auto dispatch_max = [](const at::Tensor & self) -> at::Tensor {
10787 pybind11::gil_scoped_release no_gil;
10788 return self.max();
10789 };
10790 return wrap(dispatch_max(self));
10791 }
10792 case 1: {
10793 // aten::max.other(Tensor self, Tensor other) -> Tensor
10794
10795 auto dispatch_max = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
10796 pybind11::gil_scoped_release no_gil;
10797 return self.max(other);
10798 };
10799 return wrap(dispatch_max(self, _r.tensor(0)));
10800 }
10801 case 2: {
10802 // aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
10803
10804 auto dispatch_max = [](const at::Tensor & self, int64_t dim, bool keepdim) -> ::std::tuple<at::Tensor,at::Tensor> {
10805 pybind11::gil_scoped_release no_gil;
10806 return self.max(dim, keepdim);
10807 };
10808 return wrap(NamedTuple, dispatch_max(self, _r.toInt64(0), _r.toBool(1)));
10809 }
10810 case 3: {
10811 // aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
10812
10813 auto dispatch_max = [](const at::Tensor & self, at::Dimname dim, bool keepdim) -> ::std::tuple<at::Tensor,at::Tensor> {
10814 pybind11::gil_scoped_release no_gil;
10815 return self.max(dim, keepdim);
10816 };
10817 return wrap(NamedTuple, dispatch_max(self, _r.dimname(0), _r.toBool(1)));
10818 }
10819 }
10820 Py_RETURN_NONE;
10821 END_HANDLE_TH_ERRORS
10822}
10823
10824// maximum
10825static PyObject * THPVariable_maximum(PyObject* self_, PyObject* args, PyObject* kwargs)
10826{
10827 HANDLE_TH_ERRORS
10828 const Tensor& self = THPVariable_Unpack(self_);
10829 static PythonArgParser parser({
10830 "maximum(Tensor other)",
10831 }, /*traceable=*/true);
10832
10833 ParsedArgs<1> parsed_args;
10834 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10835 if(_r.has_torch_function()) {
10836 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10837 }
10838 // aten::maximum(Tensor self, Tensor other) -> Tensor
10839
10840 auto dispatch_maximum = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
10841 pybind11::gil_scoped_release no_gil;
10842 return self.maximum(other);
10843 };
10844 return wrap(dispatch_maximum(self, _r.tensor(0)));
10845 Py_RETURN_NONE;
10846 END_HANDLE_TH_ERRORS
10847}
10848
10849\
10850// mean
10851static PyObject * THPVariable_mean(PyObject* self_, PyObject* args, PyObject* kwargs)
10852{
10853 HANDLE_TH_ERRORS
10854 const Tensor& self = THPVariable_Unpack(self_);
10855 static PythonArgParser parser({
10856 "mean(*, ScalarType? dtype=None)",
10857 "mean(IntArrayRef[1]? dim, bool keepdim=False, *, ScalarType? dtype=None)",
10858 "mean(DimnameList[1] dim, bool keepdim=False, *, ScalarType? dtype=None)",
10859 }, /*traceable=*/true);
10860
10861 ParsedArgs<3> parsed_args;
10862 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10863 if(_r.has_torch_function()) {
10864 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10865 }
10866 switch (_r.idx) {
10867 case 0: {
10868 // aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor
10869
10870 auto dispatch_mean = [](const at::Tensor & self, c10::optional<at::ScalarType> dtype) -> at::Tensor {
10871 pybind11::gil_scoped_release no_gil;
10872 return self.mean(dtype);
10873 };
10874 return wrap(dispatch_mean(self, _r.scalartypeOptional(0)));
10875 }
10876 case 1: {
10877 // aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
10878
10879 auto dispatch_mean = [](const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) -> at::Tensor {
10880 pybind11::gil_scoped_release no_gil;
10881 return self.mean(dim, keepdim, dtype);
10882 };
10883 return wrap(dispatch_mean(self, _r.intlistOptional(0), _r.toBool(1), _r.scalartypeOptional(2)));
10884 }
10885 case 2: {
10886 // aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
10887
10888 auto dispatch_mean = [](const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype) -> at::Tensor {
10889 pybind11::gil_scoped_release no_gil;
10890 return self.mean(dim, keepdim, dtype);
10891 };
10892 return wrap(dispatch_mean(self, _r.dimnamelist(0), _r.toBool(1), _r.scalartypeOptional(2)));
10893 }
10894 }
10895 Py_RETURN_NONE;
10896 END_HANDLE_TH_ERRORS
10897}
10898
10899\
10900// median
10901static PyObject * THPVariable_median(PyObject* self_, PyObject* args, PyObject* kwargs)
10902{
10903 HANDLE_TH_ERRORS
10904 static PyTypeObject* NamedTuple = get_namedtuple("median");
10905 const Tensor& self = THPVariable_Unpack(self_);
10906 static PythonArgParser parser({
10907 "median()",
10908 "median(int64_t dim, bool keepdim=False)",
10909 "median(Dimname dim, bool keepdim=False)",
10910 }, /*traceable=*/true);
10911
10912 ParsedArgs<2> parsed_args;
10913 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10914 if(_r.has_torch_function()) {
10915 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10916 }
10917 switch (_r.idx) {
10918 case 0: {
10919 // aten::median(Tensor self) -> Tensor
10920
10921 auto dispatch_median = [](const at::Tensor & self) -> at::Tensor {
10922 pybind11::gil_scoped_release no_gil;
10923 return self.median();
10924 };
10925 return wrap(dispatch_median(self));
10926 }
10927 case 1: {
10928 // aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
10929
10930 auto dispatch_median = [](const at::Tensor & self, int64_t dim, bool keepdim) -> ::std::tuple<at::Tensor,at::Tensor> {
10931 pybind11::gil_scoped_release no_gil;
10932 return self.median(dim, keepdim);
10933 };
10934 return wrap(NamedTuple, dispatch_median(self, _r.toInt64(0), _r.toBool(1)));
10935 }
10936 case 2: {
10937 // aten::median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
10938
10939 auto dispatch_median = [](const at::Tensor & self, at::Dimname dim, bool keepdim) -> ::std::tuple<at::Tensor,at::Tensor> {
10940 pybind11::gil_scoped_release no_gil;
10941 return self.median(dim, keepdim);
10942 };
10943 return wrap(NamedTuple, dispatch_median(self, _r.dimname(0), _r.toBool(1)));
10944 }
10945 }
10946 Py_RETURN_NONE;
10947 END_HANDLE_TH_ERRORS
10948}
10949
10950\
10951// min
10952static PyObject * THPVariable_min(PyObject* self_, PyObject* args, PyObject* kwargs)
10953{
10954 HANDLE_TH_ERRORS
10955 static PyTypeObject* NamedTuple = get_namedtuple("min");
10956 const Tensor& self = THPVariable_Unpack(self_);
10957 static PythonArgParser parser({
10958 "min()",
10959 "min(Tensor other)",
10960 "min(int64_t dim, bool keepdim=False)",
10961 "min(Dimname dim, bool keepdim=False)",
10962 }, /*traceable=*/true);
10963
10964 ParsedArgs<2> parsed_args;
10965 auto _r = parser.parse(self_, args, kwargs, parsed_args);
10966 if(_r.has_torch_function()) {
10967 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
10968 }
10969 switch (_r.idx) {
10970 case 0: {
10971 // aten::min(Tensor self) -> Tensor
10972
10973 auto dispatch_min = [](const at::Tensor & self) -> at::Tensor {
10974 pybind11::gil_scoped_release no_gil;
10975 return self.min();
10976 };
10977 return wrap(dispatch_min(self));
10978 }
10979 case 1: {
10980 // aten::min.other(Tensor self, Tensor other) -> Tensor
10981
10982 auto dispatch_min = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
10983 pybind11::gil_scoped_release no_gil;
10984 return self.min(other);
10985 };
10986 return wrap(dispatch_min(self, _r.tensor(0)));
10987 }
10988 case 2: {
10989 // aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
10990
10991 auto dispatch_min = [](const at::Tensor & self, int64_t dim, bool keepdim) -> ::std::tuple<at::Tensor,at::Tensor> {
10992 pybind11::gil_scoped_release no_gil;
10993 return self.min(dim, keepdim);
10994 };
10995 return wrap(NamedTuple, dispatch_min(self, _r.toInt64(0), _r.toBool(1)));
10996 }
10997 case 3: {
10998 // aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
10999
11000 auto dispatch_min = [](const at::Tensor & self, at::Dimname dim, bool keepdim) -> ::std::tuple<at::Tensor,at::Tensor> {
11001 pybind11::gil_scoped_release no_gil;
11002 return self.min(dim, keepdim);
11003 };
11004 return wrap(NamedTuple, dispatch_min(self, _r.dimname(0), _r.toBool(1)));
11005 }
11006 }
11007 Py_RETURN_NONE;
11008 END_HANDLE_TH_ERRORS
11009}
11010
11011// minimum
11012static PyObject * THPVariable_minimum(PyObject* self_, PyObject* args, PyObject* kwargs)
11013{
11014 HANDLE_TH_ERRORS
11015 const Tensor& self = THPVariable_Unpack(self_);
11016 static PythonArgParser parser({
11017 "minimum(Tensor other)",
11018 }, /*traceable=*/true);
11019
11020 ParsedArgs<1> parsed_args;
11021 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11022 if(_r.has_torch_function()) {
11023 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11024 }
11025 // aten::minimum(Tensor self, Tensor other) -> Tensor
11026
11027 auto dispatch_minimum = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
11028 pybind11::gil_scoped_release no_gil;
11029 return self.minimum(other);
11030 };
11031 return wrap(dispatch_minimum(self, _r.tensor(0)));
11032 Py_RETURN_NONE;
11033 END_HANDLE_TH_ERRORS
11034}
11035
11036// mm
11037static PyObject * THPVariable_mm(PyObject* self_, PyObject* args, PyObject* kwargs)
11038{
11039 HANDLE_TH_ERRORS
11040 const Tensor& self = THPVariable_Unpack(self_);
11041 static PythonArgParser parser({
11042 "mm(Tensor mat2)",
11043 }, /*traceable=*/true);
11044
11045 ParsedArgs<1> parsed_args;
11046 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11047 if(_r.has_torch_function()) {
11048 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11049 }
11050 // aten::mm(Tensor self, Tensor mat2) -> Tensor
11051
11052 auto dispatch_mm = [](const at::Tensor & self, const at::Tensor & mat2) -> at::Tensor {
11053 pybind11::gil_scoped_release no_gil;
11054 return self.mm(mat2);
11055 };
11056 return wrap(dispatch_mm(self, _r.tensor(0)));
11057 Py_RETURN_NONE;
11058 END_HANDLE_TH_ERRORS
11059}
11060
11061\
11062// mode
11063static PyObject * THPVariable_mode(PyObject* self_, PyObject* args, PyObject* kwargs)
11064{
11065 HANDLE_TH_ERRORS
11066 static PyTypeObject* NamedTuple = get_namedtuple("mode");
11067 const Tensor& self = THPVariable_Unpack(self_);
11068 static PythonArgParser parser({
11069 "mode(int64_t dim=-1, bool keepdim=False)",
11070 "mode(Dimname dim, bool keepdim=False)",
11071 }, /*traceable=*/true);
11072
11073 ParsedArgs<2> parsed_args;
11074 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11075 if(_r.has_torch_function()) {
11076 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11077 }
11078 switch (_r.idx) {
11079 case 0: {
11080 // aten::mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
11081
11082 auto dispatch_mode = [](const at::Tensor & self, int64_t dim, bool keepdim) -> ::std::tuple<at::Tensor,at::Tensor> {
11083 pybind11::gil_scoped_release no_gil;
11084 return self.mode(dim, keepdim);
11085 };
11086 return wrap(NamedTuple, dispatch_mode(self, _r.toInt64(0), _r.toBool(1)));
11087 }
11088 case 1: {
11089 // aten::mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
11090
11091 auto dispatch_mode = [](const at::Tensor & self, at::Dimname dim, bool keepdim) -> ::std::tuple<at::Tensor,at::Tensor> {
11092 pybind11::gil_scoped_release no_gil;
11093 return self.mode(dim, keepdim);
11094 };
11095 return wrap(NamedTuple, dispatch_mode(self, _r.dimname(0), _r.toBool(1)));
11096 }
11097 }
11098 Py_RETURN_NONE;
11099 END_HANDLE_TH_ERRORS
11100}
11101
11102\
11103// moveaxis
11104static PyObject * THPVariable_moveaxis(PyObject* self_, PyObject* args, PyObject* kwargs)
11105{
11106 HANDLE_TH_ERRORS
11107 const Tensor& self = THPVariable_Unpack(self_);
11108 static PythonArgParser parser({
11109 "moveaxis(int64_t source, int64_t destination)",
11110 "moveaxis(IntArrayRef source, IntArrayRef destination)",
11111 }, /*traceable=*/true);
11112
11113 ParsedArgs<2> parsed_args;
11114 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11115 if(_r.has_torch_function()) {
11116 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11117 }
11118 switch (_r.idx) {
11119 case 0: {
11120 // aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a)
11121
11122 auto dispatch_moveaxis = [](const at::Tensor & self, int64_t source, int64_t destination) -> at::Tensor {
11123 pybind11::gil_scoped_release no_gil;
11124 return self.moveaxis(source, destination);
11125 };
11126 return wrap(dispatch_moveaxis(self, _r.toInt64(0), _r.toInt64(1)));
11127 }
11128 case 1: {
11129 // aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)
11130
11131 auto dispatch_moveaxis = [](const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) -> at::Tensor {
11132 pybind11::gil_scoped_release no_gil;
11133 return self.moveaxis(source, destination);
11134 };
11135 return wrap(dispatch_moveaxis(self, _r.intlist(0), _r.intlist(1)));
11136 }
11137 }
11138 Py_RETURN_NONE;
11139 END_HANDLE_TH_ERRORS
11140}
11141
11142\
11143// movedim
11144static PyObject * THPVariable_movedim(PyObject* self_, PyObject* args, PyObject* kwargs)
11145{
11146 HANDLE_TH_ERRORS
11147 const Tensor& self = THPVariable_Unpack(self_);
11148 static PythonArgParser parser({
11149 "movedim(int64_t source, int64_t destination)",
11150 "movedim(IntArrayRef source, IntArrayRef destination)",
11151 }, /*traceable=*/true);
11152
11153 ParsedArgs<2> parsed_args;
11154 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11155 if(_r.has_torch_function()) {
11156 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11157 }
11158 switch (_r.idx) {
11159 case 0: {
11160 // aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a)
11161
11162 auto dispatch_movedim = [](const at::Tensor & self, int64_t source, int64_t destination) -> at::Tensor {
11163 pybind11::gil_scoped_release no_gil;
11164 return self.movedim(source, destination);
11165 };
11166 return wrap(dispatch_movedim(self, _r.toInt64(0), _r.toInt64(1)));
11167 }
11168 case 1: {
11169 // aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)
11170
11171 auto dispatch_movedim = [](const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) -> at::Tensor {
11172 pybind11::gil_scoped_release no_gil;
11173 return self.movedim(source, destination);
11174 };
11175 return wrap(dispatch_movedim(self, _r.intlist(0), _r.intlist(1)));
11176 }
11177 }
11178 Py_RETURN_NONE;
11179 END_HANDLE_TH_ERRORS
11180}
11181
11182// msort
11183static PyObject * THPVariable_msort(PyObject* self_, PyObject* args)
11184{
11185 HANDLE_TH_ERRORS
11186 const Tensor& self = THPVariable_Unpack(self_);
11187 if(check_has_torch_function(self_)) {
11188 return handle_torch_function(self_, "msort");
11189 }
11190 // aten::msort(Tensor self) -> Tensor
11191
11192 auto dispatch_msort = [](const at::Tensor & self) -> at::Tensor {
11193 pybind11::gil_scoped_release no_gil;
11194 return self.msort();
11195 };
11196 return wrap(dispatch_msort(self));
11197 END_HANDLE_TH_ERRORS
11198}
11199
11200// mul
11201static PyObject * THPVariable_mul(PyObject* self_, PyObject* args, PyObject* kwargs)
11202{
11203 HANDLE_TH_ERRORS
11204 const Tensor& self = THPVariable_Unpack(self_);
11205 static PythonArgParser parser({
11206 "mul(Tensor other)",
11207 }, /*traceable=*/true);
11208
11209 ParsedArgs<1> parsed_args;
11210 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11211 if(_r.has_torch_function()) {
11212 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11213 }
11214 // aten::mul.Tensor(Tensor self, Tensor other) -> Tensor
11215
11216 auto dispatch_mul = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
11217 pybind11::gil_scoped_release no_gil;
11218 return self.mul(other);
11219 };
11220 return wrap(dispatch_mul(self, _r.tensor(0)));
11221 Py_RETURN_NONE;
11222 END_HANDLE_TH_ERRORS
11223}
11224
11225// mul_
11226static PyObject * THPVariable_mul_(PyObject* self_, PyObject* args, PyObject* kwargs)
11227{
11228 HANDLE_TH_ERRORS
11229 const Tensor& self = THPVariable_Unpack(self_);
11230 static PythonArgParser parser({
11231 "mul_(Tensor other)",
11232 }, /*traceable=*/true);
11233
11234 ParsedArgs<1> parsed_args;
11235 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11236 if(_r.has_torch_function()) {
11237 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11238 }
11239 // aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
11240
11241 auto dispatch_mul_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
11242 pybind11::gil_scoped_release no_gil;
11243 return self.mul_(other);
11244 };
11245 return wrap(dispatch_mul_(self, _r.tensor(0)));
11246 Py_RETURN_NONE;
11247 END_HANDLE_TH_ERRORS
11248}
11249
11250// multinomial
11251static PyObject * THPVariable_multinomial(PyObject* self_, PyObject* args, PyObject* kwargs)
11252{
11253 HANDLE_TH_ERRORS
11254 const Tensor& self = THPVariable_Unpack(self_);
11255 static PythonArgParser parser({
11256 "multinomial(int64_t num_samples, bool replacement=False, *, Generator? generator=None)",
11257 }, /*traceable=*/true);
11258
11259 ParsedArgs<3> parsed_args;
11260 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11261 if(_r.has_torch_function()) {
11262 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11263 }
11264 // aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor
11265
11266 auto dispatch_multinomial = [](const at::Tensor & self, int64_t num_samples, bool replacement, c10::optional<at::Generator> generator) -> at::Tensor {
11267 pybind11::gil_scoped_release no_gil;
11268 return self.multinomial(num_samples, replacement, generator);
11269 };
11270 return wrap(dispatch_multinomial(self, _r.toInt64(0), _r.toBool(1), _r.generator(2)));
11271 Py_RETURN_NONE;
11272 END_HANDLE_TH_ERRORS
11273}
11274
11275\
11276// multiply
11277static PyObject * THPVariable_multiply(PyObject* self_, PyObject* args, PyObject* kwargs)
11278{
11279 HANDLE_TH_ERRORS
11280 const Tensor& self = THPVariable_Unpack(self_);
11281 static PythonArgParser parser({
11282 "multiply(Tensor other)",
11283 "multiply(Scalar other)",
11284 }, /*traceable=*/true);
11285
11286 ParsedArgs<1> parsed_args;
11287 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11288 if(_r.has_torch_function()) {
11289 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11290 }
11291 switch (_r.idx) {
11292 case 0: {
11293 // aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor
11294
11295 auto dispatch_multiply = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
11296 pybind11::gil_scoped_release no_gil;
11297 return self.multiply(other);
11298 };
11299 return wrap(dispatch_multiply(self, _r.tensor(0)));
11300 }
11301 case 1: {
11302 // aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor
11303
11304 auto dispatch_multiply = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
11305 pybind11::gil_scoped_release no_gil;
11306 return self.multiply(other);
11307 };
11308 return wrap(dispatch_multiply(self, _r.scalar(0)));
11309 }
11310 }
11311 Py_RETURN_NONE;
11312 END_HANDLE_TH_ERRORS
11313}
11314
11315\
11316// multiply_
11317static PyObject * THPVariable_multiply_(PyObject* self_, PyObject* args, PyObject* kwargs)
11318{
11319 HANDLE_TH_ERRORS
11320 const Tensor& self = THPVariable_Unpack(self_);
11321 static PythonArgParser parser({
11322 "multiply_(Tensor other)",
11323 "multiply_(Scalar other)",
11324 }, /*traceable=*/true);
11325
11326 ParsedArgs<1> parsed_args;
11327 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11328 if(_r.has_torch_function()) {
11329 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11330 }
11331 switch (_r.idx) {
11332 case 0: {
11333 // aten::multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
11334
11335 auto dispatch_multiply_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
11336 pybind11::gil_scoped_release no_gil;
11337 return self.multiply_(other);
11338 };
11339 return wrap(dispatch_multiply_(self, _r.tensor(0)));
11340 }
11341 case 1: {
11342 // aten::multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
11343
11344 auto dispatch_multiply_ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
11345 pybind11::gil_scoped_release no_gil;
11346 return self.multiply_(other);
11347 };
11348 return wrap(dispatch_multiply_(self, _r.scalar(0)));
11349 }
11350 }
11351 Py_RETURN_NONE;
11352 END_HANDLE_TH_ERRORS
11353}
11354
11355// mv
11356static PyObject * THPVariable_mv(PyObject* self_, PyObject* args, PyObject* kwargs)
11357{
11358 HANDLE_TH_ERRORS
11359 const Tensor& self = THPVariable_Unpack(self_);
11360 static PythonArgParser parser({
11361 "mv(Tensor vec)",
11362 }, /*traceable=*/true);
11363
11364 ParsedArgs<1> parsed_args;
11365 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11366 if(_r.has_torch_function()) {
11367 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11368 }
11369 // aten::mv(Tensor self, Tensor vec) -> Tensor
11370
11371 auto dispatch_mv = [](const at::Tensor & self, const at::Tensor & vec) -> at::Tensor {
11372 pybind11::gil_scoped_release no_gil;
11373 return self.mv(vec);
11374 };
11375 return wrap(dispatch_mv(self, _r.tensor(0)));
11376 Py_RETURN_NONE;
11377 END_HANDLE_TH_ERRORS
11378}
11379
11380// mvlgamma
11381static PyObject * THPVariable_mvlgamma(PyObject* self_, PyObject* args, PyObject* kwargs)
11382{
11383 HANDLE_TH_ERRORS
11384 const Tensor& self = THPVariable_Unpack(self_);
11385 static PythonArgParser parser({
11386 "mvlgamma(int64_t p)",
11387 }, /*traceable=*/true);
11388
11389 ParsedArgs<1> parsed_args;
11390 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11391 if(_r.has_torch_function()) {
11392 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11393 }
11394 // aten::mvlgamma(Tensor self, int p) -> Tensor
11395
11396 auto dispatch_mvlgamma = [](const at::Tensor & self, int64_t p) -> at::Tensor {
11397 pybind11::gil_scoped_release no_gil;
11398 return self.mvlgamma(p);
11399 };
11400 return wrap(dispatch_mvlgamma(self, _r.toInt64(0)));
11401 Py_RETURN_NONE;
11402 END_HANDLE_TH_ERRORS
11403}
11404
11405// mvlgamma_
11406static PyObject * THPVariable_mvlgamma_(PyObject* self_, PyObject* args, PyObject* kwargs)
11407{
11408 HANDLE_TH_ERRORS
11409 const Tensor& self = THPVariable_Unpack(self_);
11410 static PythonArgParser parser({
11411 "mvlgamma_(int64_t p)",
11412 }, /*traceable=*/true);
11413
11414 ParsedArgs<1> parsed_args;
11415 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11416 if(_r.has_torch_function()) {
11417 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11418 }
11419 // aten::mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!)
11420
11421 auto dispatch_mvlgamma_ = [](const at::Tensor & self, int64_t p) -> at::Tensor {
11422 pybind11::gil_scoped_release no_gil;
11423 return self.mvlgamma_(p);
11424 };
11425 return wrap(dispatch_mvlgamma_(self, _r.toInt64(0)));
11426 Py_RETURN_NONE;
11427 END_HANDLE_TH_ERRORS
11428}
11429
11430// nan_to_num
11431static PyObject * THPVariable_nan_to_num(PyObject* self_, PyObject* args, PyObject* kwargs)
11432{
11433 HANDLE_TH_ERRORS
11434 const Tensor& self = THPVariable_Unpack(self_);
11435 static PythonArgParser parser({
11436 "nan_to_num(double? nan=None, double? posinf=None, double? neginf=None)",
11437 }, /*traceable=*/true);
11438
11439 ParsedArgs<3> parsed_args;
11440 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11441 if(_r.has_torch_function()) {
11442 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11443 }
11444 // aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor
11445
11446 auto dispatch_nan_to_num = [](const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) -> at::Tensor {
11447 pybind11::gil_scoped_release no_gil;
11448 return self.nan_to_num(nan, posinf, neginf);
11449 };
11450 return wrap(dispatch_nan_to_num(self, _r.toDoubleOptional(0), _r.toDoubleOptional(1), _r.toDoubleOptional(2)));
11451 Py_RETURN_NONE;
11452 END_HANDLE_TH_ERRORS
11453}
11454
11455// nan_to_num_
11456static PyObject * THPVariable_nan_to_num_(PyObject* self_, PyObject* args, PyObject* kwargs)
11457{
11458 HANDLE_TH_ERRORS
11459 const Tensor& self = THPVariable_Unpack(self_);
11460 static PythonArgParser parser({
11461 "nan_to_num_(double? nan=None, double? posinf=None, double? neginf=None)",
11462 }, /*traceable=*/true);
11463
11464 ParsedArgs<3> parsed_args;
11465 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11466 if(_r.has_torch_function()) {
11467 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11468 }
11469 // aten::nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!)
11470
11471 auto dispatch_nan_to_num_ = [](const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) -> at::Tensor {
11472 pybind11::gil_scoped_release no_gil;
11473 return self.nan_to_num_(nan, posinf, neginf);
11474 };
11475 return wrap(dispatch_nan_to_num_(self, _r.toDoubleOptional(0), _r.toDoubleOptional(1), _r.toDoubleOptional(2)));
11476 Py_RETURN_NONE;
11477 END_HANDLE_TH_ERRORS
11478}
11479
11480// nanmean
11481static PyObject * THPVariable_nanmean(PyObject* self_, PyObject* args, PyObject* kwargs)
11482{
11483 HANDLE_TH_ERRORS
11484 const Tensor& self = THPVariable_Unpack(self_);
11485 static PythonArgParser parser({
11486 "nanmean(IntArrayRef[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None)",
11487 }, /*traceable=*/true);
11488
11489 ParsedArgs<3> parsed_args;
11490 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11491 if(_r.has_torch_function()) {
11492 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11493 }
11494 // aten::nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
11495
11496 auto dispatch_nanmean = [](const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) -> at::Tensor {
11497 pybind11::gil_scoped_release no_gil;
11498 return self.nanmean(dim, keepdim, dtype);
11499 };
11500 return wrap(dispatch_nanmean(self, _r.intlistOptional(0), _r.toBool(1), _r.scalartypeOptional(2)));
11501 Py_RETURN_NONE;
11502 END_HANDLE_TH_ERRORS
11503}
11504
11505\
11506// nanmedian
11507static PyObject * THPVariable_nanmedian(PyObject* self_, PyObject* args, PyObject* kwargs)
11508{
11509 HANDLE_TH_ERRORS
11510 static PyTypeObject* NamedTuple = get_namedtuple("nanmedian");
11511 const Tensor& self = THPVariable_Unpack(self_);
11512 static PythonArgParser parser({
11513 "nanmedian()",
11514 "nanmedian(int64_t dim, bool keepdim=False)",
11515 "nanmedian(Dimname dim, bool keepdim=False)",
11516 }, /*traceable=*/true);
11517
11518 ParsedArgs<2> parsed_args;
11519 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11520 if(_r.has_torch_function()) {
11521 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11522 }
11523 switch (_r.idx) {
11524 case 0: {
11525 // aten::nanmedian(Tensor self) -> Tensor
11526
11527 auto dispatch_nanmedian = [](const at::Tensor & self) -> at::Tensor {
11528 pybind11::gil_scoped_release no_gil;
11529 return self.nanmedian();
11530 };
11531 return wrap(dispatch_nanmedian(self));
11532 }
11533 case 1: {
11534 // aten::nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
11535
11536 auto dispatch_nanmedian = [](const at::Tensor & self, int64_t dim, bool keepdim) -> ::std::tuple<at::Tensor,at::Tensor> {
11537 pybind11::gil_scoped_release no_gil;
11538 return self.nanmedian(dim, keepdim);
11539 };
11540 return wrap(NamedTuple, dispatch_nanmedian(self, _r.toInt64(0), _r.toBool(1)));
11541 }
11542 case 2: {
11543 // aten::nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
11544
11545 auto dispatch_nanmedian = [](const at::Tensor & self, at::Dimname dim, bool keepdim) -> ::std::tuple<at::Tensor,at::Tensor> {
11546 pybind11::gil_scoped_release no_gil;
11547 return self.nanmedian(dim, keepdim);
11548 };
11549 return wrap(NamedTuple, dispatch_nanmedian(self, _r.dimname(0), _r.toBool(1)));
11550 }
11551 }
11552 Py_RETURN_NONE;
11553 END_HANDLE_TH_ERRORS
11554}
11555
11556\
11557// nanquantile
11558static PyObject * THPVariable_nanquantile(PyObject* self_, PyObject* args, PyObject* kwargs)
11559{
11560 HANDLE_TH_ERRORS
11561 const Tensor& self = THPVariable_Unpack(self_);
11562 static PythonArgParser parser({
11563 "nanquantile(Tensor q, int64_t? dim=None, bool keepdim=False, *, c10::string_view interpolation=\"linear\")",
11564 "nanquantile(double q, int64_t? dim=None, bool keepdim=False, *, c10::string_view interpolation=\"linear\")",
11565 }, /*traceable=*/true);
11566
11567 ParsedArgs<4> parsed_args;
11568 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11569 if(_r.has_torch_function()) {
11570 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11571 }
11572 switch (_r.idx) {
11573 case 0: {
11574 // aten::nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
11575
11576 auto dispatch_nanquantile = [](const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) -> at::Tensor {
11577 pybind11::gil_scoped_release no_gil;
11578 return self.nanquantile(q, dim, keepdim, interpolation);
11579 };
11580 return wrap(dispatch_nanquantile(self, _r.tensor(0), _r.toInt64Optional(1), _r.toBool(2), _r.stringView(3)));
11581 }
11582 case 1: {
11583 // aten::nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
11584
11585 auto dispatch_nanquantile = [](const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) -> at::Tensor {
11586 pybind11::gil_scoped_release no_gil;
11587 return self.nanquantile(q, dim, keepdim, interpolation);
11588 };
11589 return wrap(dispatch_nanquantile(self, _r.toDouble(0), _r.toInt64Optional(1), _r.toBool(2), _r.stringView(3)));
11590 }
11591 }
11592 Py_RETURN_NONE;
11593 END_HANDLE_TH_ERRORS
11594}
11595
11596// nansum
11597static PyObject * THPVariable_nansum(PyObject* self_, PyObject* args, PyObject* kwargs)
11598{
11599 HANDLE_TH_ERRORS
11600 const Tensor& self = THPVariable_Unpack(self_);
11601 static PythonArgParser parser({
11602 "nansum(IntArrayRef[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None)",
11603 }, /*traceable=*/true);
11604
11605 ParsedArgs<3> parsed_args;
11606 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11607 if(_r.has_torch_function()) {
11608 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11609 }
11610 // aten::nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
11611
11612 auto dispatch_nansum = [](const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) -> at::Tensor {
11613 pybind11::gil_scoped_release no_gil;
11614 return self.nansum(dim, keepdim, dtype);
11615 };
11616 return wrap(dispatch_nansum(self, _r.intlistOptional(0), _r.toBool(1), _r.scalartypeOptional(2)));
11617 Py_RETURN_NONE;
11618 END_HANDLE_TH_ERRORS
11619}
11620
11621\
11622// narrow
11623static PyObject * THPVariable_narrow(PyObject* self_, PyObject* args, PyObject* kwargs)
11624{
11625 HANDLE_TH_ERRORS
11626 const Tensor& self = THPVariable_Unpack(self_);
11627 static PythonArgParser parser({
11628 "narrow(int64_t dim, Tensor start, SymInt length)",
11629 "narrow(int64_t dim, SymInt start, SymInt length)",
11630 }, /*traceable=*/true);
11631
11632 ParsedArgs<3> parsed_args;
11633 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11634 if(_r.has_torch_function()) {
11635 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11636 }
11637 switch (_r.idx) {
11638 case 0: {
11639 // aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a)
11640
11641 auto dispatch_narrow = [](const at::Tensor & self, int64_t dim, const at::Tensor & start, c10::SymInt length) -> at::Tensor {
11642 pybind11::gil_scoped_release no_gil;
11643 return self.narrow_symint(dim, start, length);
11644 };
11645 return wrap(dispatch_narrow(self, _r.toInt64(0), _r.tensor(1), _r.toSymInt(2)));
11646 }
11647 case 1: {
11648 // aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a)
11649
11650 auto dispatch_narrow = [](const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) -> at::Tensor {
11651 pybind11::gil_scoped_release no_gil;
11652 return self.narrow_symint(dim, start, length);
11653 };
11654 return wrap(dispatch_narrow(self, _r.toInt64(0), _r.toSymInt(1), _r.toSymInt(2)));
11655 }
11656 }
11657 Py_RETURN_NONE;
11658 END_HANDLE_TH_ERRORS
11659}
11660
11661// narrow_copy
11662static PyObject * THPVariable_narrow_copy(PyObject* self_, PyObject* args, PyObject* kwargs)
11663{
11664 HANDLE_TH_ERRORS
11665 const Tensor& self = THPVariable_Unpack(self_);
11666 static PythonArgParser parser({
11667 "narrow_copy(int64_t dim, SymInt start, SymInt length)",
11668 }, /*traceable=*/true);
11669
11670 ParsedArgs<3> parsed_args;
11671 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11672 if(_r.has_torch_function()) {
11673 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11674 }
11675 // aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor
11676
11677 auto dispatch_narrow_copy = [](const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) -> at::Tensor {
11678 pybind11::gil_scoped_release no_gil;
11679 return self.narrow_copy_symint(dim, start, length);
11680 };
11681 return wrap(dispatch_narrow_copy(self, _r.toInt64(0), _r.toSymInt(1), _r.toSymInt(2)));
11682 Py_RETURN_NONE;
11683 END_HANDLE_TH_ERRORS
11684}
11685
11686\
11687// ne
11688static PyObject * THPVariable_ne(PyObject* self_, PyObject* args, PyObject* kwargs)
11689{
11690 HANDLE_TH_ERRORS
11691 const Tensor& self = THPVariable_Unpack(self_);
11692 static PythonArgParser parser({
11693 "ne(Tensor other)",
11694 "ne(Scalar other)",
11695 }, /*traceable=*/true);
11696
11697 ParsedArgs<1> parsed_args;
11698 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11699 if(_r.has_torch_function()) {
11700 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11701 }
11702 switch (_r.idx) {
11703 case 0: {
11704 // aten::ne.Tensor(Tensor self, Tensor other) -> Tensor
11705
11706 auto dispatch_ne = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
11707 pybind11::gil_scoped_release no_gil;
11708 return self.ne(other);
11709 };
11710 return wrap(dispatch_ne(self, _r.tensor(0)));
11711 }
11712 case 1: {
11713 // aten::ne.Scalar(Tensor self, Scalar other) -> Tensor
11714
11715 auto dispatch_ne = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
11716 pybind11::gil_scoped_release no_gil;
11717 return self.ne(other);
11718 };
11719 return wrap(dispatch_ne(self, _r.scalar(0)));
11720 }
11721 }
11722 Py_RETURN_NONE;
11723 END_HANDLE_TH_ERRORS
11724}
11725
11726\
11727// ne_
11728static PyObject * THPVariable_ne_(PyObject* self_, PyObject* args, PyObject* kwargs)
11729{
11730 HANDLE_TH_ERRORS
11731 const Tensor& self = THPVariable_Unpack(self_);
11732 static PythonArgParser parser({
11733 "ne_(Tensor other)",
11734 "ne_(Scalar other)",
11735 }, /*traceable=*/true);
11736
11737 ParsedArgs<1> parsed_args;
11738 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11739 if(_r.has_torch_function()) {
11740 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11741 }
11742 switch (_r.idx) {
11743 case 0: {
11744 // aten::ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
11745
11746 auto dispatch_ne_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
11747 pybind11::gil_scoped_release no_gil;
11748 return self.ne_(other);
11749 };
11750 return wrap(dispatch_ne_(self, _r.tensor(0)));
11751 }
11752 case 1: {
11753 // aten::ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
11754
11755 auto dispatch_ne_ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
11756 pybind11::gil_scoped_release no_gil;
11757 return self.ne_(other);
11758 };
11759 return wrap(dispatch_ne_(self, _r.scalar(0)));
11760 }
11761 }
11762 Py_RETURN_NONE;
11763 END_HANDLE_TH_ERRORS
11764}
11765
11766// neg
11767static PyObject * THPVariable_neg(PyObject* self_, PyObject* args)
11768{
11769 HANDLE_TH_ERRORS
11770 const Tensor& self = THPVariable_Unpack(self_);
11771 if(check_has_torch_function(self_)) {
11772 return handle_torch_function(self_, "neg");
11773 }
11774 // aten::neg(Tensor self) -> Tensor
11775
11776 auto dispatch_neg = [](const at::Tensor & self) -> at::Tensor {
11777 pybind11::gil_scoped_release no_gil;
11778 return self.neg();
11779 };
11780 return wrap(dispatch_neg(self));
11781 END_HANDLE_TH_ERRORS
11782}
11783
11784// neg_
11785static PyObject * THPVariable_neg_(PyObject* self_, PyObject* args)
11786{
11787 HANDLE_TH_ERRORS
11788 const Tensor& self = THPVariable_Unpack(self_);
11789 if(check_has_torch_function(self_)) {
11790 return handle_torch_function(self_, "neg_");
11791 }
11792 // aten::neg_(Tensor(a!) self) -> Tensor(a!)
11793
11794 auto dispatch_neg_ = [](const at::Tensor & self) -> at::Tensor {
11795 pybind11::gil_scoped_release no_gil;
11796 return self.neg_();
11797 };
11798 return wrap(dispatch_neg_(self));
11799 END_HANDLE_TH_ERRORS
11800}
11801
11802// negative
11803static PyObject * THPVariable_negative(PyObject* self_, PyObject* args)
11804{
11805 HANDLE_TH_ERRORS
11806 const Tensor& self = THPVariable_Unpack(self_);
11807 if(check_has_torch_function(self_)) {
11808 return handle_torch_function(self_, "negative");
11809 }
11810 // aten::negative(Tensor self) -> Tensor
11811
11812 auto dispatch_negative = [](const at::Tensor & self) -> at::Tensor {
11813 pybind11::gil_scoped_release no_gil;
11814 return self.negative();
11815 };
11816 return wrap(dispatch_negative(self));
11817 END_HANDLE_TH_ERRORS
11818}
11819
11820// negative_
11821static PyObject * THPVariable_negative_(PyObject* self_, PyObject* args)
11822{
11823 HANDLE_TH_ERRORS
11824 const Tensor& self = THPVariable_Unpack(self_);
11825 if(check_has_torch_function(self_)) {
11826 return handle_torch_function(self_, "negative_");
11827 }
11828 // aten::negative_(Tensor(a!) self) -> Tensor(a!)
11829
11830 auto dispatch_negative_ = [](const at::Tensor & self) -> at::Tensor {
11831 pybind11::gil_scoped_release no_gil;
11832 return self.negative_();
11833 };
11834 return wrap(dispatch_negative_(self));
11835 END_HANDLE_TH_ERRORS
11836}
11837
11838// new_empty
11839static PyObject * THPVariable_new_empty(PyObject* self_, PyObject* args, PyObject* kwargs)
11840{
11841 HANDLE_TH_ERRORS
11842 const Tensor& self = THPVariable_Unpack(self_);
11843 static PythonArgParser parser({
11844 "new_empty(SymIntArrayRef size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? requires_grad=False)",
11845 }, /*traceable=*/true);
11846
11847 ParsedArgs<6> parsed_args;
11848 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11849 if(_r.has_torch_function()) {
11850 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11851 }
11852 // aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
11853 const auto options = TensorOptions()
11854 .dtype(_r.scalartypeWithDefault(1, self.scalar_type()))
11855 .device(_r.deviceWithDefault(3, self.device()))
11856 .layout(_r.layoutWithDefault(2, self.layout()))
11857 .requires_grad(_r.toBool(5))
11858 .pinned_memory(_r.toBool(4));
11859 torch::utils::maybe_initialize_cuda(options);
11860
11861 auto dispatch_new_empty = [](const at::Tensor & self, c10::SymIntArrayRef size, at::TensorOptions options) -> at::Tensor {
11862 pybind11::gil_scoped_release no_gil;
11863 return self.new_empty_symint(size, options);
11864 };
11865 return wrap(dispatch_new_empty(self, _r.symintlist(0), options).set_requires_grad(_r.toBool(5)));
11866 Py_RETURN_NONE;
11867 END_HANDLE_TH_ERRORS
11868}
11869
11870// new_empty_strided
11871static PyObject * THPVariable_new_empty_strided(PyObject* self_, PyObject* args, PyObject* kwargs)
11872{
11873 HANDLE_TH_ERRORS
11874 const Tensor& self = THPVariable_Unpack(self_);
11875 static PythonArgParser parser({
11876 "new_empty_strided(SymIntArrayRef size, SymIntArrayRef stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? requires_grad=False)",
11877 }, /*traceable=*/true);
11878
11879 ParsedArgs<7> parsed_args;
11880 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11881 if(_r.has_torch_function()) {
11882 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11883 }
11884 // aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
11885 const auto options = TensorOptions()
11886 .dtype(_r.scalartypeWithDefault(2, self.scalar_type()))
11887 .device(_r.deviceWithDefault(4, self.device()))
11888 .layout(_r.layoutWithDefault(3, self.layout()))
11889 .requires_grad(_r.toBool(6))
11890 .pinned_memory(_r.toBool(5));
11891 torch::utils::maybe_initialize_cuda(options);
11892
11893 auto dispatch_new_empty_strided = [](const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options) -> at::Tensor {
11894 pybind11::gil_scoped_release no_gil;
11895 return self.new_empty_strided_symint(size, stride, options);
11896 };
11897 return wrap(dispatch_new_empty_strided(self, _r.symintlist(0), _r.symintlist(1), options).set_requires_grad(_r.toBool(6)));
11898 Py_RETURN_NONE;
11899 END_HANDLE_TH_ERRORS
11900}
11901
11902// new_full
11903static PyObject * THPVariable_new_full(PyObject* self_, PyObject* args, PyObject* kwargs)
11904{
11905 HANDLE_TH_ERRORS
11906 const Tensor& self = THPVariable_Unpack(self_);
11907 static PythonArgParser parser({
11908 "new_full(SymIntArrayRef size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? requires_grad=False)",
11909 }, /*traceable=*/true);
11910
11911 ParsedArgs<7> parsed_args;
11912 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11913 if(_r.has_torch_function()) {
11914 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11915 }
11916 // aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
11917 const auto options = TensorOptions()
11918 .dtype(_r.scalartypeWithDefault(2, self.scalar_type()))
11919 .device(_r.deviceWithDefault(4, self.device()))
11920 .layout(_r.layoutWithDefault(3, self.layout()))
11921 .requires_grad(_r.toBool(6))
11922 .pinned_memory(_r.toBool(5));
11923 torch::utils::maybe_initialize_cuda(options);
11924
11925 auto dispatch_new_full = [](const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options) -> at::Tensor {
11926 pybind11::gil_scoped_release no_gil;
11927 return self.new_full_symint(size, fill_value, options);
11928 };
11929 return wrap(dispatch_new_full(self, _r.symintlist(0), _r.scalar(1), options).set_requires_grad(_r.toBool(6)));
11930 Py_RETURN_NONE;
11931 END_HANDLE_TH_ERRORS
11932}
11933
11934// new_ones
11935static PyObject * THPVariable_new_ones(PyObject* self_, PyObject* args, PyObject* kwargs)
11936{
11937 HANDLE_TH_ERRORS
11938 const Tensor& self = THPVariable_Unpack(self_);
11939 static PythonArgParser parser({
11940 "new_ones(SymIntArrayRef size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? requires_grad=False)",
11941 }, /*traceable=*/true);
11942
11943 ParsedArgs<6> parsed_args;
11944 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11945 if(_r.has_torch_function()) {
11946 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11947 }
11948 // aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
11949 const auto options = TensorOptions()
11950 .dtype(_r.scalartypeWithDefault(1, self.scalar_type()))
11951 .device(_r.deviceWithDefault(3, self.device()))
11952 .layout(_r.layoutWithDefault(2, self.layout()))
11953 .requires_grad(_r.toBool(5))
11954 .pinned_memory(_r.toBool(4));
11955 torch::utils::maybe_initialize_cuda(options);
11956
11957 auto dispatch_new_ones = [](const at::Tensor & self, c10::SymIntArrayRef size, at::TensorOptions options) -> at::Tensor {
11958 pybind11::gil_scoped_release no_gil;
11959 return self.new_ones_symint(size, options);
11960 };
11961 return wrap(dispatch_new_ones(self, _r.symintlist(0), options).set_requires_grad(_r.toBool(5)));
11962 Py_RETURN_NONE;
11963 END_HANDLE_TH_ERRORS
11964}
11965
11966// new_zeros
11967static PyObject * THPVariable_new_zeros(PyObject* self_, PyObject* args, PyObject* kwargs)
11968{
11969 HANDLE_TH_ERRORS
11970 const Tensor& self = THPVariable_Unpack(self_);
11971 static PythonArgParser parser({
11972 "new_zeros(SymIntArrayRef size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? requires_grad=False)",
11973 }, /*traceable=*/true);
11974
11975 ParsedArgs<6> parsed_args;
11976 auto _r = parser.parse(self_, args, kwargs, parsed_args);
11977 if(_r.has_torch_function()) {
11978 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
11979 }
11980 // aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
11981 const auto options = TensorOptions()
11982 .dtype(_r.scalartypeWithDefault(1, self.scalar_type()))
11983 .device(_r.deviceWithDefault(3, self.device()))
11984 .layout(_r.layoutWithDefault(2, self.layout()))
11985 .requires_grad(_r.toBool(5))
11986 .pinned_memory(_r.toBool(4));
11987 torch::utils::maybe_initialize_cuda(options);
11988
11989 auto dispatch_new_zeros = [](const at::Tensor & self, c10::SymIntArrayRef size, at::TensorOptions options) -> at::Tensor {
11990 pybind11::gil_scoped_release no_gil;
11991 return self.new_zeros_symint(size, options);
11992 };
11993 return wrap(dispatch_new_zeros(self, _r.symintlist(0), options).set_requires_grad(_r.toBool(5)));
11994 Py_RETURN_NONE;
11995 END_HANDLE_TH_ERRORS
11996}
11997
11998// nextafter
11999static PyObject * THPVariable_nextafter(PyObject* self_, PyObject* args, PyObject* kwargs)
12000{
12001 HANDLE_TH_ERRORS
12002 const Tensor& self = THPVariable_Unpack(self_);
12003 static PythonArgParser parser({
12004 "nextafter(Tensor other)",
12005 }, /*traceable=*/true);
12006
12007 ParsedArgs<1> parsed_args;
12008 auto _r = parser.parse(self_, args, kwargs, parsed_args);
12009 if(_r.has_torch_function()) {
12010 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
12011 }
12012 // aten::nextafter(Tensor self, Tensor other) -> Tensor
12013
12014 auto dispatch_nextafter = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
12015 pybind11::gil_scoped_release no_gil;
12016 return self.nextafter(other);
12017 };
12018 return wrap(dispatch_nextafter(self, _r.tensor(0)));
12019 Py_RETURN_NONE;
12020 END_HANDLE_TH_ERRORS
12021}
12022
12023// nextafter_
12024static PyObject * THPVariable_nextafter_(PyObject* self_, PyObject* args, PyObject* kwargs)
12025{
12026 HANDLE_TH_ERRORS
12027 const Tensor& self = THPVariable_Unpack(self_);
12028 static PythonArgParser parser({
12029 "nextafter_(Tensor other)",
12030 }, /*traceable=*/true);
12031
12032 ParsedArgs<1> parsed_args;
12033 auto _r = parser.parse(self_, args, kwargs, parsed_args);
12034 if(_r.has_torch_function()) {
12035 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
12036 }
12037 // aten::nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!)
12038
12039 auto dispatch_nextafter_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
12040 pybind11::gil_scoped_release no_gil;
12041 return self.nextafter_(other);
12042 };
12043 return wrap(dispatch_nextafter_(self, _r.tensor(0)));
12044 Py_RETURN_NONE;
12045 END_HANDLE_TH_ERRORS
12046}
12047
12048\
12049// norm
12050static PyObject * THPVariable_norm(PyObject* self_, PyObject* args, PyObject* kwargs)
12051{
12052 HANDLE_TH_ERRORS
12053 const Tensor& self = THPVariable_Unpack(self_);
12054 static PythonArgParser parser({
12055 "norm(Scalar p=2)",
12056 "norm(Scalar? p, *, ScalarType dtype)",
12057 "norm(Scalar? p, IntArrayRef[1] dim, bool keepdim, *, ScalarType dtype)",
12058 "norm(Scalar? p, IntArrayRef[1] dim, bool keepdim=False)",
12059 "norm(Scalar? p, DimnameList[1] dim, bool keepdim, *, ScalarType dtype)",
12060 "norm(Scalar? p, DimnameList[1] dim, bool keepdim=False)",
12061 }, /*traceable=*/true);
12062
12063 ParsedArgs<4> parsed_args;
12064 auto _r = parser.parse(self_, args, kwargs, parsed_args);
12065 if(_r.has_torch_function()) {
12066 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
12067 }
12068 switch (_r.idx) {
12069 case 0: {
12070 // aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor
12071
12072 auto dispatch_norm = [](const at::Tensor & self, const at::Scalar & p) -> at::Tensor {
12073 pybind11::gil_scoped_release no_gil;
12074 return self.norm(p);
12075 };
12076 return wrap(dispatch_norm(self, _r.scalar(0)));
12077 }
12078 case 1: {
12079 // aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor
12080
12081 auto dispatch_norm = [](const at::Tensor & self, const c10::optional<at::Scalar> & p, at::ScalarType dtype) -> at::Tensor {
12082 pybind11::gil_scoped_release no_gil;
12083 return self.norm(p, dtype);
12084 };
12085 return wrap(dispatch_norm(self, _r.scalarOptional(0), _r.scalartype(1)));
12086 }
12087 case 2: {
12088 // aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor
12089
12090 auto dispatch_norm = [](const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) -> at::Tensor {
12091 pybind11::gil_scoped_release no_gil;
12092 return self.norm(p, dim, keepdim, dtype);
12093 };
12094 return wrap(dispatch_norm(self, _r.scalarOptional(0), _r.intlist(1), _r.toBool(2), _r.scalartype(3)));
12095 }
12096 case 3: {
12097 // aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor
12098
12099 auto dispatch_norm = [](const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) -> at::Tensor {
12100 pybind11::gil_scoped_release no_gil;
12101 return self.norm(p, dim, keepdim);
12102 };
12103 return wrap(dispatch_norm(self, _r.scalarOptional(0), _r.intlist(1), _r.toBool(2)));
12104 }
12105 case 4: {
12106 // aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor
12107
12108 auto dispatch_norm = [](const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) -> at::Tensor {
12109 pybind11::gil_scoped_release no_gil;
12110 return self.norm(p, dim, keepdim, dtype);
12111 };
12112 return wrap(dispatch_norm(self, _r.scalarOptional(0), _r.dimnamelist(1), _r.toBool(2), _r.scalartype(3)));
12113 }
12114 case 5: {
12115 // aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor
12116
12117 auto dispatch_norm = [](const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim) -> at::Tensor {
12118 pybind11::gil_scoped_release no_gil;
12119 return self.norm(p, dim, keepdim);
12120 };
12121 return wrap(dispatch_norm(self, _r.scalarOptional(0), _r.dimnamelist(1), _r.toBool(2)));
12122 }
12123 }
12124 Py_RETURN_NONE;
12125 END_HANDLE_TH_ERRORS
12126}
12127
12128// normal_
12129static PyObject * THPVariable_normal_(PyObject* self_, PyObject* args, PyObject* kwargs)
12130{
12131 HANDLE_TH_ERRORS
12132 const Tensor& self = THPVariable_Unpack(self_);
12133 static PythonArgParser parser({
12134 "normal_(double mean=0, double std=1, *, Generator? generator=None)",
12135 }, /*traceable=*/true);
12136
12137 ParsedArgs<3> parsed_args;
12138 auto _r = parser.parse(self_, args, kwargs, parsed_args);
12139 if(_r.has_torch_function()) {
12140 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
12141 }
12142 // aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)
12143
12144 auto dispatch_normal_ = [](const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) -> at::Tensor {
12145 pybind11::gil_scoped_release no_gil;
12146 return self.normal_(mean, std, generator);
12147 };
12148 return wrap(dispatch_normal_(self, _r.toDouble(0), _r.toDouble(1), _r.generator(2)));
12149 Py_RETURN_NONE;
12150 END_HANDLE_TH_ERRORS
12151}
12152
12153\
12154// not_equal
12155static PyObject * THPVariable_not_equal(PyObject* self_, PyObject* args, PyObject* kwargs)
12156{
12157 HANDLE_TH_ERRORS
12158 const Tensor& self = THPVariable_Unpack(self_);
12159 static PythonArgParser parser({
12160 "not_equal(Tensor other)",
12161 "not_equal(Scalar other)",
12162 }, /*traceable=*/true);
12163
12164 ParsedArgs<1> parsed_args;
12165 auto _r = parser.parse(self_, args, kwargs, parsed_args);
12166 if(_r.has_torch_function()) {
12167 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
12168 }
12169 switch (_r.idx) {
12170 case 0: {
12171 // aten::not_equal.Tensor(Tensor self, Tensor other) -> Tensor
12172
12173 auto dispatch_not_equal = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
12174 pybind11::gil_scoped_release no_gil;
12175 return self.not_equal(other);
12176 };
12177 return wrap(dispatch_not_equal(self, _r.tensor(0)));
12178 }
12179 case 1: {
12180 // aten::not_equal.Scalar(Tensor self, Scalar other) -> Tensor
12181
12182 auto dispatch_not_equal = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
12183 pybind11::gil_scoped_release no_gil;
12184 return self.not_equal(other);
12185 };
12186 return wrap(dispatch_not_equal(self, _r.scalar(0)));
12187 }
12188 }
12189 Py_RETURN_NONE;
12190 END_HANDLE_TH_ERRORS
12191}
12192
12193\
12194// not_equal_
12195static PyObject * THPVariable_not_equal_(PyObject* self_, PyObject* args, PyObject* kwargs)
12196{
12197 HANDLE_TH_ERRORS
12198 const Tensor& self = THPVariable_Unpack(self_);
12199 static PythonArgParser parser({
12200 "not_equal_(Tensor other)",
12201 "not_equal_(Scalar other)",
12202 }, /*traceable=*/true);
12203
12204 ParsedArgs<1> parsed_args;
12205 auto _r = parser.parse(self_, args, kwargs, parsed_args);
12206 if(_r.has_torch_function()) {
12207 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
12208 }
12209 switch (_r.idx) {
12210 case 0: {
12211 // aten::not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
12212
12213 auto dispatch_not_equal_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
12214 pybind11::gil_scoped_release no_gil;
12215 return self.not_equal_(other);
12216 };
12217 return wrap(dispatch_not_equal_(self, _r.tensor(0)));
12218 }
12219 case 1: {
12220 // aten::not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
12221
12222 auto dispatch_not_equal_ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
12223 pybind11::gil_scoped_release no_gil;
12224 return self.not_equal_(other);
12225 };
12226 return wrap(dispatch_not_equal_(self, _r.scalar(0)));
12227 }
12228 }
12229 Py_RETURN_NONE;
12230 END_HANDLE_TH_ERRORS
12231}
12232
12233// orgqr
12234static PyObject * THPVariable_orgqr(PyObject* self_, PyObject* args, PyObject* kwargs)
12235{
12236 HANDLE_TH_ERRORS
12237 const Tensor& self = THPVariable_Unpack(self_);
12238 static PythonArgParser parser({
12239 "orgqr(Tensor input2)",
12240 }, /*traceable=*/true);
12241
12242 ParsedArgs<1> parsed_args;
12243 auto _r = parser.parse(self_, args, kwargs, parsed_args);
12244 if(_r.has_torch_function()) {
12245 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
12246 }
12247 // aten::orgqr(Tensor self, Tensor input2) -> Tensor
12248
12249 auto dispatch_orgqr = [](const at::Tensor & self, const at::Tensor & input2) -> at::Tensor {
12250 pybind11::gil_scoped_release no_gil;
12251 return self.orgqr(input2);
12252 };
12253 return wrap(dispatch_orgqr(self, _r.tensor(0)));
12254 Py_RETURN_NONE;
12255 END_HANDLE_TH_ERRORS
12256}
12257
12258// ormqr
12259static PyObject * THPVariable_ormqr(PyObject* self_, PyObject* args, PyObject* kwargs)
12260{
12261 HANDLE_TH_ERRORS
12262 const Tensor& self = THPVariable_Unpack(self_);
12263 static PythonArgParser parser({
12264 "ormqr(Tensor input2, Tensor input3, bool left=True, bool transpose=False)",
12265 }, /*traceable=*/true);
12266
12267 ParsedArgs<4> parsed_args;
12268 auto _r = parser.parse(self_, args, kwargs, parsed_args);
12269 if(_r.has_torch_function()) {
12270 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
12271 }
12272 // aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor
12273
12274 auto dispatch_ormqr = [](const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) -> at::Tensor {
12275 pybind11::gil_scoped_release no_gil;
12276 return self.ormqr(input2, input3, left, transpose);
12277 };
12278 return wrap(dispatch_ormqr(self, _r.tensor(0), _r.tensor(1), _r.toBool(2), _r.toBool(3)));
12279 Py_RETURN_NONE;
12280 END_HANDLE_TH_ERRORS
12281}
12282
12283// outer
12284static PyObject * THPVariable_outer(PyObject* self_, PyObject* args, PyObject* kwargs)
12285{
12286 HANDLE_TH_ERRORS
12287 const Tensor& self = THPVariable_Unpack(self_);
12288 static PythonArgParser parser({
12289 "outer(Tensor vec2)",
12290 }, /*traceable=*/true);
12291
12292 ParsedArgs<1> parsed_args;
12293 auto _r = parser.parse(self_, args, kwargs, parsed_args);
12294 if(_r.has_torch_function()) {
12295 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
12296 }
12297 // aten::outer(Tensor self, Tensor vec2) -> Tensor
12298
12299 auto dispatch_outer = [](const at::Tensor & self, const at::Tensor & vec2) -> at::Tensor {
12300 pybind11::gil_scoped_release no_gil;
12301 return self.outer(vec2);
12302 };
12303 return wrap(dispatch_outer(self, _r.tensor(0)));
12304 Py_RETURN_NONE;
12305 END_HANDLE_TH_ERRORS
12306}
12307
12308// permute
12309static PyObject * THPVariable_permute(PyObject* self_, PyObject* args, PyObject* kwargs)
12310{
12311 HANDLE_TH_ERRORS
12312 const Tensor& self = THPVariable_Unpack(self_);
12313 static PythonArgParser parser({
12314 "permute(IntArrayRef dims)",
12315 }, /*traceable=*/true);
12316
12317 ParsedArgs<1> parsed_args;
12318 auto _r = parser.parse(self_, args, kwargs, parsed_args);
12319 if(_r.has_torch_function()) {
12320 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
12321 }
12322 // aten::permute(Tensor(a) self, int[] dims) -> Tensor(a)
12323
12324 auto dispatch_permute = [](const at::Tensor & self, at::IntArrayRef dims) -> at::Tensor {
12325 pybind11::gil_scoped_release no_gil;
12326 return self.permute(dims);
12327 };
12328 return wrap(dispatch_permute(self, _r.intlist(0)));
12329 Py_RETURN_NONE;
12330 END_HANDLE_TH_ERRORS
12331}
12332
12333// pin_memory
12334static PyObject * THPVariable_pin_memory(PyObject* self_, PyObject* args, PyObject* kwargs)
12335{
12336 HANDLE_TH_ERRORS
12337 const Tensor& self = THPVariable_Unpack(self_);
12338 static PythonArgParser parser({
12339 "pin_memory(Device? device=None)",
12340 }, /*traceable=*/true);
12341
12342 ParsedArgs<1> parsed_args;
12343 auto _r = parser.parse(self_, args, kwargs, parsed_args);
12344 if(_r.has_torch_function()) {
12345 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
12346 }
12347 // aten::pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a)
12348
12349 auto dispatch_pin_memory = [](const at::Tensor & self, c10::optional<at::Device> device) -> at::Tensor {
12350 pybind11::gil_scoped_release no_gil;
12351 return self.pin_memory(device);
12352 };
12353 return wrap(dispatch_pin_memory(self, _r.deviceOptional(0)));
12354 Py_RETURN_NONE;
12355 END_HANDLE_TH_ERRORS
12356}
12357
12358// pinverse
12359static PyObject * THPVariable_pinverse(PyObject* self_, PyObject* args, PyObject* kwargs)
12360{
12361 HANDLE_TH_ERRORS
12362 const Tensor& self = THPVariable_Unpack(self_);
12363 static PythonArgParser parser({
12364 "pinverse(double rcond=1e-15)",
12365 }, /*traceable=*/true);
12366
12367 ParsedArgs<1> parsed_args;
12368 auto _r = parser.parse(self_, args, kwargs, parsed_args);
12369 if(_r.has_torch_function()) {
12370 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
12371 }
12372 // aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor
12373
12374 auto dispatch_pinverse = [](const at::Tensor & self, double rcond) -> at::Tensor {
12375 pybind11::gil_scoped_release no_gil;
12376 return self.pinverse(rcond);
12377 };
12378 return wrap(dispatch_pinverse(self, _r.toDouble(0)));
12379 Py_RETURN_NONE;
12380 END_HANDLE_TH_ERRORS
12381}
12382
12383// polygamma
12384static PyObject * THPVariable_polygamma(PyObject* self_, PyObject* args, PyObject* kwargs)
12385{
12386 HANDLE_TH_ERRORS
12387 const Tensor& self = THPVariable_Unpack(self_);
12388 static PythonArgParser parser({
12389 "polygamma(int64_t n)",
12390 }, /*traceable=*/true);
12391
12392 ParsedArgs<1> parsed_args;
12393 auto _r = parser.parse(self_, args, kwargs, parsed_args);
12394 if(_r.has_torch_function()) {
12395 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
12396 }
12397 // aten::polygamma(int n, Tensor self) -> Tensor
12398
12399 auto dispatch_polygamma = [](int64_t n, const at::Tensor & self) -> at::Tensor {
12400 pybind11::gil_scoped_release no_gil;
12401 return self.polygamma(n);
12402 };
12403 return wrap(dispatch_polygamma(_r.toInt64(0), self));
12404 Py_RETURN_NONE;
12405 END_HANDLE_TH_ERRORS
12406}
12407
12408// polygamma_
12409static PyObject * THPVariable_polygamma_(PyObject* self_, PyObject* args, PyObject* kwargs)
12410{
12411 HANDLE_TH_ERRORS
12412 const Tensor& self = THPVariable_Unpack(self_);
12413 static PythonArgParser parser({
12414 "polygamma_(int64_t n)",
12415 }, /*traceable=*/true);
12416
12417 ParsedArgs<1> parsed_args;
12418 auto _r = parser.parse(self_, args, kwargs, parsed_args);
12419 if(_r.has_torch_function()) {
12420 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
12421 }
12422 // aten::polygamma_(Tensor(a!) self, int n) -> Tensor(a!)
12423
12424 auto dispatch_polygamma_ = [](const at::Tensor & self, int64_t n) -> at::Tensor {
12425 pybind11::gil_scoped_release no_gil;
12426 return self.polygamma_(n);
12427 };
12428 return wrap(dispatch_polygamma_(self, _r.toInt64(0)));
12429 Py_RETURN_NONE;
12430 END_HANDLE_TH_ERRORS
12431}
12432
12433// positive
12434static PyObject * THPVariable_positive(PyObject* self_, PyObject* args)
12435{
12436 HANDLE_TH_ERRORS
12437 const Tensor& self = THPVariable_Unpack(self_);
12438 if(check_has_torch_function(self_)) {
12439 return handle_torch_function(self_, "positive");
12440 }
12441 // aten::positive(Tensor(a) self) -> Tensor(a)
12442
12443 auto dispatch_positive = [](const at::Tensor & self) -> at::Tensor {
12444 pybind11::gil_scoped_release no_gil;
12445 return self.positive();
12446 };
12447 return wrap(dispatch_positive(self));
12448 END_HANDLE_TH_ERRORS
12449}
12450
12451\
12452// pow
12453static PyObject * THPVariable_pow(PyObject* self_, PyObject* args, PyObject* kwargs)
12454{
12455 HANDLE_TH_ERRORS
12456 const Tensor& self = THPVariable_Unpack(self_);
12457 static PythonArgParser parser({
12458 "pow(Tensor exponent)",
12459 "pow(Scalar exponent)",
12460 }, /*traceable=*/true);
12461
12462 ParsedArgs<1> parsed_args;
12463 auto _r = parser.parse(self_, args, kwargs, parsed_args);
12464 if(_r.has_torch_function()) {
12465 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
12466 }
12467 switch (_r.idx) {
12468 case 0: {
12469 // aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor
12470
12471 auto dispatch_pow = [](const at::Tensor & self, const at::Tensor & exponent) -> at::Tensor {
12472 pybind11::gil_scoped_release no_gil;
12473 return self.pow(exponent);
12474 };
12475 return wrap(dispatch_pow(self, _r.tensor(0)));
12476 }
12477 case 1: {
12478 // aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor
12479
12480 auto dispatch_pow = [](const at::Tensor & self, const at::Scalar & exponent) -> at::Tensor {
12481 pybind11::gil_scoped_release no_gil;
12482 return self.pow(exponent);
12483 };
12484 return wrap(dispatch_pow(self, _r.scalar(0)));
12485 }
12486 }
12487 Py_RETURN_NONE;
12488 END_HANDLE_TH_ERRORS
12489}
12490
12491\
12492// pow_
12493static PyObject * THPVariable_pow_(PyObject* self_, PyObject* args, PyObject* kwargs)
12494{
12495 HANDLE_TH_ERRORS
12496 const Tensor& self = THPVariable_Unpack(self_);
12497 static PythonArgParser parser({
12498 "pow_(Tensor exponent)",
12499 "pow_(Scalar exponent)",
12500 }, /*traceable=*/true);
12501
12502 ParsedArgs<1> parsed_args;
12503 auto _r = parser.parse(self_, args, kwargs, parsed_args);
12504 if(_r.has_torch_function()) {
12505 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
12506 }
12507 switch (_r.idx) {
12508 case 0: {
12509 // aten::pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)
12510
12511 auto dispatch_pow_ = [](const at::Tensor & self, const at::Tensor & exponent) -> at::Tensor {
12512 pybind11::gil_scoped_release no_gil;
12513 return self.pow_(exponent);
12514 };
12515 return wrap(dispatch_pow_(self, _r.tensor(0)));
12516 }
12517 case 1: {
12518 // aten::pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)
12519
12520 auto dispatch_pow_ = [](const at::Tensor & self, const at::Scalar & exponent) -> at::Tensor {
12521 pybind11::gil_scoped_release no_gil;
12522 return self.pow_(exponent);
12523 };
12524 return wrap(dispatch_pow_(self, _r.scalar(0)));
12525 }
12526 }
12527 Py_RETURN_NONE;
12528 END_HANDLE_TH_ERRORS
12529}
12530
12531// prelu
12532static PyObject * THPVariable_prelu(PyObject* self_, PyObject* args, PyObject* kwargs)
12533{
12534 HANDLE_TH_ERRORS
12535 const Tensor& self = THPVariable_Unpack(self_);
12536 static PythonArgParser parser({
12537 "prelu(Tensor weight)",
12538 }, /*traceable=*/true);
12539
12540 ParsedArgs<1> parsed_args;
12541 auto _r = parser.parse(self_, args, kwargs, parsed_args);
12542 if(_r.has_torch_function()) {
12543 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
12544 }
12545 // aten::prelu(Tensor self, Tensor weight) -> Tensor
12546
12547 auto dispatch_prelu = [](const at::Tensor & self, const at::Tensor & weight) -> at::Tensor {
12548 pybind11::gil_scoped_release no_gil;
12549 return self.prelu(weight);
12550 };
12551 return wrap(dispatch_prelu(self, _r.tensor(0)));
12552 Py_RETURN_NONE;
12553 END_HANDLE_TH_ERRORS
12554}
12555
12556\
12557// prod
12558static PyObject * THPVariable_prod(PyObject* self_, PyObject* args, PyObject* kwargs)
12559{
12560 HANDLE_TH_ERRORS
12561 const Tensor& self = THPVariable_Unpack(self_);
12562 static PythonArgParser parser({
12563 "prod(*, ScalarType? dtype=None)",
12564 "prod(int64_t dim, bool keepdim=False, *, ScalarType? dtype=None)",
12565 "prod(Dimname dim, bool keepdim=False, *, ScalarType? dtype=None)",
12566 }, /*traceable=*/true);
12567
12568 ParsedArgs<3> parsed_args;
12569 auto _r = parser.parse(self_, args, kwargs, parsed_args);
12570 if(_r.has_torch_function()) {
12571 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
12572 }
12573 switch (_r.idx) {
12574 case 0: {
12575 // aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor
12576
12577 auto dispatch_prod = [](const at::Tensor & self, c10::optional<at::ScalarType> dtype) -> at::Tensor {
12578 pybind11::gil_scoped_release no_gil;
12579 return self.prod(dtype);
12580 };
12581 return wrap(dispatch_prod(self, _r.scalartypeOptional(0)));
12582 }
12583 case 1: {
12584 // aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
12585
12586 auto dispatch_prod = [](const at::Tensor & self, int64_t dim, bool keepdim, c10::optional<at::ScalarType> dtype) -> at::Tensor {
12587 pybind11::gil_scoped_release no_gil;
12588 return self.prod(dim, keepdim, dtype);
12589 };
12590 return wrap(dispatch_prod(self, _r.toInt64(0), _r.toBool(1), _r.scalartypeOptional(2)));
12591 }
12592 case 2: {
12593 // aten::prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
12594
12595 auto dispatch_prod = [](const at::Tensor & self, at::Dimname dim, bool keepdim, c10::optional<at::ScalarType> dtype) -> at::Tensor {
12596 pybind11::gil_scoped_release no_gil;
12597 return self.prod(dim, keepdim, dtype);
12598 };
12599 return wrap(dispatch_prod(self, _r.dimname(0), _r.toBool(1), _r.scalartypeOptional(2)));
12600 }
12601 }
12602 Py_RETURN_NONE;
12603 END_HANDLE_TH_ERRORS
12604}
12605
12606// put
12607static PyObject * THPVariable_put(PyObject* self_, PyObject* args, PyObject* kwargs)
12608{
12609 HANDLE_TH_ERRORS
12610 const Tensor& self = THPVariable_Unpack(self_);
12611 static PythonArgParser parser({
12612 "put(Tensor index, Tensor source, bool accumulate=False)",
12613 }, /*traceable=*/true);
12614
12615 ParsedArgs<3> parsed_args;
12616 auto _r = parser.parse(self_, args, kwargs, parsed_args);
12617 if(_r.has_torch_function()) {
12618 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
12619 }
12620 // aten::put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor
12621
12622 auto dispatch_put = [](const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) -> at::Tensor {
12623 pybind11::gil_scoped_release no_gil;
12624 return self.put(index, source, accumulate);
12625 };
12626 return wrap(dispatch_put(self, _r.tensor(0), _r.tensor(1), _r.toBool(2)));
12627 Py_RETURN_NONE;
12628 END_HANDLE_TH_ERRORS
12629}
12630
12631// put_
12632static PyObject * THPVariable_put_(PyObject* self_, PyObject* args, PyObject* kwargs)
12633{
12634 HANDLE_TH_ERRORS
12635 const Tensor& self = THPVariable_Unpack(self_);
12636 static PythonArgParser parser({
12637 "put_(Tensor index, Tensor source, bool accumulate=False)",
12638 }, /*traceable=*/true);
12639
12640 ParsedArgs<3> parsed_args;
12641 auto _r = parser.parse(self_, args, kwargs, parsed_args);
12642 if(_r.has_torch_function()) {
12643 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
12644 }
12645 // aten::put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!)
12646
12647 auto dispatch_put_ = [](const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) -> at::Tensor {
12648 pybind11::gil_scoped_release no_gil;
12649 return self.put_(index, source, accumulate);
12650 };
12651 return wrap(dispatch_put_(self, _r.tensor(0), _r.tensor(1), _r.toBool(2)));
12652 Py_RETURN_NONE;
12653 END_HANDLE_TH_ERRORS
12654}
12655
12656// q_per_channel_axis
12657static PyObject * THPVariable_q_per_channel_axis(PyObject* self_, PyObject* args)
12658{
12659 HANDLE_TH_ERRORS
12660 const Tensor& self = THPVariable_Unpack(self_);
12661 if(check_has_torch_function(self_)) {
12662 return handle_torch_function(self_, "q_per_channel_axis");
12663 }
12664 // aten::q_per_channel_axis(Tensor self) -> int
12665
12666 auto dispatch_q_per_channel_axis = [](const at::Tensor & self) -> int64_t {
12667 pybind11::gil_scoped_release no_gil;
12668 return self.q_per_channel_axis();
12669 };
12670 return wrap(dispatch_q_per_channel_axis(self));
12671 END_HANDLE_TH_ERRORS
12672}
12673
12674// q_per_channel_scales
12675static PyObject * THPVariable_q_per_channel_scales(PyObject* self_, PyObject* args)
12676{
12677 HANDLE_TH_ERRORS
12678 const Tensor& self = THPVariable_Unpack(self_);
12679 if(check_has_torch_function(self_)) {
12680 return handle_torch_function(self_, "q_per_channel_scales");
12681 }
12682 // aten::q_per_channel_scales(Tensor self) -> Tensor
12683
12684 auto dispatch_q_per_channel_scales = [](const at::Tensor & self) -> at::Tensor {
12685 pybind11::gil_scoped_release no_gil;
12686 return self.q_per_channel_scales();
12687 };
12688 return wrap(dispatch_q_per_channel_scales(self));
12689 END_HANDLE_TH_ERRORS
12690}
12691
12692// q_per_channel_zero_points
12693static PyObject * THPVariable_q_per_channel_zero_points(PyObject* self_, PyObject* args)
12694{
12695 HANDLE_TH_ERRORS
12696 const Tensor& self = THPVariable_Unpack(self_);
12697 if(check_has_torch_function(self_)) {
12698 return handle_torch_function(self_, "q_per_channel_zero_points");
12699 }
12700 // aten::q_per_channel_zero_points(Tensor self) -> Tensor
12701
12702 auto dispatch_q_per_channel_zero_points = [](const at::Tensor & self) -> at::Tensor {
12703 pybind11::gil_scoped_release no_gil;
12704 return self.q_per_channel_zero_points();
12705 };
12706 return wrap(dispatch_q_per_channel_zero_points(self));
12707 END_HANDLE_TH_ERRORS
12708}
12709
12710// q_scale
12711static PyObject * THPVariable_q_scale(PyObject* self_, PyObject* args)
12712{
12713 HANDLE_TH_ERRORS
12714 const Tensor& self = THPVariable_Unpack(self_);
12715 if(check_has_torch_function(self_)) {
12716 return handle_torch_function(self_, "q_scale");
12717 }
12718 // aten::q_scale(Tensor self) -> float
12719
12720 auto dispatch_q_scale = [](const at::Tensor & self) -> double {
12721 pybind11::gil_scoped_release no_gil;
12722 return self.q_scale();
12723 };
12724 return wrap(dispatch_q_scale(self));
12725 END_HANDLE_TH_ERRORS
12726}
12727
12728// q_zero_point
12729static PyObject * THPVariable_q_zero_point(PyObject* self_, PyObject* args)
12730{
12731 HANDLE_TH_ERRORS
12732 const Tensor& self = THPVariable_Unpack(self_);
12733 if(check_has_torch_function(self_)) {
12734 return handle_torch_function(self_, "q_zero_point");
12735 }
12736 // aten::q_zero_point(Tensor self) -> int
12737
12738 auto dispatch_q_zero_point = [](const at::Tensor & self) -> int64_t {
12739 pybind11::gil_scoped_release no_gil;
12740 return self.q_zero_point();
12741 };
12742 return wrap(dispatch_q_zero_point(self));
12743 END_HANDLE_TH_ERRORS
12744}
12745
12746// qr
12747static PyObject * THPVariable_qr(PyObject* self_, PyObject* args, PyObject* kwargs)
12748{
12749 HANDLE_TH_ERRORS
12750 static PyTypeObject* NamedTuple = get_namedtuple("qr");
12751 const Tensor& self = THPVariable_Unpack(self_);
12752 static PythonArgParser parser({
12753 "qr(bool some=True)",
12754 }, /*traceable=*/true);
12755
12756 ParsedArgs<1> parsed_args;
12757 auto _r = parser.parse(self_, args, kwargs, parsed_args);
12758 if(_r.has_torch_function()) {
12759 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
12760 }
12761 // aten::qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R)
12762
12763 auto dispatch_qr = [](const at::Tensor & self, bool some) -> ::std::tuple<at::Tensor,at::Tensor> {
12764 pybind11::gil_scoped_release no_gil;
12765 return self.qr(some);
12766 };
12767 return wrap(NamedTuple, dispatch_qr(self, _r.toBool(0)));
12768 Py_RETURN_NONE;
12769 END_HANDLE_TH_ERRORS
12770}
12771
12772// qscheme
12773static PyObject * THPVariable_qscheme(PyObject* self_, PyObject* args)
12774{
12775 HANDLE_TH_ERRORS
12776 const Tensor& self = THPVariable_Unpack(self_);
12777 if(check_has_torch_function(self_)) {
12778 return handle_torch_function(self_, "qscheme");
12779 }
12780 // aten::qscheme(Tensor self) -> QScheme
12781
12782 auto dispatch_qscheme = [](const at::Tensor & self) -> at::QScheme {
12783 pybind11::gil_scoped_release no_gil;
12784 return self.qscheme();
12785 };
12786 return wrap(dispatch_qscheme(self));
12787 END_HANDLE_TH_ERRORS
12788}
12789
12790\
12791// quantile
12792static PyObject * THPVariable_quantile(PyObject* self_, PyObject* args, PyObject* kwargs)
12793{
12794 HANDLE_TH_ERRORS
12795 const Tensor& self = THPVariable_Unpack(self_);
12796 static PythonArgParser parser({
12797 "quantile(Tensor q, int64_t? dim=None, bool keepdim=False, *, c10::string_view interpolation=\"linear\")",
12798 "quantile(double q, int64_t? dim=None, bool keepdim=False, *, c10::string_view interpolation=\"linear\")",
12799 }, /*traceable=*/true);
12800
12801 ParsedArgs<4> parsed_args;
12802 auto _r = parser.parse(self_, args, kwargs, parsed_args);
12803 if(_r.has_torch_function()) {
12804 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
12805 }
12806 switch (_r.idx) {
12807 case 0: {
12808 // aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
12809
12810 auto dispatch_quantile = [](const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) -> at::Tensor {
12811 pybind11::gil_scoped_release no_gil;
12812 return self.quantile(q, dim, keepdim, interpolation);
12813 };
12814 return wrap(dispatch_quantile(self, _r.tensor(0), _r.toInt64Optional(1), _r.toBool(2), _r.stringView(3)));
12815 }
12816 case 1: {
12817 // aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
12818
12819 auto dispatch_quantile = [](const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) -> at::Tensor {
12820 pybind11::gil_scoped_release no_gil;
12821 return self.quantile(q, dim, keepdim, interpolation);
12822 };
12823 return wrap(dispatch_quantile(self, _r.toDouble(0), _r.toInt64Optional(1), _r.toBool(2), _r.stringView(3)));
12824 }
12825 }
12826 Py_RETURN_NONE;
12827 END_HANDLE_TH_ERRORS
12828}
12829
12830// rad2deg
12831static PyObject * THPVariable_rad2deg(PyObject* self_, PyObject* args)
12832{
12833 HANDLE_TH_ERRORS
12834 const Tensor& self = THPVariable_Unpack(self_);
12835 if(check_has_torch_function(self_)) {
12836 return handle_torch_function(self_, "rad2deg");
12837 }
12838 // aten::rad2deg(Tensor self) -> Tensor
12839
12840 auto dispatch_rad2deg = [](const at::Tensor & self) -> at::Tensor {
12841 pybind11::gil_scoped_release no_gil;
12842 return self.rad2deg();
12843 };
12844 return wrap(dispatch_rad2deg(self));
12845 END_HANDLE_TH_ERRORS
12846}
12847
12848// rad2deg_
12849static PyObject * THPVariable_rad2deg_(PyObject* self_, PyObject* args)
12850{
12851 HANDLE_TH_ERRORS
12852 const Tensor& self = THPVariable_Unpack(self_);
12853 if(check_has_torch_function(self_)) {
12854 return handle_torch_function(self_, "rad2deg_");
12855 }
12856 // aten::rad2deg_(Tensor(a!) self) -> Tensor(a!)
12857
12858 auto dispatch_rad2deg_ = [](const at::Tensor & self) -> at::Tensor {
12859 pybind11::gil_scoped_release no_gil;
12860 return self.rad2deg_();
12861 };
12862 return wrap(dispatch_rad2deg_(self));
12863 END_HANDLE_TH_ERRORS
12864}
12865
12866\
12867// random_
12868static PyObject * THPVariable_random_(PyObject* self_, PyObject* args, PyObject* kwargs)
12869{
12870 HANDLE_TH_ERRORS
12871 const Tensor& self = THPVariable_Unpack(self_);
12872 static PythonArgParser parser({
12873 "random_(*, Generator? generator=None)",
12874 "random_(int64_t from, int64_t? to, *, Generator? generator=None)",
12875 "random_(int64_t to, *, Generator? generator=None)",
12876 }, /*traceable=*/true);
12877
12878 ParsedArgs<3> parsed_args;
12879 auto _r = parser.parse(self_, args, kwargs, parsed_args);
12880 if(_r.has_torch_function()) {
12881 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
12882 }
12883 switch (_r.idx) {
12884 case 0: {
12885 // aten::random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!)
12886
12887 auto dispatch_random_ = [](const at::Tensor & self, c10::optional<at::Generator> generator) -> at::Tensor {
12888 pybind11::gil_scoped_release no_gil;
12889 return self.random_(generator);
12890 };
12891 return wrap(dispatch_random_(self, _r.generator(0)));
12892 }
12893 case 1: {
12894 // aten::random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)
12895
12896 auto dispatch_random_ = [](const at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator) -> at::Tensor {
12897 pybind11::gil_scoped_release no_gil;
12898 return self.random_(from, to, generator);
12899 };
12900 return wrap(dispatch_random_(self, _r.toInt64(0), _r.toInt64Optional(1), _r.generator(2)));
12901 }
12902 case 2: {
12903 // aten::random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!)
12904
12905 auto dispatch_random_ = [](const at::Tensor & self, int64_t to, c10::optional<at::Generator> generator) -> at::Tensor {
12906 pybind11::gil_scoped_release no_gil;
12907 return self.random_(to, generator);
12908 };
12909 return wrap(dispatch_random_(self, _r.toInt64(0), _r.generator(1)));
12910 }
12911 }
12912 Py_RETURN_NONE;
12913 END_HANDLE_TH_ERRORS
12914}
12915
12916// ravel
12917static PyObject * THPVariable_ravel(PyObject* self_, PyObject* args)
12918{
12919 HANDLE_TH_ERRORS
12920 const Tensor& self = THPVariable_Unpack(self_);
12921 if(check_has_torch_function(self_)) {
12922 return handle_torch_function(self_, "ravel");
12923 }
12924 // aten::ravel(Tensor(a) self) -> Tensor(a)
12925
12926 auto dispatch_ravel = [](const at::Tensor & self) -> at::Tensor {
12927 pybind11::gil_scoped_release no_gil;
12928 return self.ravel();
12929 };
12930 return wrap(dispatch_ravel(self));
12931 END_HANDLE_TH_ERRORS
12932}
12933
12934// reciprocal
12935static PyObject * THPVariable_reciprocal(PyObject* self_, PyObject* args)
12936{
12937 HANDLE_TH_ERRORS
12938 const Tensor& self = THPVariable_Unpack(self_);
12939 if(check_has_torch_function(self_)) {
12940 return handle_torch_function(self_, "reciprocal");
12941 }
12942 // aten::reciprocal(Tensor self) -> Tensor
12943
12944 auto dispatch_reciprocal = [](const at::Tensor & self) -> at::Tensor {
12945 pybind11::gil_scoped_release no_gil;
12946 return self.reciprocal();
12947 };
12948 return wrap(dispatch_reciprocal(self));
12949 END_HANDLE_TH_ERRORS
12950}
12951
12952// reciprocal_
12953static PyObject * THPVariable_reciprocal_(PyObject* self_, PyObject* args)
12954{
12955 HANDLE_TH_ERRORS
12956 const Tensor& self = THPVariable_Unpack(self_);
12957 if(check_has_torch_function(self_)) {
12958 return handle_torch_function(self_, "reciprocal_");
12959 }
12960 // aten::reciprocal_(Tensor(a!) self) -> Tensor(a!)
12961
12962 auto dispatch_reciprocal_ = [](const at::Tensor & self) -> at::Tensor {
12963 pybind11::gil_scoped_release no_gil;
12964 return self.reciprocal_();
12965 };
12966 return wrap(dispatch_reciprocal_(self));
12967 END_HANDLE_TH_ERRORS
12968}
12969
12970// record_stream
12971static PyObject * THPVariable_record_stream(PyObject* self_, PyObject* args, PyObject* kwargs)
12972{
12973 HANDLE_TH_ERRORS
12974 const Tensor& self = THPVariable_Unpack(self_);
12975 static PythonArgParser parser({
12976 "record_stream(Stream s)",
12977 }, /*traceable=*/false);
12978
12979 ParsedArgs<1> parsed_args;
12980 auto _r = parser.parse(self_, args, kwargs, parsed_args);
12981 if(_r.has_torch_function()) {
12982 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
12983 }
12984 // aten::record_stream(Tensor(a!) self, Stream s) -> ()
12985
12986 auto dispatch_record_stream = [](const at::Tensor & self, at::Stream s) -> void {
12987 pybind11::gil_scoped_release no_gil;
12988 self.record_stream(s);
12989 };
12990 dispatch_record_stream(self, _r.stream(0));
12991 Py_RETURN_NONE;
12992 Py_RETURN_NONE;
12993 END_HANDLE_TH_ERRORS
12994}
12995
12996// refine_names
12997static PyObject * THPVariable_refine_names(PyObject* self_, PyObject* args, PyObject* kwargs)
12998{
12999 HANDLE_TH_ERRORS
13000 const Tensor& self = THPVariable_Unpack(self_);
13001 static PythonArgParser parser({
13002 "refine_names(DimnameList names)",
13003 }, /*traceable=*/true);
13004
13005 ParsedArgs<1> parsed_args;
13006 auto _r = parser.parse(self_, args, kwargs, parsed_args);
13007 if(_r.has_torch_function()) {
13008 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
13009 }
13010 // aten::refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a)
13011
13012 auto dispatch_refine_names = [](const at::Tensor & self, at::DimnameList names) -> at::Tensor {
13013 pybind11::gil_scoped_release no_gil;
13014 return self.refine_names(names);
13015 };
13016 return wrap(dispatch_refine_names(self, _r.dimnamelist(0)));
13017 Py_RETURN_NONE;
13018 END_HANDLE_TH_ERRORS
13019}
13020
13021// relu
13022static PyObject * THPVariable_relu(PyObject* self_, PyObject* args)
13023{
13024 HANDLE_TH_ERRORS
13025 const Tensor& self = THPVariable_Unpack(self_);
13026 if(check_has_torch_function(self_)) {
13027 return handle_torch_function(self_, "relu");
13028 }
13029 // aten::relu(Tensor self) -> Tensor
13030
13031 auto dispatch_relu = [](const at::Tensor & self) -> at::Tensor {
13032 pybind11::gil_scoped_release no_gil;
13033 return self.relu();
13034 };
13035 return wrap(dispatch_relu(self));
13036 END_HANDLE_TH_ERRORS
13037}
13038
13039// relu_
13040static PyObject * THPVariable_relu_(PyObject* self_, PyObject* args)
13041{
13042 HANDLE_TH_ERRORS
13043 const Tensor& self = THPVariable_Unpack(self_);
13044 if(check_has_torch_function(self_)) {
13045 return handle_torch_function(self_, "relu_");
13046 }
13047 // aten::relu_(Tensor(a!) self) -> Tensor(a!)
13048
13049 auto dispatch_relu_ = [](const at::Tensor & self) -> at::Tensor {
13050 pybind11::gil_scoped_release no_gil;
13051 return self.relu_();
13052 };
13053 return wrap(dispatch_relu_(self));
13054 END_HANDLE_TH_ERRORS
13055}
13056
13057\
13058// remainder
13059static PyObject * THPVariable_remainder(PyObject* self_, PyObject* args, PyObject* kwargs)
13060{
13061 HANDLE_TH_ERRORS
13062 const Tensor& self = THPVariable_Unpack(self_);
13063 static PythonArgParser parser({
13064 "remainder(Tensor other)",
13065 "remainder(Scalar other)",
13066 }, /*traceable=*/true);
13067
13068 ParsedArgs<1> parsed_args;
13069 auto _r = parser.parse(self_, args, kwargs, parsed_args);
13070 if(_r.has_torch_function()) {
13071 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
13072 }
13073 switch (_r.idx) {
13074 case 0: {
13075 // aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor
13076
13077 auto dispatch_remainder = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
13078 pybind11::gil_scoped_release no_gil;
13079 return self.remainder(other);
13080 };
13081 return wrap(dispatch_remainder(self, _r.tensor(0)));
13082 }
13083 case 1: {
13084 // aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor
13085
13086 auto dispatch_remainder = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
13087 pybind11::gil_scoped_release no_gil;
13088 return self.remainder(other);
13089 };
13090 return wrap(dispatch_remainder(self, _r.scalar(0)));
13091 }
13092 }
13093 Py_RETURN_NONE;
13094 END_HANDLE_TH_ERRORS
13095}
13096
13097\
13098// remainder_
13099static PyObject * THPVariable_remainder_(PyObject* self_, PyObject* args, PyObject* kwargs)
13100{
13101 HANDLE_TH_ERRORS
13102 const Tensor& self = THPVariable_Unpack(self_);
13103 static PythonArgParser parser({
13104 "remainder_(Tensor other)",
13105 "remainder_(Scalar other)",
13106 }, /*traceable=*/true);
13107
13108 ParsedArgs<1> parsed_args;
13109 auto _r = parser.parse(self_, args, kwargs, parsed_args);
13110 if(_r.has_torch_function()) {
13111 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
13112 }
13113 switch (_r.idx) {
13114 case 0: {
13115 // aten::remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
13116
13117 auto dispatch_remainder_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
13118 pybind11::gil_scoped_release no_gil;
13119 return self.remainder_(other);
13120 };
13121 return wrap(dispatch_remainder_(self, _r.tensor(0)));
13122 }
13123 case 1: {
13124 // aten::remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
13125
13126 auto dispatch_remainder_ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
13127 pybind11::gil_scoped_release no_gil;
13128 return self.remainder_(other);
13129 };
13130 return wrap(dispatch_remainder_(self, _r.scalar(0)));
13131 }
13132 }
13133 Py_RETURN_NONE;
13134 END_HANDLE_TH_ERRORS
13135}
13136
13137// rename
13138static PyObject * THPVariable_rename(PyObject* self_, PyObject* args, PyObject* kwargs)
13139{
13140 HANDLE_TH_ERRORS
13141 const Tensor& self = THPVariable_Unpack(self_);
13142 static PythonArgParser parser({
13143 "rename(DimnameList? names)",
13144 }, /*traceable=*/true);
13145
13146 ParsedArgs<1> parsed_args;
13147 auto _r = parser.parse(self_, args, kwargs, parsed_args);
13148 if(_r.has_torch_function()) {
13149 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
13150 }
13151 // aten::rename(Tensor(a) self, Dimname[]? names) -> Tensor(a)
13152 auto __names = _r.toDimnameListOptional(0);
13153 c10::optional<DimnameList> names = __names ? c10::make_optional(DimnameList(__names.value())) : c10::nullopt;
13154 auto dispatch_rename = [](const at::Tensor & self, c10::optional<at::DimnameList> names) -> at::Tensor {
13155 pybind11::gil_scoped_release no_gil;
13156 return self.rename(names);
13157 };
13158 return wrap(dispatch_rename(self, names));
13159 Py_RETURN_NONE;
13160 END_HANDLE_TH_ERRORS
13161}
13162
13163// rename_
13164static PyObject * THPVariable_rename_(PyObject* self_, PyObject* args, PyObject* kwargs)
13165{
13166 HANDLE_TH_ERRORS
13167 const Tensor& self = THPVariable_Unpack(self_);
13168 static PythonArgParser parser({
13169 "rename_(DimnameList? names)",
13170 }, /*traceable=*/true);
13171
13172 ParsedArgs<1> parsed_args;
13173 auto _r = parser.parse(self_, args, kwargs, parsed_args);
13174 if(_r.has_torch_function()) {
13175 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
13176 }
13177 // aten::rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)
13178 auto __names = _r.toDimnameListOptional(0);
13179 c10::optional<DimnameList> names = __names ? c10::make_optional(DimnameList(__names.value())) : c10::nullopt;
13180 auto dispatch_rename_ = [](const at::Tensor & self, c10::optional<at::DimnameList> names) -> at::Tensor {
13181 pybind11::gil_scoped_release no_gil;
13182 return self.rename_(names);
13183 };
13184 return wrap(dispatch_rename_(self, names));
13185 Py_RETURN_NONE;
13186 END_HANDLE_TH_ERRORS
13187}
13188
13189// renorm
13190static PyObject * THPVariable_renorm(PyObject* self_, PyObject* args, PyObject* kwargs)
13191{
13192 HANDLE_TH_ERRORS
13193 const Tensor& self = THPVariable_Unpack(self_);
13194 static PythonArgParser parser({
13195 "renorm(Scalar p, int64_t dim, Scalar maxnorm)",
13196 }, /*traceable=*/true);
13197
13198 ParsedArgs<3> parsed_args;
13199 auto _r = parser.parse(self_, args, kwargs, parsed_args);
13200 if(_r.has_torch_function()) {
13201 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
13202 }
13203 // aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor
13204
13205 auto dispatch_renorm = [](const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) -> at::Tensor {
13206 pybind11::gil_scoped_release no_gil;
13207 return self.renorm(p, dim, maxnorm);
13208 };
13209 return wrap(dispatch_renorm(self, _r.scalar(0), _r.toInt64(1), _r.scalar(2)));
13210 Py_RETURN_NONE;
13211 END_HANDLE_TH_ERRORS
13212}
13213
13214// renorm_
13215static PyObject * THPVariable_renorm_(PyObject* self_, PyObject* args, PyObject* kwargs)
13216{
13217 HANDLE_TH_ERRORS
13218 const Tensor& self = THPVariable_Unpack(self_);
13219 static PythonArgParser parser({
13220 "renorm_(Scalar p, int64_t dim, Scalar maxnorm)",
13221 }, /*traceable=*/true);
13222
13223 ParsedArgs<3> parsed_args;
13224 auto _r = parser.parse(self_, args, kwargs, parsed_args);
13225 if(_r.has_torch_function()) {
13226 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
13227 }
13228 // aten::renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!)
13229
13230 auto dispatch_renorm_ = [](const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) -> at::Tensor {
13231 pybind11::gil_scoped_release no_gil;
13232 return self.renorm_(p, dim, maxnorm);
13233 };
13234 return wrap(dispatch_renorm_(self, _r.scalar(0), _r.toInt64(1), _r.scalar(2)));
13235 Py_RETURN_NONE;
13236 END_HANDLE_TH_ERRORS
13237}
13238
13239// repeat
13240static PyObject * THPVariable_repeat(PyObject* self_, PyObject* args, PyObject* kwargs)
13241{
13242 HANDLE_TH_ERRORS
13243 const Tensor& self = THPVariable_Unpack(self_);
13244 static PythonArgParser parser({
13245 "repeat(SymIntArrayRef repeats)",
13246 }, /*traceable=*/true);
13247
13248 ParsedArgs<1> parsed_args;
13249 auto _r = parser.parse(self_, args, kwargs, parsed_args);
13250 if(_r.has_torch_function()) {
13251 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
13252 }
13253 // aten::repeat(Tensor self, SymInt[] repeats) -> Tensor
13254
13255 auto dispatch_repeat = [](const at::Tensor & self, c10::SymIntArrayRef repeats) -> at::Tensor {
13256 pybind11::gil_scoped_release no_gil;
13257 return self.repeat_symint(repeats);
13258 };
13259 return wrap(dispatch_repeat(self, _r.symintlist(0)));
13260 Py_RETURN_NONE;
13261 END_HANDLE_TH_ERRORS
13262}
13263
13264\
13265// repeat_interleave
13266static PyObject * THPVariable_repeat_interleave(PyObject* self_, PyObject* args, PyObject* kwargs)
13267{
13268 HANDLE_TH_ERRORS
13269 const Tensor& self = THPVariable_Unpack(self_);
13270 static PythonArgParser parser({
13271 "repeat_interleave(Tensor repeats, int64_t? dim=None, *, int64_t? output_size=None)",
13272 "repeat_interleave(SymInt repeats, int64_t? dim=None, *, int64_t? output_size=None)",
13273 }, /*traceable=*/true);
13274
13275 ParsedArgs<3> parsed_args;
13276 auto _r = parser.parse(self_, args, kwargs, parsed_args);
13277 if(_r.has_torch_function()) {
13278 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
13279 }
13280 switch (_r.idx) {
13281 case 0: {
13282 // aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, int? output_size=None) -> Tensor
13283
13284 auto dispatch_repeat_interleave = [](const at::Tensor & self, const at::Tensor & repeats, c10::optional<int64_t> dim, c10::optional<int64_t> output_size) -> at::Tensor {
13285 pybind11::gil_scoped_release no_gil;
13286 return self.repeat_interleave(repeats, dim, output_size);
13287 };
13288 return wrap(dispatch_repeat_interleave(self, _r.tensor(0), _r.toInt64Optional(1), _r.toInt64Optional(2)));
13289 }
13290 case 1: {
13291 // aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, int? output_size=None) -> Tensor
13292
13293 auto dispatch_repeat_interleave = [](const at::Tensor & self, c10::SymInt repeats, c10::optional<int64_t> dim, c10::optional<int64_t> output_size) -> at::Tensor {
13294 pybind11::gil_scoped_release no_gil;
13295 return self.repeat_interleave_symint(repeats, dim, output_size);
13296 };
13297 return wrap(dispatch_repeat_interleave(self, _r.toSymInt(0), _r.toInt64Optional(1), _r.toInt64Optional(2)));
13298 }
13299 }
13300 Py_RETURN_NONE;
13301 END_HANDLE_TH_ERRORS
13302}
13303
13304// reshape
13305static PyObject * THPVariable_reshape(PyObject* self_, PyObject* args, PyObject* kwargs)
13306{
13307 HANDLE_TH_ERRORS
13308 const Tensor& self = THPVariable_Unpack(self_);
13309 static PythonArgParser parser({
13310 "reshape(SymIntArrayRef shape)",
13311 }, /*traceable=*/true);
13312
13313 ParsedArgs<1> parsed_args;
13314 auto _r = parser.parse(self_, args, kwargs, parsed_args);
13315 if(_r.has_torch_function()) {
13316 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
13317 }
13318 // aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a)
13319
13320 auto dispatch_reshape = [](const at::Tensor & self, c10::SymIntArrayRef shape) -> at::Tensor {
13321 pybind11::gil_scoped_release no_gil;
13322 return self.reshape_symint(shape);
13323 };
13324 return wrap(dispatch_reshape(self, _r.symintlist(0)));
13325 Py_RETURN_NONE;
13326 END_HANDLE_TH_ERRORS
13327}
13328
13329// reshape_as
13330static PyObject * THPVariable_reshape_as(PyObject* self_, PyObject* args, PyObject* kwargs)
13331{
13332 HANDLE_TH_ERRORS
13333 const Tensor& self = THPVariable_Unpack(self_);
13334 static PythonArgParser parser({
13335 "reshape_as(Tensor other)",
13336 }, /*traceable=*/true);
13337
13338 ParsedArgs<1> parsed_args;
13339 auto _r = parser.parse(self_, args, kwargs, parsed_args);
13340 if(_r.has_torch_function()) {
13341 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
13342 }
13343 // aten::reshape_as(Tensor(a) self, Tensor other) -> Tensor(a)
13344
13345 auto dispatch_reshape_as = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
13346 pybind11::gil_scoped_release no_gil;
13347 return self.reshape_as(other);
13348 };
13349 return wrap(dispatch_reshape_as(self, _r.tensor(0)));
13350 Py_RETURN_NONE;
13351 END_HANDLE_TH_ERRORS
13352}
13353
13354// resize_
13355static PyObject * THPVariable_resize_(PyObject* self_, PyObject* args, PyObject* kwargs)
13356{
13357 HANDLE_TH_ERRORS
13358 const Tensor& self = THPVariable_Unpack(self_);
13359 static PythonArgParser parser({
13360 "resize_(SymIntArrayRef size, *, MemoryFormat? memory_format=None)",
13361 }, /*traceable=*/true);
13362
13363 ParsedArgs<2> parsed_args;
13364 auto _r = parser.parse(self_, args, kwargs, parsed_args);
13365 if(_r.has_torch_function()) {
13366 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
13367 }
13368 // aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)
13369
13370 auto dispatch_resize_ = [](const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) -> at::Tensor {
13371 pybind11::gil_scoped_release no_gil;
13372 return self.resize__symint(size, memory_format);
13373 };
13374 return wrap(dispatch_resize_(self, _r.symintlist(0), _r.memoryformatOptional(1)));
13375 Py_RETURN_NONE;
13376 END_HANDLE_TH_ERRORS
13377}
13378
13379// resize_as_
13380static PyObject * THPVariable_resize_as_(PyObject* self_, PyObject* args, PyObject* kwargs)
13381{
13382 HANDLE_TH_ERRORS
13383 const Tensor& self = THPVariable_Unpack(self_);
13384 static PythonArgParser parser({
13385 "resize_as_(Tensor the_template, *, MemoryFormat? memory_format=None)",
13386 }, /*traceable=*/true);
13387
13388 ParsedArgs<2> parsed_args;
13389 auto _r = parser.parse(self_, args, kwargs, parsed_args);
13390 if(_r.has_torch_function()) {
13391 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
13392 }
13393 // aten::resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!)
13394
13395 auto dispatch_resize_as_ = [](const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format) -> at::Tensor {
13396 pybind11::gil_scoped_release no_gil;
13397 return self.resize_as_(the_template, memory_format);
13398 };
13399 return wrap(dispatch_resize_as_(self, _r.tensor(0), _r.memoryformatOptional(1)));
13400 Py_RETURN_NONE;
13401 END_HANDLE_TH_ERRORS
13402}
13403
13404// resize_as_sparse_
13405static PyObject * THPVariable_resize_as_sparse_(PyObject* self_, PyObject* args, PyObject* kwargs)
13406{
13407 HANDLE_TH_ERRORS
13408 const Tensor& self = THPVariable_Unpack(self_);
13409 static PythonArgParser parser({
13410 "resize_as_sparse_(Tensor the_template)",
13411 }, /*traceable=*/true);
13412
13413 ParsedArgs<1> parsed_args;
13414 auto _r = parser.parse(self_, args, kwargs, parsed_args);
13415 if(_r.has_torch_function()) {
13416 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
13417 }
13418 // aten::resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!)
13419
13420 auto dispatch_resize_as_sparse_ = [](const at::Tensor & self, const at::Tensor & the_template) -> at::Tensor {
13421 pybind11::gil_scoped_release no_gil;
13422 return self.resize_as_sparse_(the_template);
13423 };
13424 return wrap(dispatch_resize_as_sparse_(self, _r.tensor(0)));
13425 Py_RETURN_NONE;
13426 END_HANDLE_TH_ERRORS
13427}
13428
13429// resolve_conj
13430static PyObject * THPVariable_resolve_conj(PyObject* self_, PyObject* args)
13431{
13432 HANDLE_TH_ERRORS
13433 const Tensor& self = THPVariable_Unpack(self_);
13434 if(check_has_torch_function(self_)) {
13435 return handle_torch_function(self_, "resolve_conj");
13436 }
13437 // aten::resolve_conj(Tensor(a) self) -> Tensor(a)
13438
13439 auto dispatch_resolve_conj = [](const at::Tensor & self) -> at::Tensor {
13440 pybind11::gil_scoped_release no_gil;
13441 return self.resolve_conj();
13442 };
13443 return wrap(dispatch_resolve_conj(self));
13444 END_HANDLE_TH_ERRORS
13445}
13446
13447// resolve_neg
13448static PyObject * THPVariable_resolve_neg(PyObject* self_, PyObject* args)
13449{
13450 HANDLE_TH_ERRORS
13451 const Tensor& self = THPVariable_Unpack(self_);
13452 if(check_has_torch_function(self_)) {
13453 return handle_torch_function(self_, "resolve_neg");
13454 }
13455 // aten::resolve_neg(Tensor(a) self) -> Tensor(a)
13456
13457 auto dispatch_resolve_neg = [](const at::Tensor & self) -> at::Tensor {
13458 pybind11::gil_scoped_release no_gil;
13459 return self.resolve_neg();
13460 };
13461 return wrap(dispatch_resolve_neg(self));
13462 END_HANDLE_TH_ERRORS
13463}
13464
13465// retain_grad
13466static PyObject * THPVariable_retain_grad(PyObject* self_, PyObject* args)
13467{
13468 HANDLE_TH_ERRORS
13469 const Tensor& self = THPVariable_Unpack(self_);
13470 if(check_has_torch_function(self_)) {
13471 return handle_torch_function(self_, "retain_grad");
13472 }
13473 // aten::retain_grad(Tensor(a!) self) -> ()
13474
13475 auto dispatch_retain_grad = [](const at::Tensor & self) -> void {
13476 pybind11::gil_scoped_release no_gil;
13477 self.retain_grad();
13478 };
13479 dispatch_retain_grad(self);
13480 Py_RETURN_NONE;
13481 END_HANDLE_TH_ERRORS
13482}
13483
13484// roll
13485static PyObject * THPVariable_roll(PyObject* self_, PyObject* args, PyObject* kwargs)
13486{
13487 HANDLE_TH_ERRORS
13488 const Tensor& self = THPVariable_Unpack(self_);
13489 static PythonArgParser parser({
13490 "roll(IntArrayRef[1] shifts, IntArrayRef[1] dims=None)",
13491 }, /*traceable=*/true);
13492
13493 ParsedArgs<2> parsed_args;
13494 auto _r = parser.parse(self_, args, kwargs, parsed_args);
13495 if(_r.has_torch_function()) {
13496 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
13497 }
13498 // aten::roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor
13499
13500 auto dispatch_roll = [](const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims) -> at::Tensor {
13501 pybind11::gil_scoped_release no_gil;
13502 return self.roll(shifts, dims);
13503 };
13504 return wrap(dispatch_roll(self, _r.intlist(0), _r.intlist(1)));
13505 Py_RETURN_NONE;
13506 END_HANDLE_TH_ERRORS
13507}
13508
13509// rot90
13510static PyObject * THPVariable_rot90(PyObject* self_, PyObject* args, PyObject* kwargs)
13511{
13512 HANDLE_TH_ERRORS
13513 const Tensor& self = THPVariable_Unpack(self_);
13514 static PythonArgParser parser({
13515 "rot90(int64_t k=1, IntArrayRef dims={0,1})",
13516 }, /*traceable=*/true);
13517
13518 ParsedArgs<2> parsed_args;
13519 auto _r = parser.parse(self_, args, kwargs, parsed_args);
13520 if(_r.has_torch_function()) {
13521 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
13522 }
13523 // aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor
13524
13525 auto dispatch_rot90 = [](const at::Tensor & self, int64_t k, at::IntArrayRef dims) -> at::Tensor {
13526 pybind11::gil_scoped_release no_gil;
13527 return self.rot90(k, dims);
13528 };
13529 return wrap(dispatch_rot90(self, _r.toInt64(0), _r.intlist(1)));
13530 Py_RETURN_NONE;
13531 END_HANDLE_TH_ERRORS
13532}
13533
13534\
13535// round
13536static PyObject * THPVariable_round(PyObject* self_, PyObject* args, PyObject* kwargs)
13537{
13538 HANDLE_TH_ERRORS
13539 const Tensor& self = THPVariable_Unpack(self_);
13540 static PythonArgParser parser({
13541 "round()",
13542 "round(*, int64_t decimals)",
13543 }, /*traceable=*/true);
13544
13545 ParsedArgs<1> parsed_args;
13546 auto _r = parser.parse(self_, args, kwargs, parsed_args);
13547 if(_r.has_torch_function()) {
13548 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
13549 }
13550 switch (_r.idx) {
13551 case 0: {
13552 // aten::round(Tensor self) -> Tensor
13553
13554 auto dispatch_round = [](const at::Tensor & self) -> at::Tensor {
13555 pybind11::gil_scoped_release no_gil;
13556 return self.round();
13557 };
13558 return wrap(dispatch_round(self));
13559 }
13560 case 1: {
13561 // aten::round.decimals(Tensor self, *, int decimals) -> Tensor
13562
13563 auto dispatch_round = [](const at::Tensor & self, int64_t decimals) -> at::Tensor {
13564 pybind11::gil_scoped_release no_gil;
13565 return self.round(decimals);
13566 };
13567 return wrap(dispatch_round(self, _r.toInt64(0)));
13568 }
13569 }
13570 Py_RETURN_NONE;
13571 END_HANDLE_TH_ERRORS
13572}
13573
13574\
13575// round_
13576static PyObject * THPVariable_round_(PyObject* self_, PyObject* args, PyObject* kwargs)
13577{
13578 HANDLE_TH_ERRORS
13579 const Tensor& self = THPVariable_Unpack(self_);
13580 static PythonArgParser parser({
13581 "round_()",
13582 "round_(*, int64_t decimals)",
13583 }, /*traceable=*/true);
13584
13585 ParsedArgs<1> parsed_args;
13586 auto _r = parser.parse(self_, args, kwargs, parsed_args);
13587 if(_r.has_torch_function()) {
13588 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
13589 }
13590 switch (_r.idx) {
13591 case 0: {
13592 // aten::round_(Tensor(a!) self) -> Tensor(a!)
13593
13594 auto dispatch_round_ = [](const at::Tensor & self) -> at::Tensor {
13595 pybind11::gil_scoped_release no_gil;
13596 return self.round_();
13597 };
13598 return wrap(dispatch_round_(self));
13599 }
13600 case 1: {
13601 // aten::round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!)
13602
13603 auto dispatch_round_ = [](const at::Tensor & self, int64_t decimals) -> at::Tensor {
13604 pybind11::gil_scoped_release no_gil;
13605 return self.round_(decimals);
13606 };
13607 return wrap(dispatch_round_(self, _r.toInt64(0)));
13608 }
13609 }
13610 Py_RETURN_NONE;
13611 END_HANDLE_TH_ERRORS
13612}
13613
13614// row_indices
13615static PyObject * THPVariable_row_indices(PyObject* self_, PyObject* args)
13616{
13617 HANDLE_TH_ERRORS
13618 const Tensor& self = THPVariable_Unpack(self_);
13619 if(check_has_torch_function(self_)) {
13620 return handle_torch_function(self_, "row_indices");
13621 }
13622 // aten::row_indices(Tensor(a) self) -> Tensor(a)
13623
13624 auto dispatch_row_indices = [](const at::Tensor & self) -> at::Tensor {
13625 pybind11::gil_scoped_release no_gil;
13626 return self.row_indices();
13627 };
13628 return wrap(dispatch_row_indices(self));
13629 END_HANDLE_TH_ERRORS
13630}
13631
13632// rsqrt
13633static PyObject * THPVariable_rsqrt(PyObject* self_, PyObject* args)
13634{
13635 HANDLE_TH_ERRORS
13636 const Tensor& self = THPVariable_Unpack(self_);
13637 if(check_has_torch_function(self_)) {
13638 return handle_torch_function(self_, "rsqrt");
13639 }
13640 // aten::rsqrt(Tensor self) -> Tensor
13641
13642 auto dispatch_rsqrt = [](const at::Tensor & self) -> at::Tensor {
13643 pybind11::gil_scoped_release no_gil;
13644 return self.rsqrt();
13645 };
13646 return wrap(dispatch_rsqrt(self));
13647 END_HANDLE_TH_ERRORS
13648}
13649
13650// rsqrt_
13651static PyObject * THPVariable_rsqrt_(PyObject* self_, PyObject* args)
13652{
13653 HANDLE_TH_ERRORS
13654 const Tensor& self = THPVariable_Unpack(self_);
13655 if(check_has_torch_function(self_)) {
13656 return handle_torch_function(self_, "rsqrt_");
13657 }
13658 // aten::rsqrt_(Tensor(a!) self) -> Tensor(a!)
13659
13660 auto dispatch_rsqrt_ = [](const at::Tensor & self) -> at::Tensor {
13661 pybind11::gil_scoped_release no_gil;
13662 return self.rsqrt_();
13663 };
13664 return wrap(dispatch_rsqrt_(self));
13665 END_HANDLE_TH_ERRORS
13666}
13667
13668\
13669// scatter
13670static PyObject * THPVariable_scatter(PyObject* self_, PyObject* args, PyObject* kwargs)
13671{
13672 HANDLE_TH_ERRORS
13673 const Tensor& self = THPVariable_Unpack(self_);
13674 static PythonArgParser parser({
13675 "scatter(int64_t dim, Tensor index, Tensor src)",
13676 "scatter(int64_t dim, Tensor index, Tensor src, *, c10::string_view reduce)",
13677 "scatter(Dimname dim, Tensor index, Tensor src)",
13678 "scatter(int64_t dim, Tensor index, Scalar value)",
13679 "scatter(int64_t dim, Tensor index, Scalar value, *, c10::string_view reduce)",
13680 "scatter(Dimname dim, Tensor index, Scalar value)",
13681 }, /*traceable=*/true);
13682
13683 ParsedArgs<4> parsed_args;
13684 auto _r = parser.parse(self_, args, kwargs, parsed_args);
13685 if(_r.has_torch_function()) {
13686 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
13687 }
13688 switch (_r.idx) {
13689 case 0: {
13690 // aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
13691
13692 auto dispatch_scatter = [](const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) -> at::Tensor {
13693 pybind11::gil_scoped_release no_gil;
13694 return self.scatter(dim, index, src);
13695 };
13696 return wrap(dispatch_scatter(self, _r.toInt64(0), _r.tensor(1), _r.tensor(2)));
13697 }
13698 case 1: {
13699 // aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor
13700
13701 auto dispatch_scatter = [](const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) -> at::Tensor {
13702 pybind11::gil_scoped_release no_gil;
13703 return self.scatter(dim, index, src, reduce);
13704 };
13705 return wrap(dispatch_scatter(self, _r.toInt64(0), _r.tensor(1), _r.tensor(2), _r.stringView(3)));
13706 }
13707 case 2: {
13708 // aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
13709
13710 auto dispatch_scatter = [](const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) -> at::Tensor {
13711 pybind11::gil_scoped_release no_gil;
13712 return self.scatter(dim, index, src);
13713 };
13714 return wrap(dispatch_scatter(self, _r.dimname(0), _r.tensor(1), _r.tensor(2)));
13715 }
13716 case 3: {
13717 // aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
13718
13719 auto dispatch_scatter = [](const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) -> at::Tensor {
13720 pybind11::gil_scoped_release no_gil;
13721 return self.scatter(dim, index, value);
13722 };
13723 return wrap(dispatch_scatter(self, _r.toInt64(0), _r.tensor(1), _r.scalar(2)));
13724 }
13725 case 4: {
13726 // aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor
13727
13728 auto dispatch_scatter = [](const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) -> at::Tensor {
13729 pybind11::gil_scoped_release no_gil;
13730 return self.scatter(dim, index, value, reduce);
13731 };
13732 return wrap(dispatch_scatter(self, _r.toInt64(0), _r.tensor(1), _r.scalar(2), _r.stringView(3)));
13733 }
13734 case 5: {
13735 // aten::scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
13736
13737 auto dispatch_scatter = [](const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) -> at::Tensor {
13738 pybind11::gil_scoped_release no_gil;
13739 return self.scatter(dim, index, value);
13740 };
13741 return wrap(dispatch_scatter(self, _r.dimname(0), _r.tensor(1), _r.scalar(2)));
13742 }
13743 }
13744 Py_RETURN_NONE;
13745 END_HANDLE_TH_ERRORS
13746}
13747
13748\
13749// scatter_
13750static PyObject * THPVariable_scatter_(PyObject* self_, PyObject* args, PyObject* kwargs)
13751{
13752 HANDLE_TH_ERRORS
13753 const Tensor& self = THPVariable_Unpack(self_);
13754 static PythonArgParser parser({
13755 "scatter_(int64_t dim, Tensor index, Tensor src)",
13756 "scatter_(int64_t dim, Tensor index, Tensor src, *, c10::string_view reduce)",
13757 "scatter_(int64_t dim, Tensor index, Scalar value)",
13758 "scatter_(int64_t dim, Tensor index, Scalar value, *, c10::string_view reduce)",
13759 }, /*traceable=*/true);
13760
13761 ParsedArgs<4> parsed_args;
13762 auto _r = parser.parse(self_, args, kwargs, parsed_args);
13763 if(_r.has_torch_function()) {
13764 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
13765 }
13766 switch (_r.idx) {
13767 case 0: {
13768 // aten::scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
13769
13770 auto dispatch_scatter_ = [](const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) -> at::Tensor {
13771 pybind11::gil_scoped_release no_gil;
13772 return self.scatter_(dim, index, src);
13773 };
13774 return wrap(dispatch_scatter_(self, _r.toInt64(0), _r.tensor(1), _r.tensor(2)));
13775 }
13776 case 1: {
13777 // aten::scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!)
13778
13779 auto dispatch_scatter_ = [](const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) -> at::Tensor {
13780 pybind11::gil_scoped_release no_gil;
13781 return self.scatter_(dim, index, src, reduce);
13782 };
13783 return wrap(dispatch_scatter_(self, _r.toInt64(0), _r.tensor(1), _r.tensor(2), _r.stringView(3)));
13784 }
13785 case 2: {
13786 // aten::scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
13787
13788 auto dispatch_scatter_ = [](const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) -> at::Tensor {
13789 pybind11::gil_scoped_release no_gil;
13790 return self.scatter_(dim, index, value);
13791 };
13792 return wrap(dispatch_scatter_(self, _r.toInt64(0), _r.tensor(1), _r.scalar(2)));
13793 }
13794 case 3: {
13795 // aten::scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!)
13796
13797 auto dispatch_scatter_ = [](const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) -> at::Tensor {
13798 pybind11::gil_scoped_release no_gil;
13799 return self.scatter_(dim, index, value, reduce);
13800 };
13801 return wrap(dispatch_scatter_(self, _r.toInt64(0), _r.tensor(1), _r.scalar(2), _r.stringView(3)));
13802 }
13803 }
13804 Py_RETURN_NONE;
13805 END_HANDLE_TH_ERRORS
13806}
13807
13808\
13809// scatter_add
13810static PyObject * THPVariable_scatter_add(PyObject* self_, PyObject* args, PyObject* kwargs)
13811{
13812 HANDLE_TH_ERRORS
13813 const Tensor& self = THPVariable_Unpack(self_);
13814 static PythonArgParser parser({
13815 "scatter_add(int64_t dim, Tensor index, Tensor src)",
13816 "scatter_add(Dimname dim, Tensor index, Tensor src)",
13817 }, /*traceable=*/true);
13818
13819 ParsedArgs<3> parsed_args;
13820 auto _r = parser.parse(self_, args, kwargs, parsed_args);
13821 if(_r.has_torch_function()) {
13822 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
13823 }
13824 switch (_r.idx) {
13825 case 0: {
13826 // aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
13827
13828 auto dispatch_scatter_add = [](const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) -> at::Tensor {
13829 pybind11::gil_scoped_release no_gil;
13830 return self.scatter_add(dim, index, src);
13831 };
13832 return wrap(dispatch_scatter_add(self, _r.toInt64(0), _r.tensor(1), _r.tensor(2)));
13833 }
13834 case 1: {
13835 // aten::scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
13836
13837 auto dispatch_scatter_add = [](const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) -> at::Tensor {
13838 pybind11::gil_scoped_release no_gil;
13839 return self.scatter_add(dim, index, src);
13840 };
13841 return wrap(dispatch_scatter_add(self, _r.dimname(0), _r.tensor(1), _r.tensor(2)));
13842 }
13843 }
13844 Py_RETURN_NONE;
13845 END_HANDLE_TH_ERRORS
13846}
13847
13848// scatter_add_
13849static PyObject * THPVariable_scatter_add_(PyObject* self_, PyObject* args, PyObject* kwargs)
13850{
13851 HANDLE_TH_ERRORS
13852 const Tensor& self = THPVariable_Unpack(self_);
13853 static PythonArgParser parser({
13854 "scatter_add_(int64_t dim, Tensor index, Tensor src)",
13855 }, /*traceable=*/true);
13856
13857 ParsedArgs<3> parsed_args;
13858 auto _r = parser.parse(self_, args, kwargs, parsed_args);
13859 if(_r.has_torch_function()) {
13860 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
13861 }
13862 // aten::scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
13863
13864 auto dispatch_scatter_add_ = [](const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) -> at::Tensor {
13865 pybind11::gil_scoped_release no_gil;
13866 return self.scatter_add_(dim, index, src);
13867 };
13868 return wrap(dispatch_scatter_add_(self, _r.toInt64(0), _r.tensor(1), _r.tensor(2)));
13869 Py_RETURN_NONE;
13870 END_HANDLE_TH_ERRORS
13871}
13872
13873// scatter_reduce
13874static PyObject * THPVariable_scatter_reduce(PyObject* self_, PyObject* args, PyObject* kwargs)
13875{
13876 HANDLE_TH_ERRORS
13877 const Tensor& self = THPVariable_Unpack(self_);
13878 static PythonArgParser parser({
13879 "scatter_reduce(int64_t dim, Tensor index, Tensor src, c10::string_view reduce, *, bool include_self=True)",
13880 }, /*traceable=*/true);
13881
13882 ParsedArgs<5> parsed_args;
13883 auto _r = parser.parse(self_, args, kwargs, parsed_args);
13884 if(_r.has_torch_function()) {
13885 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
13886 }
13887 // aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor
13888
13889 auto dispatch_scatter_reduce = [](const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) -> at::Tensor {
13890 pybind11::gil_scoped_release no_gil;
13891 return self.scatter_reduce(dim, index, src, reduce, include_self);
13892 };
13893 return wrap(dispatch_scatter_reduce(self, _r.toInt64(0), _r.tensor(1), _r.tensor(2), _r.stringView(3), _r.toBool(4)));
13894 Py_RETURN_NONE;
13895 END_HANDLE_TH_ERRORS
13896}
13897
13898// scatter_reduce_
13899static PyObject * THPVariable_scatter_reduce_(PyObject* self_, PyObject* args, PyObject* kwargs)
13900{
13901 HANDLE_TH_ERRORS
13902 const Tensor& self = THPVariable_Unpack(self_);
13903 static PythonArgParser parser({
13904 "scatter_reduce_(int64_t dim, Tensor index, Tensor src, c10::string_view reduce, *, bool include_self=True)",
13905 }, /*traceable=*/true);
13906
13907 ParsedArgs<5> parsed_args;
13908 auto _r = parser.parse(self_, args, kwargs, parsed_args);
13909 if(_r.has_torch_function()) {
13910 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
13911 }
13912 // aten::scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!)
13913
13914 auto dispatch_scatter_reduce_ = [](const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) -> at::Tensor {
13915 pybind11::gil_scoped_release no_gil;
13916 return self.scatter_reduce_(dim, index, src, reduce, include_self);
13917 };
13918 return wrap(dispatch_scatter_reduce_(self, _r.toInt64(0), _r.tensor(1), _r.tensor(2), _r.stringView(3), _r.toBool(4)));
13919 Py_RETURN_NONE;
13920 END_HANDLE_TH_ERRORS
13921}
13922
13923\
13924// select
13925static PyObject * THPVariable_select(PyObject* self_, PyObject* args, PyObject* kwargs)
13926{
13927 HANDLE_TH_ERRORS
13928 const Tensor& self = THPVariable_Unpack(self_);
13929 static PythonArgParser parser({
13930 "select(Dimname dim, int64_t index)",
13931 "select(int64_t dim, SymInt index)",
13932 }, /*traceable=*/true);
13933
13934 ParsedArgs<2> parsed_args;
13935 auto _r = parser.parse(self_, args, kwargs, parsed_args);
13936 if(_r.has_torch_function()) {
13937 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
13938 }
13939 switch (_r.idx) {
13940 case 0: {
13941 // aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a)
13942
13943 auto dispatch_select = [](const at::Tensor & self, at::Dimname dim, int64_t index) -> at::Tensor {
13944 pybind11::gil_scoped_release no_gil;
13945 return self.select(dim, index);
13946 };
13947 return wrap(dispatch_select(self, _r.dimname(0), _r.toInt64(1)));
13948 }
13949 case 1: {
13950 // aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a)
13951
13952 auto dispatch_select = [](const at::Tensor & self, int64_t dim, c10::SymInt index) -> at::Tensor {
13953 pybind11::gil_scoped_release no_gil;
13954 return self.select_symint(dim, index);
13955 };
13956 return wrap(dispatch_select(self, _r.toInt64(0), _r.toSymInt(1)));
13957 }
13958 }
13959 Py_RETURN_NONE;
13960 END_HANDLE_TH_ERRORS
13961}
13962
13963// select_scatter
13964static PyObject * THPVariable_select_scatter(PyObject* self_, PyObject* args, PyObject* kwargs)
13965{
13966 HANDLE_TH_ERRORS
13967 const Tensor& self = THPVariable_Unpack(self_);
13968 static PythonArgParser parser({
13969 "select_scatter(Tensor src, int64_t dim, SymInt index)",
13970 }, /*traceable=*/true);
13971
13972 ParsedArgs<3> parsed_args;
13973 auto _r = parser.parse(self_, args, kwargs, parsed_args);
13974 if(_r.has_torch_function()) {
13975 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
13976 }
13977 // aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor
13978
13979 auto dispatch_select_scatter = [](const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index) -> at::Tensor {
13980 pybind11::gil_scoped_release no_gil;
13981 return self.select_scatter_symint(src, dim, index);
13982 };
13983 return wrap(dispatch_select_scatter(self, _r.tensor(0), _r.toInt64(1), _r.toSymInt(2)));
13984 Py_RETURN_NONE;
13985 END_HANDLE_TH_ERRORS
13986}
13987
13988// sgn
13989static PyObject * THPVariable_sgn(PyObject* self_, PyObject* args)
13990{
13991 HANDLE_TH_ERRORS
13992 const Tensor& self = THPVariable_Unpack(self_);
13993 if(check_has_torch_function(self_)) {
13994 return handle_torch_function(self_, "sgn");
13995 }
13996 // aten::sgn(Tensor self) -> Tensor
13997
13998 auto dispatch_sgn = [](const at::Tensor & self) -> at::Tensor {
13999 pybind11::gil_scoped_release no_gil;
14000 return self.sgn();
14001 };
14002 return wrap(dispatch_sgn(self));
14003 END_HANDLE_TH_ERRORS
14004}
14005
14006// sgn_
14007static PyObject * THPVariable_sgn_(PyObject* self_, PyObject* args)
14008{
14009 HANDLE_TH_ERRORS
14010 const Tensor& self = THPVariable_Unpack(self_);
14011 if(check_has_torch_function(self_)) {
14012 return handle_torch_function(self_, "sgn_");
14013 }
14014 // aten::sgn_(Tensor(a!) self) -> Tensor(a!)
14015
14016 auto dispatch_sgn_ = [](const at::Tensor & self) -> at::Tensor {
14017 pybind11::gil_scoped_release no_gil;
14018 return self.sgn_();
14019 };
14020 return wrap(dispatch_sgn_(self));
14021 END_HANDLE_TH_ERRORS
14022}
14023
14024// sigmoid
14025static PyObject * THPVariable_sigmoid(PyObject* self_, PyObject* args)
14026{
14027 HANDLE_TH_ERRORS
14028 const Tensor& self = THPVariable_Unpack(self_);
14029 if(check_has_torch_function(self_)) {
14030 return handle_torch_function(self_, "sigmoid");
14031 }
14032 // aten::sigmoid(Tensor self) -> Tensor
14033
14034 auto dispatch_sigmoid = [](const at::Tensor & self) -> at::Tensor {
14035 pybind11::gil_scoped_release no_gil;
14036 return self.sigmoid();
14037 };
14038 return wrap(dispatch_sigmoid(self));
14039 END_HANDLE_TH_ERRORS
14040}
14041
14042// sigmoid_
14043static PyObject * THPVariable_sigmoid_(PyObject* self_, PyObject* args)
14044{
14045 HANDLE_TH_ERRORS
14046 const Tensor& self = THPVariable_Unpack(self_);
14047 if(check_has_torch_function(self_)) {
14048 return handle_torch_function(self_, "sigmoid_");
14049 }
14050 // aten::sigmoid_(Tensor(a!) self) -> Tensor(a!)
14051
14052 auto dispatch_sigmoid_ = [](const at::Tensor & self) -> at::Tensor {
14053 pybind11::gil_scoped_release no_gil;
14054 return self.sigmoid_();
14055 };
14056 return wrap(dispatch_sigmoid_(self));
14057 END_HANDLE_TH_ERRORS
14058}
14059
14060// sign
14061static PyObject * THPVariable_sign(PyObject* self_, PyObject* args)
14062{
14063 HANDLE_TH_ERRORS
14064 const Tensor& self = THPVariable_Unpack(self_);
14065 if(check_has_torch_function(self_)) {
14066 return handle_torch_function(self_, "sign");
14067 }
14068 // aten::sign(Tensor self) -> Tensor
14069
14070 auto dispatch_sign = [](const at::Tensor & self) -> at::Tensor {
14071 pybind11::gil_scoped_release no_gil;
14072 return self.sign();
14073 };
14074 return wrap(dispatch_sign(self));
14075 END_HANDLE_TH_ERRORS
14076}
14077
14078// sign_
14079static PyObject * THPVariable_sign_(PyObject* self_, PyObject* args)
14080{
14081 HANDLE_TH_ERRORS
14082 const Tensor& self = THPVariable_Unpack(self_);
14083 if(check_has_torch_function(self_)) {
14084 return handle_torch_function(self_, "sign_");
14085 }
14086 // aten::sign_(Tensor(a!) self) -> Tensor(a!)
14087
14088 auto dispatch_sign_ = [](const at::Tensor & self) -> at::Tensor {
14089 pybind11::gil_scoped_release no_gil;
14090 return self.sign_();
14091 };
14092 return wrap(dispatch_sign_(self));
14093 END_HANDLE_TH_ERRORS
14094}
14095
14096// signbit
14097static PyObject * THPVariable_signbit(PyObject* self_, PyObject* args)
14098{
14099 HANDLE_TH_ERRORS
14100 const Tensor& self = THPVariable_Unpack(self_);
14101 if(check_has_torch_function(self_)) {
14102 return handle_torch_function(self_, "signbit");
14103 }
14104 // aten::signbit(Tensor self) -> Tensor
14105
14106 auto dispatch_signbit = [](const at::Tensor & self) -> at::Tensor {
14107 pybind11::gil_scoped_release no_gil;
14108 return self.signbit();
14109 };
14110 return wrap(dispatch_signbit(self));
14111 END_HANDLE_TH_ERRORS
14112}
14113
14114// sin
14115static PyObject * THPVariable_sin(PyObject* self_, PyObject* args)
14116{
14117 HANDLE_TH_ERRORS
14118 const Tensor& self = THPVariable_Unpack(self_);
14119 if(check_has_torch_function(self_)) {
14120 return handle_torch_function(self_, "sin");
14121 }
14122 // aten::sin(Tensor self) -> Tensor
14123
14124 auto dispatch_sin = [](const at::Tensor & self) -> at::Tensor {
14125 pybind11::gil_scoped_release no_gil;
14126 return self.sin();
14127 };
14128 return wrap(dispatch_sin(self));
14129 END_HANDLE_TH_ERRORS
14130}
14131
14132// sin_
14133static PyObject * THPVariable_sin_(PyObject* self_, PyObject* args)
14134{
14135 HANDLE_TH_ERRORS
14136 const Tensor& self = THPVariable_Unpack(self_);
14137 if(check_has_torch_function(self_)) {
14138 return handle_torch_function(self_, "sin_");
14139 }
14140 // aten::sin_(Tensor(a!) self) -> Tensor(a!)
14141
14142 auto dispatch_sin_ = [](const at::Tensor & self) -> at::Tensor {
14143 pybind11::gil_scoped_release no_gil;
14144 return self.sin_();
14145 };
14146 return wrap(dispatch_sin_(self));
14147 END_HANDLE_TH_ERRORS
14148}
14149
14150// sinc
14151static PyObject * THPVariable_sinc(PyObject* self_, PyObject* args)
14152{
14153 HANDLE_TH_ERRORS
14154 const Tensor& self = THPVariable_Unpack(self_);
14155 if(check_has_torch_function(self_)) {
14156 return handle_torch_function(self_, "sinc");
14157 }
14158 // aten::sinc(Tensor self) -> Tensor
14159
14160 auto dispatch_sinc = [](const at::Tensor & self) -> at::Tensor {
14161 pybind11::gil_scoped_release no_gil;
14162 return self.sinc();
14163 };
14164 return wrap(dispatch_sinc(self));
14165 END_HANDLE_TH_ERRORS
14166}
14167
14168// sinc_
14169static PyObject * THPVariable_sinc_(PyObject* self_, PyObject* args)
14170{
14171 HANDLE_TH_ERRORS
14172 const Tensor& self = THPVariable_Unpack(self_);
14173 if(check_has_torch_function(self_)) {
14174 return handle_torch_function(self_, "sinc_");
14175 }
14176 // aten::sinc_(Tensor(a!) self) -> Tensor(a!)
14177
14178 auto dispatch_sinc_ = [](const at::Tensor & self) -> at::Tensor {
14179 pybind11::gil_scoped_release no_gil;
14180 return self.sinc_();
14181 };
14182 return wrap(dispatch_sinc_(self));
14183 END_HANDLE_TH_ERRORS
14184}
14185
14186// sinh
14187static PyObject * THPVariable_sinh(PyObject* self_, PyObject* args)
14188{
14189 HANDLE_TH_ERRORS
14190 const Tensor& self = THPVariable_Unpack(self_);
14191 if(check_has_torch_function(self_)) {
14192 return handle_torch_function(self_, "sinh");
14193 }
14194 // aten::sinh(Tensor self) -> Tensor
14195
14196 auto dispatch_sinh = [](const at::Tensor & self) -> at::Tensor {
14197 pybind11::gil_scoped_release no_gil;
14198 return self.sinh();
14199 };
14200 return wrap(dispatch_sinh(self));
14201 END_HANDLE_TH_ERRORS
14202}
14203
14204// sinh_
14205static PyObject * THPVariable_sinh_(PyObject* self_, PyObject* args)
14206{
14207 HANDLE_TH_ERRORS
14208 const Tensor& self = THPVariable_Unpack(self_);
14209 if(check_has_torch_function(self_)) {
14210 return handle_torch_function(self_, "sinh_");
14211 }
14212 // aten::sinh_(Tensor(a!) self) -> Tensor(a!)
14213
14214 auto dispatch_sinh_ = [](const at::Tensor & self) -> at::Tensor {
14215 pybind11::gil_scoped_release no_gil;
14216 return self.sinh_();
14217 };
14218 return wrap(dispatch_sinh_(self));
14219 END_HANDLE_TH_ERRORS
14220}
14221
14222// slice_scatter
14223static PyObject * THPVariable_slice_scatter(PyObject* self_, PyObject* args, PyObject* kwargs)
14224{
14225 HANDLE_TH_ERRORS
14226 const Tensor& self = THPVariable_Unpack(self_);
14227 static PythonArgParser parser({
14228 "slice_scatter(Tensor src, int64_t dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1)",
14229 }, /*traceable=*/true);
14230
14231 ParsedArgs<5> parsed_args;
14232 auto _r = parser.parse(self_, args, kwargs, parsed_args);
14233 if(_r.has_torch_function()) {
14234 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
14235 }
14236 // aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
14237
14238 auto dispatch_slice_scatter = [](const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) -> at::Tensor {
14239 pybind11::gil_scoped_release no_gil;
14240 return self.slice_scatter_symint(src, dim, start, end, step);
14241 };
14242 return wrap(dispatch_slice_scatter(self, _r.tensor(0), _r.toInt64(1), _r.toSymIntOptional(2), _r.toSymIntOptional(3), _r.toSymInt(4)));
14243 Py_RETURN_NONE;
14244 END_HANDLE_TH_ERRORS
14245}
14246
14247// slogdet
14248static PyObject * THPVariable_slogdet(PyObject* self_, PyObject* args)
14249{
14250 HANDLE_TH_ERRORS
14251 static PyTypeObject* NamedTuple = get_namedtuple("slogdet");
14252 const Tensor& self = THPVariable_Unpack(self_);
14253 if(check_has_torch_function(self_)) {
14254 return handle_torch_function(self_, "slogdet");
14255 }
14256 // aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)
14257
14258 auto dispatch_slogdet = [](const at::Tensor & self) -> ::std::tuple<at::Tensor,at::Tensor> {
14259 pybind11::gil_scoped_release no_gil;
14260 return self.slogdet();
14261 };
14262 return wrap(NamedTuple, dispatch_slogdet(self));
14263 END_HANDLE_TH_ERRORS
14264}
14265
14266// smm
14267static PyObject * THPVariable_smm(PyObject* self_, PyObject* args, PyObject* kwargs)
14268{
14269 HANDLE_TH_ERRORS
14270 const Tensor& self = THPVariable_Unpack(self_);
14271 static PythonArgParser parser({
14272 "smm(Tensor mat2)",
14273 }, /*traceable=*/true);
14274
14275 ParsedArgs<1> parsed_args;
14276 auto _r = parser.parse(self_, args, kwargs, parsed_args);
14277 if(_r.has_torch_function()) {
14278 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
14279 }
14280 // aten::smm(Tensor self, Tensor mat2) -> Tensor
14281
14282 auto dispatch_smm = [](const at::Tensor & self, const at::Tensor & mat2) -> at::Tensor {
14283 pybind11::gil_scoped_release no_gil;
14284 return self.smm(mat2);
14285 };
14286 return wrap(dispatch_smm(self, _r.tensor(0)));
14287 Py_RETURN_NONE;
14288 END_HANDLE_TH_ERRORS
14289}
14290
14291\
14292// softmax
14293static PyObject * THPVariable_softmax(PyObject* self_, PyObject* args, PyObject* kwargs)
14294{
14295 HANDLE_TH_ERRORS
14296 const Tensor& self = THPVariable_Unpack(self_);
14297 static PythonArgParser parser({
14298 "softmax(int64_t dim, ScalarType? dtype=None)",
14299 "softmax(Dimname dim, *, ScalarType? dtype=None)",
14300 }, /*traceable=*/true);
14301
14302 ParsedArgs<2> parsed_args;
14303 auto _r = parser.parse(self_, args, kwargs, parsed_args);
14304 if(_r.has_torch_function()) {
14305 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
14306 }
14307 switch (_r.idx) {
14308 case 0: {
14309 // aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
14310
14311 auto dispatch_softmax = [](const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) -> at::Tensor {
14312 pybind11::gil_scoped_release no_gil;
14313 return self.softmax(dim, dtype);
14314 };
14315 return wrap(dispatch_softmax(self, _r.toInt64(0), _r.scalartypeOptional(1)));
14316 }
14317 case 1: {
14318 // aten::softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
14319
14320 auto dispatch_softmax = [](const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) -> at::Tensor {
14321 pybind11::gil_scoped_release no_gil;
14322 return self.softmax(dim, dtype);
14323 };
14324 return wrap(dispatch_softmax(self, _r.dimname(0), _r.scalartypeOptional(1)));
14325 }
14326 }
14327 Py_RETURN_NONE;
14328 END_HANDLE_TH_ERRORS
14329}
14330
14331\
14332// sort
14333static PyObject * THPVariable_sort(PyObject* self_, PyObject* args, PyObject* kwargs)
14334{
14335 HANDLE_TH_ERRORS
14336 static PyTypeObject* NamedTuple = get_namedtuple("sort");
14337 const Tensor& self = THPVariable_Unpack(self_);
14338 static PythonArgParser parser({
14339 "sort(*, bool? stable, int64_t dim=-1, bool descending=False)",
14340 "sort(int64_t dim=-1, bool descending=False)",
14341 "sort(*, bool? stable, Dimname dim, bool descending=False)",
14342 "sort(Dimname dim, bool descending=False)",
14343 }, /*traceable=*/true);
14344
14345 ParsedArgs<3> parsed_args;
14346 auto _r = parser.parse(self_, args, kwargs, parsed_args);
14347 if(_r.has_torch_function()) {
14348 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
14349 }
14350 switch (_r.idx) {
14351 case 0: {
14352 // aten::sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
14353
14354 auto dispatch_sort = [](const at::Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending) -> ::std::tuple<at::Tensor,at::Tensor> {
14355 pybind11::gil_scoped_release no_gil;
14356 return self.sort(stable, dim, descending);
14357 };
14358 return wrap(NamedTuple, dispatch_sort(self, _r.toBoolOptional(0), _r.toInt64(1), _r.toBool(2)));
14359 }
14360 case 1: {
14361 // aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
14362
14363 auto dispatch_sort = [](const at::Tensor & self, int64_t dim, bool descending) -> ::std::tuple<at::Tensor,at::Tensor> {
14364 pybind11::gil_scoped_release no_gil;
14365 return self.sort(dim, descending);
14366 };
14367 return wrap(NamedTuple, dispatch_sort(self, _r.toInt64(0), _r.toBool(1)));
14368 }
14369 case 2: {
14370 // aten::sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)
14371
14372 auto dispatch_sort = [](const at::Tensor & self, c10::optional<bool> stable, at::Dimname dim, bool descending) -> ::std::tuple<at::Tensor,at::Tensor> {
14373 pybind11::gil_scoped_release no_gil;
14374 return self.sort(stable, dim, descending);
14375 };
14376 return wrap(NamedTuple, dispatch_sort(self, _r.toBoolOptional(0), _r.dimname(1), _r.toBool(2)));
14377 }
14378 case 3: {
14379 // aten::sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)
14380
14381 auto dispatch_sort = [](const at::Tensor & self, at::Dimname dim, bool descending) -> ::std::tuple<at::Tensor,at::Tensor> {
14382 pybind11::gil_scoped_release no_gil;
14383 return self.sort(dim, descending);
14384 };
14385 return wrap(NamedTuple, dispatch_sort(self, _r.dimname(0), _r.toBool(1)));
14386 }
14387 }
14388 Py_RETURN_NONE;
14389 END_HANDLE_TH_ERRORS
14390}
14391
14392// sparse_dim
14393static PyObject * THPVariable_sparse_dim(PyObject* self_, PyObject* args)
14394{
14395 HANDLE_TH_ERRORS
14396 const Tensor& self = THPVariable_Unpack(self_);
14397 if(check_has_torch_function(self_)) {
14398 return handle_torch_function(self_, "sparse_dim");
14399 }
14400 // aten::sparse_dim(Tensor self) -> int
14401
14402 auto dispatch_sparse_dim = [](const at::Tensor & self) -> int64_t {
14403 pybind11::gil_scoped_release no_gil;
14404 return self.sparse_dim();
14405 };
14406 return wrap(dispatch_sparse_dim(self));
14407 END_HANDLE_TH_ERRORS
14408}
14409
14410// sparse_mask
14411static PyObject * THPVariable_sparse_mask(PyObject* self_, PyObject* args, PyObject* kwargs)
14412{
14413 HANDLE_TH_ERRORS
14414 const Tensor& self = THPVariable_Unpack(self_);
14415 static PythonArgParser parser({
14416 "sparse_mask(Tensor mask)",
14417 }, /*traceable=*/true);
14418
14419 ParsedArgs<1> parsed_args;
14420 auto _r = parser.parse(self_, args, kwargs, parsed_args);
14421 if(_r.has_torch_function()) {
14422 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
14423 }
14424 // aten::sparse_mask(Tensor self, Tensor mask) -> Tensor
14425
14426 auto dispatch_sparse_mask = [](const at::Tensor & self, const at::Tensor & mask) -> at::Tensor {
14427 pybind11::gil_scoped_release no_gil;
14428 return self.sparse_mask(mask);
14429 };
14430 return wrap(dispatch_sparse_mask(self, _r.tensor(0)));
14431 Py_RETURN_NONE;
14432 END_HANDLE_TH_ERRORS
14433}
14434
14435// sparse_resize_
14436static PyObject * THPVariable_sparse_resize_(PyObject* self_, PyObject* args, PyObject* kwargs)
14437{
14438 HANDLE_TH_ERRORS
14439 const Tensor& self = THPVariable_Unpack(self_);
14440 static PythonArgParser parser({
14441 "sparse_resize_(IntArrayRef size, int64_t sparse_dim, int64_t dense_dim)",
14442 }, /*traceable=*/true);
14443
14444 ParsedArgs<3> parsed_args;
14445 auto _r = parser.parse(self_, args, kwargs, parsed_args);
14446 if(_r.has_torch_function()) {
14447 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
14448 }
14449 // aten::sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)
14450
14451 auto dispatch_sparse_resize_ = [](const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) -> at::Tensor {
14452 pybind11::gil_scoped_release no_gil;
14453 return self.sparse_resize_(size, sparse_dim, dense_dim);
14454 };
14455 return wrap(dispatch_sparse_resize_(self, _r.intlist(0), _r.toInt64(1), _r.toInt64(2)));
14456 Py_RETURN_NONE;
14457 END_HANDLE_TH_ERRORS
14458}
14459
14460// sparse_resize_and_clear_
14461static PyObject * THPVariable_sparse_resize_and_clear_(PyObject* self_, PyObject* args, PyObject* kwargs)
14462{
14463 HANDLE_TH_ERRORS
14464 const Tensor& self = THPVariable_Unpack(self_);
14465 static PythonArgParser parser({
14466 "sparse_resize_and_clear_(IntArrayRef size, int64_t sparse_dim, int64_t dense_dim)",
14467 }, /*traceable=*/true);
14468
14469 ParsedArgs<3> parsed_args;
14470 auto _r = parser.parse(self_, args, kwargs, parsed_args);
14471 if(_r.has_torch_function()) {
14472 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
14473 }
14474 // aten::sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)
14475
14476 auto dispatch_sparse_resize_and_clear_ = [](const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) -> at::Tensor {
14477 pybind11::gil_scoped_release no_gil;
14478 return self.sparse_resize_and_clear_(size, sparse_dim, dense_dim);
14479 };
14480 return wrap(dispatch_sparse_resize_and_clear_(self, _r.intlist(0), _r.toInt64(1), _r.toInt64(2)));
14481 Py_RETURN_NONE;
14482 END_HANDLE_TH_ERRORS
14483}
14484
14485\
14486// split
14487static PyObject * THPVariable_split(PyObject* self_, PyObject* args, PyObject* kwargs)
14488{
14489 HANDLE_TH_ERRORS
14490 const Tensor& self = THPVariable_Unpack(self_);
14491 static PythonArgParser parser({
14492 "split(SymInt split_size, int64_t dim=0)",
14493 "split(SymIntArrayRef split_size, int64_t dim=0)",
14494 }, /*traceable=*/true);
14495
14496 ParsedArgs<2> parsed_args;
14497 auto _r = parser.parse(self_, args, kwargs, parsed_args);
14498 if(_r.has_torch_function()) {
14499 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
14500 }
14501 switch (_r.idx) {
14502 case 0: {
14503 // aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[]
14504
14505 auto dispatch_split = [](const at::Tensor & self, c10::SymInt split_size, int64_t dim) -> ::std::vector<at::Tensor> {
14506 pybind11::gil_scoped_release no_gil;
14507 return self.split_symint(split_size, dim);
14508 };
14509 return wrap(dispatch_split(self, _r.toSymInt(0), _r.toInt64(1)));
14510 }
14511 case 1: {
14512 // aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[]
14513
14514 auto dispatch_split = [](const at::Tensor & self, c10::SymIntArrayRef split_size, int64_t dim) -> ::std::vector<at::Tensor> {
14515 pybind11::gil_scoped_release no_gil;
14516 return self.split_symint(split_size, dim);
14517 };
14518 return wrap(dispatch_split(self, _r.symintlist(0), _r.toInt64(1)));
14519 }
14520 }
14521 Py_RETURN_NONE;
14522 END_HANDLE_TH_ERRORS
14523}
14524
14525// split_with_sizes
14526static PyObject * THPVariable_split_with_sizes(PyObject* self_, PyObject* args, PyObject* kwargs)
14527{
14528 HANDLE_TH_ERRORS
14529 const Tensor& self = THPVariable_Unpack(self_);
14530 static PythonArgParser parser({
14531 "split_with_sizes(SymIntArrayRef split_sizes, int64_t dim=0)",
14532 }, /*traceable=*/true);
14533
14534 ParsedArgs<2> parsed_args;
14535 auto _r = parser.parse(self_, args, kwargs, parsed_args);
14536 if(_r.has_torch_function()) {
14537 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
14538 }
14539 // aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[]
14540
14541 auto dispatch_split_with_sizes = [](const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) -> ::std::vector<at::Tensor> {
14542 pybind11::gil_scoped_release no_gil;
14543 return self.split_with_sizes_symint(split_sizes, dim);
14544 };
14545 return wrap(dispatch_split_with_sizes(self, _r.symintlist(0), _r.toInt64(1)));
14546 Py_RETURN_NONE;
14547 END_HANDLE_TH_ERRORS
14548}
14549
14550// sqrt
14551static PyObject * THPVariable_sqrt(PyObject* self_, PyObject* args)
14552{
14553 HANDLE_TH_ERRORS
14554 const Tensor& self = THPVariable_Unpack(self_);
14555 if(check_has_torch_function(self_)) {
14556 return handle_torch_function(self_, "sqrt");
14557 }
14558 // aten::sqrt(Tensor self) -> Tensor
14559
14560 auto dispatch_sqrt = [](const at::Tensor & self) -> at::Tensor {
14561 pybind11::gil_scoped_release no_gil;
14562 return self.sqrt();
14563 };
14564 return wrap(dispatch_sqrt(self));
14565 END_HANDLE_TH_ERRORS
14566}
14567
14568// sqrt_
14569static PyObject * THPVariable_sqrt_(PyObject* self_, PyObject* args)
14570{
14571 HANDLE_TH_ERRORS
14572 const Tensor& self = THPVariable_Unpack(self_);
14573 if(check_has_torch_function(self_)) {
14574 return handle_torch_function(self_, "sqrt_");
14575 }
14576 // aten::sqrt_(Tensor(a!) self) -> Tensor(a!)
14577
14578 auto dispatch_sqrt_ = [](const at::Tensor & self) -> at::Tensor {
14579 pybind11::gil_scoped_release no_gil;
14580 return self.sqrt_();
14581 };
14582 return wrap(dispatch_sqrt_(self));
14583 END_HANDLE_TH_ERRORS
14584}
14585
14586// square
14587static PyObject * THPVariable_square(PyObject* self_, PyObject* args)
14588{
14589 HANDLE_TH_ERRORS
14590 const Tensor& self = THPVariable_Unpack(self_);
14591 if(check_has_torch_function(self_)) {
14592 return handle_torch_function(self_, "square");
14593 }
14594 // aten::square(Tensor self) -> Tensor
14595
14596 auto dispatch_square = [](const at::Tensor & self) -> at::Tensor {
14597 pybind11::gil_scoped_release no_gil;
14598 return self.square();
14599 };
14600 return wrap(dispatch_square(self));
14601 END_HANDLE_TH_ERRORS
14602}
14603
14604// square_
14605static PyObject * THPVariable_square_(PyObject* self_, PyObject* args)
14606{
14607 HANDLE_TH_ERRORS
14608 const Tensor& self = THPVariable_Unpack(self_);
14609 if(check_has_torch_function(self_)) {
14610 return handle_torch_function(self_, "square_");
14611 }
14612 // aten::square_(Tensor(a!) self) -> Tensor(a!)
14613
14614 auto dispatch_square_ = [](const at::Tensor & self) -> at::Tensor {
14615 pybind11::gil_scoped_release no_gil;
14616 return self.square_();
14617 };
14618 return wrap(dispatch_square_(self));
14619 END_HANDLE_TH_ERRORS
14620}
14621
14622\
14623// squeeze
14624static PyObject * THPVariable_squeeze(PyObject* self_, PyObject* args, PyObject* kwargs)
14625{
14626 HANDLE_TH_ERRORS
14627 const Tensor& self = THPVariable_Unpack(self_);
14628 static PythonArgParser parser({
14629 "squeeze()",
14630 "squeeze(int64_t dim)",
14631 "squeeze(IntArrayRef dim)",
14632 "squeeze(Dimname dim)",
14633 }, /*traceable=*/true);
14634
14635 ParsedArgs<1> parsed_args;
14636 auto _r = parser.parse(self_, args, kwargs, parsed_args);
14637 if(_r.has_torch_function()) {
14638 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
14639 }
14640 switch (_r.idx) {
14641 case 0: {
14642 // aten::squeeze(Tensor(a) self) -> Tensor(a)
14643
14644 auto dispatch_squeeze = [](const at::Tensor & self) -> at::Tensor {
14645 pybind11::gil_scoped_release no_gil;
14646 return self.squeeze();
14647 };
14648 return wrap(dispatch_squeeze(self));
14649 }
14650 case 1: {
14651 // aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)
14652
14653 auto dispatch_squeeze = [](const at::Tensor & self, int64_t dim) -> at::Tensor {
14654 pybind11::gil_scoped_release no_gil;
14655 return self.squeeze(dim);
14656 };
14657 return wrap(dispatch_squeeze(self, _r.toInt64(0)));
14658 }
14659 case 2: {
14660 // aten::squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a)
14661
14662 auto dispatch_squeeze = [](const at::Tensor & self, at::IntArrayRef dim) -> at::Tensor {
14663 pybind11::gil_scoped_release no_gil;
14664 return self.squeeze(dim);
14665 };
14666 return wrap(dispatch_squeeze(self, _r.intlist(0)));
14667 }
14668 case 3: {
14669 // aten::squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a)
14670
14671 auto dispatch_squeeze = [](const at::Tensor & self, at::Dimname dim) -> at::Tensor {
14672 pybind11::gil_scoped_release no_gil;
14673 return self.squeeze(dim);
14674 };
14675 return wrap(dispatch_squeeze(self, _r.dimname(0)));
14676 }
14677 }
14678 Py_RETURN_NONE;
14679 END_HANDLE_TH_ERRORS
14680}
14681
14682\
14683// squeeze_
14684static PyObject * THPVariable_squeeze_(PyObject* self_, PyObject* args, PyObject* kwargs)
14685{
14686 HANDLE_TH_ERRORS
14687 const Tensor& self = THPVariable_Unpack(self_);
14688 static PythonArgParser parser({
14689 "squeeze_()",
14690 "squeeze_(int64_t dim)",
14691 "squeeze_(IntArrayRef dim)",
14692 "squeeze_(Dimname dim)",
14693 }, /*traceable=*/true);
14694
14695 ParsedArgs<1> parsed_args;
14696 auto _r = parser.parse(self_, args, kwargs, parsed_args);
14697 if(_r.has_torch_function()) {
14698 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
14699 }
14700 switch (_r.idx) {
14701 case 0: {
14702 // aten::squeeze_(Tensor(a!) self) -> Tensor(a!)
14703
14704 auto dispatch_squeeze_ = [](const at::Tensor & self) -> at::Tensor {
14705 pybind11::gil_scoped_release no_gil;
14706 return self.squeeze_();
14707 };
14708 return wrap(dispatch_squeeze_(self));
14709 }
14710 case 1: {
14711 // aten::squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!)
14712
14713 auto dispatch_squeeze_ = [](const at::Tensor & self, int64_t dim) -> at::Tensor {
14714 pybind11::gil_scoped_release no_gil;
14715 return self.squeeze_(dim);
14716 };
14717 return wrap(dispatch_squeeze_(self, _r.toInt64(0)));
14718 }
14719 case 2: {
14720 // aten::squeeze_.dims(Tensor(a!) self, int[] dim) -> Tensor(a!)
14721
14722 auto dispatch_squeeze_ = [](const at::Tensor & self, at::IntArrayRef dim) -> at::Tensor {
14723 pybind11::gil_scoped_release no_gil;
14724 return self.squeeze_(dim);
14725 };
14726 return wrap(dispatch_squeeze_(self, _r.intlist(0)));
14727 }
14728 case 3: {
14729 // aten::squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!)
14730
14731 auto dispatch_squeeze_ = [](const at::Tensor & self, at::Dimname dim) -> at::Tensor {
14732 pybind11::gil_scoped_release no_gil;
14733 return self.squeeze_(dim);
14734 };
14735 return wrap(dispatch_squeeze_(self, _r.dimname(0)));
14736 }
14737 }
14738 Py_RETURN_NONE;
14739 END_HANDLE_TH_ERRORS
14740}
14741
14742\
14743// sspaddmm
14744static PyObject * THPVariable_sspaddmm(PyObject* self_, PyObject* args, PyObject* kwargs)
14745{
14746 HANDLE_TH_ERRORS
14747 const Tensor& self = THPVariable_Unpack(self_);
14748 static PythonArgParser parser({
14749 "sspaddmm(Scalar beta, Scalar alpha, Tensor mat1, Tensor mat2)|deprecated",
14750 "sspaddmm(Scalar beta, Tensor mat1, Tensor mat2)|deprecated",
14751 "sspaddmm(Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1)",
14752 }, /*traceable=*/true);
14753
14754 ParsedArgs<4> parsed_args;
14755 auto _r = parser.parse(self_, args, kwargs, parsed_args);
14756 if(_r.has_torch_function()) {
14757 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
14758 }
14759 switch (_r.idx) {
14760 case 0: {
14761 // [deprecated] aten::sspaddmm(Scalar beta, Tensor self, Scalar alpha, Tensor mat1, Tensor mat2) -> Tensor
14762
14763 auto dispatch_sspaddmm = [](const at::Scalar & beta, const at::Tensor & self, const at::Scalar & alpha, const at::Tensor & mat1, const at::Tensor & mat2) -> at::Tensor {
14764 pybind11::gil_scoped_release no_gil;
14765 return self.sspaddmm(mat1, mat2, beta, alpha);
14766 };
14767 return wrap(dispatch_sspaddmm(_r.scalar(0), self, _r.scalar(1), _r.tensor(2), _r.tensor(3)));
14768 }
14769 case 1: {
14770 // [deprecated] aten::sspaddmm(Scalar beta, Tensor self, Tensor mat1, Tensor mat2) -> Tensor
14771
14772 auto dispatch_sspaddmm = [](const at::Scalar & beta, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2) -> at::Tensor {
14773 pybind11::gil_scoped_release no_gil;
14774 return self.sspaddmm(mat1, mat2, beta, 1);
14775 };
14776 return wrap(dispatch_sspaddmm(_r.scalar(0), self, _r.tensor(1), _r.tensor(2)));
14777 }
14778 case 2: {
14779 // aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
14780
14781 auto dispatch_sspaddmm = [](const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) -> at::Tensor {
14782 pybind11::gil_scoped_release no_gil;
14783 return self.sspaddmm(mat1, mat2, beta, alpha);
14784 };
14785 return wrap(dispatch_sspaddmm(self, _r.tensor(0), _r.tensor(1), _r.scalar(2), _r.scalar(3)));
14786 }
14787 }
14788 Py_RETURN_NONE;
14789 END_HANDLE_TH_ERRORS
14790}
14791
14792\
14793// std
14794static PyObject * THPVariable_std(PyObject* self_, PyObject* args, PyObject* kwargs)
14795{
14796 HANDLE_TH_ERRORS
14797 const Tensor& self = THPVariable_Unpack(self_);
14798 static PythonArgParser parser({
14799 "std(IntArrayRef[1]? dim, bool unbiased=True, bool keepdim=False)",
14800 "std(IntArrayRef[1]? dim=None, *, int64_t? correction=None, bool keepdim=False)",
14801 "std(bool unbiased=True)",
14802 "std(DimnameList[1] dim, bool unbiased=True, bool keepdim=False)",
14803 "std(DimnameList[1] dim, *, int64_t? correction=None, bool keepdim=False)",
14804 }, /*traceable=*/true);
14805
14806 ParsedArgs<3> parsed_args;
14807 auto _r = parser.parse(self_, args, kwargs, parsed_args);
14808 if(_r.has_torch_function()) {
14809 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
14810 }
14811 switch (_r.idx) {
14812 case 0: {
14813 // aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
14814
14815 auto dispatch_std = [](const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) -> at::Tensor {
14816 pybind11::gil_scoped_release no_gil;
14817 return self.std(dim, unbiased, keepdim);
14818 };
14819 return wrap(dispatch_std(self, _r.intlistOptional(0), _r.toBool(1), _r.toBool(2)));
14820 }
14821 case 1: {
14822 // aten::std.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor
14823
14824 auto dispatch_std = [](const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) -> at::Tensor {
14825 pybind11::gil_scoped_release no_gil;
14826 return self.std(dim, correction, keepdim);
14827 };
14828 return wrap(dispatch_std(self, _r.intlistOptional(0), _r.toInt64Optional(1), _r.toBool(2)));
14829 }
14830 case 2: {
14831 // aten::std(Tensor self, bool unbiased=True) -> Tensor
14832
14833 auto dispatch_std = [](const at::Tensor & self, bool unbiased) -> at::Tensor {
14834 pybind11::gil_scoped_release no_gil;
14835 return self.std(unbiased);
14836 };
14837 return wrap(dispatch_std(self, _r.toBool(0)));
14838 }
14839 case 3: {
14840 // aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
14841
14842 auto dispatch_std = [](const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) -> at::Tensor {
14843 pybind11::gil_scoped_release no_gil;
14844 return self.std(dim, unbiased, keepdim);
14845 };
14846 return wrap(dispatch_std(self, _r.dimnamelist(0), _r.toBool(1), _r.toBool(2)));
14847 }
14848 case 4: {
14849 // aten::std.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor
14850
14851 auto dispatch_std = [](const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) -> at::Tensor {
14852 pybind11::gil_scoped_release no_gil;
14853 return self.std(dim, correction, keepdim);
14854 };
14855 return wrap(dispatch_std(self, _r.dimnamelist(0), _r.toInt64Optional(1), _r.toBool(2)));
14856 }
14857 }
14858 Py_RETURN_NONE;
14859 END_HANDLE_TH_ERRORS
14860}
14861
14862\
14863// stft
14864static PyObject * THPVariable_stft(PyObject* self_, PyObject* args, PyObject* kwargs)
14865{
14866 HANDLE_TH_ERRORS
14867 const Tensor& self = THPVariable_Unpack(self_);
14868 static PythonArgParser parser({
14869 "stft(int64_t n_fft, int64_t? hop_length=None, int64_t? win_length=None, Tensor? window=None, bool center=True, c10::string_view pad_mode=\"reflect\", bool normalized=False, bool? onesided=None, bool? return_complex=None)",
14870 "stft(int64_t n_fft, int64_t? hop_length=None, int64_t? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None)",
14871 }, /*traceable=*/true);
14872
14873 ParsedArgs<9> parsed_args;
14874 auto _r = parser.parse(self_, args, kwargs, parsed_args);
14875 if(_r.has_torch_function()) {
14876 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
14877 }
14878 switch (_r.idx) {
14879 case 0: {
14880 // aten::stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode="reflect", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor
14881
14882 auto dispatch_stft = [](const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool center, c10::string_view pad_mode, bool normalized, c10::optional<bool> onesided, c10::optional<bool> return_complex) -> at::Tensor {
14883 pybind11::gil_scoped_release no_gil;
14884 return self.stft(n_fft, hop_length, win_length, window, center, pad_mode, normalized, onesided, return_complex);
14885 };
14886 return wrap(dispatch_stft(self, _r.toInt64(0), _r.toInt64Optional(1), _r.toInt64Optional(2), _r.optionalTensor(3), _r.toBool(4), _r.stringView(5), _r.toBool(6), _r.toBoolOptional(7), _r.toBoolOptional(8)));
14887 }
14888 case 1: {
14889 // aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor
14890
14891 auto dispatch_stft = [](const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool normalized, c10::optional<bool> onesided, c10::optional<bool> return_complex) -> at::Tensor {
14892 pybind11::gil_scoped_release no_gil;
14893 return self.stft(n_fft, hop_length, win_length, window, normalized, onesided, return_complex);
14894 };
14895 return wrap(dispatch_stft(self, _r.toInt64(0), _r.toInt64Optional(1), _r.toInt64Optional(2), _r.optionalTensor(3), _r.toBool(4), _r.toBoolOptional(5), _r.toBoolOptional(6)));
14896 }
14897 }
14898 Py_RETURN_NONE;
14899 END_HANDLE_TH_ERRORS
14900}
14901
14902\
14903// sub
14904static PyObject * THPVariable_sub(PyObject* self_, PyObject* args, PyObject* kwargs)
14905{
14906 HANDLE_TH_ERRORS
14907 const Tensor& self = THPVariable_Unpack(self_);
14908 static PythonArgParser parser({
14909 "sub(Scalar alpha, Tensor other)|deprecated",
14910 "sub(Tensor other, *, Scalar alpha=1)",
14911 }, /*traceable=*/true);
14912
14913 ParsedArgs<2> parsed_args;
14914 auto _r = parser.parse(self_, args, kwargs, parsed_args);
14915 if(_r.has_torch_function()) {
14916 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
14917 }
14918 switch (_r.idx) {
14919 case 0: {
14920 // [deprecated] aten::sub(Tensor self, Scalar alpha, Tensor other) -> Tensor
14921
14922 auto dispatch_sub = [](const at::Tensor & self, const at::Scalar & alpha, const at::Tensor & other) -> at::Tensor {
14923 pybind11::gil_scoped_release no_gil;
14924 return self.sub(other, alpha);
14925 };
14926 return wrap(dispatch_sub(self, _r.scalar(0), _r.tensor(1)));
14927 }
14928 case 1: {
14929 // aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
14930
14931 auto dispatch_sub = [](const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) -> at::Tensor {
14932 pybind11::gil_scoped_release no_gil;
14933 return self.sub(other, alpha);
14934 };
14935 return wrap(dispatch_sub(self, _r.tensor(0), _r.scalar(1)));
14936 }
14937 }
14938 Py_RETURN_NONE;
14939 END_HANDLE_TH_ERRORS
14940}
14941
14942\
14943// sub_
14944static PyObject * THPVariable_sub_(PyObject* self_, PyObject* args, PyObject* kwargs)
14945{
14946 HANDLE_TH_ERRORS
14947 const Tensor& self = THPVariable_Unpack(self_);
14948 static PythonArgParser parser({
14949 "sub_(Scalar alpha, Tensor other)|deprecated",
14950 "sub_(Tensor other, *, Scalar alpha=1)",
14951 }, /*traceable=*/true);
14952
14953 ParsedArgs<2> parsed_args;
14954 auto _r = parser.parse(self_, args, kwargs, parsed_args);
14955 if(_r.has_torch_function()) {
14956 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
14957 }
14958 switch (_r.idx) {
14959 case 0: {
14960 // [deprecated] aten::sub_(Tensor(a!) self, Scalar alpha, Tensor other) -> Tensor(a!)
14961
14962 auto dispatch_sub_ = [](const at::Tensor & self, const at::Scalar & alpha, const at::Tensor & other) -> at::Tensor {
14963 pybind11::gil_scoped_release no_gil;
14964 return self.sub_(other, alpha);
14965 };
14966 return wrap(dispatch_sub_(self, _r.scalar(0), _r.tensor(1)));
14967 }
14968 case 1: {
14969 // aten::sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
14970
14971 auto dispatch_sub_ = [](const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) -> at::Tensor {
14972 pybind11::gil_scoped_release no_gil;
14973 return self.sub_(other, alpha);
14974 };
14975 return wrap(dispatch_sub_(self, _r.tensor(0), _r.scalar(1)));
14976 }
14977 }
14978 Py_RETURN_NONE;
14979 END_HANDLE_TH_ERRORS
14980}
14981
14982\
14983// subtract
14984static PyObject * THPVariable_subtract(PyObject* self_, PyObject* args, PyObject* kwargs)
14985{
14986 HANDLE_TH_ERRORS
14987 const Tensor& self = THPVariable_Unpack(self_);
14988 static PythonArgParser parser({
14989 "subtract(Tensor other, *, Scalar alpha=1)",
14990 "subtract(Scalar other, Scalar alpha=1)",
14991 }, /*traceable=*/true);
14992
14993 ParsedArgs<2> parsed_args;
14994 auto _r = parser.parse(self_, args, kwargs, parsed_args);
14995 if(_r.has_torch_function()) {
14996 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
14997 }
14998 switch (_r.idx) {
14999 case 0: {
15000 // aten::subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
15001
15002 auto dispatch_subtract = [](const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) -> at::Tensor {
15003 pybind11::gil_scoped_release no_gil;
15004 return self.subtract(other, alpha);
15005 };
15006 return wrap(dispatch_subtract(self, _r.tensor(0), _r.scalar(1)));
15007 }
15008 case 1: {
15009 // aten::subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
15010
15011 auto dispatch_subtract = [](const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) -> at::Tensor {
15012 pybind11::gil_scoped_release no_gil;
15013 return self.subtract(other, alpha);
15014 };
15015 return wrap(dispatch_subtract(self, _r.scalar(0), _r.scalar(1)));
15016 }
15017 }
15018 Py_RETURN_NONE;
15019 END_HANDLE_TH_ERRORS
15020}
15021
15022\
15023// subtract_
15024static PyObject * THPVariable_subtract_(PyObject* self_, PyObject* args, PyObject* kwargs)
15025{
15026 HANDLE_TH_ERRORS
15027 const Tensor& self = THPVariable_Unpack(self_);
15028 static PythonArgParser parser({
15029 "subtract_(Tensor other, *, Scalar alpha=1)",
15030 "subtract_(Scalar other, Scalar alpha=1)",
15031 }, /*traceable=*/true);
15032
15033 ParsedArgs<2> parsed_args;
15034 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15035 if(_r.has_torch_function()) {
15036 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15037 }
15038 switch (_r.idx) {
15039 case 0: {
15040 // aten::subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
15041
15042 auto dispatch_subtract_ = [](const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) -> at::Tensor {
15043 pybind11::gil_scoped_release no_gil;
15044 return self.subtract_(other, alpha);
15045 };
15046 return wrap(dispatch_subtract_(self, _r.tensor(0), _r.scalar(1)));
15047 }
15048 case 1: {
15049 // aten::subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
15050
15051 auto dispatch_subtract_ = [](const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) -> at::Tensor {
15052 pybind11::gil_scoped_release no_gil;
15053 return self.subtract_(other, alpha);
15054 };
15055 return wrap(dispatch_subtract_(self, _r.scalar(0), _r.scalar(1)));
15056 }
15057 }
15058 Py_RETURN_NONE;
15059 END_HANDLE_TH_ERRORS
15060}
15061
15062\
15063// sum
15064static PyObject * THPVariable_sum(PyObject* self_, PyObject* args, PyObject* kwargs)
15065{
15066 HANDLE_TH_ERRORS
15067 const Tensor& self = THPVariable_Unpack(self_);
15068 static PythonArgParser parser({
15069 "sum(*, ScalarType? dtype=None)",
15070 "sum(IntArrayRef[1]? dim, bool keepdim=False, *, ScalarType? dtype=None)",
15071 "sum(DimnameList[1] dim, bool keepdim=False, *, ScalarType? dtype=None)",
15072 }, /*traceable=*/true);
15073
15074 ParsedArgs<3> parsed_args;
15075 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15076 if(_r.has_torch_function()) {
15077 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15078 }
15079 switch (_r.idx) {
15080 case 0: {
15081 // aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor
15082
15083 auto dispatch_sum = [](const at::Tensor & self, c10::optional<at::ScalarType> dtype) -> at::Tensor {
15084 pybind11::gil_scoped_release no_gil;
15085 return self.sum(dtype);
15086 };
15087 return wrap(dispatch_sum(self, _r.scalartypeOptional(0)));
15088 }
15089 case 1: {
15090 // aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
15091
15092 auto dispatch_sum = [](const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) -> at::Tensor {
15093 pybind11::gil_scoped_release no_gil;
15094 return self.sum(dim, keepdim, dtype);
15095 };
15096 return wrap(dispatch_sum(self, _r.intlistOptional(0), _r.toBool(1), _r.scalartypeOptional(2)));
15097 }
15098 case 2: {
15099 // aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
15100
15101 auto dispatch_sum = [](const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype) -> at::Tensor {
15102 pybind11::gil_scoped_release no_gil;
15103 return self.sum(dim, keepdim, dtype);
15104 };
15105 return wrap(dispatch_sum(self, _r.dimnamelist(0), _r.toBool(1), _r.scalartypeOptional(2)));
15106 }
15107 }
15108 Py_RETURN_NONE;
15109 END_HANDLE_TH_ERRORS
15110}
15111
15112// sum_to_size
15113static PyObject * THPVariable_sum_to_size(PyObject* self_, PyObject* args, PyObject* kwargs)
15114{
15115 HANDLE_TH_ERRORS
15116 const Tensor& self = THPVariable_Unpack(self_);
15117 static PythonArgParser parser({
15118 "sum_to_size(IntArrayRef size)",
15119 }, /*traceable=*/true);
15120
15121 ParsedArgs<1> parsed_args;
15122 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15123 if(_r.has_torch_function()) {
15124 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15125 }
15126 // aten::sum_to_size(Tensor self, int[] size) -> Tensor
15127
15128 auto dispatch_sum_to_size = [](const at::Tensor & self, at::IntArrayRef size) -> at::Tensor {
15129 pybind11::gil_scoped_release no_gil;
15130 return self.sum_to_size(size);
15131 };
15132 return wrap(dispatch_sum_to_size(self, _r.intlist(0)));
15133 Py_RETURN_NONE;
15134 END_HANDLE_TH_ERRORS
15135}
15136
15137// svd
15138static PyObject * THPVariable_svd(PyObject* self_, PyObject* args, PyObject* kwargs)
15139{
15140 HANDLE_TH_ERRORS
15141 static PyTypeObject* NamedTuple = get_namedtuple("svd");
15142 const Tensor& self = THPVariable_Unpack(self_);
15143 static PythonArgParser parser({
15144 "svd(bool some=True, bool compute_uv=True)",
15145 }, /*traceable=*/true);
15146
15147 ParsedArgs<2> parsed_args;
15148 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15149 if(_r.has_torch_function()) {
15150 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15151 }
15152 // aten::svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V)
15153
15154 auto dispatch_svd = [](const at::Tensor & self, bool some, bool compute_uv) -> ::std::tuple<at::Tensor,at::Tensor,at::Tensor> {
15155 pybind11::gil_scoped_release no_gil;
15156 return self.svd(some, compute_uv);
15157 };
15158 return wrap(NamedTuple, dispatch_svd(self, _r.toBool(0), _r.toBool(1)));
15159 Py_RETURN_NONE;
15160 END_HANDLE_TH_ERRORS
15161}
15162
15163// swapaxes
15164static PyObject * THPVariable_swapaxes(PyObject* self_, PyObject* args, PyObject* kwargs)
15165{
15166 HANDLE_TH_ERRORS
15167 const Tensor& self = THPVariable_Unpack(self_);
15168 static PythonArgParser parser({
15169 "swapaxes(int64_t axis0, int64_t axis1)",
15170 }, /*traceable=*/true);
15171
15172 ParsedArgs<2> parsed_args;
15173 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15174 if(_r.has_torch_function()) {
15175 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15176 }
15177 // aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a)
15178
15179 auto dispatch_swapaxes = [](const at::Tensor & self, int64_t axis0, int64_t axis1) -> at::Tensor {
15180 pybind11::gil_scoped_release no_gil;
15181 return self.swapaxes(axis0, axis1);
15182 };
15183 return wrap(dispatch_swapaxes(self, _r.toInt64(0), _r.toInt64(1)));
15184 Py_RETURN_NONE;
15185 END_HANDLE_TH_ERRORS
15186}
15187
15188// swapaxes_
15189static PyObject * THPVariable_swapaxes_(PyObject* self_, PyObject* args, PyObject* kwargs)
15190{
15191 HANDLE_TH_ERRORS
15192 const Tensor& self = THPVariable_Unpack(self_);
15193 static PythonArgParser parser({
15194 "swapaxes_(int64_t axis0, int64_t axis1)",
15195 }, /*traceable=*/true);
15196
15197 ParsedArgs<2> parsed_args;
15198 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15199 if(_r.has_torch_function()) {
15200 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15201 }
15202 // aten::swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!)
15203
15204 auto dispatch_swapaxes_ = [](const at::Tensor & self, int64_t axis0, int64_t axis1) -> at::Tensor {
15205 pybind11::gil_scoped_release no_gil;
15206 return self.swapaxes_(axis0, axis1);
15207 };
15208 return wrap(dispatch_swapaxes_(self, _r.toInt64(0), _r.toInt64(1)));
15209 Py_RETURN_NONE;
15210 END_HANDLE_TH_ERRORS
15211}
15212
15213// swapdims
15214static PyObject * THPVariable_swapdims(PyObject* self_, PyObject* args, PyObject* kwargs)
15215{
15216 HANDLE_TH_ERRORS
15217 const Tensor& self = THPVariable_Unpack(self_);
15218 static PythonArgParser parser({
15219 "swapdims(int64_t dim0, int64_t dim1)",
15220 }, /*traceable=*/true);
15221
15222 ParsedArgs<2> parsed_args;
15223 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15224 if(_r.has_torch_function()) {
15225 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15226 }
15227 // aten::swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
15228
15229 auto dispatch_swapdims = [](const at::Tensor & self, int64_t dim0, int64_t dim1) -> at::Tensor {
15230 pybind11::gil_scoped_release no_gil;
15231 return self.swapdims(dim0, dim1);
15232 };
15233 return wrap(dispatch_swapdims(self, _r.toInt64(0), _r.toInt64(1)));
15234 Py_RETURN_NONE;
15235 END_HANDLE_TH_ERRORS
15236}
15237
15238// swapdims_
15239static PyObject * THPVariable_swapdims_(PyObject* self_, PyObject* args, PyObject* kwargs)
15240{
15241 HANDLE_TH_ERRORS
15242 const Tensor& self = THPVariable_Unpack(self_);
15243 static PythonArgParser parser({
15244 "swapdims_(int64_t dim0, int64_t dim1)",
15245 }, /*traceable=*/true);
15246
15247 ParsedArgs<2> parsed_args;
15248 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15249 if(_r.has_torch_function()) {
15250 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15251 }
15252 // aten::swapdims_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
15253
15254 auto dispatch_swapdims_ = [](const at::Tensor & self, int64_t dim0, int64_t dim1) -> at::Tensor {
15255 pybind11::gil_scoped_release no_gil;
15256 return self.swapdims_(dim0, dim1);
15257 };
15258 return wrap(dispatch_swapdims_(self, _r.toInt64(0), _r.toInt64(1)));
15259 Py_RETURN_NONE;
15260 END_HANDLE_TH_ERRORS
15261}
15262
15263// t
15264static PyObject * THPVariable_t(PyObject* self_, PyObject* args)
15265{
15266 HANDLE_TH_ERRORS
15267 const Tensor& self = THPVariable_Unpack(self_);
15268 if(check_has_torch_function(self_)) {
15269 return handle_torch_function(self_, "t");
15270 }
15271 // aten::t(Tensor(a) self) -> Tensor(a)
15272
15273 auto dispatch_t = [](const at::Tensor & self) -> at::Tensor {
15274 pybind11::gil_scoped_release no_gil;
15275 return self.t();
15276 };
15277 return wrap(dispatch_t(self));
15278 END_HANDLE_TH_ERRORS
15279}
15280
15281// t_
15282static PyObject * THPVariable_t_(PyObject* self_, PyObject* args)
15283{
15284 HANDLE_TH_ERRORS
15285 const Tensor& self = THPVariable_Unpack(self_);
15286 if(check_has_torch_function(self_)) {
15287 return handle_torch_function(self_, "t_");
15288 }
15289 // aten::t_(Tensor(a!) self) -> Tensor(a!)
15290
15291 auto dispatch_t_ = [](const at::Tensor & self) -> at::Tensor {
15292 pybind11::gil_scoped_release no_gil;
15293 return self.t_();
15294 };
15295 return wrap(dispatch_t_(self));
15296 END_HANDLE_TH_ERRORS
15297}
15298
15299// take
15300static PyObject * THPVariable_take(PyObject* self_, PyObject* args, PyObject* kwargs)
15301{
15302 HANDLE_TH_ERRORS
15303 const Tensor& self = THPVariable_Unpack(self_);
15304 static PythonArgParser parser({
15305 "take(Tensor index)",
15306 }, /*traceable=*/true);
15307
15308 ParsedArgs<1> parsed_args;
15309 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15310 if(_r.has_torch_function()) {
15311 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15312 }
15313 // aten::take(Tensor self, Tensor index) -> Tensor
15314
15315 auto dispatch_take = [](const at::Tensor & self, const at::Tensor & index) -> at::Tensor {
15316 pybind11::gil_scoped_release no_gil;
15317 return self.take(index);
15318 };
15319 return wrap(dispatch_take(self, _r.tensor(0)));
15320 Py_RETURN_NONE;
15321 END_HANDLE_TH_ERRORS
15322}
15323
15324// take_along_dim
15325static PyObject * THPVariable_take_along_dim(PyObject* self_, PyObject* args, PyObject* kwargs)
15326{
15327 HANDLE_TH_ERRORS
15328 const Tensor& self = THPVariable_Unpack(self_);
15329 static PythonArgParser parser({
15330 "take_along_dim(Tensor indices, int64_t? dim=None)",
15331 }, /*traceable=*/true);
15332
15333 ParsedArgs<2> parsed_args;
15334 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15335 if(_r.has_torch_function()) {
15336 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15337 }
15338 // aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor
15339
15340 auto dispatch_take_along_dim = [](const at::Tensor & self, const at::Tensor & indices, c10::optional<int64_t> dim) -> at::Tensor {
15341 pybind11::gil_scoped_release no_gil;
15342 return self.take_along_dim(indices, dim);
15343 };
15344 return wrap(dispatch_take_along_dim(self, _r.tensor(0), _r.toInt64Optional(1)));
15345 Py_RETURN_NONE;
15346 END_HANDLE_TH_ERRORS
15347}
15348
15349// tan
15350static PyObject * THPVariable_tan(PyObject* self_, PyObject* args)
15351{
15352 HANDLE_TH_ERRORS
15353 const Tensor& self = THPVariable_Unpack(self_);
15354 if(check_has_torch_function(self_)) {
15355 return handle_torch_function(self_, "tan");
15356 }
15357 // aten::tan(Tensor self) -> Tensor
15358
15359 auto dispatch_tan = [](const at::Tensor & self) -> at::Tensor {
15360 pybind11::gil_scoped_release no_gil;
15361 return self.tan();
15362 };
15363 return wrap(dispatch_tan(self));
15364 END_HANDLE_TH_ERRORS
15365}
15366
15367// tan_
15368static PyObject * THPVariable_tan_(PyObject* self_, PyObject* args)
15369{
15370 HANDLE_TH_ERRORS
15371 const Tensor& self = THPVariable_Unpack(self_);
15372 if(check_has_torch_function(self_)) {
15373 return handle_torch_function(self_, "tan_");
15374 }
15375 // aten::tan_(Tensor(a!) self) -> Tensor(a!)
15376
15377 auto dispatch_tan_ = [](const at::Tensor & self) -> at::Tensor {
15378 pybind11::gil_scoped_release no_gil;
15379 return self.tan_();
15380 };
15381 return wrap(dispatch_tan_(self));
15382 END_HANDLE_TH_ERRORS
15383}
15384
15385// tanh
15386static PyObject * THPVariable_tanh(PyObject* self_, PyObject* args)
15387{
15388 HANDLE_TH_ERRORS
15389 const Tensor& self = THPVariable_Unpack(self_);
15390 if(check_has_torch_function(self_)) {
15391 return handle_torch_function(self_, "tanh");
15392 }
15393 // aten::tanh(Tensor self) -> Tensor
15394
15395 auto dispatch_tanh = [](const at::Tensor & self) -> at::Tensor {
15396 pybind11::gil_scoped_release no_gil;
15397 return self.tanh();
15398 };
15399 return wrap(dispatch_tanh(self));
15400 END_HANDLE_TH_ERRORS
15401}
15402
15403// tanh_
15404static PyObject * THPVariable_tanh_(PyObject* self_, PyObject* args)
15405{
15406 HANDLE_TH_ERRORS
15407 const Tensor& self = THPVariable_Unpack(self_);
15408 if(check_has_torch_function(self_)) {
15409 return handle_torch_function(self_, "tanh_");
15410 }
15411 // aten::tanh_(Tensor(a!) self) -> Tensor(a!)
15412
15413 auto dispatch_tanh_ = [](const at::Tensor & self) -> at::Tensor {
15414 pybind11::gil_scoped_release no_gil;
15415 return self.tanh_();
15416 };
15417 return wrap(dispatch_tanh_(self));
15418 END_HANDLE_TH_ERRORS
15419}
15420
15421\
15422// tensor_split
15423static PyObject * THPVariable_tensor_split(PyObject* self_, PyObject* args, PyObject* kwargs)
15424{
15425 HANDLE_TH_ERRORS
15426 const Tensor& self = THPVariable_Unpack(self_);
15427 static PythonArgParser parser({
15428 "tensor_split(SymIntArrayRef indices, int64_t dim=0)",
15429 "tensor_split(Tensor tensor_indices_or_sections, int64_t dim=0)",
15430 "tensor_split(SymInt sections, int64_t dim=0)",
15431 }, /*traceable=*/true);
15432
15433 ParsedArgs<2> parsed_args;
15434 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15435 if(_r.has_torch_function()) {
15436 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15437 }
15438 switch (_r.idx) {
15439 case 0: {
15440 // aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[]
15441
15442 auto dispatch_tensor_split = [](const at::Tensor & self, c10::SymIntArrayRef indices, int64_t dim) -> ::std::vector<at::Tensor> {
15443 pybind11::gil_scoped_release no_gil;
15444 return self.tensor_split_symint(indices, dim);
15445 };
15446 return wrap(dispatch_tensor_split(self, _r.symintlist(0), _r.toInt64(1)));
15447 }
15448 case 1: {
15449 // aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[]
15450
15451 auto dispatch_tensor_split = [](const at::Tensor & self, const at::Tensor & tensor_indices_or_sections, int64_t dim) -> ::std::vector<at::Tensor> {
15452 pybind11::gil_scoped_release no_gil;
15453 return self.tensor_split(tensor_indices_or_sections, dim);
15454 };
15455 return wrap(dispatch_tensor_split(self, _r.tensor(0), _r.toInt64(1)));
15456 }
15457 case 2: {
15458 // aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[]
15459
15460 auto dispatch_tensor_split = [](const at::Tensor & self, c10::SymInt sections, int64_t dim) -> ::std::vector<at::Tensor> {
15461 pybind11::gil_scoped_release no_gil;
15462 return self.tensor_split_symint(sections, dim);
15463 };
15464 return wrap(dispatch_tensor_split(self, _r.toSymInt(0), _r.toInt64(1)));
15465 }
15466 }
15467 Py_RETURN_NONE;
15468 END_HANDLE_TH_ERRORS
15469}
15470
15471// tile
15472static PyObject * THPVariable_tile(PyObject* self_, PyObject* args, PyObject* kwargs)
15473{
15474 HANDLE_TH_ERRORS
15475 const Tensor& self = THPVariable_Unpack(self_);
15476 static PythonArgParser parser({
15477 "tile(IntArrayRef dims)",
15478 }, /*traceable=*/true);
15479
15480 ParsedArgs<1> parsed_args;
15481 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15482 if(_r.has_torch_function()) {
15483 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15484 }
15485 // aten::tile(Tensor self, int[] dims) -> Tensor
15486
15487 auto dispatch_tile = [](const at::Tensor & self, at::IntArrayRef dims) -> at::Tensor {
15488 pybind11::gil_scoped_release no_gil;
15489 return self.tile(dims);
15490 };
15491 return wrap(dispatch_tile(self, _r.intlist(0)));
15492 Py_RETURN_NONE;
15493 END_HANDLE_TH_ERRORS
15494}
15495
15496// to_dense
15497static PyObject * THPVariable_to_dense(PyObject* self_, PyObject* args, PyObject* kwargs)
15498{
15499 HANDLE_TH_ERRORS
15500 const Tensor& self = THPVariable_Unpack(self_);
15501 static PythonArgParser parser({
15502 "to_dense(ScalarType? dtype=None)",
15503 }, /*traceable=*/true);
15504
15505 ParsedArgs<1> parsed_args;
15506 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15507 if(_r.has_torch_function()) {
15508 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15509 }
15510 // aten::to_dense(Tensor self, ScalarType? dtype=None) -> Tensor
15511
15512 auto dispatch_to_dense = [](const at::Tensor & self, c10::optional<at::ScalarType> dtype) -> at::Tensor {
15513 pybind11::gil_scoped_release no_gil;
15514 return self.to_dense(dtype);
15515 };
15516 return wrap(dispatch_to_dense(self, _r.scalartypeOptional(0)));
15517 Py_RETURN_NONE;
15518 END_HANDLE_TH_ERRORS
15519}
15520
15521// to_mkldnn
15522static PyObject * THPVariable_to_mkldnn(PyObject* self_, PyObject* args, PyObject* kwargs)
15523{
15524 HANDLE_TH_ERRORS
15525 const Tensor& self = THPVariable_Unpack(self_);
15526 static PythonArgParser parser({
15527 "to_mkldnn(ScalarType? dtype=None)",
15528 }, /*traceable=*/true);
15529
15530 ParsedArgs<1> parsed_args;
15531 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15532 if(_r.has_torch_function()) {
15533 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15534 }
15535 // aten::to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor
15536
15537 auto dispatch_to_mkldnn = [](const at::Tensor & self, c10::optional<at::ScalarType> dtype) -> at::Tensor {
15538 pybind11::gil_scoped_release no_gil;
15539 return self.to_mkldnn(dtype);
15540 };
15541 return wrap(dispatch_to_mkldnn(self, _r.scalartypeOptional(0)));
15542 Py_RETURN_NONE;
15543 END_HANDLE_TH_ERRORS
15544}
15545
15546// to_padded_tensor
15547static PyObject * THPVariable_to_padded_tensor(PyObject* self_, PyObject* args, PyObject* kwargs)
15548{
15549 HANDLE_TH_ERRORS
15550 const Tensor& self = THPVariable_Unpack(self_);
15551 static PythonArgParser parser({
15552 "to_padded_tensor(double padding, SymIntArrayRef? output_size=None)",
15553 }, /*traceable=*/true);
15554
15555 ParsedArgs<2> parsed_args;
15556 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15557 if(_r.has_torch_function()) {
15558 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15559 }
15560 // aten::to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor
15561
15562 auto dispatch_to_padded_tensor = [](const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size) -> at::Tensor {
15563 pybind11::gil_scoped_release no_gil;
15564 return self.to_padded_tensor_symint(padding, output_size);
15565 };
15566 return wrap(dispatch_to_padded_tensor(self, _r.toDouble(0), _r.symintlistOptional(1)));
15567 Py_RETURN_NONE;
15568 END_HANDLE_TH_ERRORS
15569}
15570
15571\
15572// to_sparse
15573static PyObject * THPVariable_to_sparse(PyObject* self_, PyObject* args, PyObject* kwargs)
15574{
15575 HANDLE_TH_ERRORS
15576 const Tensor& self = THPVariable_Unpack(self_);
15577 static PythonArgParser parser({
15578 "to_sparse(*, Layout? layout=None, IntArrayRef[2]? blocksize=None, int64_t? dense_dim=None)",
15579 "to_sparse(int64_t sparse_dim)",
15580 }, /*traceable=*/true);
15581
15582 ParsedArgs<3> parsed_args;
15583 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15584 if(_r.has_torch_function()) {
15585 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15586 }
15587 switch (_r.idx) {
15588 case 0: {
15589 // aten::to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor
15590
15591 auto dispatch_to_sparse = [](const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim) -> at::Tensor {
15592 pybind11::gil_scoped_release no_gil;
15593 return self.to_sparse(layout, blocksize, dense_dim);
15594 };
15595 return wrap(dispatch_to_sparse(self, _r.layoutOptional(0), _r.intlistOptional(1), _r.toInt64Optional(2)));
15596 }
15597 case 1: {
15598 // aten::to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor
15599
15600 auto dispatch_to_sparse = [](const at::Tensor & self, int64_t sparse_dim) -> at::Tensor {
15601 pybind11::gil_scoped_release no_gil;
15602 return self.to_sparse(sparse_dim);
15603 };
15604 return wrap(dispatch_to_sparse(self, _r.toInt64(0)));
15605 }
15606 }
15607 Py_RETURN_NONE;
15608 END_HANDLE_TH_ERRORS
15609}
15610
15611// to_sparse_bsc
15612static PyObject * THPVariable_to_sparse_bsc(PyObject* self_, PyObject* args, PyObject* kwargs)
15613{
15614 HANDLE_TH_ERRORS
15615 const Tensor& self = THPVariable_Unpack(self_);
15616 static PythonArgParser parser({
15617 "to_sparse_bsc(IntArrayRef[2] blocksize, int64_t? dense_dim=None)",
15618 }, /*traceable=*/true);
15619
15620 ParsedArgs<2> parsed_args;
15621 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15622 if(_r.has_torch_function()) {
15623 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15624 }
15625 // aten::to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
15626
15627 auto dispatch_to_sparse_bsc = [](const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) -> at::Tensor {
15628 pybind11::gil_scoped_release no_gil;
15629 return self.to_sparse_bsc(blocksize, dense_dim);
15630 };
15631 return wrap(dispatch_to_sparse_bsc(self, _r.intlist(0), _r.toInt64Optional(1)));
15632 Py_RETURN_NONE;
15633 END_HANDLE_TH_ERRORS
15634}
15635
15636// to_sparse_bsr
15637static PyObject * THPVariable_to_sparse_bsr(PyObject* self_, PyObject* args, PyObject* kwargs)
15638{
15639 HANDLE_TH_ERRORS
15640 const Tensor& self = THPVariable_Unpack(self_);
15641 static PythonArgParser parser({
15642 "to_sparse_bsr(IntArrayRef[2] blocksize, int64_t? dense_dim=None)",
15643 }, /*traceable=*/true);
15644
15645 ParsedArgs<2> parsed_args;
15646 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15647 if(_r.has_torch_function()) {
15648 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15649 }
15650 // aten::to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
15651
15652 auto dispatch_to_sparse_bsr = [](const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) -> at::Tensor {
15653 pybind11::gil_scoped_release no_gil;
15654 return self.to_sparse_bsr(blocksize, dense_dim);
15655 };
15656 return wrap(dispatch_to_sparse_bsr(self, _r.intlist(0), _r.toInt64Optional(1)));
15657 Py_RETURN_NONE;
15658 END_HANDLE_TH_ERRORS
15659}
15660
15661// to_sparse_csc
15662static PyObject * THPVariable_to_sparse_csc(PyObject* self_, PyObject* args, PyObject* kwargs)
15663{
15664 HANDLE_TH_ERRORS
15665 const Tensor& self = THPVariable_Unpack(self_);
15666 static PythonArgParser parser({
15667 "to_sparse_csc(int64_t? dense_dim=None)",
15668 }, /*traceable=*/true);
15669
15670 ParsedArgs<1> parsed_args;
15671 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15672 if(_r.has_torch_function()) {
15673 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15674 }
15675 // aten::to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor
15676
15677 auto dispatch_to_sparse_csc = [](const at::Tensor & self, c10::optional<int64_t> dense_dim) -> at::Tensor {
15678 pybind11::gil_scoped_release no_gil;
15679 return self.to_sparse_csc(dense_dim);
15680 };
15681 return wrap(dispatch_to_sparse_csc(self, _r.toInt64Optional(0)));
15682 Py_RETURN_NONE;
15683 END_HANDLE_TH_ERRORS
15684}
15685
15686// to_sparse_csr
15687static PyObject * THPVariable_to_sparse_csr(PyObject* self_, PyObject* args, PyObject* kwargs)
15688{
15689 HANDLE_TH_ERRORS
15690 const Tensor& self = THPVariable_Unpack(self_);
15691 static PythonArgParser parser({
15692 "to_sparse_csr(int64_t? dense_dim=None)",
15693 }, /*traceable=*/true);
15694
15695 ParsedArgs<1> parsed_args;
15696 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15697 if(_r.has_torch_function()) {
15698 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15699 }
15700 // aten::to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor
15701
15702 auto dispatch_to_sparse_csr = [](const at::Tensor & self, c10::optional<int64_t> dense_dim) -> at::Tensor {
15703 pybind11::gil_scoped_release no_gil;
15704 return self.to_sparse_csr(dense_dim);
15705 };
15706 return wrap(dispatch_to_sparse_csr(self, _r.toInt64Optional(0)));
15707 Py_RETURN_NONE;
15708 END_HANDLE_TH_ERRORS
15709}
15710
15711// topk
15712static PyObject * THPVariable_topk(PyObject* self_, PyObject* args, PyObject* kwargs)
15713{
15714 HANDLE_TH_ERRORS
15715 static PyTypeObject* NamedTuple = get_namedtuple("topk");
15716 const Tensor& self = THPVariable_Unpack(self_);
15717 static PythonArgParser parser({
15718 "topk(int64_t k, int64_t dim=-1, bool largest=True, bool sorted=True)",
15719 }, /*traceable=*/true);
15720
15721 ParsedArgs<4> parsed_args;
15722 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15723 if(_r.has_torch_function()) {
15724 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15725 }
15726 // aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)
15727
15728 auto dispatch_topk = [](const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) -> ::std::tuple<at::Tensor,at::Tensor> {
15729 pybind11::gil_scoped_release no_gil;
15730 return self.topk(k, dim, largest, sorted);
15731 };
15732 return wrap(NamedTuple, dispatch_topk(self, _r.toInt64(0), _r.toInt64(1), _r.toBool(2), _r.toBool(3)));
15733 Py_RETURN_NONE;
15734 END_HANDLE_TH_ERRORS
15735}
15736
15737// trace
15738static PyObject * THPVariable_trace(PyObject* self_, PyObject* args)
15739{
15740 HANDLE_TH_ERRORS
15741 const Tensor& self = THPVariable_Unpack(self_);
15742 if(check_has_torch_function(self_)) {
15743 return handle_torch_function(self_, "trace");
15744 }
15745 // aten::trace(Tensor self) -> Tensor
15746
15747 auto dispatch_trace = [](const at::Tensor & self) -> at::Tensor {
15748 pybind11::gil_scoped_release no_gil;
15749 return self.trace();
15750 };
15751 return wrap(dispatch_trace(self));
15752 END_HANDLE_TH_ERRORS
15753}
15754
15755\
15756// transpose
15757static PyObject * THPVariable_transpose(PyObject* self_, PyObject* args, PyObject* kwargs)
15758{
15759 HANDLE_TH_ERRORS
15760 const Tensor& self = THPVariable_Unpack(self_);
15761 static PythonArgParser parser({
15762 "transpose(int64_t dim0, int64_t dim1)",
15763 "transpose(Dimname dim0, Dimname dim1)",
15764 }, /*traceable=*/true);
15765
15766 ParsedArgs<2> parsed_args;
15767 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15768 if(_r.has_torch_function()) {
15769 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15770 }
15771 switch (_r.idx) {
15772 case 0: {
15773 // aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
15774
15775 auto dispatch_transpose = [](const at::Tensor & self, int64_t dim0, int64_t dim1) -> at::Tensor {
15776 pybind11::gil_scoped_release no_gil;
15777 return self.transpose(dim0, dim1);
15778 };
15779 return wrap(dispatch_transpose(self, _r.toInt64(0), _r.toInt64(1)));
15780 }
15781 case 1: {
15782 // aten::transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a)
15783
15784 auto dispatch_transpose = [](const at::Tensor & self, at::Dimname dim0, at::Dimname dim1) -> at::Tensor {
15785 pybind11::gil_scoped_release no_gil;
15786 return self.transpose(dim0, dim1);
15787 };
15788 return wrap(dispatch_transpose(self, _r.dimname(0), _r.dimname(1)));
15789 }
15790 }
15791 Py_RETURN_NONE;
15792 END_HANDLE_TH_ERRORS
15793}
15794
15795// transpose_
15796static PyObject * THPVariable_transpose_(PyObject* self_, PyObject* args, PyObject* kwargs)
15797{
15798 HANDLE_TH_ERRORS
15799 const Tensor& self = THPVariable_Unpack(self_);
15800 static PythonArgParser parser({
15801 "transpose_(int64_t dim0, int64_t dim1)",
15802 }, /*traceable=*/true);
15803
15804 ParsedArgs<2> parsed_args;
15805 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15806 if(_r.has_torch_function()) {
15807 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15808 }
15809 // aten::transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
15810
15811 auto dispatch_transpose_ = [](const at::Tensor & self, int64_t dim0, int64_t dim1) -> at::Tensor {
15812 pybind11::gil_scoped_release no_gil;
15813 return self.transpose_(dim0, dim1);
15814 };
15815 return wrap(dispatch_transpose_(self, _r.toInt64(0), _r.toInt64(1)));
15816 Py_RETURN_NONE;
15817 END_HANDLE_TH_ERRORS
15818}
15819
15820// triangular_solve
15821static PyObject * THPVariable_triangular_solve(PyObject* self_, PyObject* args, PyObject* kwargs)
15822{
15823 HANDLE_TH_ERRORS
15824 static PyTypeObject* NamedTuple = get_namedtuple("triangular_solve");
15825 const Tensor& self = THPVariable_Unpack(self_);
15826 static PythonArgParser parser({
15827 "triangular_solve(Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False)",
15828 }, /*traceable=*/true);
15829
15830 ParsedArgs<4> parsed_args;
15831 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15832 if(_r.has_torch_function()) {
15833 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15834 }
15835 // aten::triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient)
15836
15837 auto dispatch_triangular_solve = [](const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) -> ::std::tuple<at::Tensor,at::Tensor> {
15838 pybind11::gil_scoped_release no_gil;
15839 return self.triangular_solve(A, upper, transpose, unitriangular);
15840 };
15841 return wrap(NamedTuple, dispatch_triangular_solve(self, _r.tensor(0), _r.toBool(1), _r.toBool(2), _r.toBool(3)));
15842 Py_RETURN_NONE;
15843 END_HANDLE_TH_ERRORS
15844}
15845
15846// tril
15847static PyObject * THPVariable_tril(PyObject* self_, PyObject* args, PyObject* kwargs)
15848{
15849 HANDLE_TH_ERRORS
15850 const Tensor& self = THPVariable_Unpack(self_);
15851 static PythonArgParser parser({
15852 "tril(int64_t diagonal=0)",
15853 }, /*traceable=*/true);
15854
15855 ParsedArgs<1> parsed_args;
15856 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15857 if(_r.has_torch_function()) {
15858 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15859 }
15860 // aten::tril(Tensor self, int diagonal=0) -> Tensor
15861
15862 auto dispatch_tril = [](const at::Tensor & self, int64_t diagonal) -> at::Tensor {
15863 pybind11::gil_scoped_release no_gil;
15864 return self.tril(diagonal);
15865 };
15866 return wrap(dispatch_tril(self, _r.toInt64(0)));
15867 Py_RETURN_NONE;
15868 END_HANDLE_TH_ERRORS
15869}
15870
15871// tril_
15872static PyObject * THPVariable_tril_(PyObject* self_, PyObject* args, PyObject* kwargs)
15873{
15874 HANDLE_TH_ERRORS
15875 const Tensor& self = THPVariable_Unpack(self_);
15876 static PythonArgParser parser({
15877 "tril_(int64_t diagonal=0)",
15878 }, /*traceable=*/true);
15879
15880 ParsedArgs<1> parsed_args;
15881 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15882 if(_r.has_torch_function()) {
15883 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15884 }
15885 // aten::tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)
15886
15887 auto dispatch_tril_ = [](const at::Tensor & self, int64_t diagonal) -> at::Tensor {
15888 pybind11::gil_scoped_release no_gil;
15889 return self.tril_(diagonal);
15890 };
15891 return wrap(dispatch_tril_(self, _r.toInt64(0)));
15892 Py_RETURN_NONE;
15893 END_HANDLE_TH_ERRORS
15894}
15895
15896// triu
15897static PyObject * THPVariable_triu(PyObject* self_, PyObject* args, PyObject* kwargs)
15898{
15899 HANDLE_TH_ERRORS
15900 const Tensor& self = THPVariable_Unpack(self_);
15901 static PythonArgParser parser({
15902 "triu(int64_t diagonal=0)",
15903 }, /*traceable=*/true);
15904
15905 ParsedArgs<1> parsed_args;
15906 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15907 if(_r.has_torch_function()) {
15908 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15909 }
15910 // aten::triu(Tensor self, int diagonal=0) -> Tensor
15911
15912 auto dispatch_triu = [](const at::Tensor & self, int64_t diagonal) -> at::Tensor {
15913 pybind11::gil_scoped_release no_gil;
15914 return self.triu(diagonal);
15915 };
15916 return wrap(dispatch_triu(self, _r.toInt64(0)));
15917 Py_RETURN_NONE;
15918 END_HANDLE_TH_ERRORS
15919}
15920
15921// triu_
15922static PyObject * THPVariable_triu_(PyObject* self_, PyObject* args, PyObject* kwargs)
15923{
15924 HANDLE_TH_ERRORS
15925 const Tensor& self = THPVariable_Unpack(self_);
15926 static PythonArgParser parser({
15927 "triu_(int64_t diagonal=0)",
15928 }, /*traceable=*/true);
15929
15930 ParsedArgs<1> parsed_args;
15931 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15932 if(_r.has_torch_function()) {
15933 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15934 }
15935 // aten::triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)
15936
15937 auto dispatch_triu_ = [](const at::Tensor & self, int64_t diagonal) -> at::Tensor {
15938 pybind11::gil_scoped_release no_gil;
15939 return self.triu_(diagonal);
15940 };
15941 return wrap(dispatch_triu_(self, _r.toInt64(0)));
15942 Py_RETURN_NONE;
15943 END_HANDLE_TH_ERRORS
15944}
15945
15946\
15947// true_divide
15948static PyObject * THPVariable_true_divide(PyObject* self_, PyObject* args, PyObject* kwargs)
15949{
15950 HANDLE_TH_ERRORS
15951 const Tensor& self = THPVariable_Unpack(self_);
15952 static PythonArgParser parser({
15953 "true_divide(Tensor other)",
15954 "true_divide(Scalar other)",
15955 }, /*traceable=*/true);
15956
15957 ParsedArgs<1> parsed_args;
15958 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15959 if(_r.has_torch_function()) {
15960 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
15961 }
15962 switch (_r.idx) {
15963 case 0: {
15964 // aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor
15965
15966 auto dispatch_true_divide = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
15967 pybind11::gil_scoped_release no_gil;
15968 return self.true_divide(other);
15969 };
15970 return wrap(dispatch_true_divide(self, _r.tensor(0)));
15971 }
15972 case 1: {
15973 // aten::true_divide.Scalar(Tensor self, Scalar other) -> Tensor
15974
15975 auto dispatch_true_divide = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
15976 pybind11::gil_scoped_release no_gil;
15977 return self.true_divide(other);
15978 };
15979 return wrap(dispatch_true_divide(self, _r.scalar(0)));
15980 }
15981 }
15982 Py_RETURN_NONE;
15983 END_HANDLE_TH_ERRORS
15984}
15985
15986\
15987// true_divide_
15988static PyObject * THPVariable_true_divide_(PyObject* self_, PyObject* args, PyObject* kwargs)
15989{
15990 HANDLE_TH_ERRORS
15991 const Tensor& self = THPVariable_Unpack(self_);
15992 static PythonArgParser parser({
15993 "true_divide_(Tensor other)",
15994 "true_divide_(Scalar other)",
15995 }, /*traceable=*/true);
15996
15997 ParsedArgs<1> parsed_args;
15998 auto _r = parser.parse(self_, args, kwargs, parsed_args);
15999 if(_r.has_torch_function()) {
16000 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
16001 }
16002 switch (_r.idx) {
16003 case 0: {
16004 // aten::true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
16005
16006 auto dispatch_true_divide_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
16007 pybind11::gil_scoped_release no_gil;
16008 return self.true_divide_(other);
16009 };
16010 return wrap(dispatch_true_divide_(self, _r.tensor(0)));
16011 }
16012 case 1: {
16013 // aten::true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
16014
16015 auto dispatch_true_divide_ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
16016 pybind11::gil_scoped_release no_gil;
16017 return self.true_divide_(other);
16018 };
16019 return wrap(dispatch_true_divide_(self, _r.scalar(0)));
16020 }
16021 }
16022 Py_RETURN_NONE;
16023 END_HANDLE_TH_ERRORS
16024}
16025
16026// trunc
16027static PyObject * THPVariable_trunc(PyObject* self_, PyObject* args)
16028{
16029 HANDLE_TH_ERRORS
16030 const Tensor& self = THPVariable_Unpack(self_);
16031 if(check_has_torch_function(self_)) {
16032 return handle_torch_function(self_, "trunc");
16033 }
16034 // aten::trunc(Tensor self) -> Tensor
16035
16036 auto dispatch_trunc = [](const at::Tensor & self) -> at::Tensor {
16037 pybind11::gil_scoped_release no_gil;
16038 return self.trunc();
16039 };
16040 return wrap(dispatch_trunc(self));
16041 END_HANDLE_TH_ERRORS
16042}
16043
16044// trunc_
16045static PyObject * THPVariable_trunc_(PyObject* self_, PyObject* args)
16046{
16047 HANDLE_TH_ERRORS
16048 const Tensor& self = THPVariable_Unpack(self_);
16049 if(check_has_torch_function(self_)) {
16050 return handle_torch_function(self_, "trunc_");
16051 }
16052 // aten::trunc_(Tensor(a!) self) -> Tensor(a!)
16053
16054 auto dispatch_trunc_ = [](const at::Tensor & self) -> at::Tensor {
16055 pybind11::gil_scoped_release no_gil;
16056 return self.trunc_();
16057 };
16058 return wrap(dispatch_trunc_(self));
16059 END_HANDLE_TH_ERRORS
16060}
16061
16062// type_as
16063static PyObject * THPVariable_type_as(PyObject* self_, PyObject* args, PyObject* kwargs)
16064{
16065 HANDLE_TH_ERRORS
16066 const Tensor& self = THPVariable_Unpack(self_);
16067 static PythonArgParser parser({
16068 "type_as(Tensor other)",
16069 }, /*traceable=*/true);
16070
16071 ParsedArgs<1> parsed_args;
16072 auto _r = parser.parse(self_, args, kwargs, parsed_args);
16073 if(_r.has_torch_function()) {
16074 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
16075 }
16076 // aten::type_as(Tensor self, Tensor other) -> Tensor
16077
16078 auto dispatch_type_as = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
16079 pybind11::gil_scoped_release no_gil;
16080 return self.type_as(other);
16081 };
16082 return wrap(dispatch_type_as(self, _r.tensor(0)));
16083 Py_RETURN_NONE;
16084 END_HANDLE_TH_ERRORS
16085}
16086
16087\
16088// unbind
16089static PyObject * THPVariable_unbind(PyObject* self_, PyObject* args, PyObject* kwargs)
16090{
16091 HANDLE_TH_ERRORS
16092 const Tensor& self = THPVariable_Unpack(self_);
16093 static PythonArgParser parser({
16094 "unbind(int64_t dim=0)",
16095 "unbind(Dimname dim)",
16096 }, /*traceable=*/true);
16097
16098 ParsedArgs<1> parsed_args;
16099 auto _r = parser.parse(self_, args, kwargs, parsed_args);
16100 if(_r.has_torch_function()) {
16101 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
16102 }
16103 switch (_r.idx) {
16104 case 0: {
16105 // aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[]
16106
16107 auto dispatch_unbind = [](const at::Tensor & self, int64_t dim) -> ::std::vector<at::Tensor> {
16108 pybind11::gil_scoped_release no_gil;
16109 return self.unbind(dim);
16110 };
16111 return wrap(dispatch_unbind(self, _r.toInt64(0)));
16112 }
16113 case 1: {
16114 // aten::unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[]
16115
16116 auto dispatch_unbind = [](const at::Tensor & self, at::Dimname dim) -> ::std::vector<at::Tensor> {
16117 pybind11::gil_scoped_release no_gil;
16118 return self.unbind(dim);
16119 };
16120 return wrap(dispatch_unbind(self, _r.dimname(0)));
16121 }
16122 }
16123 Py_RETURN_NONE;
16124 END_HANDLE_TH_ERRORS
16125}
16126
16127\
16128// unflatten
16129static PyObject * THPVariable_unflatten(PyObject* self_, PyObject* args, PyObject* kwargs)
16130{
16131 HANDLE_TH_ERRORS
16132 const Tensor& self = THPVariable_Unpack(self_);
16133 static PythonArgParser parser({
16134 "unflatten(Dimname dim, IntArrayRef sizes, DimnameList names)",
16135 "unflatten(int64_t dim, IntArrayRef sizes)",
16136 }, /*traceable=*/true);
16137
16138 ParsedArgs<3> parsed_args;
16139 auto _r = parser.parse(self_, args, kwargs, parsed_args);
16140 if(_r.has_torch_function()) {
16141 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
16142 }
16143 switch (_r.idx) {
16144 case 0: {
16145 // aten::unflatten.Dimname(Tensor(a) self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor(a)
16146
16147 auto dispatch_unflatten = [](const at::Tensor & self, at::Dimname dim, at::IntArrayRef sizes, at::DimnameList names) -> at::Tensor {
16148 pybind11::gil_scoped_release no_gil;
16149 return self.unflatten(dim, sizes, names);
16150 };
16151 return wrap(dispatch_unflatten(self, _r.dimname(0), _r.intlist(1), _r.dimnamelist(2)));
16152 }
16153 case 1: {
16154 // aten::unflatten.int(Tensor(a) self, int dim, int[] sizes) -> Tensor(a)
16155
16156 auto dispatch_unflatten = [](const at::Tensor & self, int64_t dim, at::IntArrayRef sizes) -> at::Tensor {
16157 pybind11::gil_scoped_release no_gil;
16158 return self.unflatten(dim, sizes);
16159 };
16160 return wrap(dispatch_unflatten(self, _r.toInt64(0), _r.intlist(1)));
16161 }
16162 }
16163 Py_RETURN_NONE;
16164 END_HANDLE_TH_ERRORS
16165}
16166
16167// unfold
16168static PyObject * THPVariable_unfold(PyObject* self_, PyObject* args, PyObject* kwargs)
16169{
16170 HANDLE_TH_ERRORS
16171 const Tensor& self = THPVariable_Unpack(self_);
16172 static PythonArgParser parser({
16173 "unfold(int64_t dimension, int64_t size, int64_t step)",
16174 }, /*traceable=*/true);
16175
16176 ParsedArgs<3> parsed_args;
16177 auto _r = parser.parse(self_, args, kwargs, parsed_args);
16178 if(_r.has_torch_function()) {
16179 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
16180 }
16181 // aten::unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)
16182
16183 auto dispatch_unfold = [](const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) -> at::Tensor {
16184 pybind11::gil_scoped_release no_gil;
16185 return self.unfold(dimension, size, step);
16186 };
16187 return wrap(dispatch_unfold(self, _r.toInt64(0), _r.toInt64(1), _r.toInt64(2)));
16188 Py_RETURN_NONE;
16189 END_HANDLE_TH_ERRORS
16190}
16191
16192// uniform_
16193static PyObject * THPVariable_uniform_(PyObject* self_, PyObject* args, PyObject* kwargs)
16194{
16195 HANDLE_TH_ERRORS
16196 const Tensor& self = THPVariable_Unpack(self_);
16197 static PythonArgParser parser({
16198 "uniform_(double from=0, double to=1, *, Generator? generator=None)",
16199 }, /*traceable=*/true);
16200
16201 ParsedArgs<3> parsed_args;
16202 auto _r = parser.parse(self_, args, kwargs, parsed_args);
16203 if(_r.has_torch_function()) {
16204 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
16205 }
16206 // aten::uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!)
16207
16208 auto dispatch_uniform_ = [](const at::Tensor & self, double from, double to, c10::optional<at::Generator> generator) -> at::Tensor {
16209 pybind11::gil_scoped_release no_gil;
16210 return self.uniform_(from, to, generator);
16211 };
16212 return wrap(dispatch_uniform_(self, _r.toDouble(0), _r.toDouble(1), _r.generator(2)));
16213 Py_RETURN_NONE;
16214 END_HANDLE_TH_ERRORS
16215}
16216
16217// unsafe_chunk
16218static PyObject * THPVariable_unsafe_chunk(PyObject* self_, PyObject* args, PyObject* kwargs)
16219{
16220 HANDLE_TH_ERRORS
16221 const Tensor& self = THPVariable_Unpack(self_);
16222 static PythonArgParser parser({
16223 "unsafe_chunk(int64_t chunks, int64_t dim=0)",
16224 }, /*traceable=*/true);
16225
16226 ParsedArgs<2> parsed_args;
16227 auto _r = parser.parse(self_, args, kwargs, parsed_args);
16228 if(_r.has_torch_function()) {
16229 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
16230 }
16231 // aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[]
16232
16233 auto dispatch_unsafe_chunk = [](const at::Tensor & self, int64_t chunks, int64_t dim) -> ::std::vector<at::Tensor> {
16234 pybind11::gil_scoped_release no_gil;
16235 return self.unsafe_chunk(chunks, dim);
16236 };
16237 return wrap(dispatch_unsafe_chunk(self, _r.toInt64(0), _r.toInt64(1)));
16238 Py_RETURN_NONE;
16239 END_HANDLE_TH_ERRORS
16240}
16241
16242// unsafe_split
16243static PyObject * THPVariable_unsafe_split(PyObject* self_, PyObject* args, PyObject* kwargs)
16244{
16245 HANDLE_TH_ERRORS
16246 const Tensor& self = THPVariable_Unpack(self_);
16247 static PythonArgParser parser({
16248 "unsafe_split(SymInt split_size, int64_t dim=0)",
16249 }, /*traceable=*/true);
16250
16251 ParsedArgs<2> parsed_args;
16252 auto _r = parser.parse(self_, args, kwargs, parsed_args);
16253 if(_r.has_torch_function()) {
16254 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
16255 }
16256 // aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]
16257
16258 auto dispatch_unsafe_split = [](const at::Tensor & self, c10::SymInt split_size, int64_t dim) -> ::std::vector<at::Tensor> {
16259 pybind11::gil_scoped_release no_gil;
16260 return self.unsafe_split_symint(split_size, dim);
16261 };
16262 return wrap(dispatch_unsafe_split(self, _r.toSymInt(0), _r.toInt64(1)));
16263 Py_RETURN_NONE;
16264 END_HANDLE_TH_ERRORS
16265}
16266
16267// unsafe_split_with_sizes
16268static PyObject * THPVariable_unsafe_split_with_sizes(PyObject* self_, PyObject* args, PyObject* kwargs)
16269{
16270 HANDLE_TH_ERRORS
16271 const Tensor& self = THPVariable_Unpack(self_);
16272 static PythonArgParser parser({
16273 "unsafe_split_with_sizes(SymIntArrayRef split_sizes, int64_t dim=0)",
16274 }, /*traceable=*/true);
16275
16276 ParsedArgs<2> parsed_args;
16277 auto _r = parser.parse(self_, args, kwargs, parsed_args);
16278 if(_r.has_torch_function()) {
16279 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
16280 }
16281 // aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]
16282
16283 auto dispatch_unsafe_split_with_sizes = [](const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) -> ::std::vector<at::Tensor> {
16284 pybind11::gil_scoped_release no_gil;
16285 return self.unsafe_split_with_sizes_symint(split_sizes, dim);
16286 };
16287 return wrap(dispatch_unsafe_split_with_sizes(self, _r.symintlist(0), _r.toInt64(1)));
16288 Py_RETURN_NONE;
16289 END_HANDLE_TH_ERRORS
16290}
16291
16292// unsqueeze
16293static PyObject * THPVariable_unsqueeze(PyObject* self_, PyObject* args, PyObject* kwargs)
16294{
16295 HANDLE_TH_ERRORS
16296 const Tensor& self = THPVariable_Unpack(self_);
16297 static PythonArgParser parser({
16298 "unsqueeze(int64_t dim)",
16299 }, /*traceable=*/true);
16300
16301 ParsedArgs<1> parsed_args;
16302 auto _r = parser.parse(self_, args, kwargs, parsed_args);
16303 if(_r.has_torch_function()) {
16304 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
16305 }
16306 // aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a)
16307
16308 auto dispatch_unsqueeze = [](const at::Tensor & self, int64_t dim) -> at::Tensor {
16309 pybind11::gil_scoped_release no_gil;
16310 return self.unsqueeze(dim);
16311 };
16312 return wrap(dispatch_unsqueeze(self, _r.toInt64(0)));
16313 Py_RETURN_NONE;
16314 END_HANDLE_TH_ERRORS
16315}
16316
16317// unsqueeze_
16318static PyObject * THPVariable_unsqueeze_(PyObject* self_, PyObject* args, PyObject* kwargs)
16319{
16320 HANDLE_TH_ERRORS
16321 const Tensor& self = THPVariable_Unpack(self_);
16322 static PythonArgParser parser({
16323 "unsqueeze_(int64_t dim)",
16324 }, /*traceable=*/true);
16325
16326 ParsedArgs<1> parsed_args;
16327 auto _r = parser.parse(self_, args, kwargs, parsed_args);
16328 if(_r.has_torch_function()) {
16329 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
16330 }
16331 // aten::unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!)
16332
16333 auto dispatch_unsqueeze_ = [](const at::Tensor & self, int64_t dim) -> at::Tensor {
16334 pybind11::gil_scoped_release no_gil;
16335 return self.unsqueeze_(dim);
16336 };
16337 return wrap(dispatch_unsqueeze_(self, _r.toInt64(0)));
16338 Py_RETURN_NONE;
16339 END_HANDLE_TH_ERRORS
16340}
16341
16342// values
16343static PyObject * THPVariable_values(PyObject* self_, PyObject* args)
16344{
16345 HANDLE_TH_ERRORS
16346 const Tensor& self = THPVariable_Unpack(self_);
16347 if(check_has_torch_function(self_)) {
16348 return handle_torch_function(self_, "values");
16349 }
16350 // aten::values(Tensor(a) self) -> Tensor(a)
16351
16352 auto dispatch_values = [](const at::Tensor & self) -> at::Tensor {
16353 pybind11::gil_scoped_release no_gil;
16354 return self.values();
16355 };
16356 return wrap(dispatch_values(self));
16357 END_HANDLE_TH_ERRORS
16358}
16359
16360\
16361// var
16362static PyObject * THPVariable_var(PyObject* self_, PyObject* args, PyObject* kwargs)
16363{
16364 HANDLE_TH_ERRORS
16365 const Tensor& self = THPVariable_Unpack(self_);
16366 static PythonArgParser parser({
16367 "var(IntArrayRef[1]? dim, bool unbiased=True, bool keepdim=False)",
16368 "var(IntArrayRef[1]? dim=None, *, int64_t? correction=None, bool keepdim=False)",
16369 "var(bool unbiased=True)",
16370 "var(DimnameList[1] dim, bool unbiased=True, bool keepdim=False)",
16371 "var(DimnameList[1] dim, *, int64_t? correction=None, bool keepdim=False)",
16372 }, /*traceable=*/true);
16373
16374 ParsedArgs<3> parsed_args;
16375 auto _r = parser.parse(self_, args, kwargs, parsed_args);
16376 if(_r.has_torch_function()) {
16377 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
16378 }
16379 switch (_r.idx) {
16380 case 0: {
16381 // aten::var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
16382
16383 auto dispatch_var = [](const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) -> at::Tensor {
16384 pybind11::gil_scoped_release no_gil;
16385 return self.var(dim, unbiased, keepdim);
16386 };
16387 return wrap(dispatch_var(self, _r.intlistOptional(0), _r.toBool(1), _r.toBool(2)));
16388 }
16389 case 1: {
16390 // aten::var.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor
16391
16392 auto dispatch_var = [](const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) -> at::Tensor {
16393 pybind11::gil_scoped_release no_gil;
16394 return self.var(dim, correction, keepdim);
16395 };
16396 return wrap(dispatch_var(self, _r.intlistOptional(0), _r.toInt64Optional(1), _r.toBool(2)));
16397 }
16398 case 2: {
16399 // aten::var(Tensor self, bool unbiased=True) -> Tensor
16400
16401 auto dispatch_var = [](const at::Tensor & self, bool unbiased) -> at::Tensor {
16402 pybind11::gil_scoped_release no_gil;
16403 return self.var(unbiased);
16404 };
16405 return wrap(dispatch_var(self, _r.toBool(0)));
16406 }
16407 case 3: {
16408 // aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
16409
16410 auto dispatch_var = [](const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) -> at::Tensor {
16411 pybind11::gil_scoped_release no_gil;
16412 return self.var(dim, unbiased, keepdim);
16413 };
16414 return wrap(dispatch_var(self, _r.dimnamelist(0), _r.toBool(1), _r.toBool(2)));
16415 }
16416 case 4: {
16417 // aten::var.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor
16418
16419 auto dispatch_var = [](const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) -> at::Tensor {
16420 pybind11::gil_scoped_release no_gil;
16421 return self.var(dim, correction, keepdim);
16422 };
16423 return wrap(dispatch_var(self, _r.dimnamelist(0), _r.toInt64Optional(1), _r.toBool(2)));
16424 }
16425 }
16426 Py_RETURN_NONE;
16427 END_HANDLE_TH_ERRORS
16428}
16429
16430// vdot
16431static PyObject * THPVariable_vdot(PyObject* self_, PyObject* args, PyObject* kwargs)
16432{
16433 HANDLE_TH_ERRORS
16434 const Tensor& self = THPVariable_Unpack(self_);
16435 static PythonArgParser parser({
16436 "vdot(Tensor other)",
16437 }, /*traceable=*/true);
16438
16439 ParsedArgs<1> parsed_args;
16440 auto _r = parser.parse(self_, args, kwargs, parsed_args);
16441 if(_r.has_torch_function()) {
16442 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
16443 }
16444 // aten::vdot(Tensor self, Tensor other) -> Tensor
16445
16446 auto dispatch_vdot = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
16447 pybind11::gil_scoped_release no_gil;
16448 return self.vdot(other);
16449 };
16450 return wrap(dispatch_vdot(self, _r.tensor(0)));
16451 Py_RETURN_NONE;
16452 END_HANDLE_TH_ERRORS
16453}
16454
16455\
16456// view
16457static PyObject * THPVariable_view(PyObject* self_, PyObject* args, PyObject* kwargs)
16458{
16459 HANDLE_TH_ERRORS
16460 const Tensor& self = THPVariable_Unpack(self_);
16461 static PythonArgParser parser({
16462 "view(ScalarType dtype)",
16463 "view(SymIntArrayRef size)",
16464 }, /*traceable=*/true);
16465
16466 ParsedArgs<1> parsed_args;
16467 auto _r = parser.parse(self_, args, kwargs, parsed_args);
16468 if(_r.has_torch_function()) {
16469 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
16470 }
16471 switch (_r.idx) {
16472 case 0: {
16473 // aten::view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a)
16474
16475 auto dispatch_view = [](const at::Tensor & self, at::ScalarType dtype) -> at::Tensor {
16476 pybind11::gil_scoped_release no_gil;
16477 return self.view(dtype);
16478 };
16479 return wrap(dispatch_view(self, _r.scalartype(0)));
16480 }
16481 case 1: {
16482 // aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a)
16483
16484 auto dispatch_view = [](const at::Tensor & self, c10::SymIntArrayRef size) -> at::Tensor {
16485 pybind11::gil_scoped_release no_gil;
16486 return self.view_symint(size);
16487 };
16488 return wrap(dispatch_view(self, _r.symintlist(0)));
16489 }
16490 }
16491 Py_RETURN_NONE;
16492 END_HANDLE_TH_ERRORS
16493}
16494
16495// view_as
16496static PyObject * THPVariable_view_as(PyObject* self_, PyObject* args, PyObject* kwargs)
16497{
16498 HANDLE_TH_ERRORS
16499 const Tensor& self = THPVariable_Unpack(self_);
16500 static PythonArgParser parser({
16501 "view_as(Tensor other)",
16502 }, /*traceable=*/true);
16503
16504 ParsedArgs<1> parsed_args;
16505 auto _r = parser.parse(self_, args, kwargs, parsed_args);
16506 if(_r.has_torch_function()) {
16507 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
16508 }
16509 // aten::view_as(Tensor(a) self, Tensor other) -> Tensor(a)
16510
16511 auto dispatch_view_as = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
16512 pybind11::gil_scoped_release no_gil;
16513 return self.view_as(other);
16514 };
16515 return wrap(dispatch_view_as(self, _r.tensor(0)));
16516 Py_RETURN_NONE;
16517 END_HANDLE_TH_ERRORS
16518}
16519
16520\
16521// vsplit
16522static PyObject * THPVariable_vsplit(PyObject* self_, PyObject* args, PyObject* kwargs)
16523{
16524 HANDLE_TH_ERRORS
16525 const Tensor& self = THPVariable_Unpack(self_);
16526 static PythonArgParser parser({
16527 "vsplit(int64_t sections)",
16528 "vsplit(IntArrayRef indices)",
16529 }, /*traceable=*/true);
16530
16531 ParsedArgs<1> parsed_args;
16532 auto _r = parser.parse(self_, args, kwargs, parsed_args);
16533 if(_r.has_torch_function()) {
16534 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
16535 }
16536 switch (_r.idx) {
16537 case 0: {
16538 // aten::vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
16539
16540 auto dispatch_vsplit = [](const at::Tensor & self, int64_t sections) -> ::std::vector<at::Tensor> {
16541 pybind11::gil_scoped_release no_gil;
16542 return self.vsplit(sections);
16543 };
16544 return wrap(dispatch_vsplit(self, _r.toInt64(0)));
16545 }
16546 case 1: {
16547 // aten::vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
16548
16549 auto dispatch_vsplit = [](const at::Tensor & self, at::IntArrayRef indices) -> ::std::vector<at::Tensor> {
16550 pybind11::gil_scoped_release no_gil;
16551 return self.vsplit(indices);
16552 };
16553 return wrap(dispatch_vsplit(self, _r.intlist(0)));
16554 }
16555 }
16556 Py_RETURN_NONE;
16557 END_HANDLE_TH_ERRORS
16558}
16559
16560\
16561// where
16562static PyObject * THPVariable_where(PyObject* self_, PyObject* args, PyObject* kwargs)
16563{
16564 HANDLE_TH_ERRORS
16565 const Tensor& self = THPVariable_Unpack(self_);
16566 static PythonArgParser parser({
16567 "where(Tensor condition, Tensor other)",
16568 "where(Tensor condition, Scalar other)",
16569 }, /*traceable=*/true);
16570
16571 ParsedArgs<2> parsed_args;
16572 auto _r = parser.parse(self_, args, kwargs, parsed_args);
16573 if(_r.has_torch_function()) {
16574 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
16575 }
16576 switch (_r.idx) {
16577 case 0: {
16578 // aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor
16579
16580 auto dispatch_where = [](const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
16581 pybind11::gil_scoped_release no_gil;
16582 return self.where(condition, other);
16583 };
16584 return wrap(dispatch_where(_r.tensor(0), self, _r.tensor(1)));
16585 }
16586 case 1: {
16587 // aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor
16588
16589 auto dispatch_where = [](const at::Tensor & condition, const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
16590 pybind11::gil_scoped_release no_gil;
16591 return self.where(condition, other);
16592 };
16593 return wrap(dispatch_where(_r.tensor(0), self, _r.scalar(1)));
16594 }
16595 }
16596 Py_RETURN_NONE;
16597 END_HANDLE_TH_ERRORS
16598}
16599
16600\
16601// xlogy
16602static PyObject * THPVariable_xlogy(PyObject* self_, PyObject* args, PyObject* kwargs)
16603{
16604 HANDLE_TH_ERRORS
16605 const Tensor& self = THPVariable_Unpack(self_);
16606 static PythonArgParser parser({
16607 "xlogy(Tensor other)",
16608 "xlogy(Scalar other)",
16609 }, /*traceable=*/true);
16610
16611 ParsedArgs<1> parsed_args;
16612 auto _r = parser.parse(self_, args, kwargs, parsed_args);
16613 if(_r.has_torch_function()) {
16614 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
16615 }
16616 switch (_r.idx) {
16617 case 0: {
16618 // aten::xlogy.Tensor(Tensor self, Tensor other) -> Tensor
16619
16620 auto dispatch_xlogy = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
16621 pybind11::gil_scoped_release no_gil;
16622 return self.xlogy(other);
16623 };
16624 return wrap(dispatch_xlogy(self, _r.tensor(0)));
16625 }
16626 case 1: {
16627 // aten::xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor
16628
16629 auto dispatch_xlogy = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
16630 pybind11::gil_scoped_release no_gil;
16631 return self.xlogy(other);
16632 };
16633 return wrap(dispatch_xlogy(self, _r.scalar(0)));
16634 }
16635 }
16636 Py_RETURN_NONE;
16637 END_HANDLE_TH_ERRORS
16638}
16639
16640\
16641// xlogy_
16642static PyObject * THPVariable_xlogy_(PyObject* self_, PyObject* args, PyObject* kwargs)
16643{
16644 HANDLE_TH_ERRORS
16645 const Tensor& self = THPVariable_Unpack(self_);
16646 static PythonArgParser parser({
16647 "xlogy_(Tensor other)",
16648 "xlogy_(Scalar other)",
16649 }, /*traceable=*/true);
16650
16651 ParsedArgs<1> parsed_args;
16652 auto _r = parser.parse(self_, args, kwargs, parsed_args);
16653 if(_r.has_torch_function()) {
16654 return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
16655 }
16656 switch (_r.idx) {
16657 case 0: {
16658 // aten::xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
16659
16660 auto dispatch_xlogy_ = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
16661 pybind11::gil_scoped_release no_gil;
16662 return self.xlogy_(other);
16663 };
16664 return wrap(dispatch_xlogy_(self, _r.tensor(0)));
16665 }
16666 case 1: {
16667 // aten::xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!)
16668
16669 auto dispatch_xlogy_ = [](const at::Tensor & self, const at::Scalar & other) -> at::Tensor {
16670 pybind11::gil_scoped_release no_gil;
16671 return self.xlogy_(other);
16672 };
16673 return wrap(dispatch_xlogy_(self, _r.scalar(0)));
16674 }
16675 }
16676 Py_RETURN_NONE;
16677 END_HANDLE_TH_ERRORS
16678}
16679
16680// zero_
16681static PyObject * THPVariable_zero_(PyObject* self_, PyObject* args)
16682{
16683 HANDLE_TH_ERRORS
16684 const Tensor& self = THPVariable_Unpack(self_);
16685 if(check_has_torch_function(self_)) {
16686 return handle_torch_function(self_, "zero_");
16687 }
16688 // aten::zero_(Tensor(a!) self) -> Tensor(a!)
16689
16690 auto dispatch_zero_ = [](const at::Tensor & self) -> at::Tensor {
16691 pybind11::gil_scoped_release no_gil;
16692 return self.zero_();
16693 };
16694 return wrap(dispatch_zero_(self));
16695 END_HANDLE_TH_ERRORS
16696}
16697
16698static PyObject * THPVariable_bool_scalar(PyObject* self, PyObject* args) {
16699 if (check_has_torch_function(self)) {
16700 HANDLE_TH_ERRORS
16701 return handle_torch_function(self, "__bool__", args);
16702 END_HANDLE_TH_ERRORS
16703 }
16704 jit::tracer::warn("Converting a tensor to a Python boolean", jit::tracer::WARN_PYTHON_DATAFLOW);
16705 return THPVariable_is_nonzero(self, args);
16706}
16707
16708// Wrapper converts a raised TypeError into returning NotImplemented
16709// Used to implement binary arithmetic operators
16710template <PyObject* (*Func)(PyObject*, PyObject*, PyObject*)>
16711static PyObject * TypeError_to_NotImplemented_(PyObject* self, PyObject* args, PyObject* kwargs) {
16712
16713 PyObject* ret = Func(self, args, kwargs);
16714 if (!ret && PyErr_ExceptionMatches(PyExc_TypeError)) {
16715 PyErr_Clear();
16716 Py_INCREF(Py_NotImplemented);
16717 ret = Py_NotImplemented;
16718 }
16719 return ret;
16720}
16721
16722// set_ has to be defined in the template because the c10::Storage object
16723// does not have a type, and we need to make sure the Python storage object's
16724// type matches the tensor's type
16725static PyObject* THPVariable_set_(
16726 PyObject* self_,
16727 PyObject* args,
16728 PyObject* kwargs) {
16729 HANDLE_TH_ERRORS
16730 const Tensor& self = THPVariable_Unpack(self_);
16731 static PythonArgParser parser(
16732 {
16733 "set_()",
16734 "set_(Storage source)",
16735 "set_(Storage source, SymInt storage_offset, SymIntArrayRef size, SymIntArrayRef stride=None)",
16736 "set_(Tensor source)",
16737 "set_(Tensor source, SymInt storage_offset, SymIntArrayRef size, SymIntArrayRef stride=None)",
16738 },
16739 /*traceable=*/false);
16740
16741 ParsedArgs<4> parsed_args;
16742 auto _r = parser.parse(args, kwargs, parsed_args);
16743
16744 switch (_r.idx) {
16745 case 0: {
16746 // aten::set_(Tensor(a!) self) -> Tensor(a!)
16747 auto dispatch_set_ = [](const Tensor& self) -> Tensor {
16748 pybind11::gil_scoped_release no_gil;
16749 return self.set_();
16750 };
16751 return wrap(dispatch_set_(self));
16752 }
16753 case 1: {
16754 // aten::set_.source_Storage(Tensor(a!) self, Storage source) ->
16755 // Tensor(a!)
16756 at::ScalarType storage_scalar_type;
16757 bool is_typed_storage = true;
16758 at::Storage storage = _r.storage(0, storage_scalar_type, is_typed_storage);
16759 TORCH_CHECK(storage_scalar_type == self.dtype() || !is_typed_storage,
16760 "Expected a Storage of type ", self.dtype(),
16761 " or an UntypedStorage, but got type ", storage_scalar_type,
16762 " for argument 1 'storage'");
16763 auto dispatch_set_ = [](const Tensor& self, Storage source) -> Tensor {
16764 pybind11::gil_scoped_release no_gil;
16765 return self.set_(source);
16766 };
16767 return wrap(dispatch_set_(self, storage));
16768 }
16769 case 2: {
16770 // aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage
16771 // source, int storage_offset, int[] size, int[] stride=[]) -> Tensor(a!)
16772 at::ScalarType storage_scalar_type;
16773 bool is_typed_storage = true;
16774 at::Storage storage = _r.storage(0, storage_scalar_type, is_typed_storage);
16775 TORCH_CHECK(storage_scalar_type == self.dtype() || !is_typed_storage,
16776 "Expected a Storage of type ", self.dtype(),
16777 " or an UntypedStorage, but got type ", storage_scalar_type,
16778 " for argument 1 'storage'");
16779 auto dispatch_set_ = [](const Tensor& self,
16780 Storage source,
16781 c10::SymInt storage_offset,
16782 c10::SymIntArrayRef size,
16783 c10::SymIntArrayRef stride) -> Tensor {
16784 pybind11::gil_scoped_release no_gil;
16785 return self.set__symint(source, storage_offset, size, stride);
16786 };
16787 return wrap(dispatch_set_(
16788 self, storage, _r.toSymInt(1), _r.symintlist(2), _r.symintlist(3)));
16789 }
16790 case 3: {
16791 // aten::set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!)
16792 auto dispatch_set_ = [](const Tensor& self, const Tensor& source) -> Tensor {
16793 TORCH_CHECK(source.dtype() == self.dtype(), "Could not set tensor of type ", source.dtype(), " to a tensor of type ", self.dtype());
16794 pybind11::gil_scoped_release no_gil;
16795 return self.set_(source);
16796 };
16797 return wrap(dispatch_set_(self, _r.tensor(0)));
16798 }
16799 case 4: {
16800 // aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor
16801 // source, int storage_offset, int[] size, int[] stride=[]) -> Tensor(a!)
16802 at::Tensor storage = _r.tensor(0);
16803 auto dispatch_set_ = [](const Tensor& self,
16804 const Tensor& source,
16805 c10::SymInt storage_offset,
16806 c10::SymIntArrayRef size,
16807 c10::SymIntArrayRef stride) -> Tensor {
16808 pybind11::gil_scoped_release no_gil;
16809 return self.set__symint(source, storage_offset, size, stride);
16810 };
16811 return wrap(dispatch_set_(
16812 self, storage, _r.toSymInt(1), _r.symintlist(2), _r.symintlist(3)));
16813 }
16814 }
16815 Py_RETURN_NONE;
16816 END_HANDLE_TH_ERRORS
16817}
16818
16819// XXX: ops that are bound here are not exposed to the C++ api nor the JIT.
16820// Any new ops added here should be accompanied with a comment why they are not
16821// being registered through native_functions.yaml, and be tagged cpp / JIT
16822PyMethodDef variable_methods[] = {
16823 // These magic methods are all implemented on python object to wrap NotImplementedError
16824 {"__add__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_add>), METH_VARARGS | METH_KEYWORDS, NULL},
16825 {"__radd__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_add>), METH_VARARGS | METH_KEYWORDS, NULL},
16826 {"__iadd__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_add_>), METH_VARARGS | METH_KEYWORDS, NULL},
16827 {"__rmul__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_mul>), METH_VARARGS | METH_KEYWORDS, NULL},
16828 {"__mul__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_mul>), METH_VARARGS | METH_KEYWORDS, NULL},
16829 {"__imul__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_mul_>), METH_VARARGS | METH_KEYWORDS, NULL},
16830 {"__sub__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_sub>), METH_VARARGS | METH_KEYWORDS, NULL},
16831 {"__isub__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_sub_>), METH_VARARGS | METH_KEYWORDS, NULL},
16832 {"__div__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_div>), METH_VARARGS | METH_KEYWORDS, NULL},
16833 {"__truediv__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_div>), METH_VARARGS | METH_KEYWORDS, NULL},
16834 {"__floordiv__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_floor_divide>), METH_VARARGS | METH_KEYWORDS, NULL},
16835 {"__idiv__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_div_>), METH_VARARGS | METH_KEYWORDS, NULL},
16836 {"__ifloordiv__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_floor_divide_>), METH_VARARGS | METH_KEYWORDS, NULL},
16837 {"__mod__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_remainder>), METH_VARARGS | METH_KEYWORDS, NULL},
16838 {"__imod__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_remainder_>), METH_VARARGS | METH_KEYWORDS, NULL},
16839 {"__eq__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_eq>), METH_VARARGS | METH_KEYWORDS, NULL},
16840 {"__ne__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_ne>), METH_VARARGS | METH_KEYWORDS, NULL},
16841 {"__lt__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_lt>), METH_VARARGS | METH_KEYWORDS, NULL},
16842 {"__le__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_le>), METH_VARARGS | METH_KEYWORDS, NULL},
16843 {"__gt__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_gt>), METH_VARARGS | METH_KEYWORDS, NULL},
16844 {"__ge__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_ge>), METH_VARARGS | METH_KEYWORDS, NULL},
16845 {"__rand__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_bitwise_and>), METH_VARARGS | METH_KEYWORDS, NULL},
16846 {"__ror__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_bitwise_or>), METH_VARARGS | METH_KEYWORDS, NULL},
16847 {"__rxor__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_bitwise_xor>), METH_VARARGS | METH_KEYWORDS, NULL},
16848 {"__bool__", THPVariable_bool_scalar, METH_NOARGS, NULL},
16849 {"__float__", THPVariable_float_scalar, METH_NOARGS, NULL},
16850 {"__complex__", THPVariable_complex_scalar, METH_NOARGS, NULL},
16851 {"__int__", THPVariable_integral_scalar, METH_NOARGS, NULL},
16852 {"__long__", THPVariable_integral_scalar, METH_NOARGS, NULL},
16853 {"__index__", THPVariable_index_scalar, METH_NOARGS, NULL},
16854 {"__nonzero__", THPVariable_bool_scalar, METH_NOARGS, NULL},
16855 {"__invert__", THPVariable_invert, METH_NOARGS, NULL},
16856 {"__matmul__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_matmul>), METH_VARARGS | METH_KEYWORDS, NULL},
16857 {"_is_view", THPVariable__is_view, METH_NOARGS, NULL},
16858 {"apply_", THPVariable_apply_, METH_O, NULL},
16859 {"bfloat16", castPyCFunctionWithKeywords(THPVariable_bfloat16), METH_VARARGS | METH_KEYWORDS, NULL},
16860 {"byte", castPyCFunctionWithKeywords(THPVariable_byte), METH_VARARGS | METH_KEYWORDS, NULL},
16861 {"char", castPyCFunctionWithKeywords(THPVariable_char), METH_VARARGS | METH_KEYWORDS, NULL},
16862 {"contiguous", castPyCFunctionWithKeywords(THPVariable_contiguous), METH_VARARGS | METH_KEYWORDS, NULL},
16863 {"copy_", castPyCFunctionWithKeywords(THPVariable_copy_), METH_VARARGS | METH_KEYWORDS, NULL},
16864 {"cpu", castPyCFunctionWithKeywords(THPVariable_cpu), METH_VARARGS | METH_KEYWORDS, NULL},
16865 {"cuda", castPyCFunctionWithKeywords(THPVariable_cuda), METH_VARARGS | METH_KEYWORDS, NULL},
16866 {"xpu", castPyCFunctionWithKeywords(THPVariable_xpu), METH_VARARGS | METH_KEYWORDS, NULL},
16867 {"ipu", castPyCFunctionWithKeywords(THPVariable_ipu), METH_VARARGS | METH_KEYWORDS, NULL},
16868 {"data_ptr", THPVariable_data_ptr, METH_NOARGS, NULL},
16869 {"dim", THPVariable_dim, METH_NOARGS, NULL},
16870 {"has_names", THPVariable_has_names, METH_NOARGS, NULL},
16871 {"double", castPyCFunctionWithKeywords(THPVariable_double), METH_VARARGS | METH_KEYWORDS, NULL},
16872 {"cdouble", castPyCFunctionWithKeywords(THPVariable_cdouble), METH_VARARGS | METH_KEYWORDS, NULL},
16873 {"element_size", THPVariable_element_size, METH_NOARGS, NULL},
16874 {"float", castPyCFunctionWithKeywords(THPVariable_float), METH_VARARGS | METH_KEYWORDS, NULL},
16875 {"cfloat", castPyCFunctionWithKeywords(THPVariable_cfloat), METH_VARARGS | METH_KEYWORDS, NULL},
16876 {"get_device", THPVariable_get_device, METH_NOARGS, NULL},
16877 {"bool", castPyCFunctionWithKeywords(THPVariable_bool), METH_VARARGS | METH_KEYWORDS, NULL},
16878 {"half", castPyCFunctionWithKeywords(THPVariable_half), METH_VARARGS | METH_KEYWORDS, NULL},
16879 {"int", castPyCFunctionWithKeywords(THPVariable_int), METH_VARARGS | METH_KEYWORDS, NULL},
16880 {"is_contiguous", castPyCFunctionWithKeywords(THPVariable_is_contiguous), METH_VARARGS | METH_KEYWORDS, NULL},
16881 {"item", THPVariable_item, METH_NOARGS, NULL},
16882 {"long", castPyCFunctionWithKeywords(THPVariable_long), METH_VARARGS | METH_KEYWORDS, NULL},
16883 {"map_", castPyCFunctionWithKeywords(THPVariable_map_), METH_VARARGS | METH_KEYWORDS, NULL},
16884 {"map2_", castPyCFunctionWithKeywords(THPVariable_map2_), METH_VARARGS | METH_KEYWORDS, NULL},
16885 {"ndimension", THPVariable_dim, METH_NOARGS, NULL},
16886 {"nelement", THPVariable_numel, METH_NOARGS, NULL},
16887 {"new", castPyCFunctionWithKeywords(THPVariable_new), METH_VARARGS | METH_KEYWORDS, NULL},
16888 {"new_tensor", castPyCFunctionWithKeywords(THPVariable_new_tensor), METH_VARARGS | METH_KEYWORDS, NULL},
16889 {"nonzero", castPyCFunctionWithKeywords(THPVariable_nonzero), METH_VARARGS | METH_KEYWORDS, NULL},
16890 {"numel", THPVariable_numel, METH_NOARGS, NULL},
16891 {"numpy", castPyCFunctionWithKeywords(THPVariable_numpy), METH_VARARGS | METH_KEYWORDS, NULL},
16892 {"requires_grad_", castPyCFunctionWithKeywords(THPVariable_requires_grad_), METH_VARARGS | METH_KEYWORDS, NULL},
16893 {"set_", castPyCFunctionWithKeywords(THPVariable_set_), METH_VARARGS | METH_KEYWORDS, NULL},
16894 {"short", castPyCFunctionWithKeywords(THPVariable_short), METH_VARARGS | METH_KEYWORDS, NULL},
16895 {"size", castPyCFunctionWithKeywords(THPVariable_size), METH_VARARGS | METH_KEYWORDS, NULL},
16896 {"untyped_storage", THPVariable_storage, METH_NOARGS, NULL},
16897 {"storage_offset", THPVariable_storage_offset, METH_NOARGS, NULL},
16898 {"stride", castPyCFunctionWithKeywords(THPVariable_stride), METH_VARARGS | METH_KEYWORDS, NULL},
16899 {"to", castPyCFunctionWithKeywords(THPVariable_to), METH_VARARGS | METH_KEYWORDS, NULL},
16900 {"tolist", THPVariable_tolist, METH_NOARGS, NULL},
16901 {"type", castPyCFunctionWithKeywords(THPVariable_type), METH_VARARGS | METH_KEYWORDS, NULL},
16902 {"__and__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable___and__>), METH_VARARGS | METH_KEYWORDS, NULL},
16903 {"__iand__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable___iand__>), METH_VARARGS | METH_KEYWORDS, NULL},
16904 {"__ilshift__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable___ilshift__>), METH_VARARGS | METH_KEYWORDS, NULL},
16905 {"__ior__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable___ior__>), METH_VARARGS | METH_KEYWORDS, NULL},
16906 {"__irshift__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable___irshift__>), METH_VARARGS | METH_KEYWORDS, NULL},
16907 {"__ixor__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable___ixor__>), METH_VARARGS | METH_KEYWORDS, NULL},
16908 {"__lshift__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable___lshift__>), METH_VARARGS | METH_KEYWORDS, NULL},
16909 {"__or__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable___or__>), METH_VARARGS | METH_KEYWORDS, NULL},
16910 {"__rshift__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable___rshift__>), METH_VARARGS | METH_KEYWORDS, NULL},
16911 {"__xor__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable___xor__>), METH_VARARGS | METH_KEYWORDS, NULL},
16912 {"_addmm_activation", castPyCFunctionWithKeywords(THPVariable__addmm_activation), METH_VARARGS | METH_KEYWORDS, NULL},
16913 {"_autocast_to_full_precision", castPyCFunctionWithKeywords(THPVariable__autocast_to_full_precision), METH_VARARGS | METH_KEYWORDS, NULL},
16914 {"_autocast_to_reduced_precision", castPyCFunctionWithKeywords(THPVariable__autocast_to_reduced_precision), METH_VARARGS | METH_KEYWORDS, NULL},
16915 {"_coalesced_", castPyCFunctionWithKeywords(THPVariable__coalesced_), METH_VARARGS | METH_KEYWORDS, NULL},
16916 {"_conj", (THPVariable__conj), METH_NOARGS, NULL},
16917 {"_conj_physical", (THPVariable__conj_physical), METH_NOARGS, NULL},
16918 {"_dimI", (THPVariable__dimI), METH_NOARGS, NULL},
16919 {"_dimV", (THPVariable__dimV), METH_NOARGS, NULL},
16920 {"_indices", (THPVariable__indices), METH_NOARGS, NULL},
16921 {"_is_all_true", (THPVariable__is_all_true), METH_NOARGS, NULL},
16922 {"_is_any_true", (THPVariable__is_any_true), METH_NOARGS, NULL},
16923 {"_is_zerotensor", (THPVariable__is_zerotensor), METH_NOARGS, NULL},
16924 {"_neg_view", (THPVariable__neg_view), METH_NOARGS, NULL},
16925 {"_nested_tensor_size", (THPVariable__nested_tensor_size), METH_NOARGS, NULL},
16926 {"_nested_tensor_strides", (THPVariable__nested_tensor_strides), METH_NOARGS, NULL},
16927 {"_nnz", (THPVariable__nnz), METH_NOARGS, NULL},
16928 {"_to_dense", castPyCFunctionWithKeywords(THPVariable__to_dense), METH_VARARGS | METH_KEYWORDS, NULL},
16929 {"_values", (THPVariable__values), METH_NOARGS, NULL},
16930 {"abs", (THPVariable_abs), METH_NOARGS, NULL},
16931 {"abs_", (THPVariable_abs_), METH_NOARGS, NULL},
16932 {"absolute", (THPVariable_absolute), METH_NOARGS, NULL},
16933 {"absolute_", (THPVariable_absolute_), METH_NOARGS, NULL},
16934 {"acos", (THPVariable_acos), METH_NOARGS, NULL},
16935 {"acos_", (THPVariable_acos_), METH_NOARGS, NULL},
16936 {"acosh", (THPVariable_acosh), METH_NOARGS, NULL},
16937 {"acosh_", (THPVariable_acosh_), METH_NOARGS, NULL},
16938 {"add", castPyCFunctionWithKeywords(THPVariable_add), METH_VARARGS | METH_KEYWORDS, NULL},
16939 {"add_", castPyCFunctionWithKeywords(THPVariable_add_), METH_VARARGS | METH_KEYWORDS, NULL},
16940 {"addbmm", castPyCFunctionWithKeywords(THPVariable_addbmm), METH_VARARGS | METH_KEYWORDS, NULL},
16941 {"addbmm_", castPyCFunctionWithKeywords(THPVariable_addbmm_), METH_VARARGS | METH_KEYWORDS, NULL},
16942 {"addcdiv", castPyCFunctionWithKeywords(THPVariable_addcdiv), METH_VARARGS | METH_KEYWORDS, NULL},
16943 {"addcdiv_", castPyCFunctionWithKeywords(THPVariable_addcdiv_), METH_VARARGS | METH_KEYWORDS, NULL},
16944 {"addcmul", castPyCFunctionWithKeywords(THPVariable_addcmul), METH_VARARGS | METH_KEYWORDS, NULL},
16945 {"addcmul_", castPyCFunctionWithKeywords(THPVariable_addcmul_), METH_VARARGS | METH_KEYWORDS, NULL},
16946 {"addmm", castPyCFunctionWithKeywords(THPVariable_addmm), METH_VARARGS | METH_KEYWORDS, NULL},
16947 {"addmm_", castPyCFunctionWithKeywords(THPVariable_addmm_), METH_VARARGS | METH_KEYWORDS, NULL},
16948 {"addmv", castPyCFunctionWithKeywords(THPVariable_addmv), METH_VARARGS | METH_KEYWORDS, NULL},
16949 {"addmv_", castPyCFunctionWithKeywords(THPVariable_addmv_), METH_VARARGS | METH_KEYWORDS, NULL},
16950 {"addr", castPyCFunctionWithKeywords(THPVariable_addr), METH_VARARGS | METH_KEYWORDS, NULL},
16951 {"addr_", castPyCFunctionWithKeywords(THPVariable_addr_), METH_VARARGS | METH_KEYWORDS, NULL},
16952 {"adjoint", (THPVariable_adjoint), METH_NOARGS, NULL},
16953 {"align_as", castPyCFunctionWithKeywords(THPVariable_align_as), METH_VARARGS | METH_KEYWORDS, NULL},
16954 {"align_to", castPyCFunctionWithKeywords(THPVariable_align_to), METH_VARARGS | METH_KEYWORDS, NULL},
16955 {"all", castPyCFunctionWithKeywords(THPVariable_all), METH_VARARGS | METH_KEYWORDS, NULL},
16956 {"allclose", castPyCFunctionWithKeywords(THPVariable_allclose), METH_VARARGS | METH_KEYWORDS, NULL},
16957 {"amax", castPyCFunctionWithKeywords(THPVariable_amax), METH_VARARGS | METH_KEYWORDS, NULL},
16958 {"amin", castPyCFunctionWithKeywords(THPVariable_amin), METH_VARARGS | METH_KEYWORDS, NULL},
16959 {"aminmax", castPyCFunctionWithKeywords(THPVariable_aminmax), METH_VARARGS | METH_KEYWORDS, NULL},
16960 {"angle", (THPVariable_angle), METH_NOARGS, NULL},
16961 {"any", castPyCFunctionWithKeywords(THPVariable_any), METH_VARARGS | METH_KEYWORDS, NULL},
16962 {"arccos", (THPVariable_arccos), METH_NOARGS, NULL},
16963 {"arccos_", (THPVariable_arccos_), METH_NOARGS, NULL},
16964 {"arccosh", (THPVariable_arccosh), METH_NOARGS, NULL},
16965 {"arccosh_", (THPVariable_arccosh_), METH_NOARGS, NULL},
16966 {"arcsin", (THPVariable_arcsin), METH_NOARGS, NULL},
16967 {"arcsin_", (THPVariable_arcsin_), METH_NOARGS, NULL},
16968 {"arcsinh", (THPVariable_arcsinh), METH_NOARGS, NULL},
16969 {"arcsinh_", (THPVariable_arcsinh_), METH_NOARGS, NULL},
16970 {"arctan", (THPVariable_arctan), METH_NOARGS, NULL},
16971 {"arctan2", castPyCFunctionWithKeywords(THPVariable_arctan2), METH_VARARGS | METH_KEYWORDS, NULL},
16972 {"arctan2_", castPyCFunctionWithKeywords(THPVariable_arctan2_), METH_VARARGS | METH_KEYWORDS, NULL},
16973 {"arctan_", (THPVariable_arctan_), METH_NOARGS, NULL},
16974 {"arctanh", (THPVariable_arctanh), METH_NOARGS, NULL},
16975 {"arctanh_", (THPVariable_arctanh_), METH_NOARGS, NULL},
16976 {"argmax", castPyCFunctionWithKeywords(THPVariable_argmax), METH_VARARGS | METH_KEYWORDS, NULL},
16977 {"argmin", castPyCFunctionWithKeywords(THPVariable_argmin), METH_VARARGS | METH_KEYWORDS, NULL},
16978 {"argsort", castPyCFunctionWithKeywords(THPVariable_argsort), METH_VARARGS | METH_KEYWORDS, NULL},
16979 {"argwhere", (THPVariable_argwhere), METH_NOARGS, NULL},
16980 {"as_strided", castPyCFunctionWithKeywords(THPVariable_as_strided), METH_VARARGS | METH_KEYWORDS, NULL},
16981 {"as_strided_", castPyCFunctionWithKeywords(THPVariable_as_strided_), METH_VARARGS | METH_KEYWORDS, NULL},
16982 {"as_strided_scatter", castPyCFunctionWithKeywords(THPVariable_as_strided_scatter), METH_VARARGS | METH_KEYWORDS, NULL},
16983 {"asin", (THPVariable_asin), METH_NOARGS, NULL},
16984 {"asin_", (THPVariable_asin_), METH_NOARGS, NULL},
16985 {"asinh", (THPVariable_asinh), METH_NOARGS, NULL},
16986 {"asinh_", (THPVariable_asinh_), METH_NOARGS, NULL},
16987 {"atan", (THPVariable_atan), METH_NOARGS, NULL},
16988 {"atan2", castPyCFunctionWithKeywords(THPVariable_atan2), METH_VARARGS | METH_KEYWORDS, NULL},
16989 {"atan2_", castPyCFunctionWithKeywords(THPVariable_atan2_), METH_VARARGS | METH_KEYWORDS, NULL},
16990 {"atan_", (THPVariable_atan_), METH_NOARGS, NULL},
16991 {"atanh", (THPVariable_atanh), METH_NOARGS, NULL},
16992 {"atanh_", (THPVariable_atanh_), METH_NOARGS, NULL},
16993 {"baddbmm", castPyCFunctionWithKeywords(THPVariable_baddbmm), METH_VARARGS | METH_KEYWORDS, NULL},
16994 {"baddbmm_", castPyCFunctionWithKeywords(THPVariable_baddbmm_), METH_VARARGS | METH_KEYWORDS, NULL},
16995 {"bernoulli", castPyCFunctionWithKeywords(THPVariable_bernoulli), METH_VARARGS | METH_KEYWORDS, NULL},
16996 {"bernoulli_", castPyCFunctionWithKeywords(THPVariable_bernoulli_), METH_VARARGS | METH_KEYWORDS, NULL},
16997 {"bincount", castPyCFunctionWithKeywords(THPVariable_bincount), METH_VARARGS | METH_KEYWORDS, NULL},
16998 {"bitwise_and", castPyCFunctionWithKeywords(THPVariable_bitwise_and), METH_VARARGS | METH_KEYWORDS, NULL},
16999 {"bitwise_and_", castPyCFunctionWithKeywords(THPVariable_bitwise_and_), METH_VARARGS | METH_KEYWORDS, NULL},
17000 {"bitwise_left_shift", castPyCFunctionWithKeywords(THPVariable_bitwise_left_shift), METH_VARARGS | METH_KEYWORDS, NULL},
17001 {"bitwise_left_shift_", castPyCFunctionWithKeywords(THPVariable_bitwise_left_shift_), METH_VARARGS | METH_KEYWORDS, NULL},
17002 {"bitwise_not", (THPVariable_bitwise_not), METH_NOARGS, NULL},
17003 {"bitwise_not_", (THPVariable_bitwise_not_), METH_NOARGS, NULL},
17004 {"bitwise_or", castPyCFunctionWithKeywords(THPVariable_bitwise_or), METH_VARARGS | METH_KEYWORDS, NULL},
17005 {"bitwise_or_", castPyCFunctionWithKeywords(THPVariable_bitwise_or_), METH_VARARGS | METH_KEYWORDS, NULL},
17006 {"bitwise_right_shift", castPyCFunctionWithKeywords(THPVariable_bitwise_right_shift), METH_VARARGS | METH_KEYWORDS, NULL},
17007 {"bitwise_right_shift_", castPyCFunctionWithKeywords(THPVariable_bitwise_right_shift_), METH_VARARGS | METH_KEYWORDS, NULL},
17008 {"bitwise_xor", castPyCFunctionWithKeywords(THPVariable_bitwise_xor), METH_VARARGS | METH_KEYWORDS, NULL},
17009 {"bitwise_xor_", castPyCFunctionWithKeywords(THPVariable_bitwise_xor_), METH_VARARGS | METH_KEYWORDS, NULL},
17010 {"bmm", castPyCFunctionWithKeywords(THPVariable_bmm), METH_VARARGS | METH_KEYWORDS, NULL},
17011 {"broadcast_to", castPyCFunctionWithKeywords(THPVariable_broadcast_to), METH_VARARGS | METH_KEYWORDS, NULL},
17012 {"cauchy_", castPyCFunctionWithKeywords(THPVariable_cauchy_), METH_VARARGS | METH_KEYWORDS, NULL},
17013 {"ccol_indices", (THPVariable_ccol_indices), METH_NOARGS, NULL},
17014 {"ceil", (THPVariable_ceil), METH_NOARGS, NULL},
17015 {"ceil_", (THPVariable_ceil_), METH_NOARGS, NULL},
17016 {"chalf", castPyCFunctionWithKeywords(THPVariable_chalf), METH_VARARGS | METH_KEYWORDS, NULL},
17017 {"cholesky", castPyCFunctionWithKeywords(THPVariable_cholesky), METH_VARARGS | METH_KEYWORDS, NULL},
17018 {"cholesky_inverse", castPyCFunctionWithKeywords(THPVariable_cholesky_inverse), METH_VARARGS | METH_KEYWORDS, NULL},
17019 {"cholesky_solve", castPyCFunctionWithKeywords(THPVariable_cholesky_solve), METH_VARARGS | METH_KEYWORDS, NULL},
17020 {"chunk", castPyCFunctionWithKeywords(THPVariable_chunk), METH_VARARGS | METH_KEYWORDS, NULL},
17021 {"clamp", castPyCFunctionWithKeywords(THPVariable_clamp), METH_VARARGS | METH_KEYWORDS, NULL},
17022 {"clamp_", castPyCFunctionWithKeywords(THPVariable_clamp_), METH_VARARGS | METH_KEYWORDS, NULL},
17023 {"clamp_max", castPyCFunctionWithKeywords(THPVariable_clamp_max), METH_VARARGS | METH_KEYWORDS, NULL},
17024 {"clamp_max_", castPyCFunctionWithKeywords(THPVariable_clamp_max_), METH_VARARGS | METH_KEYWORDS, NULL},
17025 {"clamp_min", castPyCFunctionWithKeywords(THPVariable_clamp_min), METH_VARARGS | METH_KEYWORDS, NULL},
17026 {"clamp_min_", castPyCFunctionWithKeywords(THPVariable_clamp_min_), METH_VARARGS | METH_KEYWORDS, NULL},
17027 {"clip", castPyCFunctionWithKeywords(THPVariable_clip), METH_VARARGS | METH_KEYWORDS, NULL},
17028 {"clip_", castPyCFunctionWithKeywords(THPVariable_clip_), METH_VARARGS | METH_KEYWORDS, NULL},
17029 {"clone", castPyCFunctionWithKeywords(THPVariable_clone), METH_VARARGS | METH_KEYWORDS, NULL},
17030 {"coalesce", (THPVariable_coalesce), METH_NOARGS, NULL},
17031 {"col_indices", (THPVariable_col_indices), METH_NOARGS, NULL},
17032 {"conj", (THPVariable_conj), METH_NOARGS, NULL},
17033 {"conj_physical", (THPVariable_conj_physical), METH_NOARGS, NULL},
17034 {"conj_physical_", (THPVariable_conj_physical_), METH_NOARGS, NULL},
17035 {"copysign", castPyCFunctionWithKeywords(THPVariable_copysign), METH_VARARGS | METH_KEYWORDS, NULL},
17036 {"copysign_", castPyCFunctionWithKeywords(THPVariable_copysign_), METH_VARARGS | METH_KEYWORDS, NULL},
17037 {"corrcoef", (THPVariable_corrcoef), METH_NOARGS, NULL},
17038 {"cos", (THPVariable_cos), METH_NOARGS, NULL},
17039 {"cos_", (THPVariable_cos_), METH_NOARGS, NULL},
17040 {"cosh", (THPVariable_cosh), METH_NOARGS, NULL},
17041 {"cosh_", (THPVariable_cosh_), METH_NOARGS, NULL},
17042 {"count_nonzero", castPyCFunctionWithKeywords(THPVariable_count_nonzero), METH_VARARGS | METH_KEYWORDS, NULL},
17043 {"cov", castPyCFunctionWithKeywords(THPVariable_cov), METH_VARARGS | METH_KEYWORDS, NULL},
17044 {"cross", castPyCFunctionWithKeywords(THPVariable_cross), METH_VARARGS | METH_KEYWORDS, NULL},
17045 {"crow_indices", (THPVariable_crow_indices), METH_NOARGS, NULL},
17046 {"cummax", castPyCFunctionWithKeywords(THPVariable_cummax), METH_VARARGS | METH_KEYWORDS, NULL},
17047 {"cummin", castPyCFunctionWithKeywords(THPVariable_cummin), METH_VARARGS | METH_KEYWORDS, NULL},
17048 {"cumprod", castPyCFunctionWithKeywords(THPVariable_cumprod), METH_VARARGS | METH_KEYWORDS, NULL},
17049 {"cumprod_", castPyCFunctionWithKeywords(THPVariable_cumprod_), METH_VARARGS | METH_KEYWORDS, NULL},
17050 {"cumsum", castPyCFunctionWithKeywords(THPVariable_cumsum), METH_VARARGS | METH_KEYWORDS, NULL},
17051 {"cumsum_", castPyCFunctionWithKeywords(THPVariable_cumsum_), METH_VARARGS | METH_KEYWORDS, NULL},
17052 {"deg2rad", (THPVariable_deg2rad), METH_NOARGS, NULL},
17053 {"deg2rad_", (THPVariable_deg2rad_), METH_NOARGS, NULL},
17054 {"dense_dim", (THPVariable_dense_dim), METH_NOARGS, NULL},
17055 {"dequantize", (THPVariable_dequantize), METH_NOARGS, NULL},
17056 {"det", (THPVariable_det), METH_NOARGS, NULL},
17057 {"detach", (THPVariable_detach), METH_NOARGS, NULL},
17058 {"detach_", (THPVariable_detach_), METH_NOARGS, NULL},
17059 {"diag", castPyCFunctionWithKeywords(THPVariable_diag), METH_VARARGS | METH_KEYWORDS, NULL},
17060 {"diag_embed", castPyCFunctionWithKeywords(THPVariable_diag_embed), METH_VARARGS | METH_KEYWORDS, NULL},
17061 {"diagflat", castPyCFunctionWithKeywords(THPVariable_diagflat), METH_VARARGS | METH_KEYWORDS, NULL},
17062 {"diagonal", castPyCFunctionWithKeywords(THPVariable_diagonal), METH_VARARGS | METH_KEYWORDS, NULL},
17063 {"diagonal_scatter", castPyCFunctionWithKeywords(THPVariable_diagonal_scatter), METH_VARARGS | METH_KEYWORDS, NULL},
17064 {"diff", castPyCFunctionWithKeywords(THPVariable_diff), METH_VARARGS | METH_KEYWORDS, NULL},
17065 {"digamma", (THPVariable_digamma), METH_NOARGS, NULL},
17066 {"digamma_", (THPVariable_digamma_), METH_NOARGS, NULL},
17067 {"dist", castPyCFunctionWithKeywords(THPVariable_dist), METH_VARARGS | METH_KEYWORDS, NULL},
17068 {"div", castPyCFunctionWithKeywords(THPVariable_div), METH_VARARGS | METH_KEYWORDS, NULL},
17069 {"div_", castPyCFunctionWithKeywords(THPVariable_div_), METH_VARARGS | METH_KEYWORDS, NULL},
17070 {"divide", castPyCFunctionWithKeywords(THPVariable_divide), METH_VARARGS | METH_KEYWORDS, NULL},
17071 {"divide_", castPyCFunctionWithKeywords(THPVariable_divide_), METH_VARARGS | METH_KEYWORDS, NULL},
17072 {"dot", castPyCFunctionWithKeywords(THPVariable_dot), METH_VARARGS | METH_KEYWORDS, NULL},
17073 {"dsplit", castPyCFunctionWithKeywords(THPVariable_dsplit), METH_VARARGS | METH_KEYWORDS, NULL},
17074 {"eq", castPyCFunctionWithKeywords(THPVariable_eq), METH_VARARGS | METH_KEYWORDS, NULL},
17075 {"eq_", castPyCFunctionWithKeywords(THPVariable_eq_), METH_VARARGS | METH_KEYWORDS, NULL},
17076 {"equal", castPyCFunctionWithKeywords(THPVariable_equal), METH_VARARGS | METH_KEYWORDS, NULL},
17077 {"erf", (THPVariable_erf), METH_NOARGS, NULL},
17078 {"erf_", (THPVariable_erf_), METH_NOARGS, NULL},
17079 {"erfc", (THPVariable_erfc), METH_NOARGS, NULL},
17080 {"erfc_", (THPVariable_erfc_), METH_NOARGS, NULL},
17081 {"erfinv", (THPVariable_erfinv), METH_NOARGS, NULL},
17082 {"erfinv_", (THPVariable_erfinv_), METH_NOARGS, NULL},
17083 {"exp", (THPVariable_exp), METH_NOARGS, NULL},
17084 {"exp2", (THPVariable_exp2), METH_NOARGS, NULL},
17085 {"exp2_", (THPVariable_exp2_), METH_NOARGS, NULL},
17086 {"exp_", (THPVariable_exp_), METH_NOARGS, NULL},
17087 {"expand", castPyCFunctionWithKeywords(THPVariable_expand), METH_VARARGS | METH_KEYWORDS, NULL},
17088 {"expand_as", castPyCFunctionWithKeywords(THPVariable_expand_as), METH_VARARGS | METH_KEYWORDS, NULL},
17089 {"expm1", (THPVariable_expm1), METH_NOARGS, NULL},
17090 {"expm1_", (THPVariable_expm1_), METH_NOARGS, NULL},
17091 {"exponential_", castPyCFunctionWithKeywords(THPVariable_exponential_), METH_VARARGS | METH_KEYWORDS, NULL},
17092 {"fill_", castPyCFunctionWithKeywords(THPVariable_fill_), METH_VARARGS | METH_KEYWORDS, NULL},
17093 {"fill_diagonal_", castPyCFunctionWithKeywords(THPVariable_fill_diagonal_), METH_VARARGS | METH_KEYWORDS, NULL},
17094 {"fix", (THPVariable_fix), METH_NOARGS, NULL},
17095 {"fix_", (THPVariable_fix_), METH_NOARGS, NULL},
17096 {"flatten", castPyCFunctionWithKeywords(THPVariable_flatten), METH_VARARGS | METH_KEYWORDS, NULL},
17097 {"flip", castPyCFunctionWithKeywords(THPVariable_flip), METH_VARARGS | METH_KEYWORDS, NULL},
17098 {"fliplr", (THPVariable_fliplr), METH_NOARGS, NULL},
17099 {"flipud", (THPVariable_flipud), METH_NOARGS, NULL},
17100 {"float_power", castPyCFunctionWithKeywords(THPVariable_float_power), METH_VARARGS | METH_KEYWORDS, NULL},
17101 {"float_power_", castPyCFunctionWithKeywords(THPVariable_float_power_), METH_VARARGS | METH_KEYWORDS, NULL},
17102 {"floor", (THPVariable_floor), METH_NOARGS, NULL},
17103 {"floor_", (THPVariable_floor_), METH_NOARGS, NULL},
17104 {"floor_divide", castPyCFunctionWithKeywords(THPVariable_floor_divide), METH_VARARGS | METH_KEYWORDS, NULL},
17105 {"floor_divide_", castPyCFunctionWithKeywords(THPVariable_floor_divide_), METH_VARARGS | METH_KEYWORDS, NULL},
17106 {"fmax", castPyCFunctionWithKeywords(THPVariable_fmax), METH_VARARGS | METH_KEYWORDS, NULL},
17107 {"fmin", castPyCFunctionWithKeywords(THPVariable_fmin), METH_VARARGS | METH_KEYWORDS, NULL},
17108 {"fmod", castPyCFunctionWithKeywords(THPVariable_fmod), METH_VARARGS | METH_KEYWORDS, NULL},
17109 {"fmod_", castPyCFunctionWithKeywords(THPVariable_fmod_), METH_VARARGS | METH_KEYWORDS, NULL},
17110 {"frac", (THPVariable_frac), METH_NOARGS, NULL},
17111 {"frac_", (THPVariable_frac_), METH_NOARGS, NULL},
17112 {"frexp", (THPVariable_frexp), METH_NOARGS, NULL},
17113 {"gather", castPyCFunctionWithKeywords(THPVariable_gather), METH_VARARGS | METH_KEYWORDS, NULL},
17114 {"gcd", castPyCFunctionWithKeywords(THPVariable_gcd), METH_VARARGS | METH_KEYWORDS, NULL},
17115 {"gcd_", castPyCFunctionWithKeywords(THPVariable_gcd_), METH_VARARGS | METH_KEYWORDS, NULL},
17116 {"ge", castPyCFunctionWithKeywords(THPVariable_ge), METH_VARARGS | METH_KEYWORDS, NULL},
17117 {"ge_", castPyCFunctionWithKeywords(THPVariable_ge_), METH_VARARGS | METH_KEYWORDS, NULL},
17118 {"geometric_", castPyCFunctionWithKeywords(THPVariable_geometric_), METH_VARARGS | METH_KEYWORDS, NULL},
17119 {"geqrf", (THPVariable_geqrf), METH_NOARGS, NULL},
17120 {"ger", castPyCFunctionWithKeywords(THPVariable_ger), METH_VARARGS | METH_KEYWORDS, NULL},
17121 {"greater", castPyCFunctionWithKeywords(THPVariable_greater), METH_VARARGS | METH_KEYWORDS, NULL},
17122 {"greater_", castPyCFunctionWithKeywords(THPVariable_greater_), METH_VARARGS | METH_KEYWORDS, NULL},
17123 {"greater_equal", castPyCFunctionWithKeywords(THPVariable_greater_equal), METH_VARARGS | METH_KEYWORDS, NULL},
17124 {"greater_equal_", castPyCFunctionWithKeywords(THPVariable_greater_equal_), METH_VARARGS | METH_KEYWORDS, NULL},
17125 {"gt", castPyCFunctionWithKeywords(THPVariable_gt), METH_VARARGS | METH_KEYWORDS, NULL},
17126 {"gt_", castPyCFunctionWithKeywords(THPVariable_gt_), METH_VARARGS | METH_KEYWORDS, NULL},
17127 {"hardshrink", castPyCFunctionWithKeywords(THPVariable_hardshrink), METH_VARARGS | METH_KEYWORDS, NULL},
17128 {"heaviside", castPyCFunctionWithKeywords(THPVariable_heaviside), METH_VARARGS | METH_KEYWORDS, NULL},
17129 {"heaviside_", castPyCFunctionWithKeywords(THPVariable_heaviside_), METH_VARARGS | METH_KEYWORDS, NULL},
17130 {"histc", castPyCFunctionWithKeywords(THPVariable_histc), METH_VARARGS | METH_KEYWORDS, NULL},
17131 {"histogram", castPyCFunctionWithKeywords(THPVariable_histogram), METH_VARARGS | METH_KEYWORDS, NULL},
17132 {"hsplit", castPyCFunctionWithKeywords(THPVariable_hsplit), METH_VARARGS | METH_KEYWORDS, NULL},
17133 {"hypot", castPyCFunctionWithKeywords(THPVariable_hypot), METH_VARARGS | METH_KEYWORDS, NULL},
17134 {"hypot_", castPyCFunctionWithKeywords(THPVariable_hypot_), METH_VARARGS | METH_KEYWORDS, NULL},
17135 {"i0", (THPVariable_i0), METH_NOARGS, NULL},
17136 {"i0_", (THPVariable_i0_), METH_NOARGS, NULL},
17137 {"igamma", castPyCFunctionWithKeywords(THPVariable_igamma), METH_VARARGS | METH_KEYWORDS, NULL},
17138 {"igamma_", castPyCFunctionWithKeywords(THPVariable_igamma_), METH_VARARGS | METH_KEYWORDS, NULL},
17139 {"igammac", castPyCFunctionWithKeywords(THPVariable_igammac), METH_VARARGS | METH_KEYWORDS, NULL},
17140 {"igammac_", castPyCFunctionWithKeywords(THPVariable_igammac_), METH_VARARGS | METH_KEYWORDS, NULL},
17141 {"index_add", castPyCFunctionWithKeywords(THPVariable_index_add), METH_VARARGS | METH_KEYWORDS, NULL},
17142 {"index_add_", castPyCFunctionWithKeywords(THPVariable_index_add_), METH_VARARGS | METH_KEYWORDS, NULL},
17143 {"index_copy", castPyCFunctionWithKeywords(THPVariable_index_copy), METH_VARARGS | METH_KEYWORDS, NULL},
17144 {"index_copy_", castPyCFunctionWithKeywords(THPVariable_index_copy_), METH_VARARGS | METH_KEYWORDS, NULL},
17145 {"index_fill", castPyCFunctionWithKeywords(THPVariable_index_fill), METH_VARARGS | METH_KEYWORDS, NULL},
17146 {"index_fill_", castPyCFunctionWithKeywords(THPVariable_index_fill_), METH_VARARGS | METH_KEYWORDS, NULL},
17147 {"index_put", castPyCFunctionWithKeywords(THPVariable_index_put), METH_VARARGS | METH_KEYWORDS, NULL},
17148 {"index_put_", castPyCFunctionWithKeywords(THPVariable_index_put_), METH_VARARGS | METH_KEYWORDS, NULL},
17149 {"index_reduce", castPyCFunctionWithKeywords(THPVariable_index_reduce), METH_VARARGS | METH_KEYWORDS, NULL},
17150 {"index_reduce_", castPyCFunctionWithKeywords(THPVariable_index_reduce_), METH_VARARGS | METH_KEYWORDS, NULL},
17151 {"index_select", castPyCFunctionWithKeywords(THPVariable_index_select), METH_VARARGS | METH_KEYWORDS, NULL},
17152 {"indices", (THPVariable_indices), METH_NOARGS, NULL},
17153 {"inner", castPyCFunctionWithKeywords(THPVariable_inner), METH_VARARGS | METH_KEYWORDS, NULL},
17154 {"int_repr", (THPVariable_int_repr), METH_NOARGS, NULL},
17155 {"inverse", (THPVariable_inverse), METH_NOARGS, NULL},
17156 {"is_coalesced", (THPVariable_is_coalesced), METH_NOARGS, NULL},
17157 {"is_complex", (THPVariable_is_complex), METH_NOARGS, NULL},
17158 {"is_conj", (THPVariable_is_conj), METH_NOARGS, NULL},
17159 {"is_distributed", (THPVariable_is_distributed), METH_NOARGS, NULL},
17160 {"is_floating_point", (THPVariable_is_floating_point), METH_NOARGS, NULL},
17161 {"is_inference", (THPVariable_is_inference), METH_NOARGS, NULL},
17162 {"is_neg", (THPVariable_is_neg), METH_NOARGS, NULL},
17163 {"is_nonzero", (THPVariable_is_nonzero), METH_NOARGS, NULL},
17164 {"is_pinned", castPyCFunctionWithKeywords(THPVariable_is_pinned), METH_VARARGS | METH_KEYWORDS, NULL},
17165 {"is_same_size", castPyCFunctionWithKeywords(THPVariable_is_same_size), METH_VARARGS | METH_KEYWORDS, NULL},
17166 {"is_set_to", castPyCFunctionWithKeywords(THPVariable_is_set_to), METH_VARARGS | METH_KEYWORDS, NULL},
17167 {"is_signed", (THPVariable_is_signed), METH_NOARGS, NULL},
17168 {"isclose", castPyCFunctionWithKeywords(THPVariable_isclose), METH_VARARGS | METH_KEYWORDS, NULL},
17169 {"isfinite", (THPVariable_isfinite), METH_NOARGS, NULL},
17170 {"isinf", (THPVariable_isinf), METH_NOARGS, NULL},
17171 {"isnan", (THPVariable_isnan), METH_NOARGS, NULL},
17172 {"isneginf", (THPVariable_isneginf), METH_NOARGS, NULL},
17173 {"isposinf", (THPVariable_isposinf), METH_NOARGS, NULL},
17174 {"isreal", (THPVariable_isreal), METH_NOARGS, NULL},
17175 {"istft", castPyCFunctionWithKeywords(THPVariable_istft), METH_VARARGS | METH_KEYWORDS, NULL},
17176 {"kron", castPyCFunctionWithKeywords(THPVariable_kron), METH_VARARGS | METH_KEYWORDS, NULL},
17177 {"kthvalue", castPyCFunctionWithKeywords(THPVariable_kthvalue), METH_VARARGS | METH_KEYWORDS, NULL},
17178 {"lcm", castPyCFunctionWithKeywords(THPVariable_lcm), METH_VARARGS | METH_KEYWORDS, NULL},
17179 {"lcm_", castPyCFunctionWithKeywords(THPVariable_lcm_), METH_VARARGS | METH_KEYWORDS, NULL},
17180 {"ldexp", castPyCFunctionWithKeywords(THPVariable_ldexp), METH_VARARGS | METH_KEYWORDS, NULL},
17181 {"ldexp_", castPyCFunctionWithKeywords(THPVariable_ldexp_), METH_VARARGS | METH_KEYWORDS, NULL},
17182 {"le", castPyCFunctionWithKeywords(THPVariable_le), METH_VARARGS | METH_KEYWORDS, NULL},
17183 {"le_", castPyCFunctionWithKeywords(THPVariable_le_), METH_VARARGS | METH_KEYWORDS, NULL},
17184 {"lerp", castPyCFunctionWithKeywords(THPVariable_lerp), METH_VARARGS | METH_KEYWORDS, NULL},
17185 {"lerp_", castPyCFunctionWithKeywords(THPVariable_lerp_), METH_VARARGS | METH_KEYWORDS, NULL},
17186 {"less", castPyCFunctionWithKeywords(THPVariable_less), METH_VARARGS | METH_KEYWORDS, NULL},
17187 {"less_", castPyCFunctionWithKeywords(THPVariable_less_), METH_VARARGS | METH_KEYWORDS, NULL},
17188 {"less_equal", castPyCFunctionWithKeywords(THPVariable_less_equal), METH_VARARGS | METH_KEYWORDS, NULL},
17189 {"less_equal_", castPyCFunctionWithKeywords(THPVariable_less_equal_), METH_VARARGS | METH_KEYWORDS, NULL},
17190 {"lgamma", (THPVariable_lgamma), METH_NOARGS, NULL},
17191 {"lgamma_", (THPVariable_lgamma_), METH_NOARGS, NULL},
17192 {"log", (THPVariable_log), METH_NOARGS, NULL},
17193 {"log10", (THPVariable_log10), METH_NOARGS, NULL},
17194 {"log10_", (THPVariable_log10_), METH_NOARGS, NULL},
17195 {"log1p", (THPVariable_log1p), METH_NOARGS, NULL},
17196 {"log1p_", (THPVariable_log1p_), METH_NOARGS, NULL},
17197 {"log2", (THPVariable_log2), METH_NOARGS, NULL},
17198 {"log2_", (THPVariable_log2_), METH_NOARGS, NULL},
17199 {"log_", (THPVariable_log_), METH_NOARGS, NULL},
17200 {"log_normal_", castPyCFunctionWithKeywords(THPVariable_log_normal_), METH_VARARGS | METH_KEYWORDS, NULL},
17201 {"log_softmax", castPyCFunctionWithKeywords(THPVariable_log_softmax), METH_VARARGS | METH_KEYWORDS, NULL},
17202 {"logaddexp", castPyCFunctionWithKeywords(THPVariable_logaddexp), METH_VARARGS | METH_KEYWORDS, NULL},
17203 {"logaddexp2", castPyCFunctionWithKeywords(THPVariable_logaddexp2), METH_VARARGS | METH_KEYWORDS, NULL},
17204 {"logcumsumexp", castPyCFunctionWithKeywords(THPVariable_logcumsumexp), METH_VARARGS | METH_KEYWORDS, NULL},
17205 {"logdet", (THPVariable_logdet), METH_NOARGS, NULL},
17206 {"logical_and", castPyCFunctionWithKeywords(THPVariable_logical_and), METH_VARARGS | METH_KEYWORDS, NULL},
17207 {"logical_and_", castPyCFunctionWithKeywords(THPVariable_logical_and_), METH_VARARGS | METH_KEYWORDS, NULL},
17208 {"logical_not", (THPVariable_logical_not), METH_NOARGS, NULL},
17209 {"logical_not_", (THPVariable_logical_not_), METH_NOARGS, NULL},
17210 {"logical_or", castPyCFunctionWithKeywords(THPVariable_logical_or), METH_VARARGS | METH_KEYWORDS, NULL},
17211 {"logical_or_", castPyCFunctionWithKeywords(THPVariable_logical_or_), METH_VARARGS | METH_KEYWORDS, NULL},
17212 {"logical_xor", castPyCFunctionWithKeywords(THPVariable_logical_xor), METH_VARARGS | METH_KEYWORDS, NULL},
17213 {"logical_xor_", castPyCFunctionWithKeywords(THPVariable_logical_xor_), METH_VARARGS | METH_KEYWORDS, NULL},
17214 {"logit", castPyCFunctionWithKeywords(THPVariable_logit), METH_VARARGS | METH_KEYWORDS, NULL},
17215 {"logit_", castPyCFunctionWithKeywords(THPVariable_logit_), METH_VARARGS | METH_KEYWORDS, NULL},
17216 {"logsumexp", castPyCFunctionWithKeywords(THPVariable_logsumexp), METH_VARARGS | METH_KEYWORDS, NULL},
17217 {"lt", castPyCFunctionWithKeywords(THPVariable_lt), METH_VARARGS | METH_KEYWORDS, NULL},
17218 {"lt_", castPyCFunctionWithKeywords(THPVariable_lt_), METH_VARARGS | METH_KEYWORDS, NULL},
17219 {"lu_solve", castPyCFunctionWithKeywords(THPVariable_lu_solve), METH_VARARGS | METH_KEYWORDS, NULL},
17220 {"masked_fill", castPyCFunctionWithKeywords(THPVariable_masked_fill), METH_VARARGS | METH_KEYWORDS, NULL},
17221 {"masked_fill_", castPyCFunctionWithKeywords(THPVariable_masked_fill_), METH_VARARGS | METH_KEYWORDS, NULL},
17222 {"masked_scatter", castPyCFunctionWithKeywords(THPVariable_masked_scatter), METH_VARARGS | METH_KEYWORDS, NULL},
17223 {"masked_scatter_", castPyCFunctionWithKeywords(THPVariable_masked_scatter_), METH_VARARGS | METH_KEYWORDS, NULL},
17224 {"masked_select", castPyCFunctionWithKeywords(THPVariable_masked_select), METH_VARARGS | METH_KEYWORDS, NULL},
17225 {"matmul", castPyCFunctionWithKeywords(THPVariable_matmul), METH_VARARGS | METH_KEYWORDS, NULL},
17226 {"matrix_exp", (THPVariable_matrix_exp), METH_NOARGS, NULL},
17227 {"matrix_power", castPyCFunctionWithKeywords(THPVariable_matrix_power), METH_VARARGS | METH_KEYWORDS, NULL},
17228 {"max", castPyCFunctionWithKeywords(THPVariable_max), METH_VARARGS | METH_KEYWORDS, NULL},
17229 {"maximum", castPyCFunctionWithKeywords(THPVariable_maximum), METH_VARARGS | METH_KEYWORDS, NULL},
17230 {"mean", castPyCFunctionWithKeywords(THPVariable_mean), METH_VARARGS | METH_KEYWORDS, NULL},
17231 {"median", castPyCFunctionWithKeywords(THPVariable_median), METH_VARARGS | METH_KEYWORDS, NULL},
17232 {"min", castPyCFunctionWithKeywords(THPVariable_min), METH_VARARGS | METH_KEYWORDS, NULL},
17233 {"minimum", castPyCFunctionWithKeywords(THPVariable_minimum), METH_VARARGS | METH_KEYWORDS, NULL},
17234 {"mm", castPyCFunctionWithKeywords(THPVariable_mm), METH_VARARGS | METH_KEYWORDS, NULL},
17235 {"mode", castPyCFunctionWithKeywords(THPVariable_mode), METH_VARARGS | METH_KEYWORDS, NULL},
17236 {"moveaxis", castPyCFunctionWithKeywords(THPVariable_moveaxis), METH_VARARGS | METH_KEYWORDS, NULL},
17237 {"movedim", castPyCFunctionWithKeywords(THPVariable_movedim), METH_VARARGS | METH_KEYWORDS, NULL},
17238 {"msort", (THPVariable_msort), METH_NOARGS, NULL},
17239 {"mul", castPyCFunctionWithKeywords(THPVariable_mul), METH_VARARGS | METH_KEYWORDS, NULL},
17240 {"mul_", castPyCFunctionWithKeywords(THPVariable_mul_), METH_VARARGS | METH_KEYWORDS, NULL},
17241 {"multinomial", castPyCFunctionWithKeywords(THPVariable_multinomial), METH_VARARGS | METH_KEYWORDS, NULL},
17242 {"multiply", castPyCFunctionWithKeywords(THPVariable_multiply), METH_VARARGS | METH_KEYWORDS, NULL},
17243 {"multiply_", castPyCFunctionWithKeywords(THPVariable_multiply_), METH_VARARGS | METH_KEYWORDS, NULL},
17244 {"mv", castPyCFunctionWithKeywords(THPVariable_mv), METH_VARARGS | METH_KEYWORDS, NULL},
17245 {"mvlgamma", castPyCFunctionWithKeywords(THPVariable_mvlgamma), METH_VARARGS | METH_KEYWORDS, NULL},
17246 {"mvlgamma_", castPyCFunctionWithKeywords(THPVariable_mvlgamma_), METH_VARARGS | METH_KEYWORDS, NULL},
17247 {"nan_to_num", castPyCFunctionWithKeywords(THPVariable_nan_to_num), METH_VARARGS | METH_KEYWORDS, NULL},
17248 {"nan_to_num_", castPyCFunctionWithKeywords(THPVariable_nan_to_num_), METH_VARARGS | METH_KEYWORDS, NULL},
17249 {"nanmean", castPyCFunctionWithKeywords(THPVariable_nanmean), METH_VARARGS | METH_KEYWORDS, NULL},
17250 {"nanmedian", castPyCFunctionWithKeywords(THPVariable_nanmedian), METH_VARARGS | METH_KEYWORDS, NULL},
17251 {"nanquantile", castPyCFunctionWithKeywords(THPVariable_nanquantile), METH_VARARGS | METH_KEYWORDS, NULL},
17252 {"nansum", castPyCFunctionWithKeywords(THPVariable_nansum), METH_VARARGS | METH_KEYWORDS, NULL},
17253 {"narrow", castPyCFunctionWithKeywords(THPVariable_narrow), METH_VARARGS | METH_KEYWORDS, NULL},
17254 {"narrow_copy", castPyCFunctionWithKeywords(THPVariable_narrow_copy), METH_VARARGS | METH_KEYWORDS, NULL},
17255 {"ne", castPyCFunctionWithKeywords(THPVariable_ne), METH_VARARGS | METH_KEYWORDS, NULL},
17256 {"ne_", castPyCFunctionWithKeywords(THPVariable_ne_), METH_VARARGS | METH_KEYWORDS, NULL},
17257 {"neg", (THPVariable_neg), METH_NOARGS, NULL},
17258 {"neg_", (THPVariable_neg_), METH_NOARGS, NULL},
17259 {"negative", (THPVariable_negative), METH_NOARGS, NULL},
17260 {"negative_", (THPVariable_negative_), METH_NOARGS, NULL},
17261 {"new_empty", castPyCFunctionWithKeywords(THPVariable_new_empty), METH_VARARGS | METH_KEYWORDS, NULL},
17262 {"new_empty_strided", castPyCFunctionWithKeywords(THPVariable_new_empty_strided), METH_VARARGS | METH_KEYWORDS, NULL},
17263 {"new_full", castPyCFunctionWithKeywords(THPVariable_new_full), METH_VARARGS | METH_KEYWORDS, NULL},
17264 {"new_ones", castPyCFunctionWithKeywords(THPVariable_new_ones), METH_VARARGS | METH_KEYWORDS, NULL},
17265 {"new_zeros", castPyCFunctionWithKeywords(THPVariable_new_zeros), METH_VARARGS | METH_KEYWORDS, NULL},
17266 {"nextafter", castPyCFunctionWithKeywords(THPVariable_nextafter), METH_VARARGS | METH_KEYWORDS, NULL},
17267 {"nextafter_", castPyCFunctionWithKeywords(THPVariable_nextafter_), METH_VARARGS | METH_KEYWORDS, NULL},
17268 {"norm", castPyCFunctionWithKeywords(THPVariable_norm), METH_VARARGS | METH_KEYWORDS, NULL},
17269 {"normal_", castPyCFunctionWithKeywords(THPVariable_normal_), METH_VARARGS | METH_KEYWORDS, NULL},
17270 {"not_equal", castPyCFunctionWithKeywords(THPVariable_not_equal), METH_VARARGS | METH_KEYWORDS, NULL},
17271 {"not_equal_", castPyCFunctionWithKeywords(THPVariable_not_equal_), METH_VARARGS | METH_KEYWORDS, NULL},
17272 {"orgqr", castPyCFunctionWithKeywords(THPVariable_orgqr), METH_VARARGS | METH_KEYWORDS, NULL},
17273 {"ormqr", castPyCFunctionWithKeywords(THPVariable_ormqr), METH_VARARGS | METH_KEYWORDS, NULL},
17274 {"outer", castPyCFunctionWithKeywords(THPVariable_outer), METH_VARARGS | METH_KEYWORDS, NULL},
17275 {"permute", castPyCFunctionWithKeywords(THPVariable_permute), METH_VARARGS | METH_KEYWORDS, NULL},
17276 {"pin_memory", castPyCFunctionWithKeywords(THPVariable_pin_memory), METH_VARARGS | METH_KEYWORDS, NULL},
17277 {"pinverse", castPyCFunctionWithKeywords(THPVariable_pinverse), METH_VARARGS | METH_KEYWORDS, NULL},
17278 {"polygamma", castPyCFunctionWithKeywords(THPVariable_polygamma), METH_VARARGS | METH_KEYWORDS, NULL},
17279 {"polygamma_", castPyCFunctionWithKeywords(THPVariable_polygamma_), METH_VARARGS | METH_KEYWORDS, NULL},
17280 {"positive", (THPVariable_positive), METH_NOARGS, NULL},
17281 {"pow", castPyCFunctionWithKeywords(THPVariable_pow), METH_VARARGS | METH_KEYWORDS, NULL},
17282 {"pow_", castPyCFunctionWithKeywords(THPVariable_pow_), METH_VARARGS | METH_KEYWORDS, NULL},
17283 {"prelu", castPyCFunctionWithKeywords(THPVariable_prelu), METH_VARARGS | METH_KEYWORDS, NULL},
17284 {"prod", castPyCFunctionWithKeywords(THPVariable_prod), METH_VARARGS | METH_KEYWORDS, NULL},
17285 {"put", castPyCFunctionWithKeywords(THPVariable_put), METH_VARARGS | METH_KEYWORDS, NULL},
17286 {"put_", castPyCFunctionWithKeywords(THPVariable_put_), METH_VARARGS | METH_KEYWORDS, NULL},
17287 {"q_per_channel_axis", (THPVariable_q_per_channel_axis), METH_NOARGS, NULL},
17288 {"q_per_channel_scales", (THPVariable_q_per_channel_scales), METH_NOARGS, NULL},
17289 {"q_per_channel_zero_points", (THPVariable_q_per_channel_zero_points), METH_NOARGS, NULL},
17290 {"q_scale", (THPVariable_q_scale), METH_NOARGS, NULL},
17291 {"q_zero_point", (THPVariable_q_zero_point), METH_NOARGS, NULL},
17292 {"qr", castPyCFunctionWithKeywords(THPVariable_qr), METH_VARARGS | METH_KEYWORDS, NULL},
17293 {"qscheme", (THPVariable_qscheme), METH_NOARGS, NULL},
17294 {"quantile", castPyCFunctionWithKeywords(THPVariable_quantile), METH_VARARGS | METH_KEYWORDS, NULL},
17295 {"rad2deg", (THPVariable_rad2deg), METH_NOARGS, NULL},
17296 {"rad2deg_", (THPVariable_rad2deg_), METH_NOARGS, NULL},
17297 {"random_", castPyCFunctionWithKeywords(THPVariable_random_), METH_VARARGS | METH_KEYWORDS, NULL},
17298 {"ravel", (THPVariable_ravel), METH_NOARGS, NULL},
17299 {"reciprocal", (THPVariable_reciprocal), METH_NOARGS, NULL},
17300 {"reciprocal_", (THPVariable_reciprocal_), METH_NOARGS, NULL},
17301 {"record_stream", castPyCFunctionWithKeywords(THPVariable_record_stream), METH_VARARGS | METH_KEYWORDS, NULL},
17302 {"refine_names", castPyCFunctionWithKeywords(THPVariable_refine_names), METH_VARARGS | METH_KEYWORDS, NULL},
17303 {"relu", (THPVariable_relu), METH_NOARGS, NULL},
17304 {"relu_", (THPVariable_relu_), METH_NOARGS, NULL},
17305 {"remainder", castPyCFunctionWithKeywords(THPVariable_remainder), METH_VARARGS | METH_KEYWORDS, NULL},
17306 {"remainder_", castPyCFunctionWithKeywords(THPVariable_remainder_), METH_VARARGS | METH_KEYWORDS, NULL},
17307 {"rename", castPyCFunctionWithKeywords(THPVariable_rename), METH_VARARGS | METH_KEYWORDS, NULL},
17308 {"rename_", castPyCFunctionWithKeywords(THPVariable_rename_), METH_VARARGS | METH_KEYWORDS, NULL},
17309 {"renorm", castPyCFunctionWithKeywords(THPVariable_renorm), METH_VARARGS | METH_KEYWORDS, NULL},
17310 {"renorm_", castPyCFunctionWithKeywords(THPVariable_renorm_), METH_VARARGS | METH_KEYWORDS, NULL},
17311 {"repeat", castPyCFunctionWithKeywords(THPVariable_repeat), METH_VARARGS | METH_KEYWORDS, NULL},
17312 {"repeat_interleave", castPyCFunctionWithKeywords(THPVariable_repeat_interleave), METH_VARARGS | METH_KEYWORDS, NULL},
17313 {"reshape", castPyCFunctionWithKeywords(THPVariable_reshape), METH_VARARGS | METH_KEYWORDS, NULL},
17314 {"reshape_as", castPyCFunctionWithKeywords(THPVariable_reshape_as), METH_VARARGS | METH_KEYWORDS, NULL},
17315 {"resize_", castPyCFunctionWithKeywords(THPVariable_resize_), METH_VARARGS | METH_KEYWORDS, NULL},
17316 {"resize_as_", castPyCFunctionWithKeywords(THPVariable_resize_as_), METH_VARARGS | METH_KEYWORDS, NULL},
17317 {"resize_as_sparse_", castPyCFunctionWithKeywords(THPVariable_resize_as_sparse_), METH_VARARGS | METH_KEYWORDS, NULL},
17318 {"resolve_conj", (THPVariable_resolve_conj), METH_NOARGS, NULL},
17319 {"resolve_neg", (THPVariable_resolve_neg), METH_NOARGS, NULL},
17320 {"retain_grad", (THPVariable_retain_grad), METH_NOARGS, NULL},
17321 {"roll", castPyCFunctionWithKeywords(THPVariable_roll), METH_VARARGS | METH_KEYWORDS, NULL},
17322 {"rot90", castPyCFunctionWithKeywords(THPVariable_rot90), METH_VARARGS | METH_KEYWORDS, NULL},
17323 {"round", castPyCFunctionWithKeywords(THPVariable_round), METH_VARARGS | METH_KEYWORDS, NULL},
17324 {"round_", castPyCFunctionWithKeywords(THPVariable_round_), METH_VARARGS | METH_KEYWORDS, NULL},
17325 {"row_indices", (THPVariable_row_indices), METH_NOARGS, NULL},
17326 {"rsqrt", (THPVariable_rsqrt), METH_NOARGS, NULL},
17327 {"rsqrt_", (THPVariable_rsqrt_), METH_NOARGS, NULL},
17328 {"scatter", castPyCFunctionWithKeywords(THPVariable_scatter), METH_VARARGS | METH_KEYWORDS, NULL},
17329 {"scatter_", castPyCFunctionWithKeywords(THPVariable_scatter_), METH_VARARGS | METH_KEYWORDS, NULL},
17330 {"scatter_add", castPyCFunctionWithKeywords(THPVariable_scatter_add), METH_VARARGS | METH_KEYWORDS, NULL},
17331 {"scatter_add_", castPyCFunctionWithKeywords(THPVariable_scatter_add_), METH_VARARGS | METH_KEYWORDS, NULL},
17332 {"scatter_reduce", castPyCFunctionWithKeywords(THPVariable_scatter_reduce), METH_VARARGS | METH_KEYWORDS, NULL},
17333 {"scatter_reduce_", castPyCFunctionWithKeywords(THPVariable_scatter_reduce_), METH_VARARGS | METH_KEYWORDS, NULL},
17334 {"select", castPyCFunctionWithKeywords(THPVariable_select), METH_VARARGS | METH_KEYWORDS, NULL},
17335 {"select_scatter", castPyCFunctionWithKeywords(THPVariable_select_scatter), METH_VARARGS | METH_KEYWORDS, NULL},
17336 {"sgn", (THPVariable_sgn), METH_NOARGS, NULL},
17337 {"sgn_", (THPVariable_sgn_), METH_NOARGS, NULL},
17338 {"sigmoid", (THPVariable_sigmoid), METH_NOARGS, NULL},
17339 {"sigmoid_", (THPVariable_sigmoid_), METH_NOARGS, NULL},
17340 {"sign", (THPVariable_sign), METH_NOARGS, NULL},
17341 {"sign_", (THPVariable_sign_), METH_NOARGS, NULL},
17342 {"signbit", (THPVariable_signbit), METH_NOARGS, NULL},
17343 {"sin", (THPVariable_sin), METH_NOARGS, NULL},
17344 {"sin_", (THPVariable_sin_), METH_NOARGS, NULL},
17345 {"sinc", (THPVariable_sinc), METH_NOARGS, NULL},
17346 {"sinc_", (THPVariable_sinc_), METH_NOARGS, NULL},
17347 {"sinh", (THPVariable_sinh), METH_NOARGS, NULL},
17348 {"sinh_", (THPVariable_sinh_), METH_NOARGS, NULL},
17349 {"slice_scatter", castPyCFunctionWithKeywords(THPVariable_slice_scatter), METH_VARARGS | METH_KEYWORDS, NULL},
17350 {"slogdet", (THPVariable_slogdet), METH_NOARGS, NULL},
17351 {"smm", castPyCFunctionWithKeywords(THPVariable_smm), METH_VARARGS | METH_KEYWORDS, NULL},
17352 {"softmax", castPyCFunctionWithKeywords(THPVariable_softmax), METH_VARARGS | METH_KEYWORDS, NULL},
17353 {"sort", castPyCFunctionWithKeywords(THPVariable_sort), METH_VARARGS | METH_KEYWORDS, NULL},
17354 {"sparse_dim", (THPVariable_sparse_dim), METH_NOARGS, NULL},
17355 {"sparse_mask", castPyCFunctionWithKeywords(THPVariable_sparse_mask), METH_VARARGS | METH_KEYWORDS, NULL},
17356 {"sparse_resize_", castPyCFunctionWithKeywords(THPVariable_sparse_resize_), METH_VARARGS | METH_KEYWORDS, NULL},
17357 {"sparse_resize_and_clear_", castPyCFunctionWithKeywords(THPVariable_sparse_resize_and_clear_), METH_VARARGS | METH_KEYWORDS, NULL},
17358 {"split", castPyCFunctionWithKeywords(THPVariable_split), METH_VARARGS | METH_KEYWORDS, NULL},
17359 {"split_with_sizes", castPyCFunctionWithKeywords(THPVariable_split_with_sizes), METH_VARARGS | METH_KEYWORDS, NULL},
17360 {"sqrt", (THPVariable_sqrt), METH_NOARGS, NULL},
17361 {"sqrt_", (THPVariable_sqrt_), METH_NOARGS, NULL},
17362 {"square", (THPVariable_square), METH_NOARGS, NULL},
17363 {"square_", (THPVariable_square_), METH_NOARGS, NULL},
17364 {"squeeze", castPyCFunctionWithKeywords(THPVariable_squeeze), METH_VARARGS | METH_KEYWORDS, NULL},
17365 {"squeeze_", castPyCFunctionWithKeywords(THPVariable_squeeze_), METH_VARARGS | METH_KEYWORDS, NULL},
17366 {"sspaddmm", castPyCFunctionWithKeywords(THPVariable_sspaddmm), METH_VARARGS | METH_KEYWORDS, NULL},
17367 {"std", castPyCFunctionWithKeywords(THPVariable_std), METH_VARARGS | METH_KEYWORDS, NULL},
17368 {"stft", castPyCFunctionWithKeywords(THPVariable_stft), METH_VARARGS | METH_KEYWORDS, NULL},
17369 {"sub", castPyCFunctionWithKeywords(THPVariable_sub), METH_VARARGS | METH_KEYWORDS, NULL},
17370 {"sub_", castPyCFunctionWithKeywords(THPVariable_sub_), METH_VARARGS | METH_KEYWORDS, NULL},
17371 {"subtract", castPyCFunctionWithKeywords(THPVariable_subtract), METH_VARARGS | METH_KEYWORDS, NULL},
17372 {"subtract_", castPyCFunctionWithKeywords(THPVariable_subtract_), METH_VARARGS | METH_KEYWORDS, NULL},
17373 {"sum", castPyCFunctionWithKeywords(THPVariable_sum), METH_VARARGS | METH_KEYWORDS, NULL},
17374 {"sum_to_size", castPyCFunctionWithKeywords(THPVariable_sum_to_size), METH_VARARGS | METH_KEYWORDS, NULL},
17375 {"svd", castPyCFunctionWithKeywords(THPVariable_svd), METH_VARARGS | METH_KEYWORDS, NULL},
17376 {"swapaxes", castPyCFunctionWithKeywords(THPVariable_swapaxes), METH_VARARGS | METH_KEYWORDS, NULL},
17377 {"swapaxes_", castPyCFunctionWithKeywords(THPVariable_swapaxes_), METH_VARARGS | METH_KEYWORDS, NULL},
17378 {"swapdims", castPyCFunctionWithKeywords(THPVariable_swapdims), METH_VARARGS | METH_KEYWORDS, NULL},
17379 {"swapdims_", castPyCFunctionWithKeywords(THPVariable_swapdims_), METH_VARARGS | METH_KEYWORDS, NULL},
17380 {"t", (THPVariable_t), METH_NOARGS, NULL},
17381 {"t_", (THPVariable_t_), METH_NOARGS, NULL},
17382 {"take", castPyCFunctionWithKeywords(THPVariable_take), METH_VARARGS | METH_KEYWORDS, NULL},
17383 {"take_along_dim", castPyCFunctionWithKeywords(THPVariable_take_along_dim), METH_VARARGS | METH_KEYWORDS, NULL},
17384 {"tan", (THPVariable_tan), METH_NOARGS, NULL},
17385 {"tan_", (THPVariable_tan_), METH_NOARGS, NULL},
17386 {"tanh", (THPVariable_tanh), METH_NOARGS, NULL},
17387 {"tanh_", (THPVariable_tanh_), METH_NOARGS, NULL},
17388 {"tensor_split", castPyCFunctionWithKeywords(THPVariable_tensor_split), METH_VARARGS | METH_KEYWORDS, NULL},
17389 {"tile", castPyCFunctionWithKeywords(THPVariable_tile), METH_VARARGS | METH_KEYWORDS, NULL},
17390 {"to_dense", castPyCFunctionWithKeywords(THPVariable_to_dense), METH_VARARGS | METH_KEYWORDS, NULL},
17391 {"to_mkldnn", castPyCFunctionWithKeywords(THPVariable_to_mkldnn), METH_VARARGS | METH_KEYWORDS, NULL},
17392 {"to_padded_tensor", castPyCFunctionWithKeywords(THPVariable_to_padded_tensor), METH_VARARGS | METH_KEYWORDS, NULL},
17393 {"to_sparse", castPyCFunctionWithKeywords(THPVariable_to_sparse), METH_VARARGS | METH_KEYWORDS, NULL},
17394 {"to_sparse_bsc", castPyCFunctionWithKeywords(THPVariable_to_sparse_bsc), METH_VARARGS | METH_KEYWORDS, NULL},
17395 {"to_sparse_bsr", castPyCFunctionWithKeywords(THPVariable_to_sparse_bsr), METH_VARARGS | METH_KEYWORDS, NULL},
17396 {"to_sparse_csc", castPyCFunctionWithKeywords(THPVariable_to_sparse_csc), METH_VARARGS | METH_KEYWORDS, NULL},
17397 {"to_sparse_csr", castPyCFunctionWithKeywords(THPVariable_to_sparse_csr), METH_VARARGS | METH_KEYWORDS, NULL},
17398 {"topk", castPyCFunctionWithKeywords(THPVariable_topk), METH_VARARGS | METH_KEYWORDS, NULL},
17399 {"trace", (THPVariable_trace), METH_NOARGS, NULL},
17400 {"transpose", castPyCFunctionWithKeywords(THPVariable_transpose), METH_VARARGS | METH_KEYWORDS, NULL},
17401 {"transpose_", castPyCFunctionWithKeywords(THPVariable_transpose_), METH_VARARGS | METH_KEYWORDS, NULL},
17402 {"triangular_solve", castPyCFunctionWithKeywords(THPVariable_triangular_solve), METH_VARARGS | METH_KEYWORDS, NULL},
17403 {"tril", castPyCFunctionWithKeywords(THPVariable_tril), METH_VARARGS | METH_KEYWORDS, NULL},
17404 {"tril_", castPyCFunctionWithKeywords(THPVariable_tril_), METH_VARARGS | METH_KEYWORDS, NULL},
17405 {"triu", castPyCFunctionWithKeywords(THPVariable_triu), METH_VARARGS | METH_KEYWORDS, NULL},
17406 {"triu_", castPyCFunctionWithKeywords(THPVariable_triu_), METH_VARARGS | METH_KEYWORDS, NULL},
17407 {"true_divide", castPyCFunctionWithKeywords(THPVariable_true_divide), METH_VARARGS | METH_KEYWORDS, NULL},
17408 {"true_divide_", castPyCFunctionWithKeywords(THPVariable_true_divide_), METH_VARARGS | METH_KEYWORDS, NULL},
17409 {"trunc", (THPVariable_trunc), METH_NOARGS, NULL},
17410 {"trunc_", (THPVariable_trunc_), METH_NOARGS, NULL},
17411 {"type_as", castPyCFunctionWithKeywords(THPVariable_type_as), METH_VARARGS | METH_KEYWORDS, NULL},
17412 {"unbind", castPyCFunctionWithKeywords(THPVariable_unbind), METH_VARARGS | METH_KEYWORDS, NULL},
17413 {"unflatten", castPyCFunctionWithKeywords(THPVariable_unflatten), METH_VARARGS | METH_KEYWORDS, NULL},
17414 {"unfold", castPyCFunctionWithKeywords(THPVariable_unfold), METH_VARARGS | METH_KEYWORDS, NULL},
17415 {"uniform_", castPyCFunctionWithKeywords(THPVariable_uniform_), METH_VARARGS | METH_KEYWORDS, NULL},
17416 {"unsafe_chunk", castPyCFunctionWithKeywords(THPVariable_unsafe_chunk), METH_VARARGS | METH_KEYWORDS, NULL},
17417 {"unsafe_split", castPyCFunctionWithKeywords(THPVariable_unsafe_split), METH_VARARGS | METH_KEYWORDS, NULL},
17418 {"unsafe_split_with_sizes", castPyCFunctionWithKeywords(THPVariable_unsafe_split_with_sizes), METH_VARARGS | METH_KEYWORDS, NULL},
17419 {"unsqueeze", castPyCFunctionWithKeywords(THPVariable_unsqueeze), METH_VARARGS | METH_KEYWORDS, NULL},
17420 {"unsqueeze_", castPyCFunctionWithKeywords(THPVariable_unsqueeze_), METH_VARARGS | METH_KEYWORDS, NULL},
17421 {"values", (THPVariable_values), METH_NOARGS, NULL},
17422 {"var", castPyCFunctionWithKeywords(THPVariable_var), METH_VARARGS | METH_KEYWORDS, NULL},
17423 {"vdot", castPyCFunctionWithKeywords(THPVariable_vdot), METH_VARARGS | METH_KEYWORDS, NULL},
17424 {"view", castPyCFunctionWithKeywords(THPVariable_view), METH_VARARGS | METH_KEYWORDS, NULL},
17425 {"view_as", castPyCFunctionWithKeywords(THPVariable_view_as), METH_VARARGS | METH_KEYWORDS, NULL},
17426 {"vsplit", castPyCFunctionWithKeywords(THPVariable_vsplit), METH_VARARGS | METH_KEYWORDS, NULL},
17427 {"where", castPyCFunctionWithKeywords(THPVariable_where), METH_VARARGS | METH_KEYWORDS, NULL},
17428 {"xlogy", castPyCFunctionWithKeywords(THPVariable_xlogy), METH_VARARGS | METH_KEYWORDS, NULL},
17429 {"xlogy_", castPyCFunctionWithKeywords(THPVariable_xlogy_), METH_VARARGS | METH_KEYWORDS, NULL},
17430 {"zero_", (THPVariable_zero_), METH_NOARGS, NULL},
17431 {NULL}
17432};
17433
17434}} // namespace torch::autograd
17435