1#pragma once
2
3#ifdef TORCH_ASSERT_NO_OPERATORS
4#error This change adds a dependency on native_functions.yaml, \
5 meaning the file will need to be re-compiled every time an operator \
6 is changed or added. Consider if your change would be better placed in \
7 another file, or if a more specific header might achieve the same goal. \
8 See NOTE: [Tensor vs. TensorBase]
9#endif
10
11#include <c10/core/Device.h>
12#include <c10/core/Layout.h>
13#include <c10/core/MemoryFormat.h>
14#include <c10/core/QScheme.h>
15#include <c10/core/Stream.h>
16#include <c10/core/Scalar.h>
17#include <c10/core/ScalarType.h>
18#include <c10/core/ScalarTypeToTypeMeta.h>
19#include <c10/core/Storage.h>
20#include <c10/core/TensorImpl.h>
21#include <c10/core/UndefinedTensorImpl.h>
22#include <c10/core/WrapDimMinimal.h>
23#include <c10/util/Exception.h>
24#include <c10/util/Deprecated.h>
25#include <c10/util/MaybeOwned.h>
26#include <c10/util/Optional.h>
27#include <c10/util/OptionalArrayRef.h>
28#include <c10/util/intrusive_ptr.h>
29#include <c10/macros/Export.h>
30#include <ATen/core/CheckMemoryFormat.h>
31#include <ATen/core/DeprecatedTypePropertiesRegistry.h>
32#include <ATen/core/DeprecatedTypeProperties.h>
33#include <ATen/core/NamedTensor.h>
34#include <ATen/core/QuantizerBase.h>
35#include <c10/core/SymInt.h>
36#include <ATen/core/TensorAccessor.h>
37#include <ATen/core/TensorBase.h>
38
39
40#include <ATen/MethodOperators.h>
41
42namespace c10{
43template<class T> class List;
44template<class T> class IListRef;
45}
46namespace at {
47struct Generator;
48struct Type;
49class DeprecatedTypeProperties;
50class Tensor;
51} // namespace at
52namespace at {
53namespace indexing {
54struct TensorIndex;
55} // namespace indexing
56} // namespace at
57
58namespace torch { namespace autograd {
59
60struct Node;
61
62}} // namespace torch::autograd
63
64namespace at {
65
66class OptionalTensorRef;
67class Tensor;
68using TensorList = ArrayRef<Tensor>;
69using ITensorList = c10::IListRef<Tensor>;
70
71using Stream = c10::Stream;
72
73// Tensor is a "generic" object holding a pointer to the underlying TensorImpl object, which
74// has an embedded reference count. In this way, Tensor is similar to boost::intrusive_ptr.
75//
76// For example:
77//
78// void func(Tensor a) {
79// Tensor b = a;
80// ...
81// }
82//
83// In this example, when we say Tensor b = a, we are creating a new object that points to the
84// same underlying TensorImpl, and bumps its reference count. When b goes out of scope, the
85// destructor decrements the reference count by calling release() on the TensorImpl it points to.
86// The existing constructors, operator overloads, etc. take care to implement the correct semantics.
87//
88// Note that Tensor can also be NULL, i.e. it is not associated with any underlying TensorImpl, and
89// special care must be taken to handle this.
90class TORCH_API Tensor: public TensorBase {
91 protected:
92 // Create a Tensor with a +0 reference count. Special care must be
93 // taken to avoid decrementing this reference count at destruction
94 // time. Intended to support MaybeOwnedTraits<Tensor>.
95 explicit Tensor(unsafe_borrow_t, const TensorBase& rhs): TensorBase(unsafe_borrow_t{}, rhs) {}
96 friend MaybeOwnedTraits<Tensor>;
97 friend OptionalTensorRef;
98
99 public:
100 Tensor() = default;
101 // This constructor should not be used by end users and is an implementation
102 // detail invoked by autogenerated code.
103 explicit Tensor(
104 c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> tensor_impl)
105 : TensorBase(std::move(tensor_impl)) {}
106 Tensor(const Tensor &tensor) = default;
107 Tensor(Tensor &&tensor) = default;
108
109 // Implicitly move-constructible from TensorBase, but must be explicit to increase refcount
110 explicit Tensor(const TensorBase &base): TensorBase(base) {}
111 /*implicit*/ Tensor(TensorBase &&base): TensorBase(std::move(base)) {}
112
113 // Creates a new wrapper from TensorImpl. Intentionally a free method because
114 // it should be used with care. Checks necessary invariants
115 static Tensor wrap_tensor_impl(
116 c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> tensor_impl) {
117 return TensorBase::wrap_tensor_impl(std::move(tensor_impl));
118 }
119
120 Tensor contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) const {
121 return TensorBase::contiguous(memory_format);
122 }
123
124 Tensor conj() const {
125 if (!this->is_complex()) {
126 return *this;
127 }
128
129 switch (this->layout()) {
130 case at::kSparse:
131 case at::kSparseCsr:
132 case at::kSparseCsc:
133 case at::kSparseBsr:
134 case at::kSparseBsc:
135 return this->conj_physical();
136 default:
137 return this->_conj();
138 }
139 }
140
141 // Aliased by Dimname overloads, so need explicit using
142 using TensorBase::size;
143 using TensorBase::sym_size;
144 using TensorBase::stride;
145
146 /// Should be used if *this can reasonably be expected to be contiguous and
147 /// performance is important.
148 /// Compared to contiguous, it saves a reference count
149 /// increment/decrement if *this is already contiguous, at the cost
150 /// in all cases of an extra pointer of stack usage, an extra branch
151 /// to access, and an extra branch at destruction time.
152 c10::MaybeOwned<Tensor> expect_contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) const &;
153
154 // Use .contiguous() instead. Trying to borrow from a prvalue Tensor
155 // will only lead to trouble and dangling references.
156 c10::MaybeOwned<Tensor> expect_contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) && = delete;
157
158 // The following overloads are very intruiging. Consider the following
159 // program:
160 //
161 // x[1] = 3;
162 //
163 // We would expect that the first entry of x is written to 3. But how can we
164 // actually achieve this? x[1] evaluates to a tensor...
165 //
166 // The answer is, using a ref-qualifier. x[1] is an rvalue, which cannot be
167 // (profitably) assigned to in the traditional sense, so we overload
168 // assignment to mean, "Actually, copy 3 into the tensor data." This is done
169 // with an rvalue-reference ref-qualified overload (the methods with && at the
170 // end of their type.)
171 //
172 // There's one more fly in the ointment: We also want
173 //
174 // Tensor x = y;
175 //
176 // to work, and we want it NOT to copy. So we need a traditional operator=
177 // overload. But we MUST specify a mutable lvalue ref-qualifier, to
178 // disambiguate the traditional overload from the rvalue-reference
179 // ref-qualified overload. Otherwise, it will be ambiguous, because
180 // a non ref-qualified method is eligible for all situations.
181
182 // Unfortunately, we have to write these constructors out manually
183 // to work around an MSVC bug:
184 // error C2580: 'at::Tensor &at::Tensor::operator =(const at::Tensor &) &':
185 // multiple versions of a defaulted special member functions are not allowed
186 // Tensor& operator=(const Tensor&) & = default;
187 // Tensor& operator=(Tensor&&) & = default;
188
189 // Also MSVC will wrongly issue the following warning with the aforementioned fix
190 // warning C4522: 'at::Tensor': multiple assignment operators specified
191 // Let's just skip the warning.
192 //
193 // TODO: temporarily disabled
194
195 Tensor& operator=(const TensorBase& x) & {
196 impl_ = x.getIntrusivePtr();
197 return *this;
198 }
199 Tensor& operator=(TensorBase&& x) & noexcept {
200 impl_ = x.unsafeReleaseIntrusivePtr();
201 return *this;
202 }
203
204 Tensor& operator=(const Tensor &x) & {
205 return operator=(static_cast<const TensorBase&>(x));
206 }
207 Tensor& operator=(Tensor &&x) & noexcept {
208 return operator=(static_cast<TensorBase&&>(x));
209 }
210
211 Tensor& operator=(const Scalar &v) && {
212 return fill_(v);
213 }
214 Tensor& operator=(const Tensor &rhs) && {
215 return copy_(rhs);
216 }
217 Tensor& operator=(Tensor&& rhs) && {
218 return copy_(rhs);
219 }
220
221 C10_DEPRECATED_MESSAGE("Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device().")
222 DeprecatedTypeProperties & type() const {
223 return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
224 dispatchKeyToBackend(legacyExtractDispatchKey(key_set())),
225 scalar_type());
226 }
227
228 Tensor toType(ScalarType t) const {
229 return to(options().dtype(t), /*non_blocking*/ false, /*copy*/ false);
230 }
231
232 // TODO: Deprecate me
233 Tensor toBackend(Backend b) const {
234 return to(options().device(backendToDeviceType(b)).layout(layout_from_backend(b)), /*non_blocking*/ false, /*copy*/ false);
235 }
236
237 C10_DEPRECATED_MESSAGE("Tensor.is_variable() is deprecated; everything is a variable now. (If you want to assert that variable has been appropriately handled already, use at::impl::variable_excluded_from_dispatch())")
238 bool is_variable() const noexcept {
239 return !at::impl::variable_excluded_from_dispatch();
240 }
241
242 template<typename T>
243 C10_DEPRECATED_MESSAGE("Tensor.data<T>() is deprecated. Please use Tensor.data_ptr<T>() instead.")
244 T * data() const {
245 return data_ptr<T>();
246 }
247
248 template <typename T>
249 T item() const;
250
251 template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
252 C10_DEPRECATED_MESSAGE("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead")
253 GenericPackedTensorAccessor<T,N,PtrTraits,index_t> packed_accessor() const & {
254 return generic_packed_accessor<T,N,PtrTraits,index_t>();
255 }
256 template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
257 C10_DEPRECATED_MESSAGE("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead")
258 GenericPackedTensorAccessor<T,N,PtrTraits,index_t> packed_accessor() && = delete;
259
260 Tensor operator~() const {
261 return bitwise_not();
262 }
263 Tensor operator-() const {
264 return neg();
265 }
266 Tensor& operator+=(const Tensor & other) {
267 return add_(other);
268 }
269 Tensor& operator+=(const Scalar & other) {
270 return add_(other);
271 }
272 Tensor& operator-=(const Tensor & other) {
273 return sub_(other);
274 }
275 Tensor& operator-=(const Scalar & other) {
276 return sub_(other);
277 }
278 Tensor& operator*=(const Tensor & other) {
279 return mul_(other);
280 }
281 Tensor& operator*=(const Scalar & other) {
282 return mul_(other);
283 }
284 Tensor& operator/=(const Tensor & other) {
285 return div_(other);
286 }
287 Tensor& operator/=(const Scalar & other) {
288 return div_(other);
289 }
290 Tensor& operator&=(const Tensor & other) {
291 return bitwise_and_(other);
292 }
293 Tensor& operator|=(const Tensor & other) {
294 return bitwise_or_(other);
295 }
296 Tensor& operator^=(const Tensor & other) {
297 return bitwise_xor_(other);
298 }
299 Tensor operator[](const Scalar & index) const {
300 if (!index.isIntegral(false)) {
301 TORCH_CHECK_INDEX(false, "Can only index tensors with integral scalars");
302 }
303 return this->operator[](index.toLong());
304 }
305 Tensor operator[](const Tensor & index) const {
306 // These properties are checked in the Scalar constructor, but we already
307 // check them here to provide more useful diagnostics for the user.
308 if (!index.defined()) {
309 TORCH_CHECK_INDEX(false, "Can only index with tensors that are defined");
310 }
311 if (index.dim() != 0) {
312 TORCH_CHECK_INDEX(false,
313 "Can only index with tensors that are scalars (zero-dim)");
314 }
315 // The Scalar(Tensor) constructor is explicit, so we need to call it.
316 return this->operator[](index.item());
317 }
318 Tensor operator[](int64_t index) const {
319 return select(0, index);
320 }
321
322 Tensor index(ArrayRef<at::indexing::TensorIndex> indices) const;
323 Tensor index(std::initializer_list<at::indexing::TensorIndex> indices) const;
324
325 Tensor & index_put_(ArrayRef<at::indexing::TensorIndex> indices, Tensor const & rhs);
326 Tensor & index_put_(ArrayRef<at::indexing::TensorIndex> indices, const Scalar& v);
327 Tensor & index_put_(std::initializer_list<at::indexing::TensorIndex> indices, Tensor const & rhs);
328 Tensor & index_put_(std::initializer_list<at::indexing::TensorIndex> indices, const Scalar& v);
329
330 Tensor cpu() const {
331 return to(options().device(DeviceType::CPU), /*non_blocking*/ false, /*copy*/ false);
332 }
333
334 // TODO: The Python version also accepts arguments
335 Tensor cuda() const {
336 return to(options().device(DeviceType::CUDA), /*non_blocking*/ false, /*copy*/ false);
337 }
338
339 Tensor hip() const {
340 return to(options().device(DeviceType::HIP), /*non_blocking*/ false, /*copy*/ false);
341 }
342
343 Tensor ve() const {
344 return to(options().device(DeviceType::VE), /*non_blocking*/ false, /*copy*/ false);
345 }
346
347 Tensor vulkan() const {
348 return to(options().device(DeviceType::Vulkan), /*non_blocking*/ false, /*copy*/ false);
349 }
350
351 Tensor metal() const {
352 return to(options().device(DeviceType::Metal), /*non_blocking*/ false, /*copy*/ false);
353 }
354
355 Tensor meta() const {
356 return to(options().device(DeviceType::Meta), /*non_blocking*/ false, /*copy*/ false);
357 }
358
359 // ~~~~~ Autograd API ~~~~~
360
361 /// \fn bool is_leaf() const;
362 ///
363 /// All Tensors that have `requires_grad()` which is ``false`` will be leaf Tensors by convention.
364 ///
365 /// For Tensors that have `requires_grad()` which is ``true``, they will be leaf Tensors if they were
366 /// created by the user. This means that they are not the result of an operation and so
367 /// `grad_fn()` is `nullptr`.
368 ///
369 /// Only leaf Tensors will have their `grad()` populated during a call to `backward()`.
370 /// To get `grad()` populated for non-leaf Tensors, you can use `retain_grad()`.
371 ///
372 /// Example:
373 /// @code
374 /// auto a = torch::rand(10, torch::requires_grad());
375 /// std::cout << a.is_leaf() << std::endl; // prints `true`
376 ///
377 /// auto b = torch::rand(10, torch::requires_grad()).to(torch::kCUDA);
378 /// std::cout << b.is_leaf() << std::endl; // prints `false`
379 /// // b was created by the operation that cast a cpu Tensor into a cuda Tensor
380 ///
381 /// auto c = torch::rand(10, torch::requires_grad()) + 2;
382 /// std::cout << c.is_leaf() << std::endl; // prints `false`
383 /// // c was created by the addition operation
384 ///
385 /// auto d = torch::rand(10).cuda();
386 /// std::cout << d.is_leaf() << std::endl; // prints `true`
387 /// // d does not require gradients and so has no operation creating it (that is tracked by the autograd engine)
388 ///
389 /// auto e = torch::rand(10).cuda().requires_grad_();
390 /// std::cout << e.is_leaf() << std::endl; // prints `true`
391 /// // e requires gradients and has no operations creating it
392 ///
393 /// auto f = torch::rand(10, torch::device(torch::kCUDA).requires_grad(true));
394 /// std::cout << f.is_leaf() << std::endl; // prints `true`
395 /// // f requires grad, has no operation creating it
396 /// @endcode
397
398 /// \fn void backward(const Tensor & gradient={}, c10::optional<bool> retain_graph=c10::nullopt, bool create_graph=false, c10::optional<TensorList> inputs=c10::nullopt) const;
399 ///
400 /// Computes the gradient of current tensor with respect to graph leaves.
401 ///
402 /// The graph is differentiated using the chain rule. If the tensor is
403 /// non-scalar (i.e. its data has more than one element) and requires
404 /// gradient, the function additionally requires specifying ``gradient``.
405 /// It should be a tensor of matching type and location, that contains
406 /// the gradient of the differentiated function w.r.t. this Tensor.
407 ///
408 /// This function accumulates gradients in the leaves - you might need to
409 /// zero them before calling it.
410 ///
411 /// \param gradient Gradient w.r.t. the
412 /// tensor. If it is a tensor, it will be automatically converted
413 /// to a Tensor that does not require grad unless ``create_graph`` is True.
414 /// None values can be specified for scalar Tensors or ones that
415 /// don't require grad. If a None value would be acceptable then
416 /// this argument is optional.
417 /// \param retain_graph If ``false``, the graph used to compute
418 /// the grads will be freed. Note that in nearly all cases setting
419 /// this option to True is not needed and often can be worked around
420 /// in a much more efficient way. Defaults to the value of
421 /// ``create_graph``.
422 /// \param create_graph If ``true``, graph of the derivative will
423 /// be constructed, allowing to compute higher order derivative
424 /// products. Defaults to ``false``.
425 /// \param inputs Inputs w.r.t. which the gradient will be accumulated into
426 /// ``at::Tensor::grad``. All other Tensors will be ignored. If not
427 /// provided, the gradient is accumulated into all the leaf Tensors
428 /// that were used to compute the current tensor.
429 /// When inputs are provided and a given input is not a leaf,
430 /// the current implementation will call its grad_fn (even though it is not strictly needed to get this gradients).
431 /// It is an implementation detail on which the user should not rely.
432 /// See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details.
433 void backward(const Tensor & gradient={}, c10::optional<bool> retain_graph=c10::nullopt, bool create_graph=false, c10::optional<TensorList> inputs=c10::nullopt) const {
434 // NB: Adding this wrapper to _backward here because we'd like our
435 // 'backwards' api to accept the 'inputs' argument optionally. Since code gen
436 // currently does not support optional of TensorList our approach is to replace
437 // backward in native_functions.yaml with _backward and call it here instead.
438 if (inputs.has_value()) {
439 TORCH_CHECK(inputs.value().size() > 0, "'inputs' argument to backward cannot be empty")
440 this->_backward(inputs.value(), gradient, retain_graph, create_graph);
441 } else {
442 this->_backward({}, gradient, retain_graph, create_graph);
443 }
444 }
445
446 /// \fn Tensor detach() const;
447 ///
448 /// Returns a new Tensor, detached from the current graph.
449 /// The result will never require gradient.
450
451 /// \fn Tensor & detach_() const;
452 ///
453 /// Detaches the Tensor from the graph that created it, making it a leaf.
454 /// Views cannot be detached in-place.
455
456 /// \fn void retain_grad() const;
457 ///
458 /// Enables this Tensor to have their :attr:`grad` populated during
459 /// :func:`backward`. This is a no-op for leaf tensors.
460
461 /// \fn bool retains_grad() const;
462 ///
463 /// Is ``true`` if this Tensor is non-leaf and its :attr:`grad` is enabled to be
464 /// populated during :func:`backward`, ``false`` otherwise.
465
466 const Tensor& set_requires_grad(bool requires_grad) const {
467 TensorBase::set_requires_grad(requires_grad);
468 return *this;
469 }
470
471 /// Return a mutable reference to the gradient. This is conventionally
472 /// used as `t.grad() = x` to set a gradient to a completely new tensor.
473 /// Note that this function work with a non-const Tensor and is not
474 /// thread safe.
475 Tensor& mutable_grad() const {
476 return impl_->mutable_grad();
477 }
478
479 /// This function returns an undefined tensor by default and returns a defined tensor
480 /// the first time a call to `backward()` computes gradients for this Tensor.
481 /// The attribute will then contain the gradients computed and future calls
482 /// to `backward()` will accumulate (add) gradients into it.
483 const Tensor& grad() const {
484 const Tensor& maybe_grad = impl_->grad();
485 if (!is_leaf() && !retains_grad() && !maybe_grad.defined()) {
486 TORCH_WARN(
487 "The .grad attribute of a Tensor that is not a leaf Tensor is being accessed. Its .grad "
488 "attribute won't be populated during autograd.backward(). If you indeed want the .grad "
489 "field to be populated for a non-leaf Tensor, use .retain_grad() on the non-leaf Tensor. "
490 "If you access the non-leaf Tensor by mistake, make sure you access the leaf Tensor "
491 "instead. See github.com/pytorch/pytorch/pull/30531 for more informations.");
492 }
493 return maybe_grad;
494 }
495
496 // The Forward AD API functions below are low level and are not to be used by end
497 // users who should use the API provided in torch/csrc/autograd.h
498
499 /// This function returns the forward gradient for this Tensor at the given level.
500 const Tensor& _fw_grad(uint64_t level) const {
501 return impl_->_fw_grad(level, *this);
502 }
503
504 /// This function can be used to set the value of the forward grad.
505 /// Note that the given new_grad might not be used directly if it has different
506 /// metadata (size/stride/storage offset) compared to this Tensor. In that case,
507 /// new_grad content will be copied into a new Tensor
508 void _set_fw_grad(const TensorBase& new_grad, uint64_t level, bool is_inplace_op) const {
509 impl_->_set_fw_grad(new_grad, *this, level, is_inplace_op);
510 }
511
512
513 // STOP. Thinking of adding a method here, which only makes use
514 // of other ATen methods? Define it in native_functions.yaml.
515
516 //example
517 //Tensor * add(Tensor & b);
518 void __dispatch__backward(at::TensorList inputs, const c10::optional<at::Tensor> & gradient={}, c10::optional<bool> retain_graph=c10::nullopt, bool create_graph=false) const;
519 void __dispatch_set_data(const at::Tensor & new_data) const;
520 at::Tensor __dispatch_data() const;
521 bool __dispatch_is_leaf() const;
522 int64_t __dispatch_output_nr() const;
523 int64_t __dispatch__version() const;
524 at::Tensor & __dispatch_requires_grad_(bool requires_grad=true) const;
525 void __dispatch_retain_grad() const;
526 bool __dispatch_retains_grad() const;
527 at::Tensor _fw_primal(int64_t level) const;
528 at::Tensor & rename_(c10::optional<at::DimnameList> names) const;
529 at::Tensor rename(c10::optional<at::DimnameList> names) const;
530 at::Tensor align_to(at::DimnameList names) const;
531 at::Tensor align_to(at::DimnameList order, int64_t ellipsis_idx) const;
532 at::Tensor align_as(const at::Tensor & other) const;
533 at::Tensor refine_names(at::DimnameList names) const;
534 at::Tensor abs() const;
535 at::Tensor & abs_() const;
536 at::Tensor absolute() const;
537 at::Tensor & absolute_() const;
538 at::Tensor angle() const;
539 at::Tensor sgn() const;
540 at::Tensor & sgn_() const;
541 at::Tensor chalf(c10::optional<at::MemoryFormat> memory_format=c10::nullopt) const;
542 at::Tensor _conj() const;
543 at::Tensor __dispatch_conj() const;
544 at::Tensor _conj_physical() const;
545 at::Tensor conj_physical() const;
546 at::Tensor & conj_physical_() const;
547 at::Tensor resolve_conj() const;
548 at::Tensor resolve_neg() const;
549 at::Tensor _neg_view() const;
550 at::Tensor acos() const;
551 at::Tensor & acos_() const;
552 at::Tensor arccos() const;
553 at::Tensor & arccos_() const;
554 at::Tensor add(const at::Tensor & other, const at::Scalar & alpha=1) const;
555 at::Tensor & add_(const at::Tensor & other, const at::Scalar & alpha=1) const;
556 at::Tensor add(const at::Scalar & other, const at::Scalar & alpha=1) const;
557 at::Tensor & add_(const at::Scalar & other, const at::Scalar & alpha=1) const;
558 at::Tensor addmv(const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta=1, const at::Scalar & alpha=1) const;
559 at::Tensor & addmv_(const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta=1, const at::Scalar & alpha=1) const;
560 at::Tensor addr(const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta=1, const at::Scalar & alpha=1) const;
561 at::Tensor & addr_(const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta=1, const at::Scalar & alpha=1) const;
562 at::Tensor _is_all_true() const;
563 at::Tensor _is_any_true() const;
564 at::Tensor all(int64_t dim, bool keepdim=false) const;
565 at::Tensor all(at::Dimname dim, bool keepdim=false) const;
566 bool allclose(const at::Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false) const;
567 at::Tensor any(int64_t dim, bool keepdim=false) const;
568 at::Tensor any(at::Dimname dim, bool keepdim=false) const;
569 at::Tensor argmax(c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false) const;
570 at::Tensor argmin(c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false) const;
571 at::Tensor acosh() const;
572 at::Tensor & acosh_() const;
573 at::Tensor arccosh() const;
574 at::Tensor & arccosh_() const;
575 at::Tensor asinh() const;
576 at::Tensor & asinh_() const;
577 at::Tensor arcsinh() const;
578 at::Tensor & arcsinh_() const;
579 at::Tensor atanh() const;
580 at::Tensor & atanh_() const;
581 at::Tensor arctanh() const;
582 at::Tensor & arctanh_() const;
583 at::Tensor as_strided(at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt) const;
584 at::Tensor as_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset=c10::nullopt) const;
585 const at::Tensor & as_strided_(at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt) const;
586 const at::Tensor & as_strided__symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset=c10::nullopt) const;
587 at::Tensor asin() const;
588 at::Tensor & asin_() const;
589 at::Tensor arcsin() const;
590 at::Tensor & arcsin_() const;
591 at::Tensor atan() const;
592 at::Tensor & atan_() const;
593 at::Tensor arctan() const;
594 at::Tensor & arctan_() const;
595 at::Tensor baddbmm(const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) const;
596 at::Tensor & baddbmm_(const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) const;
597 at::Tensor bernoulli(c10::optional<at::Generator> generator=c10::nullopt) const;
598 at::Tensor & bernoulli_(const at::Tensor & p, c10::optional<at::Generator> generator=c10::nullopt) const;
599 at::Tensor & bernoulli_(double p=0.5, c10::optional<at::Generator> generator=c10::nullopt) const;
600 at::Tensor bernoulli(double p, c10::optional<at::Generator> generator=c10::nullopt) const;
601 at::Tensor bincount(const c10::optional<at::Tensor> & weights={}, int64_t minlength=0) const;
602 at::Tensor bitwise_not() const;
603 at::Tensor & bitwise_not_() const;
604 at::Tensor copysign(const at::Tensor & other) const;
605 at::Tensor & copysign_(const at::Tensor & other) const;
606 at::Tensor copysign(const at::Scalar & other) const;
607 at::Tensor & copysign_(const at::Scalar & other) const;
608 at::Tensor logical_not() const;
609 at::Tensor & logical_not_() const;
610 at::Tensor logical_xor(const at::Tensor & other) const;
611 at::Tensor & logical_xor_(const at::Tensor & other) const;
612 at::Tensor logical_and(const at::Tensor & other) const;
613 at::Tensor & logical_and_(const at::Tensor & other) const;
614 at::Tensor logical_or(const at::Tensor & other) const;
615 at::Tensor & logical_or_(const at::Tensor & other) const;
616 at::Tensor bmm(const at::Tensor & mat2) const;
617 at::Tensor broadcast_to(at::IntArrayRef size) const;
618 at::Tensor broadcast_to_symint(c10::SymIntArrayRef size) const;
619 at::Tensor ceil() const;
620 at::Tensor & ceil_() const;
621 ::std::vector<at::Tensor> unsafe_chunk(int64_t chunks, int64_t dim=0) const;
622 ::std::vector<at::Tensor> chunk(int64_t chunks, int64_t dim=0) const;
623 ::std::vector<at::Tensor> tensor_split(int64_t sections, int64_t dim=0) const;
624 ::std::vector<at::Tensor> tensor_split_symint(c10::SymInt sections, int64_t dim=0) const;
625 ::std::vector<at::Tensor> tensor_split(at::IntArrayRef indices, int64_t dim=0) const;
626 ::std::vector<at::Tensor> tensor_split_symint(c10::SymIntArrayRef indices, int64_t dim=0) const;
627 ::std::vector<at::Tensor> tensor_split(const at::Tensor & tensor_indices_or_sections, int64_t dim=0) const;
628 at::Tensor clamp(const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max=c10::nullopt) const;
629 at::Tensor clamp(const c10::optional<at::Tensor> & min={}, const c10::optional<at::Tensor> & max={}) const;
630 at::Tensor & clamp_(const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max=c10::nullopt) const;
631 at::Tensor & clamp_(const c10::optional<at::Tensor> & min={}, const c10::optional<at::Tensor> & max={}) const;
632 at::Tensor clamp_max(const at::Scalar & max) const;
633 at::Tensor clamp_max(const at::Tensor & max) const;
634 at::Tensor & clamp_max_(const at::Scalar & max) const;
635 at::Tensor & clamp_max_(const at::Tensor & max) const;
636 at::Tensor clamp_min(const at::Scalar & min) const;
637 at::Tensor clamp_min(const at::Tensor & min) const;
638 at::Tensor & clamp_min_(const at::Scalar & min) const;
639 at::Tensor & clamp_min_(const at::Tensor & min) const;
640 at::Tensor clip(const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max=c10::nullopt) const;
641 at::Tensor clip(const c10::optional<at::Tensor> & min={}, const c10::optional<at::Tensor> & max={}) const;
642 at::Tensor & clip_(const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max=c10::nullopt) const;
643 at::Tensor & clip_(const c10::optional<at::Tensor> & min={}, const c10::optional<at::Tensor> & max={}) const;
644 at::Tensor __dispatch_contiguous(at::MemoryFormat memory_format=MemoryFormat::Contiguous) const;
645 at::Tensor & copy_(const at::Tensor & src, bool non_blocking=false) const;
646 at::Tensor cos() const;
647 at::Tensor & cos_() const;
648 at::Tensor cosh() const;
649 at::Tensor & cosh_() const;
650 at::Tensor count_nonzero(at::IntArrayRef dim) const;
651 at::Tensor count_nonzero(c10::optional<int64_t> dim=c10::nullopt) const;
652 at::Tensor cov(int64_t correction=1, const c10::optional<at::Tensor> & fweights={}, const c10::optional<at::Tensor> & aweights={}) const;
653 at::Tensor corrcoef() const;
654 ::std::tuple<at::Tensor,at::Tensor> cummax(int64_t dim) const;
655 ::std::tuple<at::Tensor,at::Tensor> cummax(at::Dimname dim) const;
656 ::std::tuple<at::Tensor,at::Tensor> cummin(int64_t dim) const;
657 ::std::tuple<at::Tensor,at::Tensor> cummin(at::Dimname dim) const;
658 at::Tensor cumprod(int64_t dim, c10::optional<at::ScalarType> dtype=c10::nullopt) const;
659 at::Tensor & cumprod_(int64_t dim, c10::optional<at::ScalarType> dtype=c10::nullopt) const;
660 at::Tensor cumprod(at::Dimname dim, c10::optional<at::ScalarType> dtype=c10::nullopt) const;
661 at::Tensor & cumprod_(at::Dimname dim, c10::optional<at::ScalarType> dtype=c10::nullopt) const;
662 at::Tensor cumsum(int64_t dim, c10::optional<at::ScalarType> dtype=c10::nullopt) const;
663 at::Tensor & cumsum_(int64_t dim, c10::optional<at::ScalarType> dtype=c10::nullopt) const;
664 at::Tensor cumsum(at::Dimname dim, c10::optional<at::ScalarType> dtype=c10::nullopt) const;
665 at::Tensor & cumsum_(at::Dimname dim, c10::optional<at::ScalarType> dtype=c10::nullopt) const;
666 at::Tensor diag_embed(int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1) const;
667 at::Tensor diagflat(int64_t offset=0) const;
668 at::Tensor diagonal(int64_t offset=0, int64_t dim1=0, int64_t dim2=1) const;
669 at::Tensor diagonal(at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset=0) const;
670 at::Tensor & fill_diagonal_(const at::Scalar & fill_value, bool wrap=false) const;
671 at::Tensor diff(int64_t n=1, int64_t dim=-1, const c10::optional<at::Tensor> & prepend={}, const c10::optional<at::Tensor> & append={}) const;
672 at::Tensor div(const at::Tensor & other) const;
673 at::Tensor & div_(const at::Tensor & other) const;
674 at::Tensor div(const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) const;
675 at::Tensor & div_(const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) const;
676 at::Tensor div(const at::Scalar & other) const;
677 at::Tensor & div_(const at::Scalar & other) const;
678 at::Tensor div(const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) const;
679 at::Tensor & div_(const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) const;
680 at::Tensor divide(const at::Tensor & other) const;
681 at::Tensor & divide_(const at::Tensor & other) const;
682 at::Tensor divide(const at::Scalar & other) const;
683 at::Tensor & divide_(const at::Scalar & other) const;
684 at::Tensor divide(const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) const;
685 at::Tensor & divide_(const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) const;
686 at::Tensor divide(const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) const;
687 at::Tensor & divide_(const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) const;
688 at::Tensor true_divide(const at::Tensor & other) const;
689 at::Tensor & true_divide_(const at::Tensor & other) const;
690 at::Tensor true_divide(const at::Scalar & other) const;
691 at::Tensor & true_divide_(const at::Scalar & other) const;
692 at::Tensor dot(const at::Tensor & tensor) const;
693 at::Tensor vdot(const at::Tensor & other) const;
694 at::Tensor new_empty(at::IntArrayRef size, at::TensorOptions options={}) const;
695 at::Tensor new_empty(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const;
696 at::Tensor new_empty_symint(c10::SymIntArrayRef size, at::TensorOptions options={}) const;
697 at::Tensor new_empty_symint(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const;
698 at::Tensor new_empty_strided(at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options={}) const;
699 at::Tensor new_empty_strided(at::IntArrayRef size, at::IntArrayRef stride, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const;
700 at::Tensor new_empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options={}) const;
701 at::Tensor new_empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const;
702 at::Tensor new_full(at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) const;
703 at::Tensor new_full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const;
704 at::Tensor new_full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) const;
705 at::Tensor new_full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const;
706 at::Tensor new_zeros(at::IntArrayRef size, at::TensorOptions options={}) const;
707 at::Tensor new_zeros(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const;
708 at::Tensor new_zeros_symint(c10::SymIntArrayRef size, at::TensorOptions options={}) const;
709 at::Tensor new_zeros_symint(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const;
710 at::Tensor new_ones(at::IntArrayRef size, at::TensorOptions options={}) const;
711 at::Tensor new_ones(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const;
712 at::Tensor new_ones_symint(c10::SymIntArrayRef size, at::TensorOptions options={}) const;
713 at::Tensor new_ones_symint(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const;
714 const at::Tensor & resize_(at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) const;
715 const at::Tensor & resize__symint(c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) const;
716 at::Tensor erf() const;
717 at::Tensor & erf_() const;
718 at::Tensor erfc() const;
719 at::Tensor & erfc_() const;
720 at::Tensor exp() const;
721 at::Tensor & exp_() const;
722 at::Tensor exp2() const;
723 at::Tensor & exp2_() const;
724 at::Tensor expm1() const;
725 at::Tensor & expm1_() const;
726 at::Tensor expand(at::IntArrayRef size, bool implicit=false) const;
727 at::Tensor expand_symint(c10::SymIntArrayRef size, bool implicit=false) const;
728 at::Tensor expand_as(const at::Tensor & other) const;
729 at::Tensor flatten(int64_t start_dim=0, int64_t end_dim=-1) const;
730 at::Tensor flatten(int64_t start_dim, int64_t end_dim, at::Dimname out_dim) const;
731 at::Tensor flatten(at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim) const;
732 at::Tensor flatten(at::DimnameList dims, at::Dimname out_dim) const;
733 at::Tensor unflatten(int64_t dim, at::IntArrayRef sizes) const;
734 at::Tensor unflatten(at::Dimname dim, at::IntArrayRef sizes, at::DimnameList names) const;
735 at::Tensor & fill_(const at::Scalar & value) const;
736 at::Tensor & fill_(const at::Tensor & value) const;
737 at::Tensor floor() const;
738 at::Tensor & floor_() const;
739 at::Tensor floor_divide(const at::Tensor & other) const;
740 at::Tensor & floor_divide_(const at::Tensor & other) const;
741 at::Tensor floor_divide(const at::Scalar & other) const;
742 at::Tensor & floor_divide_(const at::Scalar & other) const;
743 at::Tensor frac() const;
744 at::Tensor & frac_() const;
745 at::Tensor gcd(const at::Tensor & other) const;
746 at::Tensor & gcd_(const at::Tensor & other) const;
747 at::Tensor lcm(const at::Tensor & other) const;
748 at::Tensor & lcm_(const at::Tensor & other) const;
749 at::Tensor index(const c10::List<c10::optional<at::Tensor>> & indices) const;
750 at::Tensor & index_copy_(int64_t dim, const at::Tensor & index, const at::Tensor & source) const;
751 at::Tensor index_copy(int64_t dim, const at::Tensor & index, const at::Tensor & source) const;
752 at::Tensor & index_copy_(at::Dimname dim, const at::Tensor & index, const at::Tensor & source) const;
753 at::Tensor index_copy(at::Dimname dim, const at::Tensor & index, const at::Tensor & source) const;
754 at::Tensor & index_put_(const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate=false) const;
755 at::Tensor index_put(const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate=false) const;
756 at::Tensor isclose(const at::Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false) const;
757 at::Tensor isnan() const;
758 bool is_distributed() const;
759 bool __dispatch_is_floating_point() const;
760 bool __dispatch_is_complex() const;
761 bool __dispatch_is_conj() const;
762 bool __dispatch__is_zerotensor() const;
763 bool __dispatch_is_neg() const;
764 at::Tensor isreal() const;
765 bool is_nonzero() const;
766 bool is_same_size(const at::Tensor & other) const;
767 bool __dispatch_is_signed() const;
768 bool __dispatch_is_inference() const;
769 at::Tensor kron(const at::Tensor & other) const;
770 ::std::tuple<at::Tensor,at::Tensor> kthvalue(int64_t k, int64_t dim=-1, bool keepdim=false) const;
771 ::std::tuple<at::Tensor,at::Tensor> kthvalue(int64_t k, at::Dimname dim, bool keepdim=false) const;
772 at::Tensor nan_to_num(c10::optional<double> nan=c10::nullopt, c10::optional<double> posinf=c10::nullopt, c10::optional<double> neginf=c10::nullopt) const;
773 at::Tensor & nan_to_num_(c10::optional<double> nan=c10::nullopt, c10::optional<double> posinf=c10::nullopt, c10::optional<double> neginf=c10::nullopt) const;
774 at::Tensor ldexp(const at::Tensor & other) const;
775 at::Tensor & ldexp_(const at::Tensor & other) const;
776 at::Tensor log() const;
777 at::Tensor & log_() const;
778 at::Tensor log10() const;
779 at::Tensor & log10_() const;
780 at::Tensor log1p() const;
781 at::Tensor & log1p_() const;
782 at::Tensor log2() const;
783 at::Tensor & log2_() const;
784 at::Tensor logaddexp(const at::Tensor & other) const;
785 at::Tensor logaddexp2(const at::Tensor & other) const;
786 at::Tensor xlogy(const at::Tensor & other) const;
787 at::Tensor xlogy(const at::Scalar & other) const;
788 at::Tensor & xlogy_(const at::Tensor & other) const;
789 at::Tensor & xlogy_(const at::Scalar & other) const;
790 at::Tensor log_softmax(int64_t dim, c10::optional<at::ScalarType> dtype=c10::nullopt) const;
791 at::Tensor log_softmax(at::Dimname dim, c10::optional<at::ScalarType> dtype=c10::nullopt) const;
792 at::Tensor logcumsumexp(int64_t dim) const;
793 at::Tensor logcumsumexp(at::Dimname dim) const;
794 at::Tensor logsumexp(at::IntArrayRef dim, bool keepdim=false) const;
795 at::Tensor logsumexp(at::DimnameList dim, bool keepdim=false) const;
796 at::Tensor matmul(const at::Tensor & other) const;
797 at::Tensor matrix_power(int64_t n) const;
798 at::Tensor matrix_exp() const;
799 ::std::tuple<at::Tensor,at::Tensor> aminmax(c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false) const;
800 ::std::tuple<at::Tensor,at::Tensor> max(int64_t dim, bool keepdim=false) const;
801 ::std::tuple<at::Tensor,at::Tensor> max(at::Dimname dim, bool keepdim=false) const;
802 at::Tensor amax(at::IntArrayRef dim={}, bool keepdim=false) const;
803 at::Tensor mean(c10::optional<at::ScalarType> dtype=c10::nullopt) const;
804 at::Tensor mean(at::OptionalIntArrayRef dim, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) const;
805 at::Tensor mean(at::DimnameList dim, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) const;
806 at::Tensor nanmean(at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) const;
807 at::Tensor median() const;
808 ::std::tuple<at::Tensor,at::Tensor> median(int64_t dim, bool keepdim=false) const;
809 ::std::tuple<at::Tensor,at::Tensor> median(at::Dimname dim, bool keepdim=false) const;
810 at::Tensor nanmedian() const;
811 ::std::tuple<at::Tensor,at::Tensor> nanmedian(int64_t dim, bool keepdim=false) const;
812 ::std::tuple<at::Tensor,at::Tensor> nanmedian(at::Dimname dim, bool keepdim=false) const;
813 ::std::tuple<at::Tensor,at::Tensor> min(int64_t dim, bool keepdim=false) const;
814 ::std::tuple<at::Tensor,at::Tensor> min(at::Dimname dim, bool keepdim=false) const;
815 at::Tensor amin(at::IntArrayRef dim={}, bool keepdim=false) const;
816 at::Tensor mm(const at::Tensor & mat2) const;
817 ::std::tuple<at::Tensor,at::Tensor> mode(int64_t dim=-1, bool keepdim=false) const;
818 ::std::tuple<at::Tensor,at::Tensor> mode(at::Dimname dim, bool keepdim=false) const;
819 at::Tensor mul(const at::Tensor & other) const;
820 at::Tensor & mul_(const at::Tensor & other) const;
821 at::Tensor mul(const at::Scalar & other) const;
822 at::Tensor & mul_(const at::Scalar & other) const;
823 at::Tensor multiply(const at::Tensor & other) const;
824 at::Tensor & multiply_(const at::Tensor & other) const;
825 at::Tensor multiply(const at::Scalar & other) const;
826 at::Tensor & multiply_(const at::Scalar & other) const;
827 at::Tensor mv(const at::Tensor & vec) const;
828 at::Tensor mvlgamma(int64_t p) const;
829 at::Tensor & mvlgamma_(int64_t p) const;
830 at::Tensor narrow_copy(int64_t dim, int64_t start, int64_t length) const;
831 at::Tensor narrow_copy_symint(int64_t dim, c10::SymInt start, c10::SymInt length) const;
832 at::Tensor narrow(int64_t dim, int64_t start, int64_t length) const;
833 at::Tensor narrow_symint(int64_t dim, c10::SymInt start, c10::SymInt length) const;
834 at::Tensor narrow(int64_t dim, const at::Tensor & start, int64_t length) const;
835 at::Tensor narrow_symint(int64_t dim, const at::Tensor & start, c10::SymInt length) const;
836 at::Tensor permute(at::IntArrayRef dims) const;
837 at::Tensor movedim(at::IntArrayRef source, at::IntArrayRef destination) const;
838 at::Tensor movedim(int64_t source, int64_t destination) const;
839 at::Tensor moveaxis(at::IntArrayRef source, at::IntArrayRef destination) const;
840 at::Tensor moveaxis(int64_t source, int64_t destination) const;
841 at::Tensor numpy_T() const;
842 at::Tensor matrix_H() const;
843 at::Tensor mT() const;
844 at::Tensor mH() const;
845 at::Tensor adjoint() const;
846 bool is_pinned(c10::optional<at::Device> device=c10::nullopt) const;
847 at::Tensor pin_memory(c10::optional<at::Device> device=c10::nullopt) const;
848 at::Tensor pinverse(double rcond=1e-15) const;
849 at::Tensor rad2deg() const;
850 at::Tensor & rad2deg_() const;
851 at::Tensor deg2rad() const;
852 at::Tensor & deg2rad_() const;
853 at::Tensor ravel() const;
854 at::Tensor reciprocal() const;
855 at::Tensor & reciprocal_() const;
856 at::Tensor neg() const;
857 at::Tensor & neg_() const;
858 at::Tensor negative() const;
859 at::Tensor & negative_() const;
860 at::Tensor repeat(at::IntArrayRef repeats) const;
861 at::Tensor repeat_symint(c10::SymIntArrayRef repeats) const;
862 at::Tensor repeat_interleave(const at::Tensor & repeats, c10::optional<int64_t> dim=c10::nullopt, c10::optional<int64_t> output_size=c10::nullopt) const;
863 at::Tensor repeat_interleave(int64_t repeats, c10::optional<int64_t> dim=c10::nullopt, c10::optional<int64_t> output_size=c10::nullopt) const;
864 at::Tensor repeat_interleave_symint(c10::SymInt repeats, c10::optional<int64_t> dim=c10::nullopt, c10::optional<int64_t> output_size=c10::nullopt) const;
865 at::Tensor reshape(at::IntArrayRef shape) const;
866 at::Tensor reshape_symint(c10::SymIntArrayRef shape) const;
867 at::Tensor _reshape_alias(at::IntArrayRef size, at::IntArrayRef stride) const;
868 at::Tensor _reshape_alias_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride) const;
869 at::Tensor reshape_as(const at::Tensor & other) const;
870 at::Tensor round() const;
871 at::Tensor & round_() const;
872 at::Tensor round(int64_t decimals) const;
873 at::Tensor & round_(int64_t decimals) const;
874 at::Tensor relu() const;
875 at::Tensor & relu_() const;
876 at::Tensor prelu(const at::Tensor & weight) const;
877 at::Tensor hardshrink(const at::Scalar & lambd=0.5) const;
878 at::Tensor hardshrink_backward(const at::Tensor & grad_out, const at::Scalar & lambd) const;
879 at::Tensor rsqrt() const;
880 at::Tensor & rsqrt_() const;
881 at::Tensor select(at::Dimname dim, int64_t index) const;
882 at::Tensor select(int64_t dim, int64_t index) const;
883 at::Tensor select_symint(int64_t dim, c10::SymInt index) const;
884 at::Tensor sigmoid() const;
885 at::Tensor & sigmoid_() const;
886 at::Tensor logit(c10::optional<double> eps=c10::nullopt) const;
887 at::Tensor & logit_(c10::optional<double> eps=c10::nullopt) const;
888 at::Tensor sin() const;
889 at::Tensor & sin_() const;
890 at::Tensor sinc() const;
891 at::Tensor & sinc_() const;
892 at::Tensor sinh() const;
893 at::Tensor & sinh_() const;
894 at::Tensor detach() const;
895 at::Tensor & detach_() const;
896 int64_t size(at::Dimname dim) const;
897 at::Tensor slice(int64_t dim=0, c10::optional<int64_t> start=c10::nullopt, c10::optional<int64_t> end=c10::nullopt, int64_t step=1) const;
898 at::Tensor slice_symint(int64_t dim=0, c10::optional<c10::SymInt> start=c10::nullopt, c10::optional<c10::SymInt> end=c10::nullopt, c10::SymInt step=1) const;
899 at::Tensor slice_scatter(const at::Tensor & src, int64_t dim=0, c10::optional<int64_t> start=c10::nullopt, c10::optional<int64_t> end=c10::nullopt, int64_t step=1) const;
900 at::Tensor slice_scatter_symint(const at::Tensor & src, int64_t dim=0, c10::optional<c10::SymInt> start=c10::nullopt, c10::optional<c10::SymInt> end=c10::nullopt, c10::SymInt step=1) const;
901 at::Tensor select_scatter(const at::Tensor & src, int64_t dim, int64_t index) const;
902 at::Tensor select_scatter_symint(const at::Tensor & src, int64_t dim, c10::SymInt index) const;
903 at::Tensor diagonal_scatter(const at::Tensor & src, int64_t offset=0, int64_t dim1=0, int64_t dim2=1) const;
904 at::Tensor as_strided_scatter(const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt) const;
905 at::Tensor as_strided_scatter_symint(const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset=c10::nullopt) const;
906 at::Tensor smm(const at::Tensor & mat2) const;
907 at::Tensor softmax(int64_t dim, c10::optional<at::ScalarType> dtype=c10::nullopt) const;
908 at::Tensor softmax(at::Dimname dim, c10::optional<at::ScalarType> dtype=c10::nullopt) const;
909 ::std::vector<at::Tensor> unsafe_split(int64_t split_size, int64_t dim=0) const;
910 ::std::vector<at::Tensor> unsafe_split_symint(c10::SymInt split_size, int64_t dim=0) const;
911 ::std::vector<at::Tensor> split(int64_t split_size, int64_t dim=0) const;
912 ::std::vector<at::Tensor> split_symint(c10::SymInt split_size, int64_t dim=0) const;
913 ::std::vector<at::Tensor> split(at::IntArrayRef split_size, int64_t dim=0) const;
914 ::std::vector<at::Tensor> split_symint(c10::SymIntArrayRef split_size, int64_t dim=0) const;
915 ::std::vector<at::Tensor> unsafe_split_with_sizes(at::IntArrayRef split_sizes, int64_t dim=0) const;
916 ::std::vector<at::Tensor> unsafe_split_with_sizes_symint(c10::SymIntArrayRef split_sizes, int64_t dim=0) const;
917 ::std::vector<at::Tensor> split_with_sizes(at::IntArrayRef split_sizes, int64_t dim=0) const;
918 ::std::vector<at::Tensor> split_with_sizes_symint(c10::SymIntArrayRef split_sizes, int64_t dim=0) const;
919 ::std::vector<at::Tensor> hsplit(int64_t sections) const;
920 ::std::vector<at::Tensor> hsplit(at::IntArrayRef indices) const;
921 ::std::vector<at::Tensor> vsplit(int64_t sections) const;
922 ::std::vector<at::Tensor> vsplit(at::IntArrayRef indices) const;
923 ::std::vector<at::Tensor> dsplit(int64_t sections) const;
924 ::std::vector<at::Tensor> dsplit(at::IntArrayRef indices) const;
925 at::Tensor squeeze() const;
926 at::Tensor squeeze(int64_t dim) const;
927 at::Tensor squeeze(at::Dimname dim) const;
928 at::Tensor squeeze(at::IntArrayRef dim) const;
929 at::Tensor & squeeze_() const;
930 at::Tensor & squeeze_(int64_t dim) const;
931 at::Tensor & squeeze_(at::IntArrayRef dim) const;
932 at::Tensor & squeeze_(at::Dimname dim) const;
933 at::Tensor sspaddmm(const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) const;
934 at::Tensor stft(int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool normalized, c10::optional<bool> onesided=c10::nullopt, c10::optional<bool> return_complex=c10::nullopt) const;
935 at::Tensor stft(int64_t n_fft, c10::optional<int64_t> hop_length=c10::nullopt, c10::optional<int64_t> win_length=c10::nullopt, const c10::optional<at::Tensor> & window={}, bool center=true, c10::string_view pad_mode="reflect", bool normalized=false, c10::optional<bool> onesided=c10::nullopt, c10::optional<bool> return_complex=c10::nullopt) const;
936 at::Tensor istft(int64_t n_fft, c10::optional<int64_t> hop_length=c10::nullopt, c10::optional<int64_t> win_length=c10::nullopt, const c10::optional<at::Tensor> & window={}, bool center=true, bool normalized=false, c10::optional<bool> onesided=c10::nullopt, c10::optional<int64_t> length=c10::nullopt, bool return_complex=false) const;
937 int64_t stride(at::Dimname dim) const;
938 at::Tensor sum(c10::optional<at::ScalarType> dtype=c10::nullopt) const;
939 at::Tensor sum(at::OptionalIntArrayRef dim, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) const;
940 at::Tensor sum(at::DimnameList dim, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) const;
941 at::Tensor nansum(at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) const;
942 at::Tensor sum_to_size(at::IntArrayRef size) const;
943 at::Tensor sqrt() const;
944 at::Tensor & sqrt_() const;
945 at::Tensor square() const;
946 at::Tensor & square_() const;
947 at::Tensor std(bool unbiased) const;
948 at::Tensor std(at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false) const;
949 at::Tensor std(at::OptionalIntArrayRef dim=c10::nullopt, c10::optional<int64_t> correction=c10::nullopt, bool keepdim=false) const;
950 at::Tensor std(at::DimnameList dim, bool unbiased, bool keepdim=false) const;
951 at::Tensor std(at::DimnameList dim, c10::optional<int64_t> correction=c10::nullopt, bool keepdim=false) const;
952 at::Tensor prod(c10::optional<at::ScalarType> dtype=c10::nullopt) const;
953 at::Tensor prod(int64_t dim, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) const;
954 at::Tensor prod(at::Dimname dim, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) const;
955 at::Tensor t() const;
956 at::Tensor & t_() const;
957 at::Tensor tan() const;
958 at::Tensor & tan_() const;
959 at::Tensor tanh() const;
960 at::Tensor & tanh_() const;
961 at::Tensor tile(at::IntArrayRef dims) const;
962 at::Tensor transpose(int64_t dim0, int64_t dim1) const;
963 at::Tensor transpose(at::Dimname dim0, at::Dimname dim1) const;
964 at::Tensor & transpose_(int64_t dim0, int64_t dim1) const;
965 at::Tensor flip(at::IntArrayRef dims) const;
966 at::Tensor fliplr() const;
967 at::Tensor flipud() const;
968 at::Tensor roll(at::IntArrayRef shifts, at::IntArrayRef dims={}) const;
969 at::Tensor rot90(int64_t k=1, at::IntArrayRef dims={0,1}) const;
970 at::Tensor _nested_tensor_size() const;
971 at::Tensor _nested_tensor_strides() const;
972 ::std::vector<int64_t> _nested_tensor_offsets() const;
973 at::Tensor trunc() const;
974 at::Tensor & trunc_() const;
975 at::Tensor fix() const;
976 at::Tensor & fix_() const;
977 at::Tensor type_as(const at::Tensor & other) const;
978 at::Tensor unsqueeze(int64_t dim) const;
979 at::Tensor & unsqueeze_(int64_t dim) const;
980 at::Tensor var(bool unbiased) const;
981 at::Tensor var(at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false) const;
982 at::Tensor var(at::OptionalIntArrayRef dim=c10::nullopt, c10::optional<int64_t> correction=c10::nullopt, bool keepdim=false) const;
983 at::Tensor var(at::DimnameList dim, bool unbiased, bool keepdim=false) const;
984 at::Tensor var(at::DimnameList dim, c10::optional<int64_t> correction=c10::nullopt, bool keepdim=false) const;
985 at::Tensor view_as(const at::Tensor & other) const;
986 at::Tensor where(const at::Tensor & condition, const at::Tensor & other) const;
987 at::Tensor where(const at::Tensor & condition, const at::Scalar & other) const;
988 at::Tensor norm(const c10::optional<at::Scalar> & p, at::ScalarType dtype) const;
989 at::Tensor norm(const at::Scalar & p=2) const;
990 at::Tensor norm(const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) const;
991 at::Tensor norm(const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim=false) const;
992 at::Tensor norm(const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) const;
993 at::Tensor norm(const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim=false) const;
994 ::std::tuple<at::Tensor,at::Tensor> frexp() const;
995 at::Tensor clone(c10::optional<at::MemoryFormat> memory_format=c10::nullopt) const;
996 at::Tensor positive() const;
997 const at::Tensor & resize_as_(const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) const;
998 const at::Tensor & resize_as_sparse_(const at::Tensor & the_template) const;
999 at::Tensor & zero_() const;
1000 at::Tensor sub(const at::Tensor & other, const at::Scalar & alpha=1) const;
1001 at::Tensor & sub_(const at::Tensor & other, const at::Scalar & alpha=1) const;
1002 at::Tensor sub(const at::Scalar & other, const at::Scalar & alpha=1) const;
1003 at::Tensor & sub_(const at::Scalar & other, const at::Scalar & alpha=1) const;
1004 at::Tensor subtract(const at::Tensor & other, const at::Scalar & alpha=1) const;
1005 at::Tensor & subtract_(const at::Tensor & other, const at::Scalar & alpha=1) const;
1006 at::Tensor subtract(const at::Scalar & other, const at::Scalar & alpha=1) const;
1007 at::Tensor & subtract_(const at::Scalar & other, const at::Scalar & alpha=1) const;
1008 at::Tensor heaviside(const at::Tensor & values) const;
1009 at::Tensor & heaviside_(const at::Tensor & values) const;
1010 at::Tensor addmm(const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) const;
1011 at::Tensor & addmm_(const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) const;
1012 at::Tensor _addmm_activation(const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1, bool use_gelu=false) const;
1013 const at::Tensor & sparse_resize_(at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) const;
1014 const at::Tensor & sparse_resize_and_clear_(at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) const;
1015 at::Tensor sparse_mask(const at::Tensor & mask) const;
1016 at::Tensor to_dense(c10::optional<at::ScalarType> dtype=c10::nullopt) const;
1017 at::Tensor _to_dense(c10::optional<at::ScalarType> dtype=c10::nullopt) const;
1018 int64_t sparse_dim() const;
1019 int64_t _dimI() const;
1020 int64_t dense_dim() const;
1021 int64_t _dimV() const;
1022 int64_t _nnz() const;
1023 at::Tensor coalesce() const;
1024 bool is_coalesced() const;
1025 at::Tensor _indices() const;
1026 at::Tensor _values() const;
1027 at::Tensor & _coalesced_(bool coalesced) const;
1028 at::Tensor indices() const;
1029 at::Tensor values() const;
1030 at::Tensor crow_indices() const;
1031 at::Tensor col_indices() const;
1032 at::Tensor ccol_indices() const;
1033 at::Tensor row_indices() const;
1034 ::std::vector<at::Tensor> unbind(int64_t dim=0) const;
1035 ::std::vector<at::Tensor> unbind(at::Dimname dim) const;
1036 at::Tensor to_sparse(int64_t sparse_dim) const;
1037 at::Tensor to_sparse(c10::optional<at::Layout> layout=c10::nullopt, at::OptionalIntArrayRef blocksize=c10::nullopt, c10::optional<int64_t> dense_dim=c10::nullopt) const;
1038 at::Tensor to_sparse_csr(c10::optional<int64_t> dense_dim=c10::nullopt) const;
1039 at::Tensor to_sparse_csc(c10::optional<int64_t> dense_dim=c10::nullopt) const;
1040 at::Tensor to_sparse_bsr(at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim=c10::nullopt) const;
1041 at::Tensor to_sparse_bsc(at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim=c10::nullopt) const;
1042 at::Tensor to_mkldnn(c10::optional<at::ScalarType> dtype=c10::nullopt) const;
1043 at::Tensor dequantize() const;
1044 double q_scale() const;
1045 int64_t q_zero_point() const;
1046 at::Tensor q_per_channel_scales() const;
1047 at::Tensor q_per_channel_zero_points() const;
1048 int64_t q_per_channel_axis() const;
1049 at::Tensor int_repr() const;
1050 at::QScheme qscheme() const;
1051 at::Tensor _autocast_to_reduced_precision(bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype) const;
1052 at::Tensor _autocast_to_full_precision(bool cuda_enabled, bool cpu_enabled) const;
1053 at::Tensor to(at::TensorOptions options={}, bool non_blocking=false, bool copy=false, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) const;
1054 at::Tensor to(c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) const;
1055 at::Tensor to(at::Device device, at::ScalarType dtype, bool non_blocking=false, bool copy=false, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) const;
1056 at::Tensor to(at::ScalarType dtype, bool non_blocking=false, bool copy=false, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) const;
1057 at::Tensor to(const at::Tensor & other, bool non_blocking=false, bool copy=false, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) const;
1058 at::Scalar item() const;
1059 at::Tensor & set_(at::Storage source) const;
1060 at::Tensor & set_(at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}) const;
1061 at::Tensor & set__symint(at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}) const;
1062 at::Tensor & set_(const at::Tensor & source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}) const;
1063 at::Tensor & set__symint(const at::Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}) const;
1064 at::Tensor & set_(const at::Tensor & source) const;
1065 at::Tensor & set_() const;
1066 bool is_set_to(const at::Tensor & tensor) const;
1067 at::Tensor & masked_fill_(const at::Tensor & mask, const at::Scalar & value) const;
1068 at::Tensor masked_fill(const at::Tensor & mask, const at::Scalar & value) const;
1069 at::Tensor & masked_fill_(const at::Tensor & mask, const at::Tensor & value) const;
1070 at::Tensor masked_fill(const at::Tensor & mask, const at::Tensor & value) const;
1071 at::Tensor & masked_scatter_(const at::Tensor & mask, const at::Tensor & source) const;
1072 at::Tensor masked_scatter(const at::Tensor & mask, const at::Tensor & source) const;
1073 at::Tensor view(at::IntArrayRef size) const;
1074 at::Tensor view_symint(c10::SymIntArrayRef size) const;
1075 at::Tensor view(at::ScalarType dtype) const;
1076 at::Tensor & put_(const at::Tensor & index, const at::Tensor & source, bool accumulate=false) const;
1077 at::Tensor put(const at::Tensor & index, const at::Tensor & source, bool accumulate=false) const;
1078 at::Tensor & index_add_(int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) const;
1079 at::Tensor index_add(int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) const;
1080 at::Tensor index_add(at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) const;
1081 at::Tensor & index_reduce_(int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true) const;
1082 at::Tensor index_reduce(int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true) const;
1083 at::Tensor & index_fill_(int64_t dim, const at::Tensor & index, const at::Scalar & value) const;
1084 at::Tensor index_fill(int64_t dim, const at::Tensor & index, const at::Scalar & value) const;
1085 at::Tensor & index_fill_(int64_t dim, const at::Tensor & index, const at::Tensor & value) const;
1086 at::Tensor index_fill(int64_t dim, const at::Tensor & index, const at::Tensor & value) const;
1087 at::Tensor & index_fill_(at::Dimname dim, const at::Tensor & index, const at::Scalar & value) const;
1088 at::Tensor & index_fill_(at::Dimname dim, const at::Tensor & index, const at::Tensor & value) const;
1089 at::Tensor index_fill(at::Dimname dim, const at::Tensor & index, const at::Scalar & value) const;
1090 at::Tensor index_fill(at::Dimname dim, const at::Tensor & index, const at::Tensor & value) const;
1091 at::Tensor scatter(int64_t dim, const at::Tensor & index, const at::Tensor & src) const;
1092 at::Tensor & scatter_(int64_t dim, const at::Tensor & index, const at::Tensor & src) const;
1093 at::Tensor scatter(int64_t dim, const at::Tensor & index, const at::Scalar & value) const;
1094 at::Tensor & scatter_(int64_t dim, const at::Tensor & index, const at::Scalar & value) const;
1095 at::Tensor scatter(int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) const;
1096 at::Tensor & scatter_(int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) const;
1097 at::Tensor scatter(int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) const;
1098 at::Tensor & scatter_(int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) const;
1099 at::Tensor scatter(at::Dimname dim, const at::Tensor & index, const at::Tensor & src) const;
1100 at::Tensor scatter(at::Dimname dim, const at::Tensor & index, const at::Scalar & value) const;
1101 at::Tensor scatter_add(int64_t dim, const at::Tensor & index, const at::Tensor & src) const;
1102 at::Tensor & scatter_add_(int64_t dim, const at::Tensor & index, const at::Tensor & src) const;
1103 at::Tensor scatter_add(at::Dimname dim, const at::Tensor & index, const at::Tensor & src) const;
1104 at::Tensor scatter_reduce(int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self=true) const;
1105 at::Tensor & scatter_reduce_(int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self=true) const;
1106 at::Tensor & eq_(const at::Scalar & other) const;
1107 at::Tensor & eq_(const at::Tensor & other) const;
1108 at::Tensor bitwise_and(const at::Scalar & other) const;
1109 at::Tensor bitwise_and(const at::Tensor & other) const;
1110 at::Tensor & bitwise_and_(const at::Scalar & other) const;
1111 at::Tensor & bitwise_and_(const at::Tensor & other) const;
1112 at::Tensor __and__(const at::Scalar & other) const;
1113 at::Tensor __and__(const at::Tensor & other) const;
1114 at::Tensor & __iand__(const at::Scalar & other) const;
1115 at::Tensor & __iand__(const at::Tensor & other) const;
1116 at::Tensor bitwise_or(const at::Scalar & other) const;
1117 at::Tensor bitwise_or(const at::Tensor & other) const;
1118 at::Tensor & bitwise_or_(const at::Scalar & other) const;
1119 at::Tensor & bitwise_or_(const at::Tensor & other) const;
1120 at::Tensor __or__(const at::Scalar & other) const;
1121 at::Tensor __or__(const at::Tensor & other) const;
1122 at::Tensor & __ior__(const at::Scalar & other) const;
1123 at::Tensor & __ior__(const at::Tensor & other) const;
1124 at::Tensor bitwise_xor(const at::Scalar & other) const;
1125 at::Tensor bitwise_xor(const at::Tensor & other) const;
1126 at::Tensor & bitwise_xor_(const at::Scalar & other) const;
1127 at::Tensor & bitwise_xor_(const at::Tensor & other) const;
1128 at::Tensor __xor__(const at::Scalar & other) const;
1129 at::Tensor __xor__(const at::Tensor & other) const;
1130 at::Tensor & __ixor__(const at::Scalar & other) const;
1131 at::Tensor & __ixor__(const at::Tensor & other) const;
1132 at::Tensor __lshift__(const at::Scalar & other) const;
1133 at::Tensor __lshift__(const at::Tensor & other) const;
1134 at::Tensor & __ilshift__(const at::Scalar & other) const;
1135 at::Tensor & __ilshift__(const at::Tensor & other) const;
1136 at::Tensor bitwise_left_shift(const at::Tensor & other) const;
1137 at::Tensor & bitwise_left_shift_(const at::Tensor & other) const;
1138 at::Tensor bitwise_left_shift(const at::Scalar & other) const;
1139 at::Tensor & bitwise_left_shift_(const at::Scalar & other) const;
1140 at::Tensor __rshift__(const at::Scalar & other) const;
1141 at::Tensor __rshift__(const at::Tensor & other) const;
1142 at::Tensor & __irshift__(const at::Scalar & other) const;
1143 at::Tensor & __irshift__(const at::Tensor & other) const;
1144 at::Tensor bitwise_right_shift(const at::Tensor & other) const;
1145 at::Tensor & bitwise_right_shift_(const at::Tensor & other) const;
1146 at::Tensor bitwise_right_shift(const at::Scalar & other) const;
1147 at::Tensor & bitwise_right_shift_(const at::Scalar & other) const;
1148 at::Tensor & tril_(int64_t diagonal=0) const;
1149 at::Tensor & triu_(int64_t diagonal=0) const;
1150 at::Tensor & digamma_() const;
1151 at::Tensor & lerp_(const at::Tensor & end, const at::Scalar & weight) const;
1152 at::Tensor & lerp_(const at::Tensor & end, const at::Tensor & weight) const;
1153 at::Tensor & addbmm_(const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) const;
1154 at::Tensor addbmm(const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) const;
1155 at::Tensor & random_(int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator=c10::nullopt) const;
1156 at::Tensor & random_(int64_t to, c10::optional<at::Generator> generator=c10::nullopt) const;
1157 at::Tensor & random_(c10::optional<at::Generator> generator=c10::nullopt) const;
1158 at::Tensor & uniform_(double from=0, double to=1, c10::optional<at::Generator> generator=c10::nullopt) const;
1159 at::Tensor & cauchy_(double median=0, double sigma=1, c10::optional<at::Generator> generator=c10::nullopt) const;
1160 at::Tensor & log_normal_(double mean=1, double std=2, c10::optional<at::Generator> generator=c10::nullopt) const;
1161 at::Tensor & exponential_(double lambd=1, c10::optional<at::Generator> generator=c10::nullopt) const;
1162 at::Tensor & geometric_(double p, c10::optional<at::Generator> generator=c10::nullopt) const;
1163 at::Tensor diag(int64_t diagonal=0) const;
1164 at::Tensor cross(const at::Tensor & other, c10::optional<int64_t> dim=c10::nullopt) const;
1165 at::Tensor triu(int64_t diagonal=0) const;
1166 at::Tensor tril(int64_t diagonal=0) const;
1167 at::Tensor trace() const;
1168 at::Tensor ne(const at::Scalar & other) const;
1169 at::Tensor ne(const at::Tensor & other) const;
1170 at::Tensor & ne_(const at::Scalar & other) const;
1171 at::Tensor & ne_(const at::Tensor & other) const;
1172 at::Tensor not_equal(const at::Scalar & other) const;
1173 at::Tensor not_equal(const at::Tensor & other) const;
1174 at::Tensor & not_equal_(const at::Scalar & other) const;
1175 at::Tensor & not_equal_(const at::Tensor & other) const;
1176 at::Tensor eq(const at::Scalar & other) const;
1177 at::Tensor eq(const at::Tensor & other) const;
1178 at::Tensor ge(const at::Scalar & other) const;
1179 at::Tensor ge(const at::Tensor & other) const;
1180 at::Tensor & ge_(const at::Scalar & other) const;
1181 at::Tensor & ge_(const at::Tensor & other) const;
1182 at::Tensor greater_equal(const at::Scalar & other) const;
1183 at::Tensor greater_equal(const at::Tensor & other) const;
1184 at::Tensor & greater_equal_(const at::Scalar & other) const;
1185 at::Tensor & greater_equal_(const at::Tensor & other) const;
1186 at::Tensor le(const at::Scalar & other) const;
1187 at::Tensor le(const at::Tensor & other) const;
1188 at::Tensor & le_(const at::Scalar & other) const;
1189 at::Tensor & le_(const at::Tensor & other) const;
1190 at::Tensor less_equal(const at::Scalar & other) const;
1191 at::Tensor less_equal(const at::Tensor & other) const;
1192 at::Tensor & less_equal_(const at::Scalar & other) const;
1193 at::Tensor & less_equal_(const at::Tensor & other) const;
1194 at::Tensor gt(const at::Scalar & other) const;
1195 at::Tensor gt(const at::Tensor & other) const;
1196 at::Tensor & gt_(const at::Scalar & other) const;
1197 at::Tensor & gt_(const at::Tensor & other) const;
1198 at::Tensor greater(const at::Scalar & other) const;
1199 at::Tensor greater(const at::Tensor & other) const;
1200 at::Tensor & greater_(const at::Scalar & other) const;
1201 at::Tensor & greater_(const at::Tensor & other) const;
1202 at::Tensor lt(const at::Scalar & other) const;
1203 at::Tensor lt(const at::Tensor & other) const;
1204 at::Tensor & lt_(const at::Scalar & other) const;
1205 at::Tensor & lt_(const at::Tensor & other) const;
1206 at::Tensor less(const at::Scalar & other) const;
1207 at::Tensor less(const at::Tensor & other) const;
1208 at::Tensor & less_(const at::Scalar & other) const;
1209 at::Tensor & less_(const at::Tensor & other) const;
1210 at::Tensor take(const at::Tensor & index) const;
1211 at::Tensor take_along_dim(const at::Tensor & indices, c10::optional<int64_t> dim=c10::nullopt) const;
1212 at::Tensor index_select(int64_t dim, const at::Tensor & index) const;
1213 at::Tensor index_select(at::Dimname dim, const at::Tensor & index) const;
1214 at::Tensor masked_select(const at::Tensor & mask) const;
1215 at::Tensor nonzero() const;
1216 ::std::vector<at::Tensor> nonzero_numpy() const;
1217 at::Tensor argwhere() const;
1218 at::Tensor gather(int64_t dim, const at::Tensor & index, bool sparse_grad=false) const;
1219 at::Tensor gather(at::Dimname dim, const at::Tensor & index, bool sparse_grad=false) const;
1220 at::Tensor addcmul(const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) const;
1221 at::Tensor & addcmul_(const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) const;
1222 at::Tensor addcdiv(const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) const;
1223 at::Tensor & addcdiv_(const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) const;
1224 ::std::tuple<at::Tensor,at::Tensor> triangular_solve(const at::Tensor & A, bool upper=true, bool transpose=false, bool unitriangular=false) const;
1225 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> svd(bool some=true, bool compute_uv=true) const;
1226 at::Tensor swapaxes(int64_t axis0, int64_t axis1) const;
1227 at::Tensor & swapaxes_(int64_t axis0, int64_t axis1) const;
1228 at::Tensor swapdims(int64_t dim0, int64_t dim1) const;
1229 at::Tensor & swapdims_(int64_t dim0, int64_t dim1) const;
1230 at::Tensor cholesky(bool upper=false) const;
1231 at::Tensor cholesky_solve(const at::Tensor & input2, bool upper=false) const;
1232 at::Tensor cholesky_inverse(bool upper=false) const;
1233 ::std::tuple<at::Tensor,at::Tensor> qr(bool some=true) const;
1234 ::std::tuple<at::Tensor,at::Tensor> geqrf() const;
1235 at::Tensor orgqr(const at::Tensor & input2) const;
1236 at::Tensor ormqr(const at::Tensor & input2, const at::Tensor & input3, bool left=true, bool transpose=false) const;
1237 at::Tensor lu_solve(const at::Tensor & LU_data, const at::Tensor & LU_pivots) const;
1238 at::Tensor multinomial(int64_t num_samples, bool replacement=false, c10::optional<at::Generator> generator=c10::nullopt) const;
1239 at::Tensor & lgamma_() const;
1240 at::Tensor lgamma() const;
1241 at::Tensor digamma() const;
1242 at::Tensor polygamma(int64_t n) const;
1243 at::Tensor & polygamma_(int64_t n) const;
1244 at::Tensor erfinv() const;
1245 at::Tensor & erfinv_() const;
1246 at::Tensor i0() const;
1247 at::Tensor & i0_() const;
1248 at::Tensor sign() const;
1249 at::Tensor & sign_() const;
1250 at::Tensor signbit() const;
1251 at::Tensor dist(const at::Tensor & other, const at::Scalar & p=2) const;
1252 at::Tensor & atan2_(const at::Tensor & other) const;
1253 at::Tensor atan2(const at::Tensor & other) const;
1254 at::Tensor arctan2(const at::Tensor & other) const;
1255 at::Tensor & arctan2_(const at::Tensor & other) const;
1256 at::Tensor lerp(const at::Tensor & end, const at::Scalar & weight) const;
1257 at::Tensor lerp(const at::Tensor & end, const at::Tensor & weight) const;
1258 at::Tensor histc(int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0) const;
1259 ::std::tuple<at::Tensor,at::Tensor> histogram(const at::Tensor & bins, const c10::optional<at::Tensor> & weight={}, bool density=false) const;
1260 ::std::tuple<at::Tensor,at::Tensor> histogram(int64_t bins=100, c10::optional<at::ArrayRef<double>> range=c10::nullopt, const c10::optional<at::Tensor> & weight={}, bool density=false) const;
1261 at::Tensor fmod(const at::Scalar & other) const;
1262 at::Tensor & fmod_(const at::Scalar & other) const;
1263 at::Tensor fmod(const at::Tensor & other) const;
1264 at::Tensor & fmod_(const at::Tensor & other) const;
1265 at::Tensor hypot(const at::Tensor & other) const;
1266 at::Tensor & hypot_(const at::Tensor & other) const;
1267 at::Tensor igamma(const at::Tensor & other) const;
1268 at::Tensor & igamma_(const at::Tensor & other) const;
1269 at::Tensor igammac(const at::Tensor & other) const;
1270 at::Tensor & igammac_(const at::Tensor & other) const;
1271 at::Tensor nextafter(const at::Tensor & other) const;
1272 at::Tensor & nextafter_(const at::Tensor & other) const;
1273 at::Tensor remainder(const at::Scalar & other) const;
1274 at::Tensor & remainder_(const at::Scalar & other) const;
1275 at::Tensor remainder(const at::Tensor & other) const;
1276 at::Tensor & remainder_(const at::Tensor & other) const;
1277 at::Tensor min() const;
1278 at::Tensor fmin(const at::Tensor & other) const;
1279 at::Tensor max() const;
1280 at::Tensor fmax(const at::Tensor & other) const;
1281 at::Tensor maximum(const at::Tensor & other) const;
1282 at::Tensor max(const at::Tensor & other) const;
1283 at::Tensor minimum(const at::Tensor & other) const;
1284 at::Tensor min(const at::Tensor & other) const;
1285 at::Tensor quantile(const at::Tensor & q, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") const;
1286 at::Tensor quantile(double q, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") const;
1287 at::Tensor nanquantile(const at::Tensor & q, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") const;
1288 at::Tensor nanquantile(double q, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") const;
1289 ::std::tuple<at::Tensor,at::Tensor> sort(int64_t dim=-1, bool descending=false) const;
1290 ::std::tuple<at::Tensor,at::Tensor> sort(c10::optional<bool> stable, int64_t dim=-1, bool descending=false) const;
1291 ::std::tuple<at::Tensor,at::Tensor> sort(at::Dimname dim, bool descending=false) const;
1292 ::std::tuple<at::Tensor,at::Tensor> sort(c10::optional<bool> stable, at::Dimname dim, bool descending=false) const;
1293 at::Tensor msort() const;
1294 at::Tensor argsort(int64_t dim=-1, bool descending=false) const;
1295 at::Tensor argsort(bool stable, int64_t dim=-1, bool descending=false) const;
1296 at::Tensor argsort(at::Dimname dim, bool descending=false) const;
1297 ::std::tuple<at::Tensor,at::Tensor> topk(int64_t k, int64_t dim=-1, bool largest=true, bool sorted=true) const;
1298 at::Tensor all() const;
1299 at::Tensor any() const;
1300 at::Tensor renorm(const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) const;
1301 at::Tensor & renorm_(const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) const;
1302 at::Tensor unfold(int64_t dimension, int64_t size, int64_t step) const;
1303 bool equal(const at::Tensor & other) const;
1304 at::Tensor pow(const at::Tensor & exponent) const;
1305 at::Tensor pow(const at::Scalar & exponent) const;
1306 at::Tensor & pow_(const at::Scalar & exponent) const;
1307 at::Tensor & pow_(const at::Tensor & exponent) const;
1308 at::Tensor float_power(const at::Tensor & exponent) const;
1309 at::Tensor float_power(const at::Scalar & exponent) const;
1310 at::Tensor & float_power_(const at::Scalar & exponent) const;
1311 at::Tensor & float_power_(const at::Tensor & exponent) const;
1312 at::Tensor & normal_(double mean=0, double std=1, c10::optional<at::Generator> generator=c10::nullopt) const;
1313 at::Tensor alias() const;
1314 at::Tensor isfinite() const;
1315 at::Tensor isinf() const;
1316 void record_stream(at::Stream s) const;
1317 at::Tensor isposinf() const;
1318 at::Tensor isneginf() const;
1319 at::Tensor det() const;
1320 ::std::tuple<at::Tensor,at::Tensor> slogdet() const;
1321 at::Tensor logdet() const;
1322 at::Tensor inverse() const;
1323 at::Tensor inner(const at::Tensor & other) const;
1324 at::Tensor outer(const at::Tensor & vec2) const;
1325 at::Tensor ger(const at::Tensor & vec2) const;
1326 at::Tensor to_padded_tensor(double padding, at::OptionalIntArrayRef output_size=c10::nullopt) const;
1327 at::Tensor to_padded_tensor_symint(double padding, at::OptionalSymIntArrayRef output_size=c10::nullopt) const;
1328
1329 // Special C++ only overloads for std()-like functions (See gh-40287)
1330 // These are needed because int -> bool conversion takes precedence over int -> IntArrayRef
1331 // So, for example std(0) would select the std(unbiased=False) overload
1332
1333 Tensor var(int dim) const {
1334 return var(IntArrayRef{dim});
1335 }
1336
1337 Tensor std(int dim) const {
1338 return std(IntArrayRef{dim});
1339 }
1340
1341 // We changed .dtype() to return a TypeMeta in #12766. Ideally, we want the
1342 // at::kDouble and its friends to be TypeMeta's, but that hasn't happened yet.
1343 // Before that change, we make this method to maintain BC for C++ usage like
1344 // `x.to(y.dtype)`.
1345 // TODO: remove following two after at::kDouble and its friends are TypeMeta's.
1346 inline Tensor to(caffe2::TypeMeta type_meta, bool non_blocking=false, bool copy=false) const {
1347 return this->to(/*scalar_type=*/typeMetaToScalarType(type_meta), non_blocking, copy);
1348 }
1349 inline Tensor to(Device device, caffe2::TypeMeta type_meta, bool non_blocking=false, bool copy=false) const {
1350 return this->to(device, /*scalar_type=*/typeMetaToScalarType(type_meta), non_blocking, copy);
1351 }
1352
1353 template <typename F, typename... Args>
1354 decltype(auto) m(F func, Args&&... params) const {
1355 return func(*this, std::forward<Args>(params)...);
1356 }
1357
1358 /// NOTE: This is similar to the legacy `.data()` function on `Variable`, and is intended
1359 /// to be used from functions that need to access the `Variable`'s equivalent `Tensor`
1360 /// (i.e. `Tensor` that shares the same storage and tensor metadata with the `Variable`).
1361 ///
1362 /// One notable difference with the legacy `.data()` function is that changes to the
1363 /// returned `Tensor`'s tensor metadata (e.g. sizes / strides / storage / storage_offset)
1364 /// will not update the original `Variable`, due to the fact that this function
1365 /// shallow-copies the `Variable`'s underlying TensorImpl.
1366 at::Tensor tensor_data() const {
1367 return TensorBase::tensor_data();
1368 }
1369
1370 /// NOTE: `var.variable_data()` in C++ has the same semantics as `tensor.data`
1371 /// in Python, which create a new `Variable` that shares the same storage and
1372 /// tensor metadata with the original `Variable`, but with a completely new
1373 /// autograd history.
1374 ///
1375 /// NOTE: If we change the tensor metadata (e.g. sizes / strides /
1376 /// storage / storage_offset) of a variable created from `var.variable_data()`, those
1377 /// changes will not update the original variable `var`. In `.variable_data()`, we set
1378 /// `allow_tensor_metadata_change_` to false to make such changes explicitly illegal,
1379 /// in order to prevent users from changing metadata of `var.variable_data()`
1380 /// and expecting the original variable `var` to also be updated.
1381 at::Tensor variable_data() const {
1382 return TensorBase::variable_data();
1383 }
1384
1385 // Hooks
1386 //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1387
1388 template <typename T>
1389 using hook_return_void_t = std::enable_if_t<std::is_void<typename c10::invoke_result_t<T&, Tensor>>::value, unsigned>;
1390 template <typename T>
1391 using hook_return_var_t = std::enable_if_t<std::is_same<typename c10::invoke_result_t<T&, Tensor>, Tensor>::value, unsigned>;
1392
1393 /// Registers a backward hook.
1394 ///
1395 /// The hook will be called every time a gradient with respect to the Tensor is computed.
1396 /// The hook should have one of the following signature:
1397 /// ```
1398 /// hook(Tensor grad) -> Tensor
1399 /// ```
1400 /// ```
1401 /// hook(Tensor grad) -> void
1402 /// ```
1403 /// The hook should not modify its argument, but it can optionally return a new gradient
1404 /// which will be used in place of `grad`.
1405 ///
1406 /// This function returns the index of the hook in the list which can be used to remove hook.
1407 ///
1408 /// Example:
1409 /// @code
1410 /// auto v = torch::tensor({0., 0., 0.}, torch::requires_grad());
1411 /// auto h = v.register_hook([](torch::Tensor grad){ return grad * 2; }); // double the gradient
1412 /// v.backward(torch::tensor({1., 2., 3.}));
1413 /// // This prints:
1414 /// // ```
1415 /// // 2
1416 /// // 4
1417 /// // 6
1418 /// // [ CPUFloatType{3} ]
1419 /// // ```
1420 /// std::cout << v.grad() << std::endl;
1421 /// v.remove_hook(h); // removes the hook
1422 /// @endcode
1423 template <typename T>
1424 hook_return_void_t<T> register_hook(T&& hook) const;
1425 template <typename T>
1426 hook_return_var_t<T> register_hook(T&& hook) const;
1427
1428 // Variable methods
1429 //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1430
1431 Tensor data() const {
1432 return TensorBase::data();
1433 }
1434
1435 void _backward(TensorList inputs, const c10::optional<Tensor>& gradient, c10::optional<bool> keep_graph, bool create_graph) const;
1436
1437 const Tensor& requires_grad_(bool _requires_grad=true) const {
1438 TensorBase::requires_grad_(_requires_grad);
1439 return *this;
1440 }
1441};
1442
1443namespace detail {
1444// Helper creator for Tensor class which doesn't requires the users to pass
1445// in an intrusive_ptr instead it just converts the argument passed to
1446// requested intrusive_ptr type.
1447template <typename T, typename... Args>
1448Tensor make_tensor(Args&&... args) {
1449 return Tensor(c10::make_intrusive<T>(std::forward<Args>(args)...));
1450}
1451
1452} // namespace detail
1453
1454} // namespace at
1455
1456
1457namespace at {
1458
1459// aten::_backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> ()
1460inline void Tensor::__dispatch__backward(at::TensorList inputs, const c10::optional<at::Tensor> & gradient, c10::optional<bool> retain_graph, bool create_graph) const {
1461 return at::_ops::_backward::call(const_cast<Tensor&>(*this), inputs, gradient, retain_graph, create_graph);
1462}
1463
1464// aten::set_data(Tensor(a!) self, Tensor new_data) -> ()
1465inline void Tensor::__dispatch_set_data(const at::Tensor & new_data) const {
1466 return at::_ops::set_data::call(const_cast<Tensor&>(*this), new_data);
1467}
1468
1469// aten::data(Tensor self) -> Tensor
1470inline at::Tensor Tensor::__dispatch_data() const {
1471 return at::_ops::data::call(const_cast<Tensor&>(*this));
1472}
1473
1474// aten::is_leaf(Tensor self) -> bool
1475inline bool Tensor::__dispatch_is_leaf() const {
1476 return at::_ops::is_leaf::call(const_cast<Tensor&>(*this));
1477}
1478
1479// aten::output_nr(Tensor self) -> int
1480inline int64_t Tensor::__dispatch_output_nr() const {
1481 return at::_ops::output_nr::call(const_cast<Tensor&>(*this));
1482}
1483
1484// aten::_version(Tensor self) -> int
1485inline int64_t Tensor::__dispatch__version() const {
1486 return at::_ops::_version::call(const_cast<Tensor&>(*this));
1487}
1488
1489// aten::requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!)
1490inline at::Tensor & Tensor::__dispatch_requires_grad_(bool requires_grad) const {
1491 return at::_ops::requires_grad_::call(const_cast<Tensor&>(*this), requires_grad);
1492}
1493
1494// aten::retain_grad(Tensor(a!) self) -> ()
1495inline void Tensor::__dispatch_retain_grad() const {
1496 return at::_ops::retain_grad::call(const_cast<Tensor&>(*this));
1497}
1498
1499// aten::retains_grad(Tensor self) -> bool
1500inline bool Tensor::__dispatch_retains_grad() const {
1501 return at::_ops::retains_grad::call(const_cast<Tensor&>(*this));
1502}
1503
1504// aten::_fw_primal(Tensor(a) self, int level) -> Tensor(a)
1505inline at::Tensor Tensor::_fw_primal(int64_t level) const {
1506 return at::_ops::_fw_primal::call(const_cast<Tensor&>(*this), level);
1507}
1508
1509// aten::rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)
1510inline at::Tensor & Tensor::rename_(c10::optional<at::DimnameList> names) const {
1511 return at::_ops::rename_::call(const_cast<Tensor&>(*this), names);
1512}
1513
1514// aten::rename(Tensor(a) self, Dimname[]? names) -> Tensor(a)
1515inline at::Tensor Tensor::rename(c10::optional<at::DimnameList> names) const {
1516 return at::_ops::rename::call(const_cast<Tensor&>(*this), names);
1517}
1518
1519// aten::align_to(Tensor(a) self, Dimname[] names) -> Tensor(a)
1520inline at::Tensor Tensor::align_to(at::DimnameList names) const {
1521 return at::_ops::align_to::call(const_cast<Tensor&>(*this), names);
1522}
1523
1524// aten::align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a)
1525inline at::Tensor Tensor::align_to(at::DimnameList order, int64_t ellipsis_idx) const {
1526 return at::_ops::align_to_ellipsis_idx::call(const_cast<Tensor&>(*this), order, ellipsis_idx);
1527}
1528
1529// aten::align_as(Tensor self, Tensor other) -> Tensor
1530inline at::Tensor Tensor::align_as(const at::Tensor & other) const {
1531 return at::_ops::align_as::call(const_cast<Tensor&>(*this), other);
1532}
1533
1534// aten::refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a)
1535inline at::Tensor Tensor::refine_names(at::DimnameList names) const {
1536 return at::_ops::refine_names::call(const_cast<Tensor&>(*this), names);
1537}
1538
1539// aten::abs(Tensor self) -> Tensor
1540inline at::Tensor Tensor::abs() const {
1541 return at::_ops::abs::call(const_cast<Tensor&>(*this));
1542}
1543
1544// aten::abs_(Tensor(a!) self) -> Tensor(a!)
1545inline at::Tensor & Tensor::abs_() const {
1546 return at::_ops::abs_::call(const_cast<Tensor&>(*this));
1547}
1548
1549// aten::absolute(Tensor self) -> Tensor
1550inline at::Tensor Tensor::absolute() const {
1551 return at::_ops::absolute::call(const_cast<Tensor&>(*this));
1552}
1553
1554// aten::absolute_(Tensor(a!) self) -> Tensor(a!)
1555inline at::Tensor & Tensor::absolute_() const {
1556 return at::_ops::absolute_::call(const_cast<Tensor&>(*this));
1557}
1558
1559// aten::angle(Tensor self) -> Tensor
1560inline at::Tensor Tensor::angle() const {
1561 return at::_ops::angle::call(const_cast<Tensor&>(*this));
1562}
1563
1564// aten::sgn(Tensor self) -> Tensor
1565inline at::Tensor Tensor::sgn() const {
1566 return at::_ops::sgn::call(const_cast<Tensor&>(*this));
1567}
1568
1569// aten::sgn_(Tensor(a!) self) -> Tensor(a!)
1570inline at::Tensor & Tensor::sgn_() const {
1571 return at::_ops::sgn_::call(const_cast<Tensor&>(*this));
1572}
1573
1574// aten::chalf(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor
1575inline at::Tensor Tensor::chalf(c10::optional<at::MemoryFormat> memory_format) const {
1576 return at::_ops::chalf::call(const_cast<Tensor&>(*this), memory_format);
1577}
1578
1579// aten::_conj(Tensor(a) self) -> Tensor(a)
1580inline at::Tensor Tensor::_conj() const {
1581 return at::_ops::_conj::call(const_cast<Tensor&>(*this));
1582}
1583
1584// aten::conj(Tensor(a) self) -> Tensor(a)
1585inline at::Tensor Tensor::__dispatch_conj() const {
1586 return at::_ops::conj::call(const_cast<Tensor&>(*this));
1587}
1588
1589// aten::_conj_physical(Tensor self) -> Tensor
1590inline at::Tensor Tensor::_conj_physical() const {
1591 return at::_ops::_conj_physical::call(const_cast<Tensor&>(*this));
1592}
1593
1594// aten::conj_physical(Tensor self) -> Tensor
1595inline at::Tensor Tensor::conj_physical() const {
1596 return at::_ops::conj_physical::call(const_cast<Tensor&>(*this));
1597}
1598
1599// aten::conj_physical_(Tensor(a!) self) -> Tensor(a!)
1600inline at::Tensor & Tensor::conj_physical_() const {
1601 return at::_ops::conj_physical_::call(const_cast<Tensor&>(*this));
1602}
1603
1604// aten::resolve_conj(Tensor(a) self) -> Tensor(a)
1605inline at::Tensor Tensor::resolve_conj() const {
1606 return at::_ops::resolve_conj::call(const_cast<Tensor&>(*this));
1607}
1608
1609// aten::resolve_neg(Tensor(a) self) -> Tensor(a)
1610inline at::Tensor Tensor::resolve_neg() const {
1611 return at::_ops::resolve_neg::call(const_cast<Tensor&>(*this));
1612}
1613
1614// aten::_neg_view(Tensor(a) self) -> Tensor(a)
1615inline at::Tensor Tensor::_neg_view() const {
1616 return at::_ops::_neg_view::call(const_cast<Tensor&>(*this));
1617}
1618
1619// aten::acos(Tensor self) -> Tensor
1620inline at::Tensor Tensor::acos() const {
1621 return at::_ops::acos::call(const_cast<Tensor&>(*this));
1622}
1623
1624// aten::acos_(Tensor(a!) self) -> Tensor(a!)
1625inline at::Tensor & Tensor::acos_() const {
1626 return at::_ops::acos_::call(const_cast<Tensor&>(*this));
1627}
1628
1629// aten::arccos(Tensor self) -> Tensor
1630inline at::Tensor Tensor::arccos() const {
1631 return at::_ops::arccos::call(const_cast<Tensor&>(*this));
1632}
1633
1634// aten::arccos_(Tensor(a!) self) -> Tensor(a!)
1635inline at::Tensor & Tensor::arccos_() const {
1636 return at::_ops::arccos_::call(const_cast<Tensor&>(*this));
1637}
1638
1639// aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
1640inline at::Tensor Tensor::add(const at::Tensor & other, const at::Scalar & alpha) const {
1641 return at::_ops::add_Tensor::call(const_cast<Tensor&>(*this), other, alpha);
1642}
1643
1644// aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
1645inline at::Tensor & Tensor::add_(const at::Tensor & other, const at::Scalar & alpha) const {
1646 return at::_ops::add__Tensor::call(const_cast<Tensor&>(*this), other, alpha);
1647}
1648
1649// aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
1650inline at::Tensor Tensor::add(const at::Scalar & other, const at::Scalar & alpha) const {
1651 return at::_ops::add_Scalar::call(const_cast<Tensor&>(*this), other, alpha);
1652}
1653
1654// aten::add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
1655inline at::Tensor & Tensor::add_(const at::Scalar & other, const at::Scalar & alpha) const {
1656 return at::_ops::add__Scalar::call(const_cast<Tensor&>(*this), other, alpha);
1657}
1658
1659// aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor
1660inline at::Tensor Tensor::addmv(const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) const {
1661 return at::_ops::addmv::call(const_cast<Tensor&>(*this), mat, vec, beta, alpha);
1662}
1663
1664// aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
1665inline at::Tensor & Tensor::addmv_(const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) const {
1666 return at::_ops::addmv_::call(const_cast<Tensor&>(*this), mat, vec, beta, alpha);
1667}
1668
1669// aten::addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
1670inline at::Tensor Tensor::addr(const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) const {
1671 return at::_ops::addr::call(const_cast<Tensor&>(*this), vec1, vec2, beta, alpha);
1672}
1673
1674// aten::addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
1675inline at::Tensor & Tensor::addr_(const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) const {
1676 return at::_ops::addr_::call(const_cast<Tensor&>(*this), vec1, vec2, beta, alpha);
1677}
1678
1679// aten::_is_all_true(Tensor self) -> Tensor
1680inline at::Tensor Tensor::_is_all_true() const {
1681 return at::_ops::_is_all_true::call(const_cast<Tensor&>(*this));
1682}
1683
1684// aten::_is_any_true(Tensor self) -> Tensor
1685inline at::Tensor Tensor::_is_any_true() const {
1686 return at::_ops::_is_any_true::call(const_cast<Tensor&>(*this));
1687}
1688
1689// aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
1690inline at::Tensor Tensor::all(int64_t dim, bool keepdim) const {
1691 return at::_ops::all_dim::call(const_cast<Tensor&>(*this), dim, keepdim);
1692}
1693
1694// aten::all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor
1695inline at::Tensor Tensor::all(at::Dimname dim, bool keepdim) const {
1696 return at::_ops::all_dimname::call(const_cast<Tensor&>(*this), dim, keepdim);
1697}
1698
1699// aten::allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool
1700inline bool Tensor::allclose(const at::Tensor & other, double rtol, double atol, bool equal_nan) const {
1701 return at::_ops::allclose::call(const_cast<Tensor&>(*this), other, rtol, atol, equal_nan);
1702}
1703
1704// aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
1705inline at::Tensor Tensor::any(int64_t dim, bool keepdim) const {
1706 return at::_ops::any_dim::call(const_cast<Tensor&>(*this), dim, keepdim);
1707}
1708
1709// aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor
1710inline at::Tensor Tensor::any(at::Dimname dim, bool keepdim) const {
1711 return at::_ops::any_dimname::call(const_cast<Tensor&>(*this), dim, keepdim);
1712}
1713
1714// aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
1715inline at::Tensor Tensor::argmax(c10::optional<int64_t> dim, bool keepdim) const {
1716 return at::_ops::argmax::call(const_cast<Tensor&>(*this), dim, keepdim);
1717}
1718
1719// aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
1720inline at::Tensor Tensor::argmin(c10::optional<int64_t> dim, bool keepdim) const {
1721 return at::_ops::argmin::call(const_cast<Tensor&>(*this), dim, keepdim);
1722}
1723
1724// aten::acosh(Tensor self) -> Tensor
1725inline at::Tensor Tensor::acosh() const {
1726 return at::_ops::acosh::call(const_cast<Tensor&>(*this));
1727}
1728
1729// aten::acosh_(Tensor(a!) self) -> Tensor(a!)
1730inline at::Tensor & Tensor::acosh_() const {
1731 return at::_ops::acosh_::call(const_cast<Tensor&>(*this));
1732}
1733
1734// aten::arccosh(Tensor self) -> Tensor
1735inline at::Tensor Tensor::arccosh() const {
1736 return at::_ops::arccosh::call(const_cast<Tensor&>(*this));
1737}
1738
1739// aten::arccosh_(Tensor(a!) self) -> Tensor(a!)
1740inline at::Tensor & Tensor::arccosh_() const {
1741 return at::_ops::arccosh_::call(const_cast<Tensor&>(*this));
1742}
1743
1744// aten::asinh(Tensor self) -> Tensor
1745inline at::Tensor Tensor::asinh() const {
1746 return at::_ops::asinh::call(const_cast<Tensor&>(*this));
1747}
1748
1749// aten::asinh_(Tensor(a!) self) -> Tensor(a!)
1750inline at::Tensor & Tensor::asinh_() const {
1751 return at::_ops::asinh_::call(const_cast<Tensor&>(*this));
1752}
1753
1754// aten::arcsinh(Tensor self) -> Tensor
1755inline at::Tensor Tensor::arcsinh() const {
1756 return at::_ops::arcsinh::call(const_cast<Tensor&>(*this));
1757}
1758
1759// aten::arcsinh_(Tensor(a!) self) -> Tensor(a!)
1760inline at::Tensor & Tensor::arcsinh_() const {
1761 return at::_ops::arcsinh_::call(const_cast<Tensor&>(*this));
1762}
1763
1764// aten::atanh(Tensor self) -> Tensor
1765inline at::Tensor Tensor::atanh() const {
1766 return at::_ops::atanh::call(const_cast<Tensor&>(*this));
1767}
1768
1769// aten::atanh_(Tensor(a!) self) -> Tensor(a!)
1770inline at::Tensor & Tensor::atanh_() const {
1771 return at::_ops::atanh_::call(const_cast<Tensor&>(*this));
1772}
1773
1774// aten::arctanh(Tensor self) -> Tensor
1775inline at::Tensor Tensor::arctanh() const {
1776 return at::_ops::arctanh::call(const_cast<Tensor&>(*this));
1777}
1778
1779// aten::arctanh_(Tensor(a!) self) -> Tensor(a!)
1780inline at::Tensor & Tensor::arctanh_() const {
1781 return at::_ops::arctanh_::call(const_cast<Tensor&>(*this));
1782}
1783
1784// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)
1785inline at::Tensor Tensor::as_strided(at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset) const {
1786 return at::_ops::as_strided::call(const_cast<Tensor&>(*this), c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt);
1787}
1788
1789// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)
1790inline at::Tensor Tensor::as_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) const {
1791 return at::_ops::as_strided::call(const_cast<Tensor&>(*this), size, stride, storage_offset);
1792}
1793
1794// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)
1795inline const at::Tensor & Tensor::as_strided_(at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset) const {
1796 return at::_ops::as_strided_::call(const_cast<Tensor&>(*this), c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt);
1797}
1798
1799// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)
1800inline const at::Tensor & Tensor::as_strided__symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) const {
1801 return at::_ops::as_strided_::call(const_cast<Tensor&>(*this), size, stride, storage_offset);
1802}
1803
1804// aten::asin(Tensor self) -> Tensor
1805inline at::Tensor Tensor::asin() const {
1806 return at::_ops::asin::call(const_cast<Tensor&>(*this));
1807}
1808
1809// aten::asin_(Tensor(a!) self) -> Tensor(a!)
1810inline at::Tensor & Tensor::asin_() const {
1811 return at::_ops::asin_::call(const_cast<Tensor&>(*this));
1812}
1813
1814// aten::arcsin(Tensor self) -> Tensor
1815inline at::Tensor Tensor::arcsin() const {
1816 return at::_ops::arcsin::call(const_cast<Tensor&>(*this));
1817}
1818
1819// aten::arcsin_(Tensor(a!) self) -> Tensor(a!)
1820inline at::Tensor & Tensor::arcsin_() const {
1821 return at::_ops::arcsin_::call(const_cast<Tensor&>(*this));
1822}
1823
1824// aten::atan(Tensor self) -> Tensor
1825inline at::Tensor Tensor::atan() const {
1826 return at::_ops::atan::call(const_cast<Tensor&>(*this));
1827}
1828
1829// aten::atan_(Tensor(a!) self) -> Tensor(a!)
1830inline at::Tensor & Tensor::atan_() const {
1831 return at::_ops::atan_::call(const_cast<Tensor&>(*this));
1832}
1833
1834// aten::arctan(Tensor self) -> Tensor
1835inline at::Tensor Tensor::arctan() const {
1836 return at::_ops::arctan::call(const_cast<Tensor&>(*this));
1837}
1838
1839// aten::arctan_(Tensor(a!) self) -> Tensor(a!)
1840inline at::Tensor & Tensor::arctan_() const {
1841 return at::_ops::arctan_::call(const_cast<Tensor&>(*this));
1842}
1843
1844// aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
1845inline at::Tensor Tensor::baddbmm(const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) const {
1846 return at::_ops::baddbmm::call(const_cast<Tensor&>(*this), batch1, batch2, beta, alpha);
1847}
1848
1849// aten::baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
1850inline at::Tensor & Tensor::baddbmm_(const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) const {
1851 return at::_ops::baddbmm_::call(const_cast<Tensor&>(*this), batch1, batch2, beta, alpha);
1852}
1853
1854// aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor
1855inline at::Tensor Tensor::bernoulli(c10::optional<at::Generator> generator) const {
1856 return at::_ops::bernoulli::call(const_cast<Tensor&>(*this), generator);
1857}
1858
1859// aten::bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!)
1860inline at::Tensor & Tensor::bernoulli_(const at::Tensor & p, c10::optional<at::Generator> generator) const {
1861 return at::_ops::bernoulli__Tensor::call(const_cast<Tensor&>(*this), p, generator);
1862}
1863
1864// aten::bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!)
1865inline at::Tensor & Tensor::bernoulli_(double p, c10::optional<at::Generator> generator) const {
1866 return at::_ops::bernoulli__float::call(const_cast<Tensor&>(*this), p, generator);
1867}
1868
1869// aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor
1870inline at::Tensor Tensor::bernoulli(double p, c10::optional<at::Generator> generator) const {
1871 return at::_ops::bernoulli_p::call(const_cast<Tensor&>(*this), p, generator);
1872}
1873
1874// aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor
1875inline at::Tensor Tensor::bincount(const c10::optional<at::Tensor> & weights, int64_t minlength) const {
1876 return at::_ops::bincount::call(const_cast<Tensor&>(*this), weights, minlength);
1877}
1878
1879// aten::bitwise_not(Tensor self) -> Tensor
1880inline at::Tensor Tensor::bitwise_not() const {
1881 return at::_ops::bitwise_not::call(const_cast<Tensor&>(*this));
1882}
1883
1884// aten::bitwise_not_(Tensor(a!) self) -> Tensor(a!)
1885inline at::Tensor & Tensor::bitwise_not_() const {
1886 return at::_ops::bitwise_not_::call(const_cast<Tensor&>(*this));
1887}
1888
1889// aten::copysign.Tensor(Tensor self, Tensor other) -> Tensor
1890inline at::Tensor Tensor::copysign(const at::Tensor & other) const {
1891 return at::_ops::copysign_Tensor::call(const_cast<Tensor&>(*this), other);
1892}
1893
1894// aten::copysign_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
1895inline at::Tensor & Tensor::copysign_(const at::Tensor & other) const {
1896 return at::_ops::copysign__Tensor::call(const_cast<Tensor&>(*this), other);
1897}
1898
1899// aten::copysign.Scalar(Tensor self, Scalar other) -> Tensor
1900inline at::Tensor Tensor::copysign(const at::Scalar & other) const {
1901 return at::_ops::copysign_Scalar::call(const_cast<Tensor&>(*this), other);
1902}
1903
1904// aten::copysign_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
1905inline at::Tensor & Tensor::copysign_(const at::Scalar & other) const {
1906 return at::_ops::copysign__Scalar::call(const_cast<Tensor&>(*this), other);
1907}
1908
1909// aten::logical_not(Tensor self) -> Tensor
1910inline at::Tensor Tensor::logical_not() const {
1911 return at::_ops::logical_not::call(const_cast<Tensor&>(*this));
1912}
1913
1914// aten::logical_not_(Tensor(a!) self) -> Tensor(a!)
1915inline at::Tensor & Tensor::logical_not_() const {
1916 return at::_ops::logical_not_::call(const_cast<Tensor&>(*this));
1917}
1918
1919// aten::logical_xor(Tensor self, Tensor other) -> Tensor
1920inline at::Tensor Tensor::logical_xor(const at::Tensor & other) const {
1921 return at::_ops::logical_xor::call(const_cast<Tensor&>(*this), other);
1922}
1923
1924// aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!)
1925inline at::Tensor & Tensor::logical_xor_(const at::Tensor & other) const {
1926 return at::_ops::logical_xor_::call(const_cast<Tensor&>(*this), other);
1927}
1928
1929// aten::logical_and(Tensor self, Tensor other) -> Tensor
1930inline at::Tensor Tensor::logical_and(const at::Tensor & other) const {
1931 return at::_ops::logical_and::call(const_cast<Tensor&>(*this), other);
1932}
1933
1934// aten::logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!)
1935inline at::Tensor & Tensor::logical_and_(const at::Tensor & other) const {
1936 return at::_ops::logical_and_::call(const_cast<Tensor&>(*this), other);
1937}
1938
1939// aten::logical_or(Tensor self, Tensor other) -> Tensor
1940inline at::Tensor Tensor::logical_or(const at::Tensor & other) const {
1941 return at::_ops::logical_or::call(const_cast<Tensor&>(*this), other);
1942}
1943
1944// aten::logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!)
1945inline at::Tensor & Tensor::logical_or_(const at::Tensor & other) const {
1946 return at::_ops::logical_or_::call(const_cast<Tensor&>(*this), other);
1947}
1948
1949// aten::bmm(Tensor self, Tensor mat2) -> Tensor
1950inline at::Tensor Tensor::bmm(const at::Tensor & mat2) const {
1951 return at::_ops::bmm::call(const_cast<Tensor&>(*this), mat2);
1952}
1953
1954// aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a)
1955inline at::Tensor Tensor::broadcast_to(at::IntArrayRef size) const {
1956 return at::_ops::broadcast_to::call(const_cast<Tensor&>(*this), c10::fromIntArrayRefSlow(size));
1957}
1958
1959// aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a)
1960inline at::Tensor Tensor::broadcast_to_symint(c10::SymIntArrayRef size) const {
1961 return at::_ops::broadcast_to::call(const_cast<Tensor&>(*this), size);
1962}
1963
1964// aten::ceil(Tensor self) -> Tensor
1965inline at::Tensor Tensor::ceil() const {
1966 return at::_ops::ceil::call(const_cast<Tensor&>(*this));
1967}
1968
1969// aten::ceil_(Tensor(a!) self) -> Tensor(a!)
1970inline at::Tensor & Tensor::ceil_() const {
1971 return at::_ops::ceil_::call(const_cast<Tensor&>(*this));
1972}
1973
1974// aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[]
1975inline ::std::vector<at::Tensor> Tensor::unsafe_chunk(int64_t chunks, int64_t dim) const {
1976 return at::_ops::unsafe_chunk::call(const_cast<Tensor&>(*this), chunks, dim);
1977}
1978
1979// aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[]
1980inline ::std::vector<at::Tensor> Tensor::chunk(int64_t chunks, int64_t dim) const {
1981 return at::_ops::chunk::call(const_cast<Tensor&>(*this), chunks, dim);
1982}
1983
1984// aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[]
1985inline ::std::vector<at::Tensor> Tensor::tensor_split(int64_t sections, int64_t dim) const {
1986 return at::_ops::tensor_split_sections::call(const_cast<Tensor&>(*this), sections, dim);
1987}
1988
1989// aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[]
1990inline ::std::vector<at::Tensor> Tensor::tensor_split_symint(c10::SymInt sections, int64_t dim) const {
1991 return at::_ops::tensor_split_sections::call(const_cast<Tensor&>(*this), sections, dim);
1992}
1993
1994// aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[]
1995inline ::std::vector<at::Tensor> Tensor::tensor_split(at::IntArrayRef indices, int64_t dim) const {
1996 return at::_ops::tensor_split_indices::call(const_cast<Tensor&>(*this), c10::fromIntArrayRefSlow(indices), dim);
1997}
1998
1999// aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[]
2000inline ::std::vector<at::Tensor> Tensor::tensor_split_symint(c10::SymIntArrayRef indices, int64_t dim) const {
2001 return at::_ops::tensor_split_indices::call(const_cast<Tensor&>(*this), indices, dim);
2002}
2003
2004// aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[]
2005inline ::std::vector<at::Tensor> Tensor::tensor_split(const at::Tensor & tensor_indices_or_sections, int64_t dim) const {
2006 return at::_ops::tensor_split_tensor_indices_or_sections::call(const_cast<Tensor&>(*this), tensor_indices_or_sections, dim);
2007}
2008
2009// aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
2010inline at::Tensor Tensor::clamp(const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) const {
2011 return at::_ops::clamp::call(const_cast<Tensor&>(*this), min, max);
2012}
2013
2014// aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
2015inline at::Tensor Tensor::clamp(const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) const {
2016 return at::_ops::clamp_Tensor::call(const_cast<Tensor&>(*this), min, max);
2017}
2018
2019// aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
2020inline at::Tensor & Tensor::clamp_(const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) const {
2021 return at::_ops::clamp_::call(const_cast<Tensor&>(*this), min, max);
2022}
2023
2024// aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
2025inline at::Tensor & Tensor::clamp_(const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) const {
2026 return at::_ops::clamp__Tensor::call(const_cast<Tensor&>(*this), min, max);
2027}
2028
2029// aten::clamp_max(Tensor self, Scalar max) -> Tensor
2030inline at::Tensor Tensor::clamp_max(const at::Scalar & max) const {
2031 return at::_ops::clamp_max::call(const_cast<Tensor&>(*this), max);
2032}
2033
2034// aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor
2035inline at::Tensor Tensor::clamp_max(const at::Tensor & max) const {
2036 return at::_ops::clamp_max_Tensor::call(const_cast<Tensor&>(*this), max);
2037}
2038
2039// aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!)
2040inline at::Tensor & Tensor::clamp_max_(const at::Scalar & max) const {
2041 return at::_ops::clamp_max_::call(const_cast<Tensor&>(*this), max);
2042}
2043
2044// aten::clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!)
2045inline at::Tensor & Tensor::clamp_max_(const at::Tensor & max) const {
2046 return at::_ops::clamp_max__Tensor::call(const_cast<Tensor&>(*this), max);
2047}
2048
2049// aten::clamp_min(Tensor self, Scalar min) -> Tensor
2050inline at::Tensor Tensor::clamp_min(const at::Scalar & min) const {
2051 return at::_ops::clamp_min::call(const_cast<Tensor&>(*this), min);
2052}
2053
2054// aten::clamp_min.Tensor(Tensor self, Tensor min) -> Tensor
2055inline at::Tensor Tensor::clamp_min(const at::Tensor & min) const {
2056 return at::_ops::clamp_min_Tensor::call(const_cast<Tensor&>(*this), min);
2057}
2058
2059// aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)
2060inline at::Tensor & Tensor::clamp_min_(const at::Scalar & min) const {
2061 return at::_ops::clamp_min_::call(const_cast<Tensor&>(*this), min);
2062}
2063
2064// aten::clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!)
2065inline at::Tensor & Tensor::clamp_min_(const at::Tensor & min) const {
2066 return at::_ops::clamp_min__Tensor::call(const_cast<Tensor&>(*this), min);
2067}
2068
2069// aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
2070inline at::Tensor Tensor::clip(const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) const {
2071 return at::_ops::clip::call(const_cast<Tensor&>(*this), min, max);
2072}
2073
2074// aten::clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
2075inline at::Tensor Tensor::clip(const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) const {
2076 return at::_ops::clip_Tensor::call(const_cast<Tensor&>(*this), min, max);
2077}
2078
2079// aten::clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
2080inline at::Tensor & Tensor::clip_(const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) const {
2081 return at::_ops::clip_::call(const_cast<Tensor&>(*this), min, max);
2082}
2083
2084// aten::clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
2085inline at::Tensor & Tensor::clip_(const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) const {
2086 return at::_ops::clip__Tensor::call(const_cast<Tensor&>(*this), min, max);
2087}
2088
2089// aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a)
2090inline at::Tensor Tensor::__dispatch_contiguous(at::MemoryFormat memory_format) const {
2091 return at::_ops::contiguous::call(const_cast<Tensor&>(*this), memory_format);
2092}
2093
2094// aten::copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)
2095inline at::Tensor & Tensor::copy_(const at::Tensor & src, bool non_blocking) const {
2096 return at::_ops::copy_::call(const_cast<Tensor&>(*this), src, non_blocking);
2097}
2098
2099// aten::cos(Tensor self) -> Tensor
2100inline at::Tensor Tensor::cos() const {
2101 return at::_ops::cos::call(const_cast<Tensor&>(*this));
2102}
2103
2104// aten::cos_(Tensor(a!) self) -> Tensor(a!)
2105inline at::Tensor & Tensor::cos_() const {
2106 return at::_ops::cos_::call(const_cast<Tensor&>(*this));
2107}
2108
2109// aten::cosh(Tensor self) -> Tensor
2110inline at::Tensor Tensor::cosh() const {
2111 return at::_ops::cosh::call(const_cast<Tensor&>(*this));
2112}
2113
2114// aten::cosh_(Tensor(a!) self) -> Tensor(a!)
2115inline at::Tensor & Tensor::cosh_() const {
2116 return at::_ops::cosh_::call(const_cast<Tensor&>(*this));
2117}
2118
2119// aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor
2120inline at::Tensor Tensor::count_nonzero(at::IntArrayRef dim) const {
2121 return at::_ops::count_nonzero_dim_IntList::call(const_cast<Tensor&>(*this), dim);
2122}
2123
2124// aten::count_nonzero(Tensor self, int? dim=None) -> Tensor
2125inline at::Tensor Tensor::count_nonzero(c10::optional<int64_t> dim) const {
2126 return at::_ops::count_nonzero::call(const_cast<Tensor&>(*this), dim);
2127}
2128
2129// aten::cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor
2130inline at::Tensor Tensor::cov(int64_t correction, const c10::optional<at::Tensor> & fweights, const c10::optional<at::Tensor> & aweights) const {
2131 return at::_ops::cov::call(const_cast<Tensor&>(*this), correction, fweights, aweights);
2132}
2133
2134// aten::corrcoef(Tensor self) -> Tensor
2135inline at::Tensor Tensor::corrcoef() const {
2136 return at::_ops::corrcoef::call(const_cast<Tensor&>(*this));
2137}
2138
2139// aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices)
2140inline ::std::tuple<at::Tensor,at::Tensor> Tensor::cummax(int64_t dim) const {
2141 return at::_ops::cummax::call(const_cast<Tensor&>(*this), dim);
2142}
2143
2144// aten::cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)
2145inline ::std::tuple<at::Tensor,at::Tensor> Tensor::cummax(at::Dimname dim) const {
2146 return at::_ops::cummax_dimname::call(const_cast<Tensor&>(*this), dim);
2147}
2148
2149// aten::cummin(Tensor self, int dim) -> (Tensor values, Tensor indices)
2150inline ::std::tuple<at::Tensor,at::Tensor> Tensor::cummin(int64_t dim) const {
2151 return at::_ops::cummin::call(const_cast<Tensor&>(*this), dim);
2152}
2153
2154// aten::cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)
2155inline ::std::tuple<at::Tensor,at::Tensor> Tensor::cummin(at::Dimname dim) const {
2156 return at::_ops::cummin_dimname::call(const_cast<Tensor&>(*this), dim);
2157}
2158
2159// aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
2160inline at::Tensor Tensor::cumprod(int64_t dim, c10::optional<at::ScalarType> dtype) const {
2161 return at::_ops::cumprod::call(const_cast<Tensor&>(*this), dim, dtype);
2162}
2163
2164// aten::cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)
2165inline at::Tensor & Tensor::cumprod_(int64_t dim, c10::optional<at::ScalarType> dtype) const {
2166 return at::_ops::cumprod_::call(const_cast<Tensor&>(*this), dim, dtype);
2167}
2168
2169// aten::cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
2170inline at::Tensor Tensor::cumprod(at::Dimname dim, c10::optional<at::ScalarType> dtype) const {
2171 return at::_ops::cumprod_dimname::call(const_cast<Tensor&>(*this), dim, dtype);
2172}
2173
2174// aten::cumprod_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)
2175inline at::Tensor & Tensor::cumprod_(at::Dimname dim, c10::optional<at::ScalarType> dtype) const {
2176 return at::_ops::cumprod__dimname::call(const_cast<Tensor&>(*this), dim, dtype);
2177}
2178
2179// aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
2180inline at::Tensor Tensor::cumsum(int64_t dim, c10::optional<at::ScalarType> dtype) const {
2181 return at::_ops::cumsum::call(const_cast<Tensor&>(*this), dim, dtype);
2182}
2183
2184// aten::cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)
2185inline at::Tensor & Tensor::cumsum_(int64_t dim, c10::optional<at::ScalarType> dtype) const {
2186 return at::_ops::cumsum_::call(const_cast<Tensor&>(*this), dim, dtype);
2187}
2188
2189// aten::cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
2190inline at::Tensor Tensor::cumsum(at::Dimname dim, c10::optional<at::ScalarType> dtype) const {
2191 return at::_ops::cumsum_dimname::call(const_cast<Tensor&>(*this), dim, dtype);
2192}
2193
2194// aten::cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)
2195inline at::Tensor & Tensor::cumsum_(at::Dimname dim, c10::optional<at::ScalarType> dtype) const {
2196 return at::_ops::cumsum__dimname::call(const_cast<Tensor&>(*this), dim, dtype);
2197}
2198
2199// aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor
2200inline at::Tensor Tensor::diag_embed(int64_t offset, int64_t dim1, int64_t dim2) const {
2201 return at::_ops::diag_embed::call(const_cast<Tensor&>(*this), offset, dim1, dim2);
2202}
2203
2204// aten::diagflat(Tensor self, int offset=0) -> Tensor
2205inline at::Tensor Tensor::diagflat(int64_t offset) const {
2206 return at::_ops::diagflat::call(const_cast<Tensor&>(*this), offset);
2207}
2208
2209// aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)
2210inline at::Tensor Tensor::diagonal(int64_t offset, int64_t dim1, int64_t dim2) const {
2211 return at::_ops::diagonal::call(const_cast<Tensor&>(*this), offset, dim1, dim2);
2212}
2213
2214// aten::diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a)
2215inline at::Tensor Tensor::diagonal(at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset) const {
2216 return at::_ops::diagonal_Dimname::call(const_cast<Tensor&>(*this), outdim, dim1, dim2, offset);
2217}
2218
2219// aten::fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!)
2220inline at::Tensor & Tensor::fill_diagonal_(const at::Scalar & fill_value, bool wrap) const {
2221 return at::_ops::fill_diagonal_::call(const_cast<Tensor&>(*this), fill_value, wrap);
2222}
2223
2224// aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor
2225inline at::Tensor Tensor::diff(int64_t n, int64_t dim, const c10::optional<at::Tensor> & prepend, const c10::optional<at::Tensor> & append) const {
2226 return at::_ops::diff::call(const_cast<Tensor&>(*this), n, dim, prepend, append);
2227}
2228
2229// aten::div.Tensor(Tensor self, Tensor other) -> Tensor
2230inline at::Tensor Tensor::div(const at::Tensor & other) const {
2231 return at::_ops::div_Tensor::call(const_cast<Tensor&>(*this), other);
2232}
2233
2234// aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
2235inline at::Tensor & Tensor::div_(const at::Tensor & other) const {
2236 return at::_ops::div__Tensor::call(const_cast<Tensor&>(*this), other);
2237}
2238
2239// aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor
2240inline at::Tensor Tensor::div(const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) const {
2241 return at::_ops::div_Tensor_mode::call(const_cast<Tensor&>(*this), other, rounding_mode);
2242}
2243
2244// aten::div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)
2245inline at::Tensor & Tensor::div_(const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) const {
2246 return at::_ops::div__Tensor_mode::call(const_cast<Tensor&>(*this), other, rounding_mode);
2247}
2248
2249// aten::div.Scalar(Tensor self, Scalar other) -> Tensor
2250inline at::Tensor Tensor::div(const at::Scalar & other) const {
2251 return at::_ops::div_Scalar::call(const_cast<Tensor&>(*this), other);
2252}
2253
2254// aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
2255inline at::Tensor & Tensor::div_(const at::Scalar & other) const {
2256 return at::_ops::div__Scalar::call(const_cast<Tensor&>(*this), other);
2257}
2258
2259// aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor
2260inline at::Tensor Tensor::div(const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) const {
2261 return at::_ops::div_Scalar_mode::call(const_cast<Tensor&>(*this), other, rounding_mode);
2262}
2263
2264// aten::div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)
2265inline at::Tensor & Tensor::div_(const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) const {
2266 return at::_ops::div__Scalar_mode::call(const_cast<Tensor&>(*this), other, rounding_mode);
2267}
2268
2269// aten::divide.Tensor(Tensor self, Tensor other) -> Tensor
2270inline at::Tensor Tensor::divide(const at::Tensor & other) const {
2271 return at::_ops::divide_Tensor::call(const_cast<Tensor&>(*this), other);
2272}
2273
2274// aten::divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
2275inline at::Tensor & Tensor::divide_(const at::Tensor & other) const {
2276 return at::_ops::divide__Tensor::call(const_cast<Tensor&>(*this), other);
2277}
2278
2279// aten::divide.Scalar(Tensor self, Scalar other) -> Tensor
2280inline at::Tensor Tensor::divide(const at::Scalar & other) const {
2281 return at::_ops::divide_Scalar::call(const_cast<Tensor&>(*this), other);
2282}
2283
2284// aten::divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
2285inline at::Tensor & Tensor::divide_(const at::Scalar & other) const {
2286 return at::_ops::divide__Scalar::call(const_cast<Tensor&>(*this), other);
2287}
2288
2289// aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor
2290inline at::Tensor Tensor::divide(const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) const {
2291 return at::_ops::divide_Tensor_mode::call(const_cast<Tensor&>(*this), other, rounding_mode);
2292}
2293
2294// aten::divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)
2295inline at::Tensor & Tensor::divide_(const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) const {
2296 return at::_ops::divide__Tensor_mode::call(const_cast<Tensor&>(*this), other, rounding_mode);
2297}
2298
2299// aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor
2300inline at::Tensor Tensor::divide(const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) const {
2301 return at::_ops::divide_Scalar_mode::call(const_cast<Tensor&>(*this), other, rounding_mode);
2302}
2303
2304// aten::divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)
2305inline at::Tensor & Tensor::divide_(const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) const {
2306 return at::_ops::divide__Scalar_mode::call(const_cast<Tensor&>(*this), other, rounding_mode);
2307}
2308
2309// aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor
2310inline at::Tensor Tensor::true_divide(const at::Tensor & other) const {
2311 return at::_ops::true_divide_Tensor::call(const_cast<Tensor&>(*this), other);
2312}
2313
2314// aten::true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
2315inline at::Tensor & Tensor::true_divide_(const at::Tensor & other) const {
2316 return at::_ops::true_divide__Tensor::call(const_cast<Tensor&>(*this), other);
2317}
2318
2319// aten::true_divide.Scalar(Tensor self, Scalar other) -> Tensor
2320inline at::Tensor Tensor::true_divide(const at::Scalar & other) const {
2321 return at::_ops::true_divide_Scalar::call(const_cast<Tensor&>(*this), other);
2322}
2323
2324// aten::true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
2325inline at::Tensor & Tensor::true_divide_(const at::Scalar & other) const {
2326 return at::_ops::true_divide__Scalar::call(const_cast<Tensor&>(*this), other);
2327}
2328
2329// aten::dot(Tensor self, Tensor tensor) -> Tensor
2330inline at::Tensor Tensor::dot(const at::Tensor & tensor) const {
2331 return at::_ops::dot::call(const_cast<Tensor&>(*this), tensor);
2332}
2333
2334// aten::vdot(Tensor self, Tensor other) -> Tensor
2335inline at::Tensor Tensor::vdot(const at::Tensor & other) const {
2336 return at::_ops::vdot::call(const_cast<Tensor&>(*this), other);
2337}
2338
2339// aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2340inline at::Tensor Tensor::new_empty(at::IntArrayRef size, at::TensorOptions options) const {
2341 return at::_ops::new_empty::call(const_cast<Tensor&>(*this), c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2342}
2343
2344// aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2345inline at::Tensor Tensor::new_empty(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const {
2346 return at::_ops::new_empty::call(const_cast<Tensor&>(*this), c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
2347}
2348
2349// aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2350inline at::Tensor Tensor::new_empty_symint(c10::SymIntArrayRef size, at::TensorOptions options) const {
2351 return at::_ops::new_empty::call(const_cast<Tensor&>(*this), size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2352}
2353
2354// aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2355inline at::Tensor Tensor::new_empty_symint(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const {
2356 return at::_ops::new_empty::call(const_cast<Tensor&>(*this), size, dtype, layout, device, pin_memory);
2357}
2358
2359// aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2360inline at::Tensor Tensor::new_empty_strided(at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options) const {
2361 return at::_ops::new_empty_strided::call(const_cast<Tensor&>(*this), c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2362}
2363
2364// aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2365inline at::Tensor Tensor::new_empty_strided(at::IntArrayRef size, at::IntArrayRef stride, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const {
2366 return at::_ops::new_empty_strided::call(const_cast<Tensor&>(*this), c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), dtype, layout, device, pin_memory);
2367}
2368
2369// aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2370inline at::Tensor Tensor::new_empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options) const {
2371 return at::_ops::new_empty_strided::call(const_cast<Tensor&>(*this), size, stride, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2372}
2373
2374// aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2375inline at::Tensor Tensor::new_empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const {
2376 return at::_ops::new_empty_strided::call(const_cast<Tensor&>(*this), size, stride, dtype, layout, device, pin_memory);
2377}
2378
2379// aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2380inline at::Tensor Tensor::new_full(at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options) const {
2381 return at::_ops::new_full::call(const_cast<Tensor&>(*this), c10::fromIntArrayRefSlow(size), fill_value, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2382}
2383
2384// aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2385inline at::Tensor Tensor::new_full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const {
2386 return at::_ops::new_full::call(const_cast<Tensor&>(*this), c10::fromIntArrayRefSlow(size), fill_value, dtype, layout, device, pin_memory);
2387}
2388
2389// aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2390inline at::Tensor Tensor::new_full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options) const {
2391 return at::_ops::new_full::call(const_cast<Tensor&>(*this), size, fill_value, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2392}
2393
2394// aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2395inline at::Tensor Tensor::new_full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const {
2396 return at::_ops::new_full::call(const_cast<Tensor&>(*this), size, fill_value, dtype, layout, device, pin_memory);
2397}
2398
2399// aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2400inline at::Tensor Tensor::new_zeros(at::IntArrayRef size, at::TensorOptions options) const {
2401 return at::_ops::new_zeros::call(const_cast<Tensor&>(*this), c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2402}
2403
2404// aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2405inline at::Tensor Tensor::new_zeros(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const {
2406 return at::_ops::new_zeros::call(const_cast<Tensor&>(*this), c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
2407}
2408
2409// aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2410inline at::Tensor Tensor::new_zeros_symint(c10::SymIntArrayRef size, at::TensorOptions options) const {
2411 return at::_ops::new_zeros::call(const_cast<Tensor&>(*this), size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2412}
2413
2414// aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2415inline at::Tensor Tensor::new_zeros_symint(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const {
2416 return at::_ops::new_zeros::call(const_cast<Tensor&>(*this), size, dtype, layout, device, pin_memory);
2417}
2418
2419// aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2420inline at::Tensor Tensor::new_ones(at::IntArrayRef size, at::TensorOptions options) const {
2421 return at::_ops::new_ones::call(const_cast<Tensor&>(*this), c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2422}
2423
2424// aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2425inline at::Tensor Tensor::new_ones(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const {
2426 return at::_ops::new_ones::call(const_cast<Tensor&>(*this), c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
2427}
2428
2429// aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2430inline at::Tensor Tensor::new_ones_symint(c10::SymIntArrayRef size, at::TensorOptions options) const {
2431 return at::_ops::new_ones::call(const_cast<Tensor&>(*this), size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2432}
2433
2434// aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2435inline at::Tensor Tensor::new_ones_symint(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const {
2436 return at::_ops::new_ones::call(const_cast<Tensor&>(*this), size, dtype, layout, device, pin_memory);
2437}
2438
2439// aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)
2440inline const at::Tensor & Tensor::resize_(at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format) const {
2441 return at::_ops::resize_::call(const_cast<Tensor&>(*this), c10::fromIntArrayRefSlow(size), memory_format);
2442}
2443
2444// aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)
2445inline const at::Tensor & Tensor::resize__symint(c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) const {
2446 return at::_ops::resize_::call(const_cast<Tensor&>(*this), size, memory_format);
2447}
2448
2449// aten::erf(Tensor self) -> Tensor
2450inline at::Tensor Tensor::erf() const {
2451 return at::_ops::erf::call(const_cast<Tensor&>(*this));
2452}
2453
2454// aten::erf_(Tensor(a!) self) -> Tensor(a!)
2455inline at::Tensor & Tensor::erf_() const {
2456 return at::_ops::erf_::call(const_cast<Tensor&>(*this));
2457}
2458
2459// aten::erfc(Tensor self) -> Tensor
2460inline at::Tensor Tensor::erfc() const {
2461 return at::_ops::erfc::call(const_cast<Tensor&>(*this));
2462}
2463
2464// aten::erfc_(Tensor(a!) self) -> Tensor(a!)
2465inline at::Tensor & Tensor::erfc_() const {
2466 return at::_ops::erfc_::call(const_cast<Tensor&>(*this));
2467}
2468
2469// aten::exp(Tensor self) -> Tensor
2470inline at::Tensor Tensor::exp() const {
2471 return at::_ops::exp::call(const_cast<Tensor&>(*this));
2472}
2473
2474// aten::exp_(Tensor(a!) self) -> Tensor(a!)
2475inline at::Tensor & Tensor::exp_() const {
2476 return at::_ops::exp_::call(const_cast<Tensor&>(*this));
2477}
2478
2479// aten::exp2(Tensor self) -> Tensor
2480inline at::Tensor Tensor::exp2() const {
2481 return at::_ops::exp2::call(const_cast<Tensor&>(*this));
2482}
2483
2484// aten::exp2_(Tensor(a!) self) -> Tensor(a!)
2485inline at::Tensor & Tensor::exp2_() const {
2486 return at::_ops::exp2_::call(const_cast<Tensor&>(*this));
2487}
2488
2489// aten::expm1(Tensor self) -> Tensor
2490inline at::Tensor Tensor::expm1() const {
2491 return at::_ops::expm1::call(const_cast<Tensor&>(*this));
2492}
2493
2494// aten::expm1_(Tensor(a!) self) -> Tensor(a!)
2495inline at::Tensor & Tensor::expm1_() const {
2496 return at::_ops::expm1_::call(const_cast<Tensor&>(*this));
2497}
2498
2499// aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)
2500inline at::Tensor Tensor::expand(at::IntArrayRef size, bool implicit) const {
2501 return at::_ops::expand::call(const_cast<Tensor&>(*this), c10::fromIntArrayRefSlow(size), implicit);
2502}
2503
2504// aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)
2505inline at::Tensor Tensor::expand_symint(c10::SymIntArrayRef size, bool implicit) const {
2506 return at::_ops::expand::call(const_cast<Tensor&>(*this), size, implicit);
2507}
2508
2509// aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a)
2510inline at::Tensor Tensor::expand_as(const at::Tensor & other) const {
2511 return at::_ops::expand_as::call(const_cast<Tensor&>(*this), other);
2512}
2513
2514// aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)
2515inline at::Tensor Tensor::flatten(int64_t start_dim, int64_t end_dim) const {
2516 return at::_ops::flatten_using_ints::call(const_cast<Tensor&>(*this), start_dim, end_dim);
2517}
2518
2519// aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a)
2520inline at::Tensor Tensor::flatten(int64_t start_dim, int64_t end_dim, at::Dimname out_dim) const {
2521 return at::_ops::flatten_named_out_dim::call(const_cast<Tensor&>(*this), start_dim, end_dim, out_dim);
2522}
2523
2524// aten::flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a)
2525inline at::Tensor Tensor::flatten(at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim) const {
2526 return at::_ops::flatten_using_names::call(const_cast<Tensor&>(*this), start_dim, end_dim, out_dim);
2527}
2528
2529// aten::flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a)
2530inline at::Tensor Tensor::flatten(at::DimnameList dims, at::Dimname out_dim) const {
2531 return at::_ops::flatten_DimnameList::call(const_cast<Tensor&>(*this), dims, out_dim);
2532}
2533
2534// aten::unflatten.int(Tensor(a) self, int dim, int[] sizes) -> Tensor(a)
2535inline at::Tensor Tensor::unflatten(int64_t dim, at::IntArrayRef sizes) const {
2536 return at::_ops::unflatten_int::call(const_cast<Tensor&>(*this), dim, sizes);
2537}
2538
2539// aten::unflatten.Dimname(Tensor(a) self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor(a)
2540inline at::Tensor Tensor::unflatten(at::Dimname dim, at::IntArrayRef sizes, at::DimnameList names) const {
2541 return at::_ops::unflatten_Dimname::call(const_cast<Tensor&>(*this), dim, sizes, names);
2542}
2543
2544// aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)
2545inline at::Tensor & Tensor::fill_(const at::Scalar & value) const {
2546 return at::_ops::fill__Scalar::call(const_cast<Tensor&>(*this), value);
2547}
2548
2549// aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!)
2550inline at::Tensor & Tensor::fill_(const at::Tensor & value) const {
2551 return at::_ops::fill__Tensor::call(const_cast<Tensor&>(*this), value);
2552}
2553
2554// aten::floor(Tensor self) -> Tensor
2555inline at::Tensor Tensor::floor() const {
2556 return at::_ops::floor::call(const_cast<Tensor&>(*this));
2557}
2558
2559// aten::floor_(Tensor(a!) self) -> Tensor(a!)
2560inline at::Tensor & Tensor::floor_() const {
2561 return at::_ops::floor_::call(const_cast<Tensor&>(*this));
2562}
2563
2564// aten::floor_divide(Tensor self, Tensor other) -> Tensor
2565inline at::Tensor Tensor::floor_divide(const at::Tensor & other) const {
2566 return at::_ops::floor_divide::call(const_cast<Tensor&>(*this), other);
2567}
2568
2569// aten::floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
2570inline at::Tensor & Tensor::floor_divide_(const at::Tensor & other) const {
2571 return at::_ops::floor_divide__Tensor::call(const_cast<Tensor&>(*this), other);
2572}
2573
2574// aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor
2575inline at::Tensor Tensor::floor_divide(const at::Scalar & other) const {
2576 return at::_ops::floor_divide_Scalar::call(const_cast<Tensor&>(*this), other);
2577}
2578
2579// aten::floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
2580inline at::Tensor & Tensor::floor_divide_(const at::Scalar & other) const {
2581 return at::_ops::floor_divide__Scalar::call(const_cast<Tensor&>(*this), other);
2582}
2583
2584// aten::frac(Tensor self) -> Tensor
2585inline at::Tensor Tensor::frac() const {
2586 return at::_ops::frac::call(const_cast<Tensor&>(*this));
2587}
2588
2589// aten::frac_(Tensor(a!) self) -> Tensor(a!)
2590inline at::Tensor & Tensor::frac_() const {
2591 return at::_ops::frac_::call(const_cast<Tensor&>(*this));
2592}
2593
2594// aten::gcd(Tensor self, Tensor other) -> Tensor
2595inline at::Tensor Tensor::gcd(const at::Tensor & other) const {
2596 return at::_ops::gcd::call(const_cast<Tensor&>(*this), other);
2597}
2598
2599// aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!)
2600inline at::Tensor & Tensor::gcd_(const at::Tensor & other) const {
2601 return at::_ops::gcd_::call(const_cast<Tensor&>(*this), other);
2602}
2603
2604// aten::lcm(Tensor self, Tensor other) -> Tensor
2605inline at::Tensor Tensor::lcm(const at::Tensor & other) const {
2606 return at::_ops::lcm::call(const_cast<Tensor&>(*this), other);
2607}
2608
2609// aten::lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!)
2610inline at::Tensor & Tensor::lcm_(const at::Tensor & other) const {
2611 return at::_ops::lcm_::call(const_cast<Tensor&>(*this), other);
2612}
2613
2614// aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor
2615inline at::Tensor Tensor::index(const c10::List<c10::optional<at::Tensor>> & indices) const {
2616 return at::_ops::index_Tensor::call(const_cast<Tensor&>(*this), indices);
2617}
2618
2619// aten::index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)
2620inline at::Tensor & Tensor::index_copy_(int64_t dim, const at::Tensor & index, const at::Tensor & source) const {
2621 return at::_ops::index_copy_::call(const_cast<Tensor&>(*this), dim, index, source);
2622}
2623
2624// aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor
2625inline at::Tensor Tensor::index_copy(int64_t dim, const at::Tensor & index, const at::Tensor & source) const {
2626 return at::_ops::index_copy::call(const_cast<Tensor&>(*this), dim, index, source);
2627}
2628
2629// aten::index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!)
2630inline at::Tensor & Tensor::index_copy_(at::Dimname dim, const at::Tensor & index, const at::Tensor & source) const {
2631 return at::_ops::index_copy__dimname::call(const_cast<Tensor&>(*this), dim, index, source);
2632}
2633
2634// aten::index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor
2635inline at::Tensor Tensor::index_copy(at::Dimname dim, const at::Tensor & index, const at::Tensor & source) const {
2636 return at::_ops::index_copy_dimname::call(const_cast<Tensor&>(*this), dim, index, source);
2637}
2638
2639// aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)
2640inline at::Tensor & Tensor::index_put_(const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) const {
2641 return at::_ops::index_put_::call(const_cast<Tensor&>(*this), indices, values, accumulate);
2642}
2643
2644// aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor
2645inline at::Tensor Tensor::index_put(const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) const {
2646 return at::_ops::index_put::call(const_cast<Tensor&>(*this), indices, values, accumulate);
2647}
2648
2649// aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor
2650inline at::Tensor Tensor::isclose(const at::Tensor & other, double rtol, double atol, bool equal_nan) const {
2651 return at::_ops::isclose::call(const_cast<Tensor&>(*this), other, rtol, atol, equal_nan);
2652}
2653
2654// aten::isnan(Tensor self) -> Tensor
2655inline at::Tensor Tensor::isnan() const {
2656 return at::_ops::isnan::call(const_cast<Tensor&>(*this));
2657}
2658
2659// aten::is_distributed(Tensor self) -> bool
2660inline bool Tensor::is_distributed() const {
2661 return at::_ops::is_distributed::call(const_cast<Tensor&>(*this));
2662}
2663
2664// aten::is_floating_point(Tensor self) -> bool
2665inline bool Tensor::__dispatch_is_floating_point() const {
2666 return at::_ops::is_floating_point::call(const_cast<Tensor&>(*this));
2667}
2668
2669// aten::is_complex(Tensor self) -> bool
2670inline bool Tensor::__dispatch_is_complex() const {
2671 return at::_ops::is_complex::call(const_cast<Tensor&>(*this));
2672}
2673
2674// aten::is_conj(Tensor self) -> bool
2675inline bool Tensor::__dispatch_is_conj() const {
2676 return at::_ops::is_conj::call(const_cast<Tensor&>(*this));
2677}
2678
2679// aten::_is_zerotensor(Tensor self) -> bool
2680inline bool Tensor::__dispatch__is_zerotensor() const {
2681 return at::_ops::_is_zerotensor::call(const_cast<Tensor&>(*this));
2682}
2683
2684// aten::is_neg(Tensor self) -> bool
2685inline bool Tensor::__dispatch_is_neg() const {
2686 return at::_ops::is_neg::call(const_cast<Tensor&>(*this));
2687}
2688
2689// aten::isreal(Tensor self) -> Tensor
2690inline at::Tensor Tensor::isreal() const {
2691 return at::_ops::isreal::call(const_cast<Tensor&>(*this));
2692}
2693
2694// aten::is_nonzero(Tensor self) -> bool
2695inline bool Tensor::is_nonzero() const {
2696 return at::_ops::is_nonzero::call(const_cast<Tensor&>(*this));
2697}
2698
2699// aten::is_same_size(Tensor self, Tensor other) -> bool
2700inline bool Tensor::is_same_size(const at::Tensor & other) const {
2701 return at::_ops::is_same_size::call(const_cast<Tensor&>(*this), other);
2702}
2703
2704// aten::is_signed(Tensor self) -> bool
2705inline bool Tensor::__dispatch_is_signed() const {
2706 return at::_ops::is_signed::call(const_cast<Tensor&>(*this));
2707}
2708
2709// aten::is_inference(Tensor self) -> bool
2710inline bool Tensor::__dispatch_is_inference() const {
2711 return at::_ops::is_inference::call(const_cast<Tensor&>(*this));
2712}
2713
2714// aten::kron(Tensor self, Tensor other) -> Tensor
2715inline at::Tensor Tensor::kron(const at::Tensor & other) const {
2716 return at::_ops::kron::call(const_cast<Tensor&>(*this), other);
2717}
2718
2719// aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
2720inline ::std::tuple<at::Tensor,at::Tensor> Tensor::kthvalue(int64_t k, int64_t dim, bool keepdim) const {
2721 return at::_ops::kthvalue::call(const_cast<Tensor&>(*this), k, dim, keepdim);
2722}
2723
2724// aten::kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
2725inline ::std::tuple<at::Tensor,at::Tensor> Tensor::kthvalue(int64_t k, at::Dimname dim, bool keepdim) const {
2726 return at::_ops::kthvalue_dimname::call(const_cast<Tensor&>(*this), k, dim, keepdim);
2727}
2728
2729// aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor
2730inline at::Tensor Tensor::nan_to_num(c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) const {
2731 return at::_ops::nan_to_num::call(const_cast<Tensor&>(*this), nan, posinf, neginf);
2732}
2733
2734// aten::nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!)
2735inline at::Tensor & Tensor::nan_to_num_(c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) const {
2736 return at::_ops::nan_to_num_::call(const_cast<Tensor&>(*this), nan, posinf, neginf);
2737}
2738
2739// aten::ldexp.Tensor(Tensor self, Tensor other) -> Tensor
2740inline at::Tensor Tensor::ldexp(const at::Tensor & other) const {
2741 return at::_ops::ldexp_Tensor::call(const_cast<Tensor&>(*this), other);
2742}
2743
2744// aten::ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!)
2745inline at::Tensor & Tensor::ldexp_(const at::Tensor & other) const {
2746 return at::_ops::ldexp_::call(const_cast<Tensor&>(*this), other);
2747}
2748
2749// aten::log(Tensor self) -> Tensor
2750inline at::Tensor Tensor::log() const {
2751 return at::_ops::log::call(const_cast<Tensor&>(*this));
2752}
2753
2754// aten::log_(Tensor(a!) self) -> Tensor(a!)
2755inline at::Tensor & Tensor::log_() const {
2756 return at::_ops::log_::call(const_cast<Tensor&>(*this));
2757}
2758
2759// aten::log10(Tensor self) -> Tensor
2760inline at::Tensor Tensor::log10() const {
2761 return at::_ops::log10::call(const_cast<Tensor&>(*this));
2762}
2763
2764// aten::log10_(Tensor(a!) self) -> Tensor(a!)
2765inline at::Tensor & Tensor::log10_() const {
2766 return at::_ops::log10_::call(const_cast<Tensor&>(*this));
2767}
2768
2769// aten::log1p(Tensor self) -> Tensor
2770inline at::Tensor Tensor::log1p() const {
2771 return at::_ops::log1p::call(const_cast<Tensor&>(*this));
2772}
2773
2774// aten::log1p_(Tensor(a!) self) -> Tensor(a!)
2775inline at::Tensor & Tensor::log1p_() const {
2776 return at::_ops::log1p_::call(const_cast<Tensor&>(*this));
2777}
2778
2779// aten::log2(Tensor self) -> Tensor
2780inline at::Tensor Tensor::log2() const {
2781 return at::_ops::log2::call(const_cast<Tensor&>(*this));
2782}
2783
2784// aten::log2_(Tensor(a!) self) -> Tensor(a!)
2785inline at::Tensor & Tensor::log2_() const {
2786 return at::_ops::log2_::call(const_cast<Tensor&>(*this));
2787}
2788
2789// aten::logaddexp(Tensor self, Tensor other) -> Tensor
2790inline at::Tensor Tensor::logaddexp(const at::Tensor & other) const {
2791 return at::_ops::logaddexp::call(const_cast<Tensor&>(*this), other);
2792}
2793
2794// aten::logaddexp2(Tensor self, Tensor other) -> Tensor
2795inline at::Tensor Tensor::logaddexp2(const at::Tensor & other) const {
2796 return at::_ops::logaddexp2::call(const_cast<Tensor&>(*this), other);
2797}
2798
2799// aten::xlogy.Tensor(Tensor self, Tensor other) -> Tensor
2800inline at::Tensor Tensor::xlogy(const at::Tensor & other) const {
2801 return at::_ops::xlogy_Tensor::call(const_cast<Tensor&>(*this), other);
2802}
2803
2804// aten::xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor
2805inline at::Tensor Tensor::xlogy(const at::Scalar & other) const {
2806 return at::_ops::xlogy_Scalar_Other::call(const_cast<Tensor&>(*this), other);
2807}
2808
2809// aten::xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
2810inline at::Tensor & Tensor::xlogy_(const at::Tensor & other) const {
2811 return at::_ops::xlogy__Tensor::call(const_cast<Tensor&>(*this), other);
2812}
2813
2814// aten::xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!)
2815inline at::Tensor & Tensor::xlogy_(const at::Scalar & other) const {
2816 return at::_ops::xlogy__Scalar_Other::call(const_cast<Tensor&>(*this), other);
2817}
2818
2819// aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
2820inline at::Tensor Tensor::log_softmax(int64_t dim, c10::optional<at::ScalarType> dtype) const {
2821 return at::_ops::log_softmax_int::call(const_cast<Tensor&>(*this), dim, dtype);
2822}
2823
2824// aten::log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
2825inline at::Tensor Tensor::log_softmax(at::Dimname dim, c10::optional<at::ScalarType> dtype) const {
2826 return at::_ops::log_softmax_Dimname::call(const_cast<Tensor&>(*this), dim, dtype);
2827}
2828
2829// aten::logcumsumexp(Tensor self, int dim) -> Tensor
2830inline at::Tensor Tensor::logcumsumexp(int64_t dim) const {
2831 return at::_ops::logcumsumexp::call(const_cast<Tensor&>(*this), dim);
2832}
2833
2834// aten::logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor
2835inline at::Tensor Tensor::logcumsumexp(at::Dimname dim) const {
2836 return at::_ops::logcumsumexp_dimname::call(const_cast<Tensor&>(*this), dim);
2837}
2838
2839// aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
2840inline at::Tensor Tensor::logsumexp(at::IntArrayRef dim, bool keepdim) const {
2841 return at::_ops::logsumexp::call(const_cast<Tensor&>(*this), dim, keepdim);
2842}
2843
2844// aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor
2845inline at::Tensor Tensor::logsumexp(at::DimnameList dim, bool keepdim) const {
2846 return at::_ops::logsumexp_names::call(const_cast<Tensor&>(*this), dim, keepdim);
2847}
2848
2849// aten::matmul(Tensor self, Tensor other) -> Tensor
2850inline at::Tensor Tensor::matmul(const at::Tensor & other) const {
2851 return at::_ops::matmul::call(const_cast<Tensor&>(*this), other);
2852}
2853
2854// aten::matrix_power(Tensor self, int n) -> Tensor
2855inline at::Tensor Tensor::matrix_power(int64_t n) const {
2856 return at::_ops::matrix_power::call(const_cast<Tensor&>(*this), n);
2857}
2858
2859// aten::matrix_exp(Tensor self) -> Tensor
2860inline at::Tensor Tensor::matrix_exp() const {
2861 return at::_ops::matrix_exp::call(const_cast<Tensor&>(*this));
2862}
2863
2864// aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)
2865inline ::std::tuple<at::Tensor,at::Tensor> Tensor::aminmax(c10::optional<int64_t> dim, bool keepdim) const {
2866 return at::_ops::aminmax::call(const_cast<Tensor&>(*this), dim, keepdim);
2867}
2868
2869// aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
2870inline ::std::tuple<at::Tensor,at::Tensor> Tensor::max(int64_t dim, bool keepdim) const {
2871 return at::_ops::max_dim::call(const_cast<Tensor&>(*this), dim, keepdim);
2872}
2873
2874// aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
2875inline ::std::tuple<at::Tensor,at::Tensor> Tensor::max(at::Dimname dim, bool keepdim) const {
2876 return at::_ops::max_names_dim::call(const_cast<Tensor&>(*this), dim, keepdim);
2877}
2878
2879// aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor
2880inline at::Tensor Tensor::amax(at::IntArrayRef dim, bool keepdim) const {
2881 return at::_ops::amax::call(const_cast<Tensor&>(*this), dim, keepdim);
2882}
2883
2884// aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor
2885inline at::Tensor Tensor::mean(c10::optional<at::ScalarType> dtype) const {
2886 return at::_ops::mean::call(const_cast<Tensor&>(*this), dtype);
2887}
2888
2889// aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
2890inline at::Tensor Tensor::mean(at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) const {
2891 return at::_ops::mean_dim::call(const_cast<Tensor&>(*this), dim, keepdim, dtype);
2892}
2893
2894// aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
2895inline at::Tensor Tensor::mean(at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype) const {
2896 return at::_ops::mean_names_dim::call(const_cast<Tensor&>(*this), dim, keepdim, dtype);
2897}
2898
2899// aten::nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
2900inline at::Tensor Tensor::nanmean(at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) const {
2901 return at::_ops::nanmean::call(const_cast<Tensor&>(*this), dim, keepdim, dtype);
2902}
2903
2904// aten::median(Tensor self) -> Tensor
2905inline at::Tensor Tensor::median() const {
2906 return at::_ops::median::call(const_cast<Tensor&>(*this));
2907}
2908
2909// aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
2910inline ::std::tuple<at::Tensor,at::Tensor> Tensor::median(int64_t dim, bool keepdim) const {
2911 return at::_ops::median_dim::call(const_cast<Tensor&>(*this), dim, keepdim);
2912}
2913
2914// aten::median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
2915inline ::std::tuple<at::Tensor,at::Tensor> Tensor::median(at::Dimname dim, bool keepdim) const {
2916 return at::_ops::median_names_dim::call(const_cast<Tensor&>(*this), dim, keepdim);
2917}
2918
2919// aten::nanmedian(Tensor self) -> Tensor
2920inline at::Tensor Tensor::nanmedian() const {
2921 return at::_ops::nanmedian::call(const_cast<Tensor&>(*this));
2922}
2923
2924// aten::nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
2925inline ::std::tuple<at::Tensor,at::Tensor> Tensor::nanmedian(int64_t dim, bool keepdim) const {
2926 return at::_ops::nanmedian_dim::call(const_cast<Tensor&>(*this), dim, keepdim);
2927}
2928
2929// aten::nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
2930inline ::std::tuple<at::Tensor,at::Tensor> Tensor::nanmedian(at::Dimname dim, bool keepdim) const {
2931 return at::_ops::nanmedian_names_dim::call(const_cast<Tensor&>(*this), dim, keepdim);
2932}
2933
2934// aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
2935inline ::std::tuple<at::Tensor,at::Tensor> Tensor::min(int64_t dim, bool keepdim) const {
2936 return at::_ops::min_dim::call(const_cast<Tensor&>(*this), dim, keepdim);
2937}
2938
2939// aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
2940inline ::std::tuple<at::Tensor,at::Tensor> Tensor::min(at::Dimname dim, bool keepdim) const {
2941 return at::_ops::min_names_dim::call(const_cast<Tensor&>(*this), dim, keepdim);
2942}
2943
2944// aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor
2945inline at::Tensor Tensor::amin(at::IntArrayRef dim, bool keepdim) const {
2946 return at::_ops::amin::call(const_cast<Tensor&>(*this), dim, keepdim);
2947}
2948
2949// aten::mm(Tensor self, Tensor mat2) -> Tensor
2950inline at::Tensor Tensor::mm(const at::Tensor & mat2) const {
2951 return at::_ops::mm::call(const_cast<Tensor&>(*this), mat2);
2952}
2953
2954// aten::mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
2955inline ::std::tuple<at::Tensor,at::Tensor> Tensor::mode(int64_t dim, bool keepdim) const {
2956 return at::_ops::mode::call(const_cast<Tensor&>(*this), dim, keepdim);
2957}
2958
2959// aten::mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
2960inline ::std::tuple<at::Tensor,at::Tensor> Tensor::mode(at::Dimname dim, bool keepdim) const {
2961 return at::_ops::mode_dimname::call(const_cast<Tensor&>(*this), dim, keepdim);
2962}
2963
2964// aten::mul.Tensor(Tensor self, Tensor other) -> Tensor
2965inline at::Tensor Tensor::mul(const at::Tensor & other) const {
2966 return at::_ops::mul_Tensor::call(const_cast<Tensor&>(*this), other);
2967}
2968
2969// aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
2970inline at::Tensor & Tensor::mul_(const at::Tensor & other) const {
2971 return at::_ops::mul__Tensor::call(const_cast<Tensor&>(*this), other);
2972}
2973
2974// aten::mul.Scalar(Tensor self, Scalar other) -> Tensor
2975inline at::Tensor Tensor::mul(const at::Scalar & other) const {
2976 return at::_ops::mul_Scalar::call(const_cast<Tensor&>(*this), other);
2977}
2978
2979// aten::mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
2980inline at::Tensor & Tensor::mul_(const at::Scalar & other) const {
2981 return at::_ops::mul__Scalar::call(const_cast<Tensor&>(*this), other);
2982}
2983
2984// aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor
2985inline at::Tensor Tensor::multiply(const at::Tensor & other) const {
2986 return at::_ops::multiply_Tensor::call(const_cast<Tensor&>(*this), other);
2987}
2988
2989// aten::multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
2990inline at::Tensor & Tensor::multiply_(const at::Tensor & other) const {
2991 return at::_ops::multiply__Tensor::call(const_cast<Tensor&>(*this), other);
2992}
2993
2994// aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor
2995inline at::Tensor Tensor::multiply(const at::Scalar & other) const {
2996 return at::_ops::multiply_Scalar::call(const_cast<Tensor&>(*this), other);
2997}
2998
2999// aten::multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
3000inline at::Tensor & Tensor::multiply_(const at::Scalar & other) const {
3001 return at::_ops::multiply__Scalar::call(const_cast<Tensor&>(*this), other);
3002}
3003
3004// aten::mv(Tensor self, Tensor vec) -> Tensor
3005inline at::Tensor Tensor::mv(const at::Tensor & vec) const {
3006 return at::_ops::mv::call(const_cast<Tensor&>(*this), vec);
3007}
3008
3009// aten::mvlgamma(Tensor self, int p) -> Tensor
3010inline at::Tensor Tensor::mvlgamma(int64_t p) const {
3011 return at::_ops::mvlgamma::call(const_cast<Tensor&>(*this), p);
3012}
3013
3014// aten::mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!)
3015inline at::Tensor & Tensor::mvlgamma_(int64_t p) const {
3016 return at::_ops::mvlgamma_::call(const_cast<Tensor&>(*this), p);
3017}
3018
3019// aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor
3020inline at::Tensor Tensor::narrow_copy(int64_t dim, int64_t start, int64_t length) const {
3021 return at::_ops::narrow_copy::call(const_cast<Tensor&>(*this), dim, start, length);
3022}
3023
3024// aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor
3025inline at::Tensor Tensor::narrow_copy_symint(int64_t dim, c10::SymInt start, c10::SymInt length) const {
3026 return at::_ops::narrow_copy::call(const_cast<Tensor&>(*this), dim, start, length);
3027}
3028
3029// aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a)
3030inline at::Tensor Tensor::narrow(int64_t dim, int64_t start, int64_t length) const {
3031 return at::_ops::narrow::call(const_cast<Tensor&>(*this), dim, start, length);
3032}
3033
3034// aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a)
3035inline at::Tensor Tensor::narrow_symint(int64_t dim, c10::SymInt start, c10::SymInt length) const {
3036 return at::_ops::narrow::call(const_cast<Tensor&>(*this), dim, start, length);
3037}
3038
3039// aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a)
3040inline at::Tensor Tensor::narrow(int64_t dim, const at::Tensor & start, int64_t length) const {
3041 return at::_ops::narrow_Tensor::call(const_cast<Tensor&>(*this), dim, start, length);
3042}
3043
3044// aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a)
3045inline at::Tensor Tensor::narrow_symint(int64_t dim, const at::Tensor & start, c10::SymInt length) const {
3046 return at::_ops::narrow_Tensor::call(const_cast<Tensor&>(*this), dim, start, length);
3047}
3048
3049// aten::permute(Tensor(a) self, int[] dims) -> Tensor(a)
3050inline at::Tensor Tensor::permute(at::IntArrayRef dims) const {
3051 return at::_ops::permute::call(const_cast<Tensor&>(*this), dims);
3052}
3053
3054// aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)
3055inline at::Tensor Tensor::movedim(at::IntArrayRef source, at::IntArrayRef destination) const {
3056 return at::_ops::movedim_intlist::call(const_cast<Tensor&>(*this), source, destination);
3057}
3058
3059// aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a)
3060inline at::Tensor Tensor::movedim(int64_t source, int64_t destination) const {
3061 return at::_ops::movedim_int::call(const_cast<Tensor&>(*this), source, destination);
3062}
3063
3064// aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)
3065inline at::Tensor Tensor::moveaxis(at::IntArrayRef source, at::IntArrayRef destination) const {
3066 return at::_ops::moveaxis_intlist::call(const_cast<Tensor&>(*this), source, destination);
3067}
3068
3069// aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a)
3070inline at::Tensor Tensor::moveaxis(int64_t source, int64_t destination) const {
3071 return at::_ops::moveaxis_int::call(const_cast<Tensor&>(*this), source, destination);
3072}
3073
3074// aten::numpy_T(Tensor(a) self) -> Tensor(a)
3075inline at::Tensor Tensor::numpy_T() const {
3076 return at::_ops::numpy_T::call(const_cast<Tensor&>(*this));
3077}
3078
3079// aten::matrix_H(Tensor(a) self) -> Tensor(a)
3080inline at::Tensor Tensor::matrix_H() const {
3081 return at::_ops::matrix_H::call(const_cast<Tensor&>(*this));
3082}
3083
3084// aten::mT(Tensor(a) self) -> Tensor(a)
3085inline at::Tensor Tensor::mT() const {
3086 return at::_ops::mT::call(const_cast<Tensor&>(*this));
3087}
3088
3089// aten::mH(Tensor(a) self) -> Tensor(a)
3090inline at::Tensor Tensor::mH() const {
3091 return at::_ops::mH::call(const_cast<Tensor&>(*this));
3092}
3093
3094// aten::adjoint(Tensor(a) self) -> Tensor(a)
3095inline at::Tensor Tensor::adjoint() const {
3096 return at::_ops::adjoint::call(const_cast<Tensor&>(*this));
3097}
3098
3099// aten::is_pinned(Tensor self, Device? device=None) -> bool
3100inline bool Tensor::is_pinned(c10::optional<at::Device> device) const {
3101 return at::_ops::is_pinned::call(const_cast<Tensor&>(*this), device);
3102}
3103
3104// aten::pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a)
3105inline at::Tensor Tensor::pin_memory(c10::optional<at::Device> device) const {
3106 return at::_ops::pin_memory::call(const_cast<Tensor&>(*this), device);
3107}
3108
3109// aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor
3110inline at::Tensor Tensor::pinverse(double rcond) const {
3111 return at::_ops::pinverse::call(const_cast<Tensor&>(*this), rcond);
3112}
3113
3114// aten::rad2deg(Tensor self) -> Tensor
3115inline at::Tensor Tensor::rad2deg() const {
3116 return at::_ops::rad2deg::call(const_cast<Tensor&>(*this));
3117}
3118
3119// aten::rad2deg_(Tensor(a!) self) -> Tensor(a!)
3120inline at::Tensor & Tensor::rad2deg_() const {
3121 return at::_ops::rad2deg_::call(const_cast<Tensor&>(*this));
3122}
3123
3124// aten::deg2rad(Tensor self) -> Tensor
3125inline at::Tensor Tensor::deg2rad() const {
3126 return at::_ops::deg2rad::call(const_cast<Tensor&>(*this));
3127}
3128
3129// aten::deg2rad_(Tensor(a!) self) -> Tensor(a!)
3130inline at::Tensor & Tensor::deg2rad_() const {
3131 return at::_ops::deg2rad_::call(const_cast<Tensor&>(*this));
3132}
3133
3134// aten::ravel(Tensor(a) self) -> Tensor(a)
3135inline at::Tensor Tensor::ravel() const {
3136 return at::_ops::ravel::call(const_cast<Tensor&>(*this));
3137}
3138
3139// aten::reciprocal(Tensor self) -> Tensor
3140inline at::Tensor Tensor::reciprocal() const {
3141 return at::_ops::reciprocal::call(const_cast<Tensor&>(*this));
3142}
3143
3144// aten::reciprocal_(Tensor(a!) self) -> Tensor(a!)
3145inline at::Tensor & Tensor::reciprocal_() const {
3146 return at::_ops::reciprocal_::call(const_cast<Tensor&>(*this));
3147}
3148
3149// aten::neg(Tensor self) -> Tensor
3150inline at::Tensor Tensor::neg() const {
3151 return at::_ops::neg::call(const_cast<Tensor&>(*this));
3152}
3153
3154// aten::neg_(Tensor(a!) self) -> Tensor(a!)
3155inline at::Tensor & Tensor::neg_() const {
3156 return at::_ops::neg_::call(const_cast<Tensor&>(*this));
3157}
3158
3159// aten::negative(Tensor self) -> Tensor
3160inline at::Tensor Tensor::negative() const {
3161 return at::_ops::negative::call(const_cast<Tensor&>(*this));
3162}
3163
3164// aten::negative_(Tensor(a!) self) -> Tensor(a!)
3165inline at::Tensor & Tensor::negative_() const {
3166 return at::_ops::negative_::call(const_cast<Tensor&>(*this));
3167}
3168
3169// aten::repeat(Tensor self, SymInt[] repeats) -> Tensor
3170inline at::Tensor Tensor::repeat(at::IntArrayRef repeats) const {
3171 return at::_ops::repeat::call(const_cast<Tensor&>(*this), c10::fromIntArrayRefSlow(repeats));
3172}
3173
3174// aten::repeat(Tensor self, SymInt[] repeats) -> Tensor
3175inline at::Tensor Tensor::repeat_symint(c10::SymIntArrayRef repeats) const {
3176 return at::_ops::repeat::call(const_cast<Tensor&>(*this), repeats);
3177}
3178
3179// aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, int? output_size=None) -> Tensor
3180inline at::Tensor Tensor::repeat_interleave(const at::Tensor & repeats, c10::optional<int64_t> dim, c10::optional<int64_t> output_size) const {
3181 return at::_ops::repeat_interleave_self_Tensor::call(const_cast<Tensor&>(*this), repeats, dim, output_size);
3182}
3183
3184// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, int? output_size=None) -> Tensor
3185inline at::Tensor Tensor::repeat_interleave(int64_t repeats, c10::optional<int64_t> dim, c10::optional<int64_t> output_size) const {
3186 return at::_ops::repeat_interleave_self_int::call(const_cast<Tensor&>(*this), repeats, dim, output_size);
3187}
3188
3189// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, int? output_size=None) -> Tensor
3190inline at::Tensor Tensor::repeat_interleave_symint(c10::SymInt repeats, c10::optional<int64_t> dim, c10::optional<int64_t> output_size) const {
3191 return at::_ops::repeat_interleave_self_int::call(const_cast<Tensor&>(*this), repeats, dim, output_size);
3192}
3193
3194// aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a)
3195inline at::Tensor Tensor::reshape(at::IntArrayRef shape) const {
3196 return at::_ops::reshape::call(const_cast<Tensor&>(*this), c10::fromIntArrayRefSlow(shape));
3197}
3198
3199// aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a)
3200inline at::Tensor Tensor::reshape_symint(c10::SymIntArrayRef shape) const {
3201 return at::_ops::reshape::call(const_cast<Tensor&>(*this), shape);
3202}
3203
3204// aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a)
3205inline at::Tensor Tensor::_reshape_alias(at::IntArrayRef size, at::IntArrayRef stride) const {
3206 return at::_ops::_reshape_alias::call(const_cast<Tensor&>(*this), c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride));
3207}
3208
3209// aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a)
3210inline at::Tensor Tensor::_reshape_alias_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride) const {
3211 return at::_ops::_reshape_alias::call(const_cast<Tensor&>(*this), size, stride);
3212}
3213
3214// aten::reshape_as(Tensor(a) self, Tensor other) -> Tensor(a)
3215inline at::Tensor Tensor::reshape_as(const at::Tensor & other) const {
3216 return at::_ops::reshape_as::call(const_cast<Tensor&>(*this), other);
3217}
3218
3219// aten::round(Tensor self) -> Tensor
3220inline at::Tensor Tensor::round() const {
3221 return at::_ops::round::call(const_cast<Tensor&>(*this));
3222}
3223
3224// aten::round_(Tensor(a!) self) -> Tensor(a!)
3225inline at::Tensor & Tensor::round_() const {
3226 return at::_ops::round_::call(const_cast<Tensor&>(*this));
3227}
3228
3229// aten::round.decimals(Tensor self, *, int decimals) -> Tensor
3230inline at::Tensor Tensor::round(int64_t decimals) const {
3231 return at::_ops::round_decimals::call(const_cast<Tensor&>(*this), decimals);
3232}
3233
3234// aten::round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!)
3235inline at::Tensor & Tensor::round_(int64_t decimals) const {
3236 return at::_ops::round__decimals::call(const_cast<Tensor&>(*this), decimals);
3237}
3238
3239// aten::relu(Tensor self) -> Tensor
3240inline at::Tensor Tensor::relu() const {
3241 return at::_ops::relu::call(const_cast<Tensor&>(*this));
3242}
3243
3244// aten::relu_(Tensor(a!) self) -> Tensor(a!)
3245inline at::Tensor & Tensor::relu_() const {
3246 return at::_ops::relu_::call(const_cast<Tensor&>(*this));
3247}
3248
3249// aten::prelu(Tensor self, Tensor weight) -> Tensor
3250inline at::Tensor Tensor::prelu(const at::Tensor & weight) const {
3251 return at::_ops::prelu::call(const_cast<Tensor&>(*this), weight);
3252}
3253
3254// aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor
3255inline at::Tensor Tensor::hardshrink(const at::Scalar & lambd) const {
3256 return at::_ops::hardshrink::call(const_cast<Tensor&>(*this), lambd);
3257}
3258
3259// aten::hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor
3260inline at::Tensor Tensor::hardshrink_backward(const at::Tensor & grad_out, const at::Scalar & lambd) const {
3261 return at::_ops::hardshrink_backward::call(grad_out, const_cast<Tensor&>(*this), lambd);
3262}
3263
3264// aten::rsqrt(Tensor self) -> Tensor
3265inline at::Tensor Tensor::rsqrt() const {
3266 return at::_ops::rsqrt::call(const_cast<Tensor&>(*this));
3267}
3268
3269// aten::rsqrt_(Tensor(a!) self) -> Tensor(a!)
3270inline at::Tensor & Tensor::rsqrt_() const {
3271 return at::_ops::rsqrt_::call(const_cast<Tensor&>(*this));
3272}
3273
3274// aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a)
3275inline at::Tensor Tensor::select(at::Dimname dim, int64_t index) const {
3276 return at::_ops::select_Dimname::call(const_cast<Tensor&>(*this), dim, index);
3277}
3278
3279// aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a)
3280inline at::Tensor Tensor::select(int64_t dim, int64_t index) const {
3281 return at::_ops::select_int::call(const_cast<Tensor&>(*this), dim, index);
3282}
3283
3284// aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a)
3285inline at::Tensor Tensor::select_symint(int64_t dim, c10::SymInt index) const {
3286 return at::_ops::select_int::call(const_cast<Tensor&>(*this), dim, index);
3287}
3288
3289// aten::sigmoid(Tensor self) -> Tensor
3290inline at::Tensor Tensor::sigmoid() const {
3291 return at::_ops::sigmoid::call(const_cast<Tensor&>(*this));
3292}
3293
3294// aten::sigmoid_(Tensor(a!) self) -> Tensor(a!)
3295inline at::Tensor & Tensor::sigmoid_() const {
3296 return at::_ops::sigmoid_::call(const_cast<Tensor&>(*this));
3297}
3298
3299// aten::logit(Tensor self, float? eps=None) -> Tensor
3300inline at::Tensor Tensor::logit(c10::optional<double> eps) const {
3301 return at::_ops::logit::call(const_cast<Tensor&>(*this), eps);
3302}
3303
3304// aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!)
3305inline at::Tensor & Tensor::logit_(c10::optional<double> eps) const {
3306 return at::_ops::logit_::call(const_cast<Tensor&>(*this), eps);
3307}
3308
3309// aten::sin(Tensor self) -> Tensor
3310inline at::Tensor Tensor::sin() const {
3311 return at::_ops::sin::call(const_cast<Tensor&>(*this));
3312}
3313
3314// aten::sin_(Tensor(a!) self) -> Tensor(a!)
3315inline at::Tensor & Tensor::sin_() const {
3316 return at::_ops::sin_::call(const_cast<Tensor&>(*this));
3317}
3318
3319// aten::sinc(Tensor self) -> Tensor
3320inline at::Tensor Tensor::sinc() const {
3321 return at::_ops::sinc::call(const_cast<Tensor&>(*this));
3322}
3323
3324// aten::sinc_(Tensor(a!) self) -> Tensor(a!)
3325inline at::Tensor & Tensor::sinc_() const {
3326 return at::_ops::sinc_::call(const_cast<Tensor&>(*this));
3327}
3328
3329// aten::sinh(Tensor self) -> Tensor
3330inline at::Tensor Tensor::sinh() const {
3331 return at::_ops::sinh::call(const_cast<Tensor&>(*this));
3332}
3333
3334// aten::sinh_(Tensor(a!) self) -> Tensor(a!)
3335inline at::Tensor & Tensor::sinh_() const {
3336 return at::_ops::sinh_::call(const_cast<Tensor&>(*this));
3337}
3338
3339// aten::detach(Tensor(a) self) -> Tensor(a)
3340inline at::Tensor Tensor::detach() const {
3341 return at::_ops::detach::call(const_cast<Tensor&>(*this));
3342}
3343
3344// aten::detach_(Tensor(a!) self) -> Tensor(a!)
3345inline at::Tensor & Tensor::detach_() const {
3346 return at::_ops::detach_::call(const_cast<Tensor&>(*this));
3347}
3348
3349// aten::size.Dimname(Tensor self, Dimname dim) -> int
3350inline int64_t Tensor::size(at::Dimname dim) const {
3351 return at::_ops::size_Dimname::call(const_cast<Tensor&>(*this), dim);
3352}
3353
3354// aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)
3355inline at::Tensor Tensor::slice(int64_t dim, c10::optional<int64_t> start, c10::optional<int64_t> end, int64_t step) const {
3356 return at::_ops::slice_Tensor::call(const_cast<Tensor&>(*this), dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step);
3357}
3358
3359// aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)
3360inline at::Tensor Tensor::slice_symint(int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) const {
3361 return at::_ops::slice_Tensor::call(const_cast<Tensor&>(*this), dim, start, end, step);
3362}
3363
3364// aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
3365inline at::Tensor Tensor::slice_scatter(const at::Tensor & src, int64_t dim, c10::optional<int64_t> start, c10::optional<int64_t> end, int64_t step) const {
3366 return at::_ops::slice_scatter::call(const_cast<Tensor&>(*this), src, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step);
3367}
3368
3369// aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
3370inline at::Tensor Tensor::slice_scatter_symint(const at::Tensor & src, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) const {
3371 return at::_ops::slice_scatter::call(const_cast<Tensor&>(*this), src, dim, start, end, step);
3372}
3373
3374// aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor
3375inline at::Tensor Tensor::select_scatter(const at::Tensor & src, int64_t dim, int64_t index) const {
3376 return at::_ops::select_scatter::call(const_cast<Tensor&>(*this), src, dim, index);
3377}
3378
3379// aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor
3380inline at::Tensor Tensor::select_scatter_symint(const at::Tensor & src, int64_t dim, c10::SymInt index) const {
3381 return at::_ops::select_scatter::call(const_cast<Tensor&>(*this), src, dim, index);
3382}
3383
3384// aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor
3385inline at::Tensor Tensor::diagonal_scatter(const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2) const {
3386 return at::_ops::diagonal_scatter::call(const_cast<Tensor&>(*this), src, offset, dim1, dim2);
3387}
3388
3389// aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
3390inline at::Tensor Tensor::as_strided_scatter(const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset) const {
3391 return at::_ops::as_strided_scatter::call(const_cast<Tensor&>(*this), src, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt);
3392}
3393
3394// aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
3395inline at::Tensor Tensor::as_strided_scatter_symint(const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) const {
3396 return at::_ops::as_strided_scatter::call(const_cast<Tensor&>(*this), src, size, stride, storage_offset);
3397}
3398
3399// aten::smm(Tensor self, Tensor mat2) -> Tensor
3400inline at::Tensor Tensor::smm(const at::Tensor & mat2) const {
3401 return at::_ops::smm::call(const_cast<Tensor&>(*this), mat2);
3402}
3403
3404// aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
3405inline at::Tensor Tensor::softmax(int64_t dim, c10::optional<at::ScalarType> dtype) const {
3406 return at::_ops::softmax_int::call(const_cast<Tensor&>(*this), dim, dtype);
3407}
3408
3409// aten::softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
3410inline at::Tensor Tensor::softmax(at::Dimname dim, c10::optional<at::ScalarType> dtype) const {
3411 return at::_ops::softmax_Dimname::call(const_cast<Tensor&>(*this), dim, dtype);
3412}
3413
3414// aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]
3415inline ::std::vector<at::Tensor> Tensor::unsafe_split(int64_t split_size, int64_t dim) const {
3416 return at::_ops::unsafe_split_Tensor::call(const_cast<Tensor&>(*this), split_size, dim);
3417}
3418
3419// aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]
3420inline ::std::vector<at::Tensor> Tensor::unsafe_split_symint(c10::SymInt split_size, int64_t dim) const {
3421 return at::_ops::unsafe_split_Tensor::call(const_cast<Tensor&>(*this), split_size, dim);
3422}
3423
3424// aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[]
3425inline ::std::vector<at::Tensor> Tensor::split(int64_t split_size, int64_t dim) const {
3426 return at::_ops::split_Tensor::call(const_cast<Tensor&>(*this), split_size, dim);
3427}
3428
3429// aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[]
3430inline ::std::vector<at::Tensor> Tensor::split_symint(c10::SymInt split_size, int64_t dim) const {
3431 return at::_ops::split_Tensor::call(const_cast<Tensor&>(*this), split_size, dim);
3432}
3433
3434// aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[]
3435inline ::std::vector<at::Tensor> Tensor::split(at::IntArrayRef split_size, int64_t dim) const {
3436 return at::_ops::split_sizes::call(const_cast<Tensor&>(*this), c10::fromIntArrayRefSlow(split_size), dim);
3437}
3438
3439// aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[]
3440inline ::std::vector<at::Tensor> Tensor::split_symint(c10::SymIntArrayRef split_size, int64_t dim) const {
3441 return at::_ops::split_sizes::call(const_cast<Tensor&>(*this), split_size, dim);
3442}
3443
3444// aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]
3445inline ::std::vector<at::Tensor> Tensor::unsafe_split_with_sizes(at::IntArrayRef split_sizes, int64_t dim) const {
3446 return at::_ops::unsafe_split_with_sizes::call(const_cast<Tensor&>(*this), c10::fromIntArrayRefSlow(split_sizes), dim);
3447}
3448
3449// aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]
3450inline ::std::vector<at::Tensor> Tensor::unsafe_split_with_sizes_symint(c10::SymIntArrayRef split_sizes, int64_t dim) const {
3451 return at::_ops::unsafe_split_with_sizes::call(const_cast<Tensor&>(*this), split_sizes, dim);
3452}
3453
3454// aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[]
3455inline ::std::vector<at::Tensor> Tensor::split_with_sizes(at::IntArrayRef split_sizes, int64_t dim) const {
3456 return at::_ops::split_with_sizes::call(const_cast<Tensor&>(*this), c10::fromIntArrayRefSlow(split_sizes), dim);
3457}
3458
3459// aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[]
3460inline ::std::vector<at::Tensor> Tensor::split_with_sizes_symint(c10::SymIntArrayRef split_sizes, int64_t dim) const {
3461 return at::_ops::split_with_sizes::call(const_cast<Tensor&>(*this), split_sizes, dim);
3462}
3463
3464// aten::hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
3465inline ::std::vector<at::Tensor> Tensor::hsplit(int64_t sections) const {
3466 return at::_ops::hsplit_int::call(const_cast<Tensor&>(*this), sections);
3467}
3468
3469// aten::hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
3470inline ::std::vector<at::Tensor> Tensor::hsplit(at::IntArrayRef indices) const {
3471 return at::_ops::hsplit_array::call(const_cast<Tensor&>(*this), indices);
3472}
3473
3474// aten::vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
3475inline ::std::vector<at::Tensor> Tensor::vsplit(int64_t sections) const {
3476 return at::_ops::vsplit_int::call(const_cast<Tensor&>(*this), sections);
3477}
3478
3479// aten::vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
3480inline ::std::vector<at::Tensor> Tensor::vsplit(at::IntArrayRef indices) const {
3481 return at::_ops::vsplit_array::call(const_cast<Tensor&>(*this), indices);
3482}
3483
3484// aten::dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
3485inline ::std::vector<at::Tensor> Tensor::dsplit(int64_t sections) const {
3486 return at::_ops::dsplit_int::call(const_cast<Tensor&>(*this), sections);
3487}
3488
3489// aten::dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
3490inline ::std::vector<at::Tensor> Tensor::dsplit(at::IntArrayRef indices) const {
3491 return at::_ops::dsplit_array::call(const_cast<Tensor&>(*this), indices);
3492}
3493
3494// aten::squeeze(Tensor(a) self) -> Tensor(a)
3495inline at::Tensor Tensor::squeeze() const {
3496 return at::_ops::squeeze::call(const_cast<Tensor&>(*this));
3497}
3498
3499// aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)
3500inline at::Tensor Tensor::squeeze(int64_t dim) const {
3501 return at::_ops::squeeze_dim::call(const_cast<Tensor&>(*this), dim);
3502}
3503
3504// aten::squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a)
3505inline at::Tensor Tensor::squeeze(at::Dimname dim) const {
3506 return at::_ops::squeeze_dimname::call(const_cast<Tensor&>(*this), dim);
3507}
3508
3509// aten::squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a)
3510inline at::Tensor Tensor::squeeze(at::IntArrayRef dim) const {
3511 return at::_ops::squeeze_dims::call(const_cast<Tensor&>(*this), dim);
3512}
3513
3514// aten::squeeze_(Tensor(a!) self) -> Tensor(a!)
3515inline at::Tensor & Tensor::squeeze_() const {
3516 return at::_ops::squeeze_::call(const_cast<Tensor&>(*this));
3517}
3518
3519// aten::squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!)
3520inline at::Tensor & Tensor::squeeze_(int64_t dim) const {
3521 return at::_ops::squeeze__dim::call(const_cast<Tensor&>(*this), dim);
3522}
3523
3524// aten::squeeze_.dims(Tensor(a!) self, int[] dim) -> Tensor(a!)
3525inline at::Tensor & Tensor::squeeze_(at::IntArrayRef dim) const {
3526 return at::_ops::squeeze__dims::call(const_cast<Tensor&>(*this), dim);
3527}
3528
3529// aten::squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!)
3530inline at::Tensor & Tensor::squeeze_(at::Dimname dim) const {
3531 return at::_ops::squeeze__dimname::call(const_cast<Tensor&>(*this), dim);
3532}
3533
3534// aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
3535inline at::Tensor Tensor::sspaddmm(const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) const {
3536 return at::_ops::sspaddmm::call(const_cast<Tensor&>(*this), mat1, mat2, beta, alpha);
3537}
3538
3539// aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor
3540inline at::Tensor Tensor::stft(int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool normalized, c10::optional<bool> onesided, c10::optional<bool> return_complex) const {
3541 return at::_ops::stft::call(const_cast<Tensor&>(*this), n_fft, hop_length, win_length, window, normalized, onesided, return_complex);
3542}
3543
3544// aten::stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode="reflect", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor
3545inline at::Tensor Tensor::stft(int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool center, c10::string_view pad_mode, bool normalized, c10::optional<bool> onesided, c10::optional<bool> return_complex) const {
3546 return at::_ops::stft_center::call(const_cast<Tensor&>(*this), n_fft, hop_length, win_length, window, center, pad_mode, normalized, onesided, return_complex);
3547}
3548
3549// aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor
3550inline at::Tensor Tensor::istft(int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool center, bool normalized, c10::optional<bool> onesided, c10::optional<int64_t> length, bool return_complex) const {
3551 return at::_ops::istft::call(const_cast<Tensor&>(*this), n_fft, hop_length, win_length, window, center, normalized, onesided, length, return_complex);
3552}
3553
3554// aten::stride.Dimname(Tensor self, Dimname dim) -> int
3555inline int64_t Tensor::stride(at::Dimname dim) const {
3556 return at::_ops::stride_Dimname::call(const_cast<Tensor&>(*this), dim);
3557}
3558
3559// aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor
3560inline at::Tensor Tensor::sum(c10::optional<at::ScalarType> dtype) const {
3561 return at::_ops::sum::call(const_cast<Tensor&>(*this), dtype);
3562}
3563
3564// aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
3565inline at::Tensor Tensor::sum(at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) const {
3566 return at::_ops::sum_dim_IntList::call(const_cast<Tensor&>(*this), dim, keepdim, dtype);
3567}
3568
3569// aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
3570inline at::Tensor Tensor::sum(at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype) const {
3571 return at::_ops::sum_dim_DimnameList::call(const_cast<Tensor&>(*this), dim, keepdim, dtype);
3572}
3573
3574// aten::nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
3575inline at::Tensor Tensor::nansum(at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) const {
3576 return at::_ops::nansum::call(const_cast<Tensor&>(*this), dim, keepdim, dtype);
3577}
3578
3579// aten::sum_to_size(Tensor self, int[] size) -> Tensor
3580inline at::Tensor Tensor::sum_to_size(at::IntArrayRef size) const {
3581 return at::_ops::sum_to_size::call(const_cast<Tensor&>(*this), size);
3582}
3583
3584// aten::sqrt(Tensor self) -> Tensor
3585inline at::Tensor Tensor::sqrt() const {
3586 return at::_ops::sqrt::call(const_cast<Tensor&>(*this));
3587}
3588
3589// aten::sqrt_(Tensor(a!) self) -> Tensor(a!)
3590inline at::Tensor & Tensor::sqrt_() const {
3591 return at::_ops::sqrt_::call(const_cast<Tensor&>(*this));
3592}
3593
3594// aten::square(Tensor self) -> Tensor
3595inline at::Tensor Tensor::square() const {
3596 return at::_ops::square::call(const_cast<Tensor&>(*this));
3597}
3598
3599// aten::square_(Tensor(a!) self) -> Tensor(a!)
3600inline at::Tensor & Tensor::square_() const {
3601 return at::_ops::square_::call(const_cast<Tensor&>(*this));
3602}
3603
3604// aten::std(Tensor self, bool unbiased=True) -> Tensor
3605inline at::Tensor Tensor::std(bool unbiased) const {
3606 return at::_ops::std::call(const_cast<Tensor&>(*this), unbiased);
3607}
3608
3609// aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
3610inline at::Tensor Tensor::std(at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) const {
3611 return at::_ops::std_dim::call(const_cast<Tensor&>(*this), dim, unbiased, keepdim);
3612}
3613
3614// aten::std.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor
3615inline at::Tensor Tensor::std(at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) const {
3616 return at::_ops::std_correction::call(const_cast<Tensor&>(*this), dim, correction, keepdim);
3617}
3618
3619// aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
3620inline at::Tensor Tensor::std(at::DimnameList dim, bool unbiased, bool keepdim) const {
3621 return at::_ops::std_names_dim::call(const_cast<Tensor&>(*this), dim, unbiased, keepdim);
3622}
3623
3624// aten::std.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor
3625inline at::Tensor Tensor::std(at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) const {
3626 return at::_ops::std_correction_names::call(const_cast<Tensor&>(*this), dim, correction, keepdim);
3627}
3628
3629// aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor
3630inline at::Tensor Tensor::prod(c10::optional<at::ScalarType> dtype) const {
3631 return at::_ops::prod::call(const_cast<Tensor&>(*this), dtype);
3632}
3633
3634// aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
3635inline at::Tensor Tensor::prod(int64_t dim, bool keepdim, c10::optional<at::ScalarType> dtype) const {
3636 return at::_ops::prod_dim_int::call(const_cast<Tensor&>(*this), dim, keepdim, dtype);
3637}
3638
3639// aten::prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
3640inline at::Tensor Tensor::prod(at::Dimname dim, bool keepdim, c10::optional<at::ScalarType> dtype) const {
3641 return at::_ops::prod_dim_Dimname::call(const_cast<Tensor&>(*this), dim, keepdim, dtype);
3642}
3643
3644// aten::t(Tensor(a) self) -> Tensor(a)
3645inline at::Tensor Tensor::t() const {
3646 return at::_ops::t::call(const_cast<Tensor&>(*this));
3647}
3648
3649// aten::t_(Tensor(a!) self) -> Tensor(a!)
3650inline at::Tensor & Tensor::t_() const {
3651 return at::_ops::t_::call(const_cast<Tensor&>(*this));
3652}
3653
3654// aten::tan(Tensor self) -> Tensor
3655inline at::Tensor Tensor::tan() const {
3656 return at::_ops::tan::call(const_cast<Tensor&>(*this));
3657}
3658
3659// aten::tan_(Tensor(a!) self) -> Tensor(a!)
3660inline at::Tensor & Tensor::tan_() const {
3661 return at::_ops::tan_::call(const_cast<Tensor&>(*this));
3662}
3663
3664// aten::tanh(Tensor self) -> Tensor
3665inline at::Tensor Tensor::tanh() const {
3666 return at::_ops::tanh::call(const_cast<Tensor&>(*this));
3667}
3668
3669// aten::tanh_(Tensor(a!) self) -> Tensor(a!)
3670inline at::Tensor & Tensor::tanh_() const {
3671 return at::_ops::tanh_::call(const_cast<Tensor&>(*this));
3672}
3673
3674// aten::tile(Tensor self, int[] dims) -> Tensor
3675inline at::Tensor Tensor::tile(at::IntArrayRef dims) const {
3676 return at::_ops::tile::call(const_cast<Tensor&>(*this), dims);
3677}
3678
3679// aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
3680inline at::Tensor Tensor::transpose(int64_t dim0, int64_t dim1) const {
3681 return at::_ops::transpose_int::call(const_cast<Tensor&>(*this), dim0, dim1);
3682}
3683
3684// aten::transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a)
3685inline at::Tensor Tensor::transpose(at::Dimname dim0, at::Dimname dim1) const {
3686 return at::_ops::transpose_Dimname::call(const_cast<Tensor&>(*this), dim0, dim1);
3687}
3688
3689// aten::transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
3690inline at::Tensor & Tensor::transpose_(int64_t dim0, int64_t dim1) const {
3691 return at::_ops::transpose_::call(const_cast<Tensor&>(*this), dim0, dim1);
3692}
3693
3694// aten::flip(Tensor self, int[] dims) -> Tensor
3695inline at::Tensor Tensor::flip(at::IntArrayRef dims) const {
3696 return at::_ops::flip::call(const_cast<Tensor&>(*this), dims);
3697}
3698
3699// aten::fliplr(Tensor self) -> Tensor
3700inline at::Tensor Tensor::fliplr() const {
3701 return at::_ops::fliplr::call(const_cast<Tensor&>(*this));
3702}
3703
3704// aten::flipud(Tensor self) -> Tensor
3705inline at::Tensor Tensor::flipud() const {
3706 return at::_ops::flipud::call(const_cast<Tensor&>(*this));
3707}
3708
3709// aten::roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor
3710inline at::Tensor Tensor::roll(at::IntArrayRef shifts, at::IntArrayRef dims) const {
3711 return at::_ops::roll::call(const_cast<Tensor&>(*this), shifts, dims);
3712}
3713
3714// aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor
3715inline at::Tensor Tensor::rot90(int64_t k, at::IntArrayRef dims) const {
3716 return at::_ops::rot90::call(const_cast<Tensor&>(*this), k, dims);
3717}
3718
3719// aten::_nested_tensor_size(Tensor self) -> Tensor
3720inline at::Tensor Tensor::_nested_tensor_size() const {
3721 return at::_ops::_nested_tensor_size::call(const_cast<Tensor&>(*this));
3722}
3723
3724// aten::_nested_tensor_strides(Tensor self) -> Tensor
3725inline at::Tensor Tensor::_nested_tensor_strides() const {
3726 return at::_ops::_nested_tensor_strides::call(const_cast<Tensor&>(*this));
3727}
3728
3729// aten::_nested_tensor_offsets(Tensor self) -> int[]
3730inline ::std::vector<int64_t> Tensor::_nested_tensor_offsets() const {
3731 return at::_ops::_nested_tensor_offsets::call(const_cast<Tensor&>(*this));
3732}
3733
3734// aten::trunc(Tensor self) -> Tensor
3735inline at::Tensor Tensor::trunc() const {
3736 return at::_ops::trunc::call(const_cast<Tensor&>(*this));
3737}
3738
3739// aten::trunc_(Tensor(a!) self) -> Tensor(a!)
3740inline at::Tensor & Tensor::trunc_() const {
3741 return at::_ops::trunc_::call(const_cast<Tensor&>(*this));
3742}
3743
3744// aten::fix(Tensor self) -> Tensor
3745inline at::Tensor Tensor::fix() const {
3746 return at::_ops::fix::call(const_cast<Tensor&>(*this));
3747}
3748
3749// aten::fix_(Tensor(a!) self) -> Tensor(a!)
3750inline at::Tensor & Tensor::fix_() const {
3751 return at::_ops::fix_::call(const_cast<Tensor&>(*this));
3752}
3753
3754// aten::type_as(Tensor self, Tensor other) -> Tensor
3755inline at::Tensor Tensor::type_as(const at::Tensor & other) const {
3756 return at::_ops::type_as::call(const_cast<Tensor&>(*this), other);
3757}
3758
3759// aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a)
3760inline at::Tensor Tensor::unsqueeze(int64_t dim) const {
3761 return at::_ops::unsqueeze::call(const_cast<Tensor&>(*this), dim);
3762}
3763
3764// aten::unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!)
3765inline at::Tensor & Tensor::unsqueeze_(int64_t dim) const {
3766 return at::_ops::unsqueeze_::call(const_cast<Tensor&>(*this), dim);
3767}
3768
3769// aten::var(Tensor self, bool unbiased=True) -> Tensor
3770inline at::Tensor Tensor::var(bool unbiased) const {
3771 return at::_ops::var::call(const_cast<Tensor&>(*this), unbiased);
3772}
3773
3774// aten::var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
3775inline at::Tensor Tensor::var(at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) const {
3776 return at::_ops::var_dim::call(const_cast<Tensor&>(*this), dim, unbiased, keepdim);
3777}
3778
3779// aten::var.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor
3780inline at::Tensor Tensor::var(at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) const {
3781 return at::_ops::var_correction::call(const_cast<Tensor&>(*this), dim, correction, keepdim);
3782}
3783
3784// aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
3785inline at::Tensor Tensor::var(at::DimnameList dim, bool unbiased, bool keepdim) const {
3786 return at::_ops::var_names_dim::call(const_cast<Tensor&>(*this), dim, unbiased, keepdim);
3787}
3788
3789// aten::var.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor
3790inline at::Tensor Tensor::var(at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) const {
3791 return at::_ops::var_correction_names::call(const_cast<Tensor&>(*this), dim, correction, keepdim);
3792}
3793
3794// aten::view_as(Tensor(a) self, Tensor other) -> Tensor(a)
3795inline at::Tensor Tensor::view_as(const at::Tensor & other) const {
3796 return at::_ops::view_as::call(const_cast<Tensor&>(*this), other);
3797}
3798
3799// aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor
3800inline at::Tensor Tensor::where(const at::Tensor & condition, const at::Tensor & other) const {
3801 return at::_ops::where_self::call(condition, const_cast<Tensor&>(*this), other);
3802}
3803
3804// aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor
3805inline at::Tensor Tensor::where(const at::Tensor & condition, const at::Scalar & other) const {
3806 return at::_ops::where_ScalarOther::call(condition, const_cast<Tensor&>(*this), other);
3807}
3808
3809// aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor
3810inline at::Tensor Tensor::norm(const c10::optional<at::Scalar> & p, at::ScalarType dtype) const {
3811 return at::_ops::norm_ScalarOpt_dtype::call(const_cast<Tensor&>(*this), p, dtype);
3812}
3813
3814// aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor
3815inline at::Tensor Tensor::norm(const at::Scalar & p) const {
3816 return at::_ops::norm_Scalar::call(const_cast<Tensor&>(*this), p);
3817}
3818
3819// aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor
3820inline at::Tensor Tensor::norm(const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) const {
3821 return at::_ops::norm_ScalarOpt_dim_dtype::call(const_cast<Tensor&>(*this), p, dim, keepdim, dtype);
3822}
3823
3824// aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor
3825inline at::Tensor Tensor::norm(const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) const {
3826 return at::_ops::norm_ScalarOpt_dim::call(const_cast<Tensor&>(*this), p, dim, keepdim);
3827}
3828
3829// aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor
3830inline at::Tensor Tensor::norm(const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) const {
3831 return at::_ops::norm_names_ScalarOpt_dim_dtype::call(const_cast<Tensor&>(*this), p, dim, keepdim, dtype);
3832}
3833
3834// aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor
3835inline at::Tensor Tensor::norm(const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim) const {
3836 return at::_ops::norm_names_ScalarOpt_dim::call(const_cast<Tensor&>(*this), p, dim, keepdim);
3837}
3838
3839// aten::frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent)
3840inline ::std::tuple<at::Tensor,at::Tensor> Tensor::frexp() const {
3841 return at::_ops::frexp_Tensor::call(const_cast<Tensor&>(*this));
3842}
3843
3844// aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor
3845inline at::Tensor Tensor::clone(c10::optional<at::MemoryFormat> memory_format) const {
3846 return at::_ops::clone::call(const_cast<Tensor&>(*this), memory_format);
3847}
3848
3849// aten::positive(Tensor(a) self) -> Tensor(a)
3850inline at::Tensor Tensor::positive() const {
3851 return at::_ops::positive::call(const_cast<Tensor&>(*this));
3852}
3853
3854// aten::resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!)
3855inline const at::Tensor & Tensor::resize_as_(const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format) const {
3856 return at::_ops::resize_as_::call(const_cast<Tensor&>(*this), the_template, memory_format);
3857}
3858
3859// aten::resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!)
3860inline const at::Tensor & Tensor::resize_as_sparse_(const at::Tensor & the_template) const {
3861 return at::_ops::resize_as_sparse_::call(const_cast<Tensor&>(*this), the_template);
3862}
3863
3864// aten::zero_(Tensor(a!) self) -> Tensor(a!)
3865inline at::Tensor & Tensor::zero_() const {
3866 return at::_ops::zero_::call(const_cast<Tensor&>(*this));
3867}
3868
3869// aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
3870inline at::Tensor Tensor::sub(const at::Tensor & other, const at::Scalar & alpha) const {
3871 return at::_ops::sub_Tensor::call(const_cast<Tensor&>(*this), other, alpha);
3872}
3873
3874// aten::sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
3875inline at::Tensor & Tensor::sub_(const at::Tensor & other, const at::Scalar & alpha) const {
3876 return at::_ops::sub__Tensor::call(const_cast<Tensor&>(*this), other, alpha);
3877}
3878
3879// aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
3880inline at::Tensor Tensor::sub(const at::Scalar & other, const at::Scalar & alpha) const {
3881 return at::_ops::sub_Scalar::call(const_cast<Tensor&>(*this), other, alpha);
3882}
3883
3884// aten::sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
3885inline at::Tensor & Tensor::sub_(const at::Scalar & other, const at::Scalar & alpha) const {
3886 return at::_ops::sub__Scalar::call(const_cast<Tensor&>(*this), other, alpha);
3887}
3888
3889// aten::subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
3890inline at::Tensor Tensor::subtract(const at::Tensor & other, const at::Scalar & alpha) const {
3891 return at::_ops::subtract_Tensor::call(const_cast<Tensor&>(*this), other, alpha);
3892}
3893
3894// aten::subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
3895inline at::Tensor & Tensor::subtract_(const at::Tensor & other, const at::Scalar & alpha) const {
3896 return at::_ops::subtract__Tensor::call(const_cast<Tensor&>(*this), other, alpha);
3897}
3898
3899// aten::subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
3900inline at::Tensor Tensor::subtract(const at::Scalar & other, const at::Scalar & alpha) const {
3901 return at::_ops::subtract_Scalar::call(const_cast<Tensor&>(*this), other, alpha);
3902}
3903
3904// aten::subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
3905inline at::Tensor & Tensor::subtract_(const at::Scalar & other, const at::Scalar & alpha) const {
3906 return at::_ops::subtract__Scalar::call(const_cast<Tensor&>(*this), other, alpha);
3907}
3908
3909// aten::heaviside(Tensor self, Tensor values) -> Tensor
3910inline at::Tensor Tensor::heaviside(const at::Tensor & values) const {
3911 return at::_ops::heaviside::call(const_cast<Tensor&>(*this), values);
3912}
3913
3914// aten::heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!)
3915inline at::Tensor & Tensor::heaviside_(const at::Tensor & values) const {
3916 return at::_ops::heaviside_::call(const_cast<Tensor&>(*this), values);
3917}
3918
3919// aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
3920inline at::Tensor Tensor::addmm(const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) const {
3921 return at::_ops::addmm::call(const_cast<Tensor&>(*this), mat1, mat2, beta, alpha);
3922}
3923
3924// aten::addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
3925inline at::Tensor & Tensor::addmm_(const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) const {
3926 return at::_ops::addmm_::call(const_cast<Tensor&>(*this), mat1, mat2, beta, alpha);
3927}
3928
3929// aten::_addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor
3930inline at::Tensor Tensor::_addmm_activation(const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu) const {
3931 return at::_ops::_addmm_activation::call(const_cast<Tensor&>(*this), mat1, mat2, beta, alpha, use_gelu);
3932}
3933
3934// aten::sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)
3935inline const at::Tensor & Tensor::sparse_resize_(at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) const {
3936 return at::_ops::sparse_resize_::call(const_cast<Tensor&>(*this), size, sparse_dim, dense_dim);
3937}
3938
3939// aten::sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)
3940inline const at::Tensor & Tensor::sparse_resize_and_clear_(at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) const {
3941 return at::_ops::sparse_resize_and_clear_::call(const_cast<Tensor&>(*this), size, sparse_dim, dense_dim);
3942}
3943
3944// aten::sparse_mask(Tensor self, Tensor mask) -> Tensor
3945inline at::Tensor Tensor::sparse_mask(const at::Tensor & mask) const {
3946 return at::_ops::sparse_mask::call(const_cast<Tensor&>(*this), mask);
3947}
3948
3949// aten::to_dense(Tensor self, ScalarType? dtype=None) -> Tensor
3950inline at::Tensor Tensor::to_dense(c10::optional<at::ScalarType> dtype) const {
3951 return at::_ops::to_dense::call(const_cast<Tensor&>(*this), dtype);
3952}
3953
3954// aten::_to_dense(Tensor self, ScalarType? dtype=None) -> Tensor
3955inline at::Tensor Tensor::_to_dense(c10::optional<at::ScalarType> dtype) const {
3956 return at::_ops::_to_dense::call(const_cast<Tensor&>(*this), dtype);
3957}
3958
3959// aten::sparse_dim(Tensor self) -> int
3960inline int64_t Tensor::sparse_dim() const {
3961 return at::_ops::sparse_dim::call(const_cast<Tensor&>(*this));
3962}
3963
3964// aten::_dimI(Tensor self) -> int
3965inline int64_t Tensor::_dimI() const {
3966 return at::_ops::_dimI::call(const_cast<Tensor&>(*this));
3967}
3968
3969// aten::dense_dim(Tensor self) -> int
3970inline int64_t Tensor::dense_dim() const {
3971 return at::_ops::dense_dim::call(const_cast<Tensor&>(*this));
3972}
3973
3974// aten::_dimV(Tensor self) -> int
3975inline int64_t Tensor::_dimV() const {
3976 return at::_ops::_dimV::call(const_cast<Tensor&>(*this));
3977}
3978
3979// aten::_nnz(Tensor self) -> int
3980inline int64_t Tensor::_nnz() const {
3981 return at::_ops::_nnz::call(const_cast<Tensor&>(*this));
3982}
3983
3984// aten::coalesce(Tensor(a) self) -> Tensor(a)
3985inline at::Tensor Tensor::coalesce() const {
3986 return at::_ops::coalesce::call(const_cast<Tensor&>(*this));
3987}
3988
3989// aten::is_coalesced(Tensor self) -> bool
3990inline bool Tensor::is_coalesced() const {
3991 return at::_ops::is_coalesced::call(const_cast<Tensor&>(*this));
3992}
3993
3994// aten::_indices(Tensor(a) self) -> Tensor(a)
3995inline at::Tensor Tensor::_indices() const {
3996 return at::_ops::_indices::call(const_cast<Tensor&>(*this));
3997}
3998
3999// aten::_values(Tensor(a) self) -> Tensor(a)
4000inline at::Tensor Tensor::_values() const {
4001 return at::_ops::_values::call(const_cast<Tensor&>(*this));
4002}
4003
4004// aten::_coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!)
4005inline at::Tensor & Tensor::_coalesced_(bool coalesced) const {
4006 return at::_ops::_coalesced_::call(const_cast<Tensor&>(*this), coalesced);
4007}
4008
4009// aten::indices(Tensor(a) self) -> Tensor(a)
4010inline at::Tensor Tensor::indices() const {
4011 return at::_ops::indices::call(const_cast<Tensor&>(*this));
4012}
4013
4014// aten::values(Tensor(a) self) -> Tensor(a)
4015inline at::Tensor Tensor::values() const {
4016 return at::_ops::values::call(const_cast<Tensor&>(*this));
4017}
4018
4019// aten::crow_indices(Tensor(a) self) -> Tensor(a)
4020inline at::Tensor Tensor::crow_indices() const {
4021 return at::_ops::crow_indices::call(const_cast<Tensor&>(*this));
4022}
4023
4024// aten::col_indices(Tensor(a) self) -> Tensor(a)
4025inline at::Tensor Tensor::col_indices() const {
4026 return at::_ops::col_indices::call(const_cast<Tensor&>(*this));
4027}
4028
4029// aten::ccol_indices(Tensor(a) self) -> Tensor(a)
4030inline at::Tensor Tensor::ccol_indices() const {
4031 return at::_ops::ccol_indices::call(const_cast<Tensor&>(*this));
4032}
4033
4034// aten::row_indices(Tensor(a) self) -> Tensor(a)
4035inline at::Tensor Tensor::row_indices() const {
4036 return at::_ops::row_indices::call(const_cast<Tensor&>(*this));
4037}
4038
4039// aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[]
4040inline ::std::vector<at::Tensor> Tensor::unbind(int64_t dim) const {
4041 return at::_ops::unbind_int::call(const_cast<Tensor&>(*this), dim);
4042}
4043
4044// aten::unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[]
4045inline ::std::vector<at::Tensor> Tensor::unbind(at::Dimname dim) const {
4046 return at::_ops::unbind_Dimname::call(const_cast<Tensor&>(*this), dim);
4047}
4048
4049// aten::to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor
4050inline at::Tensor Tensor::to_sparse(int64_t sparse_dim) const {
4051 return at::_ops::to_sparse_sparse_dim::call(const_cast<Tensor&>(*this), sparse_dim);
4052}
4053
4054// aten::to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor
4055inline at::Tensor Tensor::to_sparse(c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim) const {
4056 return at::_ops::to_sparse::call(const_cast<Tensor&>(*this), layout, blocksize, dense_dim);
4057}
4058
4059// aten::to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor
4060inline at::Tensor Tensor::to_sparse_csr(c10::optional<int64_t> dense_dim) const {
4061 return at::_ops::to_sparse_csr::call(const_cast<Tensor&>(*this), dense_dim);
4062}
4063
4064// aten::to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor
4065inline at::Tensor Tensor::to_sparse_csc(c10::optional<int64_t> dense_dim) const {
4066 return at::_ops::to_sparse_csc::call(const_cast<Tensor&>(*this), dense_dim);
4067}
4068
4069// aten::to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
4070inline at::Tensor Tensor::to_sparse_bsr(at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) const {
4071 return at::_ops::to_sparse_bsr::call(const_cast<Tensor&>(*this), blocksize, dense_dim);
4072}
4073
4074// aten::to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
4075inline at::Tensor Tensor::to_sparse_bsc(at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) const {
4076 return at::_ops::to_sparse_bsc::call(const_cast<Tensor&>(*this), blocksize, dense_dim);
4077}
4078
4079// aten::to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor
4080inline at::Tensor Tensor::to_mkldnn(c10::optional<at::ScalarType> dtype) const {
4081 return at::_ops::to_mkldnn::call(const_cast<Tensor&>(*this), dtype);
4082}
4083
4084// aten::dequantize.self(Tensor self) -> Tensor
4085inline at::Tensor Tensor::dequantize() const {
4086 return at::_ops::dequantize_self::call(const_cast<Tensor&>(*this));
4087}
4088
4089// aten::q_scale(Tensor self) -> float
4090inline double Tensor::q_scale() const {
4091 return at::_ops::q_scale::call(const_cast<Tensor&>(*this));
4092}
4093
4094// aten::q_zero_point(Tensor self) -> int
4095inline int64_t Tensor::q_zero_point() const {
4096 return at::_ops::q_zero_point::call(const_cast<Tensor&>(*this));
4097}
4098
4099// aten::q_per_channel_scales(Tensor self) -> Tensor
4100inline at::Tensor Tensor::q_per_channel_scales() const {
4101 return at::_ops::q_per_channel_scales::call(const_cast<Tensor&>(*this));
4102}
4103
4104// aten::q_per_channel_zero_points(Tensor self) -> Tensor
4105inline at::Tensor Tensor::q_per_channel_zero_points() const {
4106 return at::_ops::q_per_channel_zero_points::call(const_cast<Tensor&>(*this));
4107}
4108
4109// aten::q_per_channel_axis(Tensor self) -> int
4110inline int64_t Tensor::q_per_channel_axis() const {
4111 return at::_ops::q_per_channel_axis::call(const_cast<Tensor&>(*this));
4112}
4113
4114// aten::int_repr(Tensor self) -> Tensor
4115inline at::Tensor Tensor::int_repr() const {
4116 return at::_ops::int_repr::call(const_cast<Tensor&>(*this));
4117}
4118
4119// aten::qscheme(Tensor self) -> QScheme
4120inline at::QScheme Tensor::qscheme() const {
4121 return at::_ops::qscheme::call(const_cast<Tensor&>(*this));
4122}
4123
4124// aten::_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a)
4125inline at::Tensor Tensor::_autocast_to_reduced_precision(bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype) const {
4126 return at::_ops::_autocast_to_reduced_precision::call(const_cast<Tensor&>(*this), cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype);
4127}
4128
4129// aten::_autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a)
4130inline at::Tensor Tensor::_autocast_to_full_precision(bool cuda_enabled, bool cpu_enabled) const {
4131 return at::_ops::_autocast_to_full_precision::call(const_cast<Tensor&>(*this), cuda_enabled, cpu_enabled);
4132}
4133
4134// aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
4135inline at::Tensor Tensor::to(at::TensorOptions options, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) const {
4136 return at::_ops::to_dtype_layout::call(const_cast<Tensor&>(*this), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), non_blocking, copy, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
4137}
4138
4139// aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
4140inline at::Tensor Tensor::to(c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) const {
4141 return at::_ops::to_dtype_layout::call(const_cast<Tensor&>(*this), dtype, layout, device, pin_memory, non_blocking, copy, memory_format);
4142}
4143
4144// aten::to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
4145inline at::Tensor Tensor::to(at::Device device, at::ScalarType dtype, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) const {
4146 return at::_ops::to_device::call(const_cast<Tensor&>(*this), device, dtype, non_blocking, copy, memory_format);
4147}
4148
4149// aten::to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
4150inline at::Tensor Tensor::to(at::ScalarType dtype, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) const {
4151 return at::_ops::to_dtype::call(const_cast<Tensor&>(*this), dtype, non_blocking, copy, memory_format);
4152}
4153
4154// aten::to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
4155inline at::Tensor Tensor::to(const at::Tensor & other, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) const {
4156 return at::_ops::to_other::call(const_cast<Tensor&>(*this), other, non_blocking, copy, memory_format);
4157}
4158
4159// aten::item(Tensor self) -> Scalar
4160inline at::Scalar Tensor::item() const {
4161 return at::_ops::item::call(const_cast<Tensor&>(*this));
4162}
4163
4164// aten::set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!)
4165inline at::Tensor & Tensor::set_(at::Storage source) const {
4166 return at::_ops::set__source_Storage::call(const_cast<Tensor&>(*this), source);
4167}
4168
4169// aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)
4170inline at::Tensor & Tensor::set_(at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride) const {
4171 return at::_ops::set__source_Storage_storage_offset::call(const_cast<Tensor&>(*this), source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride));
4172}
4173
4174// aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)
4175inline at::Tensor & Tensor::set__symint(at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) const {
4176 return at::_ops::set__source_Storage_storage_offset::call(const_cast<Tensor&>(*this), source, storage_offset, size, stride);
4177}
4178
4179// aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)
4180inline at::Tensor & Tensor::set_(const at::Tensor & source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride) const {
4181 return at::_ops::set__source_Tensor_storage_offset::call(const_cast<Tensor&>(*this), source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride));
4182}
4183
4184// aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)
4185inline at::Tensor & Tensor::set__symint(const at::Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) const {
4186 return at::_ops::set__source_Tensor_storage_offset::call(const_cast<Tensor&>(*this), source, storage_offset, size, stride);
4187}
4188
4189// aten::set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!)
4190inline at::Tensor & Tensor::set_(const at::Tensor & source) const {
4191 return at::_ops::set__source_Tensor::call(const_cast<Tensor&>(*this), source);
4192}
4193
4194// aten::set_(Tensor(a!) self) -> Tensor(a!)
4195inline at::Tensor & Tensor::set_() const {
4196 return at::_ops::set_::call(const_cast<Tensor&>(*this));
4197}
4198
4199// aten::is_set_to(Tensor self, Tensor tensor) -> bool
4200inline bool Tensor::is_set_to(const at::Tensor & tensor) const {
4201 return at::_ops::is_set_to::call(const_cast<Tensor&>(*this), tensor);
4202}
4203
4204// aten::masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!)
4205inline at::Tensor & Tensor::masked_fill_(const at::Tensor & mask, const at::Scalar & value) const {
4206 return at::_ops::masked_fill__Scalar::call(const_cast<Tensor&>(*this), mask, value);
4207}
4208
4209// aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor
4210inline at::Tensor Tensor::masked_fill(const at::Tensor & mask, const at::Scalar & value) const {
4211 return at::_ops::masked_fill_Scalar::call(const_cast<Tensor&>(*this), mask, value);
4212}
4213
4214// aten::masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!)
4215inline at::Tensor & Tensor::masked_fill_(const at::Tensor & mask, const at::Tensor & value) const {
4216 return at::_ops::masked_fill__Tensor::call(const_cast<Tensor&>(*this), mask, value);
4217}
4218
4219// aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor
4220inline at::Tensor Tensor::masked_fill(const at::Tensor & mask, const at::Tensor & value) const {
4221 return at::_ops::masked_fill_Tensor::call(const_cast<Tensor&>(*this), mask, value);
4222}
4223
4224// aten::masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!)
4225inline at::Tensor & Tensor::masked_scatter_(const at::Tensor & mask, const at::Tensor & source) const {
4226 return at::_ops::masked_scatter_::call(const_cast<Tensor&>(*this), mask, source);
4227}
4228
4229// aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor
4230inline at::Tensor Tensor::masked_scatter(const at::Tensor & mask, const at::Tensor & source) const {
4231 return at::_ops::masked_scatter::call(const_cast<Tensor&>(*this), mask, source);
4232}
4233
4234// aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a)
4235inline at::Tensor Tensor::view(at::IntArrayRef size) const {
4236 return at::_ops::view::call(const_cast<Tensor&>(*this), c10::fromIntArrayRefSlow(size));
4237}
4238
4239// aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a)
4240inline at::Tensor Tensor::view_symint(c10::SymIntArrayRef size) const {
4241 return at::_ops::view::call(const_cast<Tensor&>(*this), size);
4242}
4243
4244// aten::view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a)
4245inline at::Tensor Tensor::view(at::ScalarType dtype) const {
4246 return at::_ops::view_dtype::call(const_cast<Tensor&>(*this), dtype);
4247}
4248
4249// aten::put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!)
4250inline at::Tensor & Tensor::put_(const at::Tensor & index, const at::Tensor & source, bool accumulate) const {
4251 return at::_ops::put_::call(const_cast<Tensor&>(*this), index, source, accumulate);
4252}
4253
4254// aten::put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor
4255inline at::Tensor Tensor::put(const at::Tensor & index, const at::Tensor & source, bool accumulate) const {
4256 return at::_ops::put::call(const_cast<Tensor&>(*this), index, source, accumulate);
4257}
4258
4259// aten::index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!)
4260inline at::Tensor & Tensor::index_add_(int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) const {
4261 return at::_ops::index_add_::call(const_cast<Tensor&>(*this), dim, index, source, alpha);
4262}
4263
4264// aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor
4265inline at::Tensor Tensor::index_add(int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) const {
4266 return at::_ops::index_add::call(const_cast<Tensor&>(*this), dim, index, source, alpha);
4267}
4268
4269// aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor
4270inline at::Tensor Tensor::index_add(at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) const {
4271 return at::_ops::index_add_dimname::call(const_cast<Tensor&>(*this), dim, index, source, alpha);
4272}
4273
4274// aten::index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!)
4275inline at::Tensor & Tensor::index_reduce_(int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) const {
4276 return at::_ops::index_reduce_::call(const_cast<Tensor&>(*this), dim, index, source, reduce, include_self);
4277}
4278
4279// aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor
4280inline at::Tensor Tensor::index_reduce(int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) const {
4281 return at::_ops::index_reduce::call(const_cast<Tensor&>(*this), dim, index, source, reduce, include_self);
4282}
4283
4284// aten::index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
4285inline at::Tensor & Tensor::index_fill_(int64_t dim, const at::Tensor & index, const at::Scalar & value) const {
4286 return at::_ops::index_fill__int_Scalar::call(const_cast<Tensor&>(*this), dim, index, value);
4287}
4288
4289// aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
4290inline at::Tensor Tensor::index_fill(int64_t dim, const at::Tensor & index, const at::Scalar & value) const {
4291 return at::_ops::index_fill_int_Scalar::call(const_cast<Tensor&>(*this), dim, index, value);
4292}
4293
4294// aten::index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)
4295inline at::Tensor & Tensor::index_fill_(int64_t dim, const at::Tensor & index, const at::Tensor & value) const {
4296 return at::_ops::index_fill__int_Tensor::call(const_cast<Tensor&>(*this), dim, index, value);
4297}
4298
4299// aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor
4300inline at::Tensor Tensor::index_fill(int64_t dim, const at::Tensor & index, const at::Tensor & value) const {
4301 return at::_ops::index_fill_int_Tensor::call(const_cast<Tensor&>(*this), dim, index, value);
4302}
4303
4304// aten::index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!)
4305inline at::Tensor & Tensor::index_fill_(at::Dimname dim, const at::Tensor & index, const at::Scalar & value) const {
4306 return at::_ops::index_fill__Dimname_Scalar::call(const_cast<Tensor&>(*this), dim, index, value);
4307}
4308
4309// aten::index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!)
4310inline at::Tensor & Tensor::index_fill_(at::Dimname dim, const at::Tensor & index, const at::Tensor & value) const {
4311 return at::_ops::index_fill__Dimname_Tensor::call(const_cast<Tensor&>(*this), dim, index, value);
4312}
4313
4314// aten::index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
4315inline at::Tensor Tensor::index_fill(at::Dimname dim, const at::Tensor & index, const at::Scalar & value) const {
4316 return at::_ops::index_fill_Dimname_Scalar::call(const_cast<Tensor&>(*this), dim, index, value);
4317}
4318
4319// aten::index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor
4320inline at::Tensor Tensor::index_fill(at::Dimname dim, const at::Tensor & index, const at::Tensor & value) const {
4321 return at::_ops::index_fill_Dimname_Tensor::call(const_cast<Tensor&>(*this), dim, index, value);
4322}
4323
4324// aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
4325inline at::Tensor Tensor::scatter(int64_t dim, const at::Tensor & index, const at::Tensor & src) const {
4326 return at::_ops::scatter_src::call(const_cast<Tensor&>(*this), dim, index, src);
4327}
4328
4329// aten::scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
4330inline at::Tensor & Tensor::scatter_(int64_t dim, const at::Tensor & index, const at::Tensor & src) const {
4331 return at::_ops::scatter__src::call(const_cast<Tensor&>(*this), dim, index, src);
4332}
4333
4334// aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
4335inline at::Tensor Tensor::scatter(int64_t dim, const at::Tensor & index, const at::Scalar & value) const {
4336 return at::_ops::scatter_value::call(const_cast<Tensor&>(*this), dim, index, value);
4337}
4338
4339// aten::scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
4340inline at::Tensor & Tensor::scatter_(int64_t dim, const at::Tensor & index, const at::Scalar & value) const {
4341 return at::_ops::scatter__value::call(const_cast<Tensor&>(*this), dim, index, value);
4342}
4343
4344// aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor
4345inline at::Tensor Tensor::scatter(int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) const {
4346 return at::_ops::scatter_reduce::call(const_cast<Tensor&>(*this), dim, index, src, reduce);
4347}
4348
4349// aten::scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!)
4350inline at::Tensor & Tensor::scatter_(int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) const {
4351 return at::_ops::scatter__reduce::call(const_cast<Tensor&>(*this), dim, index, src, reduce);
4352}
4353
4354// aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor
4355inline at::Tensor Tensor::scatter(int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) const {
4356 return at::_ops::scatter_value_reduce::call(const_cast<Tensor&>(*this), dim, index, value, reduce);
4357}
4358
4359// aten::scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!)
4360inline at::Tensor & Tensor::scatter_(int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) const {
4361 return at::_ops::scatter__value_reduce::call(const_cast<Tensor&>(*this), dim, index, value, reduce);
4362}
4363
4364// aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
4365inline at::Tensor Tensor::scatter(at::Dimname dim, const at::Tensor & index, const at::Tensor & src) const {
4366 return at::_ops::scatter_dimname_src::call(const_cast<Tensor&>(*this), dim, index, src);
4367}
4368
4369// aten::scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
4370inline at::Tensor Tensor::scatter(at::Dimname dim, const at::Tensor & index, const at::Scalar & value) const {
4371 return at::_ops::scatter_dimname_value::call(const_cast<Tensor&>(*this), dim, index, value);
4372}
4373
4374// aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
4375inline at::Tensor Tensor::scatter_add(int64_t dim, const at::Tensor & index, const at::Tensor & src) const {
4376 return at::_ops::scatter_add::call(const_cast<Tensor&>(*this), dim, index, src);
4377}
4378
4379// aten::scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
4380inline at::Tensor & Tensor::scatter_add_(int64_t dim, const at::Tensor & index, const at::Tensor & src) const {
4381 return at::_ops::scatter_add_::call(const_cast<Tensor&>(*this), dim, index, src);
4382}
4383
4384// aten::scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
4385inline at::Tensor Tensor::scatter_add(at::Dimname dim, const at::Tensor & index, const at::Tensor & src) const {
4386 return at::_ops::scatter_add_dimname::call(const_cast<Tensor&>(*this), dim, index, src);
4387}
4388
4389// aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor
4390inline at::Tensor Tensor::scatter_reduce(int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) const {
4391 return at::_ops::scatter_reduce_two::call(const_cast<Tensor&>(*this), dim, index, src, reduce, include_self);
4392}
4393
4394// aten::scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!)
4395inline at::Tensor & Tensor::scatter_reduce_(int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) const {
4396 return at::_ops::scatter_reduce__two::call(const_cast<Tensor&>(*this), dim, index, src, reduce, include_self);
4397}
4398
4399// aten::eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4400inline at::Tensor & Tensor::eq_(const at::Scalar & other) const {
4401 return at::_ops::eq__Scalar::call(const_cast<Tensor&>(*this), other);
4402}
4403
4404// aten::eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4405inline at::Tensor & Tensor::eq_(const at::Tensor & other) const {
4406 return at::_ops::eq__Tensor::call(const_cast<Tensor&>(*this), other);
4407}
4408
4409// aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor
4410inline at::Tensor Tensor::bitwise_and(const at::Scalar & other) const {
4411 return at::_ops::bitwise_and_Scalar::call(const_cast<Tensor&>(*this), other);
4412}
4413
4414// aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor
4415inline at::Tensor Tensor::bitwise_and(const at::Tensor & other) const {
4416 return at::_ops::bitwise_and_Tensor::call(const_cast<Tensor&>(*this), other);
4417}
4418
4419// aten::bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4420inline at::Tensor & Tensor::bitwise_and_(const at::Scalar & other) const {
4421 return at::_ops::bitwise_and__Scalar::call(const_cast<Tensor&>(*this), other);
4422}
4423
4424// aten::bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4425inline at::Tensor & Tensor::bitwise_and_(const at::Tensor & other) const {
4426 return at::_ops::bitwise_and__Tensor::call(const_cast<Tensor&>(*this), other);
4427}
4428
4429// aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor
4430inline at::Tensor Tensor::__and__(const at::Scalar & other) const {
4431 return at::_ops::__and___Scalar::call(const_cast<Tensor&>(*this), other);
4432}
4433
4434// aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor
4435inline at::Tensor Tensor::__and__(const at::Tensor & other) const {
4436 return at::_ops::__and___Tensor::call(const_cast<Tensor&>(*this), other);
4437}
4438
4439// aten::__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4440inline at::Tensor & Tensor::__iand__(const at::Scalar & other) const {
4441 return at::_ops::__iand___Scalar::call(const_cast<Tensor&>(*this), other);
4442}
4443
4444// aten::__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4445inline at::Tensor & Tensor::__iand__(const at::Tensor & other) const {
4446 return at::_ops::__iand___Tensor::call(const_cast<Tensor&>(*this), other);
4447}
4448
4449// aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor
4450inline at::Tensor Tensor::bitwise_or(const at::Scalar & other) const {
4451 return at::_ops::bitwise_or_Scalar::call(const_cast<Tensor&>(*this), other);
4452}
4453
4454// aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor
4455inline at::Tensor Tensor::bitwise_or(const at::Tensor & other) const {
4456 return at::_ops::bitwise_or_Tensor::call(const_cast<Tensor&>(*this), other);
4457}
4458
4459// aten::bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4460inline at::Tensor & Tensor::bitwise_or_(const at::Scalar & other) const {
4461 return at::_ops::bitwise_or__Scalar::call(const_cast<Tensor&>(*this), other);
4462}
4463
4464// aten::bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4465inline at::Tensor & Tensor::bitwise_or_(const at::Tensor & other) const {
4466 return at::_ops::bitwise_or__Tensor::call(const_cast<Tensor&>(*this), other);
4467}
4468
4469// aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor
4470inline at::Tensor Tensor::__or__(const at::Scalar & other) const {
4471 return at::_ops::__or___Scalar::call(const_cast<Tensor&>(*this), other);
4472}
4473
4474// aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor
4475inline at::Tensor Tensor::__or__(const at::Tensor & other) const {
4476 return at::_ops::__or___Tensor::call(const_cast<Tensor&>(*this), other);
4477}
4478
4479// aten::__ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4480inline at::Tensor & Tensor::__ior__(const at::Scalar & other) const {
4481 return at::_ops::__ior___Scalar::call(const_cast<Tensor&>(*this), other);
4482}
4483
4484// aten::__ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4485inline at::Tensor & Tensor::__ior__(const at::Tensor & other) const {
4486 return at::_ops::__ior___Tensor::call(const_cast<Tensor&>(*this), other);
4487}
4488
4489// aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor
4490inline at::Tensor Tensor::bitwise_xor(const at::Scalar & other) const {
4491 return at::_ops::bitwise_xor_Scalar::call(const_cast<Tensor&>(*this), other);
4492}
4493
4494// aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor
4495inline at::Tensor Tensor::bitwise_xor(const at::Tensor & other) const {
4496 return at::_ops::bitwise_xor_Tensor::call(const_cast<Tensor&>(*this), other);
4497}
4498
4499// aten::bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4500inline at::Tensor & Tensor::bitwise_xor_(const at::Scalar & other) const {
4501 return at::_ops::bitwise_xor__Scalar::call(const_cast<Tensor&>(*this), other);
4502}
4503
4504// aten::bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4505inline at::Tensor & Tensor::bitwise_xor_(const at::Tensor & other) const {
4506 return at::_ops::bitwise_xor__Tensor::call(const_cast<Tensor&>(*this), other);
4507}
4508
4509// aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor
4510inline at::Tensor Tensor::__xor__(const at::Scalar & other) const {
4511 return at::_ops::__xor___Scalar::call(const_cast<Tensor&>(*this), other);
4512}
4513
4514// aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor
4515inline at::Tensor Tensor::__xor__(const at::Tensor & other) const {
4516 return at::_ops::__xor___Tensor::call(const_cast<Tensor&>(*this), other);
4517}
4518
4519// aten::__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4520inline at::Tensor & Tensor::__ixor__(const at::Scalar & other) const {
4521 return at::_ops::__ixor___Scalar::call(const_cast<Tensor&>(*this), other);
4522}
4523
4524// aten::__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4525inline at::Tensor & Tensor::__ixor__(const at::Tensor & other) const {
4526 return at::_ops::__ixor___Tensor::call(const_cast<Tensor&>(*this), other);
4527}
4528
4529// aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor
4530inline at::Tensor Tensor::__lshift__(const at::Scalar & other) const {
4531 return at::_ops::__lshift___Scalar::call(const_cast<Tensor&>(*this), other);
4532}
4533
4534// aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor
4535inline at::Tensor Tensor::__lshift__(const at::Tensor & other) const {
4536 return at::_ops::__lshift___Tensor::call(const_cast<Tensor&>(*this), other);
4537}
4538
4539// aten::__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4540inline at::Tensor & Tensor::__ilshift__(const at::Scalar & other) const {
4541 return at::_ops::__ilshift___Scalar::call(const_cast<Tensor&>(*this), other);
4542}
4543
4544// aten::__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4545inline at::Tensor & Tensor::__ilshift__(const at::Tensor & other) const {
4546 return at::_ops::__ilshift___Tensor::call(const_cast<Tensor&>(*this), other);
4547}
4548
4549// aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor
4550inline at::Tensor Tensor::bitwise_left_shift(const at::Tensor & other) const {
4551 return at::_ops::bitwise_left_shift_Tensor::call(const_cast<Tensor&>(*this), other);
4552}
4553
4554// aten::bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4555inline at::Tensor & Tensor::bitwise_left_shift_(const at::Tensor & other) const {
4556 return at::_ops::bitwise_left_shift__Tensor::call(const_cast<Tensor&>(*this), other);
4557}
4558
4559// aten::bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
4560inline at::Tensor Tensor::bitwise_left_shift(const at::Scalar & other) const {
4561 return at::_ops::bitwise_left_shift_Tensor_Scalar::call(const_cast<Tensor&>(*this), other);
4562}
4563
4564// aten::bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4565inline at::Tensor & Tensor::bitwise_left_shift_(const at::Scalar & other) const {
4566 return at::_ops::bitwise_left_shift__Tensor_Scalar::call(const_cast<Tensor&>(*this), other);
4567}
4568
4569// aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor
4570inline at::Tensor Tensor::__rshift__(const at::Scalar & other) const {
4571 return at::_ops::__rshift___Scalar::call(const_cast<Tensor&>(*this), other);
4572}
4573
4574// aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor
4575inline at::Tensor Tensor::__rshift__(const at::Tensor & other) const {
4576 return at::_ops::__rshift___Tensor::call(const_cast<Tensor&>(*this), other);
4577}
4578
4579// aten::__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4580inline at::Tensor & Tensor::__irshift__(const at::Scalar & other) const {
4581 return at::_ops::__irshift___Scalar::call(const_cast<Tensor&>(*this), other);
4582}
4583
4584// aten::__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4585inline at::Tensor & Tensor::__irshift__(const at::Tensor & other) const {
4586 return at::_ops::__irshift___Tensor::call(const_cast<Tensor&>(*this), other);
4587}
4588
4589// aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor
4590inline at::Tensor Tensor::bitwise_right_shift(const at::Tensor & other) const {
4591 return at::_ops::bitwise_right_shift_Tensor::call(const_cast<Tensor&>(*this), other);
4592}
4593
4594// aten::bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4595inline at::Tensor & Tensor::bitwise_right_shift_(const at::Tensor & other) const {
4596 return at::_ops::bitwise_right_shift__Tensor::call(const_cast<Tensor&>(*this), other);
4597}
4598
4599// aten::bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
4600inline at::Tensor Tensor::bitwise_right_shift(const at::Scalar & other) const {
4601 return at::_ops::bitwise_right_shift_Tensor_Scalar::call(const_cast<Tensor&>(*this), other);
4602}
4603
4604// aten::bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4605inline at::Tensor & Tensor::bitwise_right_shift_(const at::Scalar & other) const {
4606 return at::_ops::bitwise_right_shift__Tensor_Scalar::call(const_cast<Tensor&>(*this), other);
4607}
4608
4609// aten::tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)
4610inline at::Tensor & Tensor::tril_(int64_t diagonal) const {
4611 return at::_ops::tril_::call(const_cast<Tensor&>(*this), diagonal);
4612}
4613
4614// aten::triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)
4615inline at::Tensor & Tensor::triu_(int64_t diagonal) const {
4616 return at::_ops::triu_::call(const_cast<Tensor&>(*this), diagonal);
4617}
4618
4619// aten::digamma_(Tensor(a!) self) -> Tensor(a!)
4620inline at::Tensor & Tensor::digamma_() const {
4621 return at::_ops::digamma_::call(const_cast<Tensor&>(*this));
4622}
4623
4624// aten::lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!)
4625inline at::Tensor & Tensor::lerp_(const at::Tensor & end, const at::Scalar & weight) const {
4626 return at::_ops::lerp__Scalar::call(const_cast<Tensor&>(*this), end, weight);
4627}
4628
4629// aten::lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!)
4630inline at::Tensor & Tensor::lerp_(const at::Tensor & end, const at::Tensor & weight) const {
4631 return at::_ops::lerp__Tensor::call(const_cast<Tensor&>(*this), end, weight);
4632}
4633
4634// aten::addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
4635inline at::Tensor & Tensor::addbmm_(const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) const {
4636 return at::_ops::addbmm_::call(const_cast<Tensor&>(*this), batch1, batch2, beta, alpha);
4637}
4638
4639// aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
4640inline at::Tensor Tensor::addbmm(const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) const {
4641 return at::_ops::addbmm::call(const_cast<Tensor&>(*this), batch1, batch2, beta, alpha);
4642}
4643
4644// aten::random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)
4645inline at::Tensor & Tensor::random_(int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator) const {
4646 return at::_ops::random__from::call(const_cast<Tensor&>(*this), from, to, generator);
4647}
4648
4649// aten::random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!)
4650inline at::Tensor & Tensor::random_(int64_t to, c10::optional<at::Generator> generator) const {
4651 return at::_ops::random__to::call(const_cast<Tensor&>(*this), to, generator);
4652}
4653
4654// aten::random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!)
4655inline at::Tensor & Tensor::random_(c10::optional<at::Generator> generator) const {
4656 return at::_ops::random_::call(const_cast<Tensor&>(*this), generator);
4657}
4658
4659// aten::uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!)
4660inline at::Tensor & Tensor::uniform_(double from, double to, c10::optional<at::Generator> generator) const {
4661 return at::_ops::uniform_::call(const_cast<Tensor&>(*this), from, to, generator);
4662}
4663
4664// aten::cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!)
4665inline at::Tensor & Tensor::cauchy_(double median, double sigma, c10::optional<at::Generator> generator) const {
4666 return at::_ops::cauchy_::call(const_cast<Tensor&>(*this), median, sigma, generator);
4667}
4668
4669// aten::log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!)
4670inline at::Tensor & Tensor::log_normal_(double mean, double std, c10::optional<at::Generator> generator) const {
4671 return at::_ops::log_normal_::call(const_cast<Tensor&>(*this), mean, std, generator);
4672}
4673
4674// aten::exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!)
4675inline at::Tensor & Tensor::exponential_(double lambd, c10::optional<at::Generator> generator) const {
4676 return at::_ops::exponential_::call(const_cast<Tensor&>(*this), lambd, generator);
4677}
4678
4679// aten::geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)
4680inline at::Tensor & Tensor::geometric_(double p, c10::optional<at::Generator> generator) const {
4681 return at::_ops::geometric_::call(const_cast<Tensor&>(*this), p, generator);
4682}
4683
4684// aten::diag(Tensor self, int diagonal=0) -> Tensor
4685inline at::Tensor Tensor::diag(int64_t diagonal) const {
4686 return at::_ops::diag::call(const_cast<Tensor&>(*this), diagonal);
4687}
4688
4689// aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor
4690inline at::Tensor Tensor::cross(const at::Tensor & other, c10::optional<int64_t> dim) const {
4691 return at::_ops::cross::call(const_cast<Tensor&>(*this), other, dim);
4692}
4693
4694// aten::triu(Tensor self, int diagonal=0) -> Tensor
4695inline at::Tensor Tensor::triu(int64_t diagonal) const {
4696 return at::_ops::triu::call(const_cast<Tensor&>(*this), diagonal);
4697}
4698
4699// aten::tril(Tensor self, int diagonal=0) -> Tensor
4700inline at::Tensor Tensor::tril(int64_t diagonal) const {
4701 return at::_ops::tril::call(const_cast<Tensor&>(*this), diagonal);
4702}
4703
4704// aten::trace(Tensor self) -> Tensor
4705inline at::Tensor Tensor::trace() const {
4706 return at::_ops::trace::call(const_cast<Tensor&>(*this));
4707}
4708
4709// aten::ne.Scalar(Tensor self, Scalar other) -> Tensor
4710inline at::Tensor Tensor::ne(const at::Scalar & other) const {
4711 return at::_ops::ne_Scalar::call(const_cast<Tensor&>(*this), other);
4712}
4713
4714// aten::ne.Tensor(Tensor self, Tensor other) -> Tensor
4715inline at::Tensor Tensor::ne(const at::Tensor & other) const {
4716 return at::_ops::ne_Tensor::call(const_cast<Tensor&>(*this), other);
4717}
4718
4719// aten::ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4720inline at::Tensor & Tensor::ne_(const at::Scalar & other) const {
4721 return at::_ops::ne__Scalar::call(const_cast<Tensor&>(*this), other);
4722}
4723
4724// aten::ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4725inline at::Tensor & Tensor::ne_(const at::Tensor & other) const {
4726 return at::_ops::ne__Tensor::call(const_cast<Tensor&>(*this), other);
4727}
4728
4729// aten::not_equal.Scalar(Tensor self, Scalar other) -> Tensor
4730inline at::Tensor Tensor::not_equal(const at::Scalar & other) const {
4731 return at::_ops::not_equal_Scalar::call(const_cast<Tensor&>(*this), other);
4732}
4733
4734// aten::not_equal.Tensor(Tensor self, Tensor other) -> Tensor
4735inline at::Tensor Tensor::not_equal(const at::Tensor & other) const {
4736 return at::_ops::not_equal_Tensor::call(const_cast<Tensor&>(*this), other);
4737}
4738
4739// aten::not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4740inline at::Tensor & Tensor::not_equal_(const at::Scalar & other) const {
4741 return at::_ops::not_equal__Scalar::call(const_cast<Tensor&>(*this), other);
4742}
4743
4744// aten::not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4745inline at::Tensor & Tensor::not_equal_(const at::Tensor & other) const {
4746 return at::_ops::not_equal__Tensor::call(const_cast<Tensor&>(*this), other);
4747}
4748
4749// aten::eq.Scalar(Tensor self, Scalar other) -> Tensor
4750inline at::Tensor Tensor::eq(const at::Scalar & other) const {
4751 return at::_ops::eq_Scalar::call(const_cast<Tensor&>(*this), other);
4752}
4753
4754// aten::eq.Tensor(Tensor self, Tensor other) -> Tensor
4755inline at::Tensor Tensor::eq(const at::Tensor & other) const {
4756 return at::_ops::eq_Tensor::call(const_cast<Tensor&>(*this), other);
4757}
4758
4759// aten::ge.Scalar(Tensor self, Scalar other) -> Tensor
4760inline at::Tensor Tensor::ge(const at::Scalar & other) const {
4761 return at::_ops::ge_Scalar::call(const_cast<Tensor&>(*this), other);
4762}
4763
4764// aten::ge.Tensor(Tensor self, Tensor other) -> Tensor
4765inline at::Tensor Tensor::ge(const at::Tensor & other) const {
4766 return at::_ops::ge_Tensor::call(const_cast<Tensor&>(*this), other);
4767}
4768
4769// aten::ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4770inline at::Tensor & Tensor::ge_(const at::Scalar & other) const {
4771 return at::_ops::ge__Scalar::call(const_cast<Tensor&>(*this), other);
4772}
4773
4774// aten::ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4775inline at::Tensor & Tensor::ge_(const at::Tensor & other) const {
4776 return at::_ops::ge__Tensor::call(const_cast<Tensor&>(*this), other);
4777}
4778
4779// aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor
4780inline at::Tensor Tensor::greater_equal(const at::Scalar & other) const {
4781 return at::_ops::greater_equal_Scalar::call(const_cast<Tensor&>(*this), other);
4782}
4783
4784// aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor
4785inline at::Tensor Tensor::greater_equal(const at::Tensor & other) const {
4786 return at::_ops::greater_equal_Tensor::call(const_cast<Tensor&>(*this), other);
4787}
4788
4789// aten::greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4790inline at::Tensor & Tensor::greater_equal_(const at::Scalar & other) const {
4791 return at::_ops::greater_equal__Scalar::call(const_cast<Tensor&>(*this), other);
4792}
4793
4794// aten::greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4795inline at::Tensor & Tensor::greater_equal_(const at::Tensor & other) const {
4796 return at::_ops::greater_equal__Tensor::call(const_cast<Tensor&>(*this), other);
4797}
4798
4799// aten::le.Scalar(Tensor self, Scalar other) -> Tensor
4800inline at::Tensor Tensor::le(const at::Scalar & other) const {
4801 return at::_ops::le_Scalar::call(const_cast<Tensor&>(*this), other);
4802}
4803
4804// aten::le.Tensor(Tensor self, Tensor other) -> Tensor
4805inline at::Tensor Tensor::le(const at::Tensor & other) const {
4806 return at::_ops::le_Tensor::call(const_cast<Tensor&>(*this), other);
4807}
4808
4809// aten::le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4810inline at::Tensor & Tensor::le_(const at::Scalar & other) const {
4811 return at::_ops::le__Scalar::call(const_cast<Tensor&>(*this), other);
4812}
4813
4814// aten::le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4815inline at::Tensor & Tensor::le_(const at::Tensor & other) const {
4816 return at::_ops::le__Tensor::call(const_cast<Tensor&>(*this), other);
4817}
4818
4819// aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor
4820inline at::Tensor Tensor::less_equal(const at::Scalar & other) const {
4821 return at::_ops::less_equal_Scalar::call(const_cast<Tensor&>(*this), other);
4822}
4823
4824// aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor
4825inline at::Tensor Tensor::less_equal(const at::Tensor & other) const {
4826 return at::_ops::less_equal_Tensor::call(const_cast<Tensor&>(*this), other);
4827}
4828
4829// aten::less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4830inline at::Tensor & Tensor::less_equal_(const at::Scalar & other) const {
4831 return at::_ops::less_equal__Scalar::call(const_cast<Tensor&>(*this), other);
4832}
4833
4834// aten::less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4835inline at::Tensor & Tensor::less_equal_(const at::Tensor & other) const {
4836 return at::_ops::less_equal__Tensor::call(const_cast<Tensor&>(*this), other);
4837}
4838
4839// aten::gt.Scalar(Tensor self, Scalar other) -> Tensor
4840inline at::Tensor Tensor::gt(const at::Scalar & other) const {
4841 return at::_ops::gt_Scalar::call(const_cast<Tensor&>(*this), other);
4842}
4843
4844// aten::gt.Tensor(Tensor self, Tensor other) -> Tensor
4845inline at::Tensor Tensor::gt(const at::Tensor & other) const {
4846 return at::_ops::gt_Tensor::call(const_cast<Tensor&>(*this), other);
4847}
4848
4849// aten::gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4850inline at::Tensor & Tensor::gt_(const at::Scalar & other) const {
4851 return at::_ops::gt__Scalar::call(const_cast<Tensor&>(*this), other);
4852}
4853
4854// aten::gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4855inline at::Tensor & Tensor::gt_(const at::Tensor & other) const {
4856 return at::_ops::gt__Tensor::call(const_cast<Tensor&>(*this), other);
4857}
4858
4859// aten::greater.Scalar(Tensor self, Scalar other) -> Tensor
4860inline at::Tensor Tensor::greater(const at::Scalar & other) const {
4861 return at::_ops::greater_Scalar::call(const_cast<Tensor&>(*this), other);
4862}
4863
4864// aten::greater.Tensor(Tensor self, Tensor other) -> Tensor
4865inline at::Tensor Tensor::greater(const at::Tensor & other) const {
4866 return at::_ops::greater_Tensor::call(const_cast<Tensor&>(*this), other);
4867}
4868
4869// aten::greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4870inline at::Tensor & Tensor::greater_(const at::Scalar & other) const {
4871 return at::_ops::greater__Scalar::call(const_cast<Tensor&>(*this), other);
4872}
4873
4874// aten::greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4875inline at::Tensor & Tensor::greater_(const at::Tensor & other) const {
4876 return at::_ops::greater__Tensor::call(const_cast<Tensor&>(*this), other);
4877}
4878
4879// aten::lt.Scalar(Tensor self, Scalar other) -> Tensor
4880inline at::Tensor Tensor::lt(const at::Scalar & other) const {
4881 return at::_ops::lt_Scalar::call(const_cast<Tensor&>(*this), other);
4882}
4883
4884// aten::lt.Tensor(Tensor self, Tensor other) -> Tensor
4885inline at::Tensor Tensor::lt(const at::Tensor & other) const {
4886 return at::_ops::lt_Tensor::call(const_cast<Tensor&>(*this), other);
4887}
4888
4889// aten::lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4890inline at::Tensor & Tensor::lt_(const at::Scalar & other) const {
4891 return at::_ops::lt__Scalar::call(const_cast<Tensor&>(*this), other);
4892}
4893
4894// aten::lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4895inline at::Tensor & Tensor::lt_(const at::Tensor & other) const {
4896 return at::_ops::lt__Tensor::call(const_cast<Tensor&>(*this), other);
4897}
4898
4899// aten::less.Scalar(Tensor self, Scalar other) -> Tensor
4900inline at::Tensor Tensor::less(const at::Scalar & other) const {
4901 return at::_ops::less_Scalar::call(const_cast<Tensor&>(*this), other);
4902}
4903
4904// aten::less.Tensor(Tensor self, Tensor other) -> Tensor
4905inline at::Tensor Tensor::less(const at::Tensor & other) const {
4906 return at::_ops::less_Tensor::call(const_cast<Tensor&>(*this), other);
4907}
4908
4909// aten::less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4910inline at::Tensor & Tensor::less_(const at::Scalar & other) const {
4911 return at::_ops::less__Scalar::call(const_cast<Tensor&>(*this), other);
4912}
4913
4914// aten::less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4915inline at::Tensor & Tensor::less_(const at::Tensor & other) const {
4916 return at::_ops::less__Tensor::call(const_cast<Tensor&>(*this), other);
4917}
4918
4919// aten::take(Tensor self, Tensor index) -> Tensor
4920inline at::Tensor Tensor::take(const at::Tensor & index) const {
4921 return at::_ops::take::call(const_cast<Tensor&>(*this), index);
4922}
4923
4924// aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor
4925inline at::Tensor Tensor::take_along_dim(const at::Tensor & indices, c10::optional<int64_t> dim) const {
4926 return at::_ops::take_along_dim::call(const_cast<Tensor&>(*this), indices, dim);
4927}
4928
4929// aten::index_select(Tensor self, int dim, Tensor index) -> Tensor
4930inline at::Tensor Tensor::index_select(int64_t dim, const at::Tensor & index) const {
4931 return at::_ops::index_select::call(const_cast<Tensor&>(*this), dim, index);
4932}
4933
4934// aten::index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor
4935inline at::Tensor Tensor::index_select(at::Dimname dim, const at::Tensor & index) const {
4936 return at::_ops::index_select_dimname::call(const_cast<Tensor&>(*this), dim, index);
4937}
4938
4939// aten::masked_select(Tensor self, Tensor mask) -> Tensor
4940inline at::Tensor Tensor::masked_select(const at::Tensor & mask) const {
4941 return at::_ops::masked_select::call(const_cast<Tensor&>(*this), mask);
4942}
4943
4944// aten::nonzero(Tensor self) -> Tensor
4945inline at::Tensor Tensor::nonzero() const {
4946 return at::_ops::nonzero::call(const_cast<Tensor&>(*this));
4947}
4948
4949// aten::nonzero_numpy(Tensor self) -> Tensor[]
4950inline ::std::vector<at::Tensor> Tensor::nonzero_numpy() const {
4951 return at::_ops::nonzero_numpy::call(const_cast<Tensor&>(*this));
4952}
4953
4954// aten::argwhere(Tensor self) -> Tensor
4955inline at::Tensor Tensor::argwhere() const {
4956 return at::_ops::argwhere::call(const_cast<Tensor&>(*this));
4957}
4958
4959// aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor
4960inline at::Tensor Tensor::gather(int64_t dim, const at::Tensor & index, bool sparse_grad) const {
4961 return at::_ops::gather::call(const_cast<Tensor&>(*this), dim, index, sparse_grad);
4962}
4963
4964// aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor
4965inline at::Tensor Tensor::gather(at::Dimname dim, const at::Tensor & index, bool sparse_grad) const {
4966 return at::_ops::gather_dimname::call(const_cast<Tensor&>(*this), dim, index, sparse_grad);
4967}
4968
4969// aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
4970inline at::Tensor Tensor::addcmul(const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) const {
4971 return at::_ops::addcmul::call(const_cast<Tensor&>(*this), tensor1, tensor2, value);
4972}
4973
4974// aten::addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
4975inline at::Tensor & Tensor::addcmul_(const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) const {
4976 return at::_ops::addcmul_::call(const_cast<Tensor&>(*this), tensor1, tensor2, value);
4977}
4978
4979// aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
4980inline at::Tensor Tensor::addcdiv(const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) const {
4981 return at::_ops::addcdiv::call(const_cast<Tensor&>(*this), tensor1, tensor2, value);
4982}
4983
4984// aten::addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
4985inline at::Tensor & Tensor::addcdiv_(const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) const {
4986 return at::_ops::addcdiv_::call(const_cast<Tensor&>(*this), tensor1, tensor2, value);
4987}
4988
4989// aten::triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient)
4990inline ::std::tuple<at::Tensor,at::Tensor> Tensor::triangular_solve(const at::Tensor & A, bool upper, bool transpose, bool unitriangular) const {
4991 return at::_ops::triangular_solve::call(const_cast<Tensor&>(*this), A, upper, transpose, unitriangular);
4992}
4993
4994// aten::svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V)
4995inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> Tensor::svd(bool some, bool compute_uv) const {
4996 return at::_ops::svd::call(const_cast<Tensor&>(*this), some, compute_uv);
4997}
4998
4999// aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a)
5000inline at::Tensor Tensor::swapaxes(int64_t axis0, int64_t axis1) const {
5001 return at::_ops::swapaxes::call(const_cast<Tensor&>(*this), axis0, axis1);
5002}
5003
5004// aten::swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!)
5005inline at::Tensor & Tensor::swapaxes_(int64_t axis0, int64_t axis1) const {
5006 return at::_ops::swapaxes_::call(const_cast<Tensor&>(*this), axis0, axis1);
5007}
5008
5009// aten::swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
5010inline at::Tensor Tensor::swapdims(int64_t dim0, int64_t dim1) const {
5011 return at::_ops::swapdims::call(const_cast<Tensor&>(*this), dim0, dim1);
5012}
5013
5014// aten::swapdims_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
5015inline at::Tensor & Tensor::swapdims_(int64_t dim0, int64_t dim1) const {
5016 return at::_ops::swapdims_::call(const_cast<Tensor&>(*this), dim0, dim1);
5017}
5018
5019// aten::cholesky(Tensor self, bool upper=False) -> Tensor
5020inline at::Tensor Tensor::cholesky(bool upper) const {
5021 return at::_ops::cholesky::call(const_cast<Tensor&>(*this), upper);
5022}
5023
5024// aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor
5025inline at::Tensor Tensor::cholesky_solve(const at::Tensor & input2, bool upper) const {
5026 return at::_ops::cholesky_solve::call(const_cast<Tensor&>(*this), input2, upper);
5027}
5028
5029// aten::cholesky_inverse(Tensor self, bool upper=False) -> Tensor
5030inline at::Tensor Tensor::cholesky_inverse(bool upper) const {
5031 return at::_ops::cholesky_inverse::call(const_cast<Tensor&>(*this), upper);
5032}
5033
5034// aten::qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R)
5035inline ::std::tuple<at::Tensor,at::Tensor> Tensor::qr(bool some) const {
5036 return at::_ops::qr::call(const_cast<Tensor&>(*this), some);
5037}
5038
5039// aten::geqrf(Tensor self) -> (Tensor a, Tensor tau)
5040inline ::std::tuple<at::Tensor,at::Tensor> Tensor::geqrf() const {
5041 return at::_ops::geqrf::call(const_cast<Tensor&>(*this));
5042}
5043
5044// aten::orgqr(Tensor self, Tensor input2) -> Tensor
5045inline at::Tensor Tensor::orgqr(const at::Tensor & input2) const {
5046 return at::_ops::orgqr::call(const_cast<Tensor&>(*this), input2);
5047}
5048
5049// aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor
5050inline at::Tensor Tensor::ormqr(const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) const {
5051 return at::_ops::ormqr::call(const_cast<Tensor&>(*this), input2, input3, left, transpose);
5052}
5053
5054// aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor
5055inline at::Tensor Tensor::lu_solve(const at::Tensor & LU_data, const at::Tensor & LU_pivots) const {
5056 return at::_ops::lu_solve::call(const_cast<Tensor&>(*this), LU_data, LU_pivots);
5057}
5058
5059// aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor
5060inline at::Tensor Tensor::multinomial(int64_t num_samples, bool replacement, c10::optional<at::Generator> generator) const {
5061 return at::_ops::multinomial::call(const_cast<Tensor&>(*this), num_samples, replacement, generator);
5062}
5063
5064// aten::lgamma_(Tensor(a!) self) -> Tensor(a!)
5065inline at::Tensor & Tensor::lgamma_() const {
5066 return at::_ops::lgamma_::call(const_cast<Tensor&>(*this));
5067}
5068
5069// aten::lgamma(Tensor self) -> Tensor
5070inline at::Tensor Tensor::lgamma() const {
5071 return at::_ops::lgamma::call(const_cast<Tensor&>(*this));
5072}
5073
5074// aten::digamma(Tensor self) -> Tensor
5075inline at::Tensor Tensor::digamma() const {
5076 return at::_ops::digamma::call(const_cast<Tensor&>(*this));
5077}
5078
5079// aten::polygamma(int n, Tensor self) -> Tensor
5080inline at::Tensor Tensor::polygamma(int64_t n) const {
5081 return at::_ops::polygamma::call(n, const_cast<Tensor&>(*this));
5082}
5083
5084// aten::polygamma_(Tensor(a!) self, int n) -> Tensor(a!)
5085inline at::Tensor & Tensor::polygamma_(int64_t n) const {
5086 return at::_ops::polygamma_::call(const_cast<Tensor&>(*this), n);
5087}
5088
5089// aten::erfinv(Tensor self) -> Tensor
5090inline at::Tensor Tensor::erfinv() const {
5091 return at::_ops::erfinv::call(const_cast<Tensor&>(*this));
5092}
5093
5094// aten::erfinv_(Tensor(a!) self) -> Tensor(a!)
5095inline at::Tensor & Tensor::erfinv_() const {
5096 return at::_ops::erfinv_::call(const_cast<Tensor&>(*this));
5097}
5098
5099// aten::i0(Tensor self) -> Tensor
5100inline at::Tensor Tensor::i0() const {
5101 return at::_ops::i0::call(const_cast<Tensor&>(*this));
5102}
5103
5104// aten::i0_(Tensor(a!) self) -> Tensor(a!)
5105inline at::Tensor & Tensor::i0_() const {
5106 return at::_ops::i0_::call(const_cast<Tensor&>(*this));
5107}
5108
5109// aten::sign(Tensor self) -> Tensor
5110inline at::Tensor Tensor::sign() const {
5111 return at::_ops::sign::call(const_cast<Tensor&>(*this));
5112}
5113
5114// aten::sign_(Tensor(a!) self) -> Tensor(a!)
5115inline at::Tensor & Tensor::sign_() const {
5116 return at::_ops::sign_::call(const_cast<Tensor&>(*this));
5117}
5118
5119// aten::signbit(Tensor self) -> Tensor
5120inline at::Tensor Tensor::signbit() const {
5121 return at::_ops::signbit::call(const_cast<Tensor&>(*this));
5122}
5123
5124// aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor
5125inline at::Tensor Tensor::dist(const at::Tensor & other, const at::Scalar & p) const {
5126 return at::_ops::dist::call(const_cast<Tensor&>(*this), other, p);
5127}
5128
5129// aten::atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)
5130inline at::Tensor & Tensor::atan2_(const at::Tensor & other) const {
5131 return at::_ops::atan2_::call(const_cast<Tensor&>(*this), other);
5132}
5133
5134// aten::atan2(Tensor self, Tensor other) -> Tensor
5135inline at::Tensor Tensor::atan2(const at::Tensor & other) const {
5136 return at::_ops::atan2::call(const_cast<Tensor&>(*this), other);
5137}
5138
5139// aten::arctan2(Tensor self, Tensor other) -> Tensor
5140inline at::Tensor Tensor::arctan2(const at::Tensor & other) const {
5141 return at::_ops::arctan2::call(const_cast<Tensor&>(*this), other);
5142}
5143
5144// aten::arctan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)
5145inline at::Tensor & Tensor::arctan2_(const at::Tensor & other) const {
5146 return at::_ops::arctan2_::call(const_cast<Tensor&>(*this), other);
5147}
5148
5149// aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor
5150inline at::Tensor Tensor::lerp(const at::Tensor & end, const at::Scalar & weight) const {
5151 return at::_ops::lerp_Scalar::call(const_cast<Tensor&>(*this), end, weight);
5152}
5153
5154// aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor
5155inline at::Tensor Tensor::lerp(const at::Tensor & end, const at::Tensor & weight) const {
5156 return at::_ops::lerp_Tensor::call(const_cast<Tensor&>(*this), end, weight);
5157}
5158
5159// aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor
5160inline at::Tensor Tensor::histc(int64_t bins, const at::Scalar & min, const at::Scalar & max) const {
5161 return at::_ops::histc::call(const_cast<Tensor&>(*this), bins, min, max);
5162}
5163
5164// aten::histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)
5165inline ::std::tuple<at::Tensor,at::Tensor> Tensor::histogram(const at::Tensor & bins, const c10::optional<at::Tensor> & weight, bool density) const {
5166 return at::_ops::histogram_bins_tensor::call(const_cast<Tensor&>(*this), bins, weight, density);
5167}
5168
5169// aten::histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)
5170inline ::std::tuple<at::Tensor,at::Tensor> Tensor::histogram(int64_t bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) const {
5171 return at::_ops::histogram_bin_ct::call(const_cast<Tensor&>(*this), bins, range, weight, density);
5172}
5173
5174// aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor
5175inline at::Tensor Tensor::fmod(const at::Scalar & other) const {
5176 return at::_ops::fmod_Scalar::call(const_cast<Tensor&>(*this), other);
5177}
5178
5179// aten::fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
5180inline at::Tensor & Tensor::fmod_(const at::Scalar & other) const {
5181 return at::_ops::fmod__Scalar::call(const_cast<Tensor&>(*this), other);
5182}
5183
5184// aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor
5185inline at::Tensor Tensor::fmod(const at::Tensor & other) const {
5186 return at::_ops::fmod_Tensor::call(const_cast<Tensor&>(*this), other);
5187}
5188
5189// aten::fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
5190inline at::Tensor & Tensor::fmod_(const at::Tensor & other) const {
5191 return at::_ops::fmod__Tensor::call(const_cast<Tensor&>(*this), other);
5192}
5193
5194// aten::hypot(Tensor self, Tensor other) -> Tensor
5195inline at::Tensor Tensor::hypot(const at::Tensor & other) const {
5196 return at::_ops::hypot::call(const_cast<Tensor&>(*this), other);
5197}
5198
5199// aten::hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!)
5200inline at::Tensor & Tensor::hypot_(const at::Tensor & other) const {
5201 return at::_ops::hypot_::call(const_cast<Tensor&>(*this), other);
5202}
5203
5204// aten::igamma(Tensor self, Tensor other) -> Tensor
5205inline at::Tensor Tensor::igamma(const at::Tensor & other) const {
5206 return at::_ops::igamma::call(const_cast<Tensor&>(*this), other);
5207}
5208
5209// aten::igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!)
5210inline at::Tensor & Tensor::igamma_(const at::Tensor & other) const {
5211 return at::_ops::igamma_::call(const_cast<Tensor&>(*this), other);
5212}
5213
5214// aten::igammac(Tensor self, Tensor other) -> Tensor
5215inline at::Tensor Tensor::igammac(const at::Tensor & other) const {
5216 return at::_ops::igammac::call(const_cast<Tensor&>(*this), other);
5217}
5218
5219// aten::igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!)
5220inline at::Tensor & Tensor::igammac_(const at::Tensor & other) const {
5221 return at::_ops::igammac_::call(const_cast<Tensor&>(*this), other);
5222}
5223
5224// aten::nextafter(Tensor self, Tensor other) -> Tensor
5225inline at::Tensor Tensor::nextafter(const at::Tensor & other) const {
5226 return at::_ops::nextafter::call(const_cast<Tensor&>(*this), other);
5227}
5228
5229// aten::nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!)
5230inline at::Tensor & Tensor::nextafter_(const at::Tensor & other) const {
5231 return at::_ops::nextafter_::call(const_cast<Tensor&>(*this), other);
5232}
5233
5234// aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor
5235inline at::Tensor Tensor::remainder(const at::Scalar & other) const {
5236 return at::_ops::remainder_Scalar::call(const_cast<Tensor&>(*this), other);
5237}
5238
5239// aten::remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
5240inline at::Tensor & Tensor::remainder_(const at::Scalar & other) const {
5241 return at::_ops::remainder__Scalar::call(const_cast<Tensor&>(*this), other);
5242}
5243
5244// aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor
5245inline at::Tensor Tensor::remainder(const at::Tensor & other) const {
5246 return at::_ops::remainder_Tensor::call(const_cast<Tensor&>(*this), other);
5247}
5248
5249// aten::remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
5250inline at::Tensor & Tensor::remainder_(const at::Tensor & other) const {
5251 return at::_ops::remainder__Tensor::call(const_cast<Tensor&>(*this), other);
5252}
5253
5254// aten::min(Tensor self) -> Tensor
5255inline at::Tensor Tensor::min() const {
5256 return at::_ops::min::call(const_cast<Tensor&>(*this));
5257}
5258
5259// aten::fmin(Tensor self, Tensor other) -> Tensor
5260inline at::Tensor Tensor::fmin(const at::Tensor & other) const {
5261 return at::_ops::fmin::call(const_cast<Tensor&>(*this), other);
5262}
5263
5264// aten::max(Tensor self) -> Tensor
5265inline at::Tensor Tensor::max() const {
5266 return at::_ops::max::call(const_cast<Tensor&>(*this));
5267}
5268
5269// aten::fmax(Tensor self, Tensor other) -> Tensor
5270inline at::Tensor Tensor::fmax(const at::Tensor & other) const {
5271 return at::_ops::fmax::call(const_cast<Tensor&>(*this), other);
5272}
5273
5274// aten::maximum(Tensor self, Tensor other) -> Tensor
5275inline at::Tensor Tensor::maximum(const at::Tensor & other) const {
5276 return at::_ops::maximum::call(const_cast<Tensor&>(*this), other);
5277}
5278
5279// aten::max.other(Tensor self, Tensor other) -> Tensor
5280inline at::Tensor Tensor::max(const at::Tensor & other) const {
5281 return at::_ops::max_other::call(const_cast<Tensor&>(*this), other);
5282}
5283
5284// aten::minimum(Tensor self, Tensor other) -> Tensor
5285inline at::Tensor Tensor::minimum(const at::Tensor & other) const {
5286 return at::_ops::minimum::call(const_cast<Tensor&>(*this), other);
5287}
5288
5289// aten::min.other(Tensor self, Tensor other) -> Tensor
5290inline at::Tensor Tensor::min(const at::Tensor & other) const {
5291 return at::_ops::min_other::call(const_cast<Tensor&>(*this), other);
5292}
5293
5294// aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
5295inline at::Tensor Tensor::quantile(const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) const {
5296 return at::_ops::quantile::call(const_cast<Tensor&>(*this), q, dim, keepdim, interpolation);
5297}
5298
5299// aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
5300inline at::Tensor Tensor::quantile(double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) const {
5301 return at::_ops::quantile_scalar::call(const_cast<Tensor&>(*this), q, dim, keepdim, interpolation);
5302}
5303
5304// aten::nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
5305inline at::Tensor Tensor::nanquantile(const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) const {
5306 return at::_ops::nanquantile::call(const_cast<Tensor&>(*this), q, dim, keepdim, interpolation);
5307}
5308
5309// aten::nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
5310inline at::Tensor Tensor::nanquantile(double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) const {
5311 return at::_ops::nanquantile_scalar::call(const_cast<Tensor&>(*this), q, dim, keepdim, interpolation);
5312}
5313
5314// aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
5315inline ::std::tuple<at::Tensor,at::Tensor> Tensor::sort(int64_t dim, bool descending) const {
5316 return at::_ops::sort::call(const_cast<Tensor&>(*this), dim, descending);
5317}
5318
5319// aten::sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
5320inline ::std::tuple<at::Tensor,at::Tensor> Tensor::sort(c10::optional<bool> stable, int64_t dim, bool descending) const {
5321 return at::_ops::sort_stable::call(const_cast<Tensor&>(*this), stable, dim, descending);
5322}
5323
5324// aten::sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)
5325inline ::std::tuple<at::Tensor,at::Tensor> Tensor::sort(at::Dimname dim, bool descending) const {
5326 return at::_ops::sort_dimname::call(const_cast<Tensor&>(*this), dim, descending);
5327}
5328
5329// aten::sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)
5330inline ::std::tuple<at::Tensor,at::Tensor> Tensor::sort(c10::optional<bool> stable, at::Dimname dim, bool descending) const {
5331 return at::_ops::sort_dimname_stable::call(const_cast<Tensor&>(*this), stable, dim, descending);
5332}
5333
5334// aten::msort(Tensor self) -> Tensor
5335inline at::Tensor Tensor::msort() const {
5336 return at::_ops::msort::call(const_cast<Tensor&>(*this));
5337}
5338
5339// aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor
5340inline at::Tensor Tensor::argsort(int64_t dim, bool descending) const {
5341 return at::_ops::argsort::call(const_cast<Tensor&>(*this), dim, descending);
5342}
5343
5344// aten::argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor
5345inline at::Tensor Tensor::argsort(bool stable, int64_t dim, bool descending) const {
5346 return at::_ops::argsort_stable::call(const_cast<Tensor&>(*this), stable, dim, descending);
5347}
5348
5349// aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor
5350inline at::Tensor Tensor::argsort(at::Dimname dim, bool descending) const {
5351 return at::_ops::argsort_dimname::call(const_cast<Tensor&>(*this), dim, descending);
5352}
5353
5354// aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)
5355inline ::std::tuple<at::Tensor,at::Tensor> Tensor::topk(int64_t k, int64_t dim, bool largest, bool sorted) const {
5356 return at::_ops::topk::call(const_cast<Tensor&>(*this), k, dim, largest, sorted);
5357}
5358
5359// aten::all(Tensor self) -> Tensor
5360inline at::Tensor Tensor::all() const {
5361 return at::_ops::all::call(const_cast<Tensor&>(*this));
5362}
5363
5364// aten::any(Tensor self) -> Tensor
5365inline at::Tensor Tensor::any() const {
5366 return at::_ops::any::call(const_cast<Tensor&>(*this));
5367}
5368
5369// aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor
5370inline at::Tensor Tensor::renorm(const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) const {
5371 return at::_ops::renorm::call(const_cast<Tensor&>(*this), p, dim, maxnorm);
5372}
5373
5374// aten::renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!)
5375inline at::Tensor & Tensor::renorm_(const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) const {
5376 return at::_ops::renorm_::call(const_cast<Tensor&>(*this), p, dim, maxnorm);
5377}
5378
5379// aten::unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)
5380inline at::Tensor Tensor::unfold(int64_t dimension, int64_t size, int64_t step) const {
5381 return at::_ops::unfold::call(const_cast<Tensor&>(*this), dimension, size, step);
5382}
5383
5384// aten::equal(Tensor self, Tensor other) -> bool
5385inline bool Tensor::equal(const at::Tensor & other) const {
5386 return at::_ops::equal::call(const_cast<Tensor&>(*this), other);
5387}
5388
5389// aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor
5390inline at::Tensor Tensor::pow(const at::Tensor & exponent) const {
5391 return at::_ops::pow_Tensor_Tensor::call(const_cast<Tensor&>(*this), exponent);
5392}
5393
5394// aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor
5395inline at::Tensor Tensor::pow(const at::Scalar & exponent) const {
5396 return at::_ops::pow_Tensor_Scalar::call(const_cast<Tensor&>(*this), exponent);
5397}
5398
5399// aten::pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)
5400inline at::Tensor & Tensor::pow_(const at::Scalar & exponent) const {
5401 return at::_ops::pow__Scalar::call(const_cast<Tensor&>(*this), exponent);
5402}
5403
5404// aten::pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)
5405inline at::Tensor & Tensor::pow_(const at::Tensor & exponent) const {
5406 return at::_ops::pow__Tensor::call(const_cast<Tensor&>(*this), exponent);
5407}
5408
5409// aten::float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor
5410inline at::Tensor Tensor::float_power(const at::Tensor & exponent) const {
5411 return at::_ops::float_power_Tensor_Tensor::call(const_cast<Tensor&>(*this), exponent);
5412}
5413
5414// aten::float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor
5415inline at::Tensor Tensor::float_power(const at::Scalar & exponent) const {
5416 return at::_ops::float_power_Tensor_Scalar::call(const_cast<Tensor&>(*this), exponent);
5417}
5418
5419// aten::float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)
5420inline at::Tensor & Tensor::float_power_(const at::Scalar & exponent) const {
5421 return at::_ops::float_power__Scalar::call(const_cast<Tensor&>(*this), exponent);
5422}
5423
5424// aten::float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)
5425inline at::Tensor & Tensor::float_power_(const at::Tensor & exponent) const {
5426 return at::_ops::float_power__Tensor::call(const_cast<Tensor&>(*this), exponent);
5427}
5428
5429// aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)
5430inline at::Tensor & Tensor::normal_(double mean, double std, c10::optional<at::Generator> generator) const {
5431 return at::_ops::normal_::call(const_cast<Tensor&>(*this), mean, std, generator);
5432}
5433
5434// aten::alias(Tensor(a) self) -> Tensor(a)
5435inline at::Tensor Tensor::alias() const {
5436 return at::_ops::alias::call(const_cast<Tensor&>(*this));
5437}
5438
5439// aten::isfinite(Tensor self) -> Tensor
5440inline at::Tensor Tensor::isfinite() const {
5441 return at::_ops::isfinite::call(const_cast<Tensor&>(*this));
5442}
5443
5444// aten::isinf(Tensor self) -> Tensor
5445inline at::Tensor Tensor::isinf() const {
5446 return at::_ops::isinf::call(const_cast<Tensor&>(*this));
5447}
5448
5449// aten::record_stream(Tensor(a!) self, Stream s) -> ()
5450inline void Tensor::record_stream(at::Stream s) const {
5451 return at::_ops::record_stream::call(const_cast<Tensor&>(*this), s);
5452}
5453
5454// aten::isposinf(Tensor self) -> Tensor
5455inline at::Tensor Tensor::isposinf() const {
5456 return at::_ops::isposinf::call(const_cast<Tensor&>(*this));
5457}
5458
5459// aten::isneginf(Tensor self) -> Tensor
5460inline at::Tensor Tensor::isneginf() const {
5461 return at::_ops::isneginf::call(const_cast<Tensor&>(*this));
5462}
5463
5464// aten::det(Tensor self) -> Tensor
5465inline at::Tensor Tensor::det() const {
5466 return at::_ops::det::call(const_cast<Tensor&>(*this));
5467}
5468
5469// aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)
5470inline ::std::tuple<at::Tensor,at::Tensor> Tensor::slogdet() const {
5471 return at::_ops::slogdet::call(const_cast<Tensor&>(*this));
5472}
5473
5474// aten::logdet(Tensor self) -> Tensor
5475inline at::Tensor Tensor::logdet() const {
5476 return at::_ops::logdet::call(const_cast<Tensor&>(*this));
5477}
5478
5479// aten::inverse(Tensor self) -> Tensor
5480inline at::Tensor Tensor::inverse() const {
5481 return at::_ops::inverse::call(const_cast<Tensor&>(*this));
5482}
5483
5484// aten::inner(Tensor self, Tensor other) -> Tensor
5485inline at::Tensor Tensor::inner(const at::Tensor & other) const {
5486 return at::_ops::inner::call(const_cast<Tensor&>(*this), other);
5487}
5488
5489// aten::outer(Tensor self, Tensor vec2) -> Tensor
5490inline at::Tensor Tensor::outer(const at::Tensor & vec2) const {
5491 return at::_ops::outer::call(const_cast<Tensor&>(*this), vec2);
5492}
5493
5494// aten::ger(Tensor self, Tensor vec2) -> Tensor
5495inline at::Tensor Tensor::ger(const at::Tensor & vec2) const {
5496 return at::_ops::ger::call(const_cast<Tensor&>(*this), vec2);
5497}
5498
5499// aten::to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor
5500inline at::Tensor Tensor::to_padded_tensor(double padding, at::OptionalIntArrayRef output_size) const {
5501 return at::_ops::to_padded_tensor::call(const_cast<Tensor&>(*this), padding, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt);
5502}
5503
5504// aten::to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor
5505inline at::Tensor Tensor::to_padded_tensor_symint(double padding, at::OptionalSymIntArrayRef output_size) const {
5506 return at::_ops::to_padded_tensor::call(const_cast<Tensor&>(*this), padding, output_size);
5507}
5508} // namespace at
5509
5510
5511namespace c10 {
5512template <>
5513struct MaybeOwnedTraits<at::Tensor> {
5514 using owned_type = at::Tensor;
5515 using borrow_type = at::Tensor;
5516
5517 static borrow_type createBorrow(const owned_type& from) {
5518 // NOTE: this can be implemented without the special
5519 // unsafe_borrow_t Tensor constructor as
5520 //
5521 // return borrow_type(c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl>::reclaim(from.unsafeGetTensorImpl()));
5522 //
5523 // but that hurts inlining due to the nullptr check in the
5524 // Tensor(c10::intrusive_ptr<...>) constructor. We already know
5525 // that from.impl_ isn't null because from is a valid Tensor, so
5526 // we needn't do the check again. (using __builtin_assume can
5527 // avoid this, but wouldn't be portable to MSVC.)
5528 return borrow_type(borrow_type::unsafe_borrow_t{}, from);
5529 }
5530
5531 static void assignBorrow(borrow_type& lhs, const borrow_type& rhs) {
5532 lhs.unsafeReleaseTensorImpl();
5533 // See above note: this can be implemented with public API
5534 // similarly to createBorrow(), but that would hurt inlining.
5535 lhs = borrow_type(borrow_type::unsafe_borrow_t{}, rhs);
5536 }
5537
5538 static void destroyBorrow(borrow_type& toDestroy) {
5539 toDestroy.unsafeReleaseTensorImpl(); // "leak" it, but it was already +0.
5540 }
5541
5542 static const owned_type& referenceFromBorrow(const borrow_type& borrow) {
5543 return borrow;
5544 }
5545
5546 static const owned_type* pointerFromBorrow(const borrow_type& borrow) {
5547 return &borrow;
5548 }
5549
5550 static bool debugBorrowIsValid(const borrow_type& /*borrow*/) {
5551 return true;
5552 }
5553};
5554
5555template <>
5556struct ExclusivelyOwnedTraits<at::Tensor> {
5557 using repr_type = at::Tensor;
5558 using pointer_type = at::Tensor*;
5559 using const_pointer_type = const at::Tensor*;
5560
5561 static repr_type nullRepr() {
5562 return at::Tensor();
5563 }
5564
5565 template <class... Args>
5566 static repr_type createInPlace(Args&&... args) {
5567 return at::Tensor(std::forward<Args>(args)...);
5568 }
5569
5570 static repr_type moveToRepr(at::Tensor&& x) {
5571 return std::move(x);
5572 }
5573
5574 static void destroyOwned(at::Tensor& x) {
5575 return ExclusivelyOwnedTraits<at::TensorBase>::destroyOwned(x);
5576 }
5577
5578 static at::Tensor take(at::Tensor& x) {
5579 return std::move(x);
5580 }
5581
5582 static pointer_type getImpl(repr_type& x) {
5583 return &x;
5584 }
5585
5586 static const_pointer_type getImpl(const repr_type& x) {
5587 return &x;
5588 }
5589};
5590} // namespace c10
5591
5592namespace at {
5593
5594inline c10::MaybeOwned<Tensor> borrow_from_optional_tensor(
5595 const c10::optional<Tensor>& opt) {
5596 return opt.has_value()
5597 ? c10::MaybeOwned<Tensor>::borrowed(*opt)
5598 : c10::MaybeOwned<Tensor>::owned(c10::in_place);
5599}
5600
5601inline c10::MaybeOwned<Tensor> Tensor::expect_contiguous(MemoryFormat memory_format) const & {
5602 if (is_contiguous(memory_format)) {
5603 return c10::MaybeOwned<Tensor>::borrowed(*this);
5604 } else {
5605 return c10::MaybeOwned<Tensor>::owned(__dispatch_contiguous(memory_format));
5606 }
5607}
5608} // namespace at
5609