1 | #pragma once |
2 | |
3 | // @generated by torchgen/gen.py from NativeFunction.h |
4 | |
5 | #include <c10/core/Scalar.h> |
6 | #include <c10/core/Storage.h> |
7 | #include <c10/core/TensorOptions.h> |
8 | #include <c10/util/Deprecated.h> |
9 | #include <c10/util/Optional.h> |
10 | #include <c10/core/QScheme.h> |
11 | #include <ATen/core/Reduction.h> |
12 | #include <ATen/core/Tensor.h> |
13 | #include <tuple> |
14 | #include <vector> |
15 | #include <ATen/ops/add_meta.h> |
16 | |
17 | namespace at { |
18 | namespace native { |
19 | struct TORCH_API structured_ufunc_add_CPU : public at::meta::structured_add_Tensor { |
20 | void impl(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, const at::Tensor & out); |
21 | }; |
22 | struct TORCH_API structured_ufunc_add_CUDA : public at::meta::structured_add_Tensor { |
23 | void impl(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, const at::Tensor & out); |
24 | }; |
25 | TORCH_API at::Tensor NestedTensor_add_Tensor(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); |
26 | TORCH_API at::Tensor & NestedTensor_add__Tensor(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); |
27 | TORCH_API at::Tensor add_sparse(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); |
28 | TORCH_API at::Tensor & add_out_sparse_cpu(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); |
29 | TORCH_API at::Tensor & add_sparse_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); |
30 | TORCH_API at::Tensor & add_out_sparse_cuda(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); |
31 | TORCH_API at::Tensor add_sparse_csr(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); |
32 | TORCH_API at::Tensor & add_out_sparse_csr_cpu(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); |
33 | TORCH_API at::Tensor & add_sparse_csr_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); |
34 | TORCH_API at::Tensor & add_out_sparse_csr_cuda(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); |
35 | TORCH_API at::Tensor mkldnn_add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); |
36 | TORCH_API at::Tensor & mkldnn_add_out(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); |
37 | TORCH_API at::Tensor & mkldnn_add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); |
38 | TORCH_API at::Tensor add_zerotensor(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); |
39 | TORCH_API at::Tensor add(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1); |
40 | TORCH_API at::Tensor & add_Scalar_out(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out); |
41 | TORCH_API at::Tensor & add_(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1); |
42 | } // namespace native |
43 | } // namespace at |
44 | |