1 | #define TORCH_ASSERT_NO_OPERATORS |
2 | |
3 | #include <ATen/native/DispatchStub.h> |
4 | #include <ATen/TensorIterator.h> |
5 | #include <ATen/TensorMeta.h> |
6 | |
7 | namespace at { |
8 | |
9 | // NB: this is explicitly copied here (via codegen) rather than |
10 | // included via NativeFunctions.h to avoid recompiling this file when |
11 | // NativeFunctions.h changes |
12 | namespace meta { |
13 | struct TORCH_API structured_add_Tensor : public TensorIteratorBase { |
14 | |
15 | |
16 | void meta(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha); |
17 | }; |
18 | } |
19 | |
20 | namespace native { |
21 | struct TORCH_API structured_ufunc_add_CPU : public at::meta::structured_add_Tensor { |
22 | void impl(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, const at::Tensor & out); |
23 | }; |
24 | |
25 | using add_fn = void(*)(TensorIteratorBase&, const at::Scalar &); |
26 | DECLARE_DISPATCH(add_fn, add_stub); |
27 | DEFINE_DISPATCH(add_stub); |
28 | |
29 | TORCH_IMPL_FUNC(ufunc_add_CPU)(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, const at::Tensor & out) { |
30 | add_stub(device_type(), *this, alpha); |
31 | } |
32 | }} // namespace at::native |
33 | |