1#pragma once
2
3// @generated by torchgen/gen.py from NativeFunctions.h
4
5#ifdef TORCH_ASSERT_NO_OPERATORS
6#error This change adds a dependency on native_functions.yaml, \
7 meaning the file will need to be re-compiled every time an operator \
8 is changed or added. Consider if your change would be better placed in \
9 another file, or if a more specific header might achieve the same goal. \
10 See NOTE: [Tensor vs. TensorBase]
11#endif
12
13#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
14#error This change adds a dependency on all pytorch operators, meaning the \
15 file will need to be re-compiled every time an operator is changed or added. \
16 Consider including a specific operator from <ATen/ops/{my_operator}_native.h> \
17 and see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
18#endif
19
20#include <c10/core/Scalar.h>
21#include <c10/core/Storage.h>
22#include <c10/core/TensorOptions.h>
23#include <c10/util/Deprecated.h>
24#include <c10/util/Optional.h>
25#include <c10/core/QScheme.h>
26#include <ATen/core/Reduction.h>
27#include <ATen/core/Tensor.h>
28#include <tuple>
29#include <vector>
30
31namespace at {
32namespace native {
33} // namespace native
34} // namespace at
35
36namespace custom {
37namespace native {
38TORCH_API at::Tensor & add_3_out(const at::Tensor & a, const at::Tensor & b, const at::Tensor & c, at::Tensor & out);
39} // namespace native
40} // namespace custom
41