1 | // clang-format off |
---|---|
2 | // Generated code for registering custom operators into the dispatcher. |
3 | |
4 | #include <torch/library.h> |
5 | #include <ATen/Tensor.h> |
6 | |
7 | #include "CustomOpsNativeFunctions.h" |
8 | |
9 | namespace torch { |
10 | namespace executor { |
11 | namespace function { |
12 | |
13 | |
14 | namespace { |
15 | |
16 | at::Tensor & wrapper_CPU_out_add_3_out(const at::Tensor & a, const at::Tensor & b, const at::Tensor & c, at::Tensor & out) { |
17 | // No device check |
18 | |
19 | |
20 | // DeviceGuard omitted |
21 | return custom::native::add_3_out(a, b, c, out); |
22 | } |
23 | |
24 | } // anonymous namespace |
25 | |
26 | // All out variants ops |
27 | |
28 | TORCH_LIBRARY_IMPL(custom, CPU, m) { |
29 | m.impl("add_3.out", |
30 | TORCH_FN(wrapper_CPU_out_add_3_out)); |
31 | |
32 | }; |
33 | |
34 | namespace cpu |
35 | { |
36 | |
37 | |
38 | } // namespace cpu |
39 | |
40 | } // namespace function |
41 | } // namespace executor |
42 | } // namespace torch |
43 |