1 | #pragma once |
2 | |
3 | // @generated by torchgen/gen.py from Operator.h |
4 | |
5 | #include <tuple> |
6 | #include <vector> |
7 | |
8 | // Forward declarations of any types needed in the operator signatures. |
9 | // We can't directly include these classes because it will cause circular include dependencies. |
10 | // This file is included by TensorBody.h, which defines the Tensor class. |
11 | #include <ATen/core/ATen_fwd.h> |
12 | |
13 | namespace at { |
14 | namespace _ops { |
15 | |
16 | |
17 | struct TORCH_API convolution_overrideable { |
18 | using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional<at::Tensor> &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t); |
19 | using ptr_schema = schema*; |
20 | // See Note [static constexpr char* members for windows NVCC] |
21 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::convolution_overrideable" ) |
22 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "" ) |
23 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor" ) |
24 | static at::Tensor call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups); |
25 | static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups); |
26 | }; |
27 | |
28 | struct TORCH_API convolution_overrideable_out { |
29 | using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional<at::Tensor> &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, at::Tensor &); |
30 | using ptr_schema = schema*; |
31 | // See Note [static constexpr char* members for windows NVCC] |
32 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::convolution_overrideable" ) |
33 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out" ) |
34 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!)" ) |
35 | static at::Tensor & call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, at::Tensor & out); |
36 | static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, at::Tensor & out); |
37 | }; |
38 | |
39 | }} // namespace at::_ops |
40 | |