1 | #pragma once |
2 | |
3 | // @generated by torchgen/gen.py from Operator.h |
4 | |
5 | #include <tuple> |
6 | #include <vector> |
7 | |
8 | // Forward declarations of any types needed in the operator signatures. |
9 | // We can't directly include these classes because it will cause circular include dependencies. |
10 | // This file is included by TensorBody.h, which defines the Tensor class. |
11 | #include <ATen/core/ATen_fwd.h> |
12 | |
13 | namespace at { |
14 | namespace _ops { |
15 | |
16 | |
17 | struct TORCH_API rnn_tanh_input { |
18 | using schema = ::std::tuple<at::Tensor,at::Tensor> (const at::Tensor &, const at::Tensor &, at::TensorList, bool, int64_t, double, bool, bool, bool); |
19 | using ptr_schema = schema*; |
20 | // See Note [static constexpr char* members for windows NVCC] |
21 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rnn_tanh" ) |
22 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "input" ) |
23 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)" ) |
24 | static ::std::tuple<at::Tensor,at::Tensor> call(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); |
25 | static ::std::tuple<at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); |
26 | }; |
27 | |
28 | struct TORCH_API rnn_tanh_data { |
29 | using schema = ::std::tuple<at::Tensor,at::Tensor> (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::TensorList, bool, int64_t, double, bool, bool); |
30 | using ptr_schema = schema*; |
31 | // See Note [static constexpr char* members for windows NVCC] |
32 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rnn_tanh" ) |
33 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "data" ) |
34 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)" ) |
35 | static ::std::tuple<at::Tensor,at::Tensor> call(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); |
36 | static ::std::tuple<at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); |
37 | }; |
38 | |
39 | }} // namespace at::_ops |
40 | |