1#pragma once
2// @generated by torchgen/gen.py from DispatchKeyFunction.h
3
4// NB: The implementing C++ file is RegisterDispatchKey.cpp
5
6// The only #includes we need are for custom classes that have defaults in the C++ API
7#include <c10/core/MemoryFormat.h>
8#include <c10/core/Scalar.h>
9#include <ATen/core/Reduction.h>
10
11// Forward declarations of any types needed in the operator signatures.
12// We can't directly include these classes because it will cause circular include dependencies.
13// This file is included by TensorBody.h, which defines the Tensor class.
14#include <ATen/core/ATen_fwd.h>
15
16namespace at {
17
18namespace cuda {
19
20TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> _cudnn_rnn_backward(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask);
21TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> _cudnn_rnn_backward_symint(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask);
22
23} // namespace cuda
24} // namespace at
25