1#pragma once
2
3// @generated by torchgen/gen.py from NativeFunction.h
4
5#include <c10/core/Scalar.h>
6#include <c10/core/Storage.h>
7#include <c10/core/TensorOptions.h>
8#include <c10/util/Deprecated.h>
9#include <c10/util/Optional.h>
10#include <c10/core/QScheme.h>
11#include <ATen/core/Reduction.h>
12#include <ATen/core/Tensor.h>
13#include <tuple>
14#include <vector>
15
16
17namespace at {
18namespace native {
19TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_flash_attention_backward_cuda(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, int64_t philox_seed, int64_t philox_offset);
20} // namespace native
21} // namespace at
22