1#pragma once
2// @generated by torchgen/gen.py from DispatchKeyFunction.h
3
4// NB: The implementing C++ file is RegisterDispatchKey.cpp
5
6// The only #includes we need are for custom classes that have defaults in the C++ API
7#include <c10/core/MemoryFormat.h>
8#include <c10/core/Scalar.h>
9#include <ATen/core/Reduction.h>
10
11// Forward declarations of any types needed in the operator signatures.
12// We can't directly include these classes because it will cause circular include dependencies.
13// This file is included by TensorBody.h, which defines the Tensor class.
14#include <ATen/core/ATen_fwd.h>
15
16namespace at {
17
18namespace cuda {
19
20TORCH_API ::std::tuple<at::Tensor,at::Tensor> _efficient_attention_forward(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & cu_seqlens_q, const c10::optional<at::Tensor> & cu_seqlens_k, c10::optional<int64_t> max_seqlen_q, bool compute_log_sumexp=false, bool causal=false);
21
22} // namespace cuda
23} // namespace at
24