1#pragma once
2// @generated by torchgen/gen.py from DispatchKeyFunction.h
3
4// NB: The implementing C++ file is RegisterDispatchKey.cpp
5
6// The only #includes we need are for custom classes that have defaults in the C++ API
7#include <c10/core/MemoryFormat.h>
8#include <c10/core/Scalar.h>
9#include <ATen/core/Reduction.h>
10
11// Forward declarations of any types needed in the operator signatures.
12// We can't directly include these classes because it will cause circular include dependencies.
13// This file is included by TensorBody.h, which defines the Tensor class.
14#include <ATen/core/ATen_fwd.h>
15
16namespace at {
17
18namespace compositeexplicitautograd {
19
20TORCH_API at::Tensor & _fake_quantize_learnable_per_channel_affine_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor=1.0);
21TORCH_API at::Tensor & _fake_quantize_learnable_per_channel_affine_outf(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out);
22
23} // namespace compositeexplicitautograd
24} // namespace at
25