1#pragma once
2
3// @generated by torchgen/gen.py from NativeFunction.h
4
5#include <c10/core/Scalar.h>
6#include <c10/core/Storage.h>
7#include <c10/core/TensorOptions.h>
8#include <c10/util/Deprecated.h>
9#include <c10/util/Optional.h>
10#include <c10/core/QScheme.h>
11#include <ATen/core/Reduction.h>
12#include <ATen/core/Tensor.h>
13#include <tuple>
14#include <vector>
15
16
17namespace at {
18namespace native {
19TORCH_API ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_out_symint(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2);
20TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor> layer_norm_backward_cpu(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask);
21TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor> layer_norm_backward_cuda(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask);
22} // namespace native
23} // namespace at
24