1#pragma once
2
3// @generated by torchgen/gen.py from Function.h
4
5#include <ATen/Context.h>
6#include <ATen/DeviceGuard.h>
7#include <ATen/TensorUtils.h>
8#include <ATen/TracerMode.h>
9#include <ATen/core/Generator.h>
10#include <ATen/core/Reduction.h>
11#include <ATen/core/Tensor.h>
12#include <c10/core/Scalar.h>
13#include <c10/core/Storage.h>
14#include <c10/core/TensorOptions.h>
15#include <c10/util/Deprecated.h>
16#include <c10/util/Optional.h>
17
18
19
20#include <ATen/ops/adaptive_max_pool2d_backward_ops.h>
21
22namespace at {
23
24
25// aten::adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
26inline at::Tensor & adaptive_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
27 return at::_ops::adaptive_max_pool2d_backward_grad_input::call(grad_output, self, indices, grad_input);
28}
29// aten::adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
30inline at::Tensor & adaptive_max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) {
31 return at::_ops::adaptive_max_pool2d_backward_grad_input::call(grad_output, self, indices, grad_input);
32}
33
34// aten::adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor
35inline at::Tensor adaptive_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
36 return at::_ops::adaptive_max_pool2d_backward::call(grad_output, self, indices);
37}
38
39}
40