1 | #pragma once |
2 | |
3 | // @generated by torchgen/gen.py from Function.h |
4 | |
5 | #include <ATen/Context.h> |
6 | #include <ATen/DeviceGuard.h> |
7 | #include <ATen/TensorUtils.h> |
8 | #include <ATen/TracerMode.h> |
9 | #include <ATen/core/Generator.h> |
10 | #include <ATen/core/Reduction.h> |
11 | #include <ATen/core/Tensor.h> |
12 | #include <c10/core/Scalar.h> |
13 | #include <c10/core/Storage.h> |
14 | #include <c10/core/TensorOptions.h> |
15 | #include <c10/util/Deprecated.h> |
16 | #include <c10/util/Optional.h> |
17 | |
18 | |
19 | |
20 | #include <ATen/ops/multi_margin_loss_backward_ops.h> |
21 | |
22 | namespace at { |
23 | |
24 | |
25 | // aten::multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) |
26 | inline at::Tensor & multi_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean) { |
27 | return at::_ops::multi_margin_loss_backward_grad_input::call(grad_output, self, target, p, margin, weight, reduction, grad_input); |
28 | } |
29 | // aten::multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) |
30 | inline at::Tensor & multi_margin_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input) { |
31 | return at::_ops::multi_margin_loss_backward_grad_input::call(grad_output, self, target, p, margin, weight, reduction, grad_input); |
32 | } |
33 | |
34 | // aten::multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor |
35 | inline at::Tensor multi_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean) { |
36 | return at::_ops::multi_margin_loss_backward::call(grad_output, self, target, p, margin, weight, reduction); |
37 | } |
38 | |
39 | } |
40 | |