1#pragma once
2// @generated by torchgen/gen.py from DispatchKeyFunction.h
3
4// NB: The implementing C++ file is RegisterDispatchKey.cpp
5
6// The only #includes we need are for custom classes that have defaults in the C++ API
7#include <c10/core/MemoryFormat.h>
8#include <c10/core/Scalar.h>
9#include <ATen/core/Reduction.h>
10
11// Forward declarations of any types needed in the operator signatures.
12// We can't directly include these classes because it will cause circular include dependencies.
13// This file is included by TensorBody.h, which defines the Tensor class.
14#include <ATen/core/ATen_fwd.h>
15
16namespace at {
17
18namespace compositeexplicitautograd {
19
20TORCH_API at::Tensor normal_functional(const at::Tensor & self, double mean=0, double std=1, c10::optional<at::Generator> generator=c10::nullopt);
21TORCH_API at::Tensor & normal_out(at::Tensor & out, const at::Tensor & self, double mean=0, double std=1, c10::optional<at::Generator> generator=c10::nullopt);
22TORCH_API at::Tensor & normal_outf(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator, at::Tensor & out);
23TORCH_API at::Tensor normal(double mean, double std, at::IntArrayRef size, c10::optional<at::Generator> generator=c10::nullopt, at::TensorOptions options={});
24TORCH_API at::Tensor normal(double mean, double std, at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory);
25TORCH_API at::Tensor normal_symint(double mean, double std, c10::SymIntArrayRef size, c10::optional<at::Generator> generator=c10::nullopt, at::TensorOptions options={});
26TORCH_API at::Tensor normal_symint(double mean, double std, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory);
27TORCH_API at::Tensor & normal_out(at::Tensor & out, double mean, double std, at::IntArrayRef size, c10::optional<at::Generator> generator=c10::nullopt);
28TORCH_API at::Tensor & normal_outf(double mean, double std, at::IntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out);
29TORCH_API at::Tensor & normal_symint_out(at::Tensor & out, double mean, double std, c10::SymIntArrayRef size, c10::optional<at::Generator> generator=c10::nullopt);
30TORCH_API at::Tensor & normal_symint_outf(double mean, double std, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out);
31
32} // namespace compositeexplicitautograd
33} // namespace at
34