1 | #pragma once |
2 | |
3 | // @generated by torchgen/gen.py from Operator.h |
4 | |
5 | #include <tuple> |
6 | #include <vector> |
7 | |
8 | // Forward declarations of any types needed in the operator signatures. |
9 | // We can't directly include these classes because it will cause circular include dependencies. |
10 | // This file is included by TensorBody.h, which defines the Tensor class. |
11 | #include <ATen/core/ATen_fwd.h> |
12 | |
13 | namespace at { |
14 | namespace _ops { |
15 | |
16 | |
17 | struct TORCH_API rand_names { |
18 | using schema = at::Tensor (c10::SymIntArrayRef, c10::optional<at::DimnameList>, c10::optional<at::ScalarType>, c10::optional<at::Layout>, c10::optional<at::Device>, c10::optional<bool>); |
19 | using ptr_schema = schema*; |
20 | // See Note [static constexpr char* members for windows NVCC] |
21 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rand" ) |
22 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "names" ) |
23 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" ) |
24 | static at::Tensor call(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory); |
25 | static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory); |
26 | }; |
27 | |
28 | struct TORCH_API rand_generator_with_names { |
29 | using schema = at::Tensor (c10::SymIntArrayRef, c10::optional<at::Generator>, c10::optional<at::DimnameList>, c10::optional<at::ScalarType>, c10::optional<at::Layout>, c10::optional<at::Device>, c10::optional<bool>); |
30 | using ptr_schema = schema*; |
31 | // See Note [static constexpr char* members for windows NVCC] |
32 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rand" ) |
33 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "generator_with_names" ) |
34 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" ) |
35 | static at::Tensor call(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory); |
36 | static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory); |
37 | }; |
38 | |
39 | struct TORCH_API rand { |
40 | using schema = at::Tensor (c10::SymIntArrayRef, c10::optional<at::ScalarType>, c10::optional<at::Layout>, c10::optional<at::Device>, c10::optional<bool>); |
41 | using ptr_schema = schema*; |
42 | // See Note [static constexpr char* members for windows NVCC] |
43 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rand" ) |
44 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "" ) |
45 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" ) |
46 | static at::Tensor call(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory); |
47 | static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory); |
48 | }; |
49 | |
50 | struct TORCH_API rand_generator { |
51 | using schema = at::Tensor (c10::SymIntArrayRef, c10::optional<at::Generator>, c10::optional<at::ScalarType>, c10::optional<at::Layout>, c10::optional<at::Device>, c10::optional<bool>); |
52 | using ptr_schema = schema*; |
53 | // See Note [static constexpr char* members for windows NVCC] |
54 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rand" ) |
55 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "generator" ) |
56 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" ) |
57 | static at::Tensor call(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory); |
58 | static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory); |
59 | }; |
60 | |
61 | struct TORCH_API rand_out { |
62 | using schema = at::Tensor & (c10::SymIntArrayRef, at::Tensor &); |
63 | using ptr_schema = schema*; |
64 | // See Note [static constexpr char* members for windows NVCC] |
65 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rand" ) |
66 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out" ) |
67 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)" ) |
68 | static at::Tensor & call(c10::SymIntArrayRef size, at::Tensor & out); |
69 | static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out); |
70 | }; |
71 | |
72 | struct TORCH_API rand_generator_out { |
73 | using schema = at::Tensor & (c10::SymIntArrayRef, c10::optional<at::Generator>, at::Tensor &); |
74 | using ptr_schema = schema*; |
75 | // See Note [static constexpr char* members for windows NVCC] |
76 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rand" ) |
77 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "generator_out" ) |
78 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)" ) |
79 | static at::Tensor & call(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out); |
80 | static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out); |
81 | }; |
82 | |
83 | struct TORCH_API rand_names_out { |
84 | using schema = at::Tensor & (c10::SymIntArrayRef, c10::optional<at::DimnameList>, at::Tensor &); |
85 | using ptr_schema = schema*; |
86 | // See Note [static constexpr char* members for windows NVCC] |
87 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rand" ) |
88 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "names_out" ) |
89 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)" ) |
90 | static at::Tensor & call(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out); |
91 | static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out); |
92 | }; |
93 | |
94 | struct TORCH_API rand_generator_with_names_out { |
95 | using schema = at::Tensor & (c10::SymIntArrayRef, c10::optional<at::Generator>, c10::optional<at::DimnameList>, at::Tensor &); |
96 | using ptr_schema = schema*; |
97 | // See Note [static constexpr char* members for windows NVCC] |
98 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rand" ) |
99 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "generator_with_names_out" ) |
100 | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)" ) |
101 | static at::Tensor & call(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::Tensor & out); |
102 | static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::Tensor & out); |
103 | }; |
104 | |
105 | }} // namespace at::_ops |
106 | |