1 | // We register ops with a higher priority dispatch key (BackendSelect) than the usual backend-specific keys (e.g. CPU) |
2 | // which makes calls to the factory functions dispatch to here. |
3 | // We then 'manually' compute a lower-priority to re-dispatch to (e.g. CPU) to get to the eventually correct backend. |
4 | // @generated by torchgen/gen.py from RegisterBackendSelect.cpp |
5 | |
6 | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS |
7 | #include <ATen/core/Tensor.h> |
8 | #include <ATen/core/dispatch/DispatchKeyExtractor.h> |
9 | #include <torch/library.h> |
10 | |
11 | #ifndef AT_PER_OPERATOR_HEADERS |
12 | #include <ATen/Operators.h> |
13 | #else |
14 | #include <ATen/ops/is_pinned_ops.h> |
15 | #include <ATen/ops/_pin_memory_ops.h> |
16 | |
17 | #include <ATen/ops/_cudnn_init_dropout_state_ops.h> |
18 | #include <ATen/ops/arange_ops.h> |
19 | #include <ATen/ops/arange_ops.h> |
20 | #include <ATen/ops/arange_ops.h> |
21 | #include <ATen/ops/bartlett_window_ops.h> |
22 | #include <ATen/ops/bartlett_window_ops.h> |
23 | #include <ATen/ops/blackman_window_ops.h> |
24 | #include <ATen/ops/blackman_window_ops.h> |
25 | #include <ATen/ops/empty_ops.h> |
26 | #include <ATen/ops/empty_ops.h> |
27 | #include <ATen/ops/_empty_affine_quantized_ops.h> |
28 | #include <ATen/ops/_empty_per_channel_affine_quantized_ops.h> |
29 | #include <ATen/ops/empty_quantized_ops.h> |
30 | #include <ATen/ops/empty_strided_ops.h> |
31 | #include <ATen/ops/eye_ops.h> |
32 | #include <ATen/ops/eye_ops.h> |
33 | #include <ATen/ops/full_ops.h> |
34 | #include <ATen/ops/full_ops.h> |
35 | #include <ATen/ops/from_file_ops.h> |
36 | #include <ATen/ops/hann_window_ops.h> |
37 | #include <ATen/ops/hann_window_ops.h> |
38 | #include <ATen/ops/hamming_window_ops.h> |
39 | #include <ATen/ops/hamming_window_ops.h> |
40 | #include <ATen/ops/hamming_window_ops.h> |
41 | #include <ATen/ops/hamming_window_ops.h> |
42 | #include <ATen/ops/kaiser_window_ops.h> |
43 | #include <ATen/ops/kaiser_window_ops.h> |
44 | #include <ATen/ops/kaiser_window_ops.h> |
45 | #include <ATen/ops/linspace_ops.h> |
46 | #include <ATen/ops/logspace_ops.h> |
47 | #include <ATen/ops/ones_ops.h> |
48 | #include <ATen/ops/ones_ops.h> |
49 | #include <ATen/ops/scalar_tensor_ops.h> |
50 | #include <ATen/ops/rand_ops.h> |
51 | #include <ATen/ops/rand_ops.h> |
52 | #include <ATen/ops/rand_ops.h> |
53 | #include <ATen/ops/rand_ops.h> |
54 | #include <ATen/ops/randint_ops.h> |
55 | #include <ATen/ops/randint_ops.h> |
56 | #include <ATen/ops/randint_ops.h> |
57 | #include <ATen/ops/randint_ops.h> |
58 | #include <ATen/ops/randn_ops.h> |
59 | #include <ATen/ops/randn_ops.h> |
60 | #include <ATen/ops/randn_ops.h> |
61 | #include <ATen/ops/randn_ops.h> |
62 | #include <ATen/ops/randperm_ops.h> |
63 | #include <ATen/ops/randperm_ops.h> |
64 | #include <ATen/ops/range_ops.h> |
65 | #include <ATen/ops/range_ops.h> |
66 | #include <ATen/ops/zeros_ops.h> |
67 | #include <ATen/ops/_efficientzerotensor_ops.h> |
68 | #include <ATen/ops/zeros_ops.h> |
69 | #include <ATen/ops/sparse_compressed_tensor_ops.h> |
70 | #include <ATen/ops/sparse_csr_tensor_ops.h> |
71 | #include <ATen/ops/sparse_csc_tensor_ops.h> |
72 | #include <ATen/ops/sparse_bsr_tensor_ops.h> |
73 | #include <ATen/ops/sparse_bsc_tensor_ops.h> |
74 | #include <ATen/ops/sparse_compressed_tensor_ops.h> |
75 | #include <ATen/ops/sparse_csr_tensor_ops.h> |
76 | #include <ATen/ops/sparse_csc_tensor_ops.h> |
77 | #include <ATen/ops/sparse_bsr_tensor_ops.h> |
78 | #include <ATen/ops/sparse_bsc_tensor_ops.h> |
79 | #include <ATen/ops/_sparse_compressed_tensor_unsafe_ops.h> |
80 | #include <ATen/ops/_sparse_csr_tensor_unsafe_ops.h> |
81 | #include <ATen/ops/_sparse_csc_tensor_unsafe_ops.h> |
82 | #include <ATen/ops/_sparse_bsr_tensor_unsafe_ops.h> |
83 | #include <ATen/ops/_sparse_bsc_tensor_unsafe_ops.h> |
84 | #include <ATen/ops/sparse_coo_tensor_ops.h> |
85 | #include <ATen/ops/sparse_coo_tensor_ops.h> |
86 | #include <ATen/ops/sparse_coo_tensor_ops.h> |
87 | #include <ATen/ops/_sparse_coo_tensor_unsafe_ops.h> |
88 | #include <ATen/ops/_sparse_coo_tensor_with_dims_ops.h> |
89 | #include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_ops.h> |
90 | #include <ATen/ops/_to_copy_ops.h> |
91 | #include <ATen/ops/to_ops.h> |
92 | #include <ATen/ops/tril_indices_ops.h> |
93 | #include <ATen/ops/triu_indices_ops.h> |
94 | #include <ATen/ops/normal_ops.h> |
95 | #include <ATen/ops/fft_fftfreq_ops.h> |
96 | #include <ATen/ops/fft_rfftfreq_ops.h> |
97 | #endif |
98 | |
99 | namespace at { |
100 | |
101 | namespace { |
102 | |
103 | // aten::_cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor |
104 | C10_ALWAYS_INLINE |
105 | at::Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
106 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
107 | return at::_ops::_cudnn_init_dropout_state::redispatch( |
108 | _dk, dropout, train, dropout_seed, dtype, layout, device, pin_memory); |
109 | } |
110 | // aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
111 | C10_ALWAYS_INLINE |
112 | at::Tensor arange(const at::Scalar & end, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
113 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
114 | return at::_ops::arange::redispatch( |
115 | _dk, end, dtype, layout, device, pin_memory); |
116 | } |
117 | // aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
118 | C10_ALWAYS_INLINE |
119 | at::Tensor arange_start(const at::Scalar & start, const at::Scalar & end, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
120 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
121 | return at::_ops::arange_start::redispatch( |
122 | _dk, start, end, dtype, layout, device, pin_memory); |
123 | } |
124 | // aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
125 | C10_ALWAYS_INLINE |
126 | at::Tensor arange_start_step(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
127 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
128 | return at::_ops::arange_start_step::redispatch( |
129 | _dk, start, end, step, dtype, layout, device, pin_memory); |
130 | } |
131 | // aten::bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
132 | C10_ALWAYS_INLINE |
133 | at::Tensor bartlett_window(int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
134 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
135 | return at::_ops::bartlett_window::redispatch( |
136 | _dk, window_length, dtype, layout, device, pin_memory); |
137 | } |
138 | // aten::bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
139 | C10_ALWAYS_INLINE |
140 | at::Tensor bartlett_window_periodic(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
141 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
142 | return at::_ops::bartlett_window_periodic::redispatch( |
143 | _dk, window_length, periodic, dtype, layout, device, pin_memory); |
144 | } |
145 | // aten::blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
146 | C10_ALWAYS_INLINE |
147 | at::Tensor blackman_window(int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
148 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
149 | return at::_ops::blackman_window::redispatch( |
150 | _dk, window_length, dtype, layout, device, pin_memory); |
151 | } |
152 | // aten::blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
153 | C10_ALWAYS_INLINE |
154 | at::Tensor blackman_window_periodic(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
155 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
156 | return at::_ops::blackman_window_periodic::redispatch( |
157 | _dk, window_length, periodic, dtype, layout, device, pin_memory); |
158 | } |
159 | // aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor |
160 | C10_ALWAYS_INLINE |
161 | at::Tensor empty_names(at::IntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
162 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
163 | return at::_ops::empty_names::redispatch( |
164 | _dk, size, names, dtype, layout, device, pin_memory, memory_format); |
165 | } |
166 | // aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor |
167 | C10_ALWAYS_INLINE |
168 | at::Tensor empty_memory_format(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
169 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
170 | return at::_ops::empty_memory_format::redispatch( |
171 | _dk, size, dtype, layout, device, pin_memory, memory_format); |
172 | } |
173 | // aten::_empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor |
174 | C10_ALWAYS_INLINE |
175 | at::Tensor _empty_affine_quantized(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, double scale, int64_t zero_point, c10::optional<at::MemoryFormat> memory_format) { |
176 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
177 | return at::_ops::_empty_affine_quantized::redispatch( |
178 | _dk, size, dtype, layout, device, pin_memory, scale, zero_point, memory_format); |
179 | } |
180 | // aten::_empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor |
181 | C10_ALWAYS_INLINE |
182 | at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
183 | DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(scales, zero_points); |
184 | DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect); |
185 | DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask); |
186 | return at::_ops::_empty_per_channel_affine_quantized::redispatch( |
187 | _dk, size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format); |
188 | } |
189 | // aten::empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor |
190 | C10_ALWAYS_INLINE |
191 | at::Tensor empty_quantized(at::IntArrayRef size, const at::Tensor & qtensor, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
192 | DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(qtensor); |
193 | DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect); |
194 | DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask); |
195 | return at::_ops::empty_quantized::redispatch( |
196 | _dk, size, qtensor, dtype, layout, device, pin_memory, memory_format); |
197 | } |
198 | // aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
199 | C10_ALWAYS_INLINE |
200 | at::Tensor empty_strided(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
201 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
202 | return at::_ops::empty_strided::redispatch( |
203 | _dk, size, stride, dtype, layout, device, pin_memory); |
204 | } |
205 | // aten::eye(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
206 | C10_ALWAYS_INLINE |
207 | at::Tensor eye(int64_t n, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
208 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
209 | return at::_ops::eye::redispatch( |
210 | _dk, n, dtype, layout, device, pin_memory); |
211 | } |
212 | // aten::eye.m(int n, int m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
213 | C10_ALWAYS_INLINE |
214 | at::Tensor eye_m(int64_t n, int64_t m, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
215 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
216 | return at::_ops::eye_m::redispatch( |
217 | _dk, n, m, dtype, layout, device, pin_memory); |
218 | } |
219 | // aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
220 | C10_ALWAYS_INLINE |
221 | at::Tensor full_names(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
222 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
223 | return at::_ops::full_names::redispatch( |
224 | _dk, size, fill_value, names, dtype, layout, device, pin_memory); |
225 | } |
226 | // aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
227 | C10_ALWAYS_INLINE |
228 | at::Tensor full(c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
229 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
230 | return at::_ops::full::redispatch( |
231 | _dk, size, fill_value, dtype, layout, device, pin_memory); |
232 | } |
233 | // aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
234 | C10_ALWAYS_INLINE |
235 | at::Tensor from_file(c10::string_view filename, c10::optional<bool> shared, c10::optional<int64_t> size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
236 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
237 | return at::_ops::from_file::redispatch( |
238 | _dk, filename, shared, size, dtype, layout, device, pin_memory); |
239 | } |
240 | // aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
241 | C10_ALWAYS_INLINE |
242 | at::Tensor hann_window(int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
243 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
244 | return at::_ops::hann_window::redispatch( |
245 | _dk, window_length, dtype, layout, device, pin_memory); |
246 | } |
247 | // aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
248 | C10_ALWAYS_INLINE |
249 | at::Tensor hann_window_periodic(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
250 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
251 | return at::_ops::hann_window_periodic::redispatch( |
252 | _dk, window_length, periodic, dtype, layout, device, pin_memory); |
253 | } |
254 | // aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
255 | C10_ALWAYS_INLINE |
256 | at::Tensor hamming_window(int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
257 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
258 | return at::_ops::hamming_window::redispatch( |
259 | _dk, window_length, dtype, layout, device, pin_memory); |
260 | } |
261 | // aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
262 | C10_ALWAYS_INLINE |
263 | at::Tensor hamming_window_periodic(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
264 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
265 | return at::_ops::hamming_window_periodic::redispatch( |
266 | _dk, window_length, periodic, dtype, layout, device, pin_memory); |
267 | } |
268 | // aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
269 | C10_ALWAYS_INLINE |
270 | at::Tensor hamming_window_periodic_alpha(int64_t window_length, bool periodic, double alpha, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
271 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
272 | return at::_ops::hamming_window_periodic_alpha::redispatch( |
273 | _dk, window_length, periodic, alpha, dtype, layout, device, pin_memory); |
274 | } |
275 | // aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
276 | C10_ALWAYS_INLINE |
277 | at::Tensor hamming_window_periodic_alpha_beta(int64_t window_length, bool periodic, double alpha, double beta, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
278 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
279 | return at::_ops::hamming_window_periodic_alpha_beta::redispatch( |
280 | _dk, window_length, periodic, alpha, beta, dtype, layout, device, pin_memory); |
281 | } |
282 | // aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
283 | C10_ALWAYS_INLINE |
284 | at::Tensor kaiser_window(int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
285 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
286 | return at::_ops::kaiser_window::redispatch( |
287 | _dk, window_length, dtype, layout, device, pin_memory); |
288 | } |
289 | // aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
290 | C10_ALWAYS_INLINE |
291 | at::Tensor kaiser_window_periodic(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
292 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
293 | return at::_ops::kaiser_window_periodic::redispatch( |
294 | _dk, window_length, periodic, dtype, layout, device, pin_memory); |
295 | } |
296 | // aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
297 | C10_ALWAYS_INLINE |
298 | at::Tensor kaiser_window_beta(int64_t window_length, bool periodic, double beta, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
299 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
300 | return at::_ops::kaiser_window_beta::redispatch( |
301 | _dk, window_length, periodic, beta, dtype, layout, device, pin_memory); |
302 | } |
303 | // aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
304 | C10_ALWAYS_INLINE |
305 | at::Tensor linspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
306 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
307 | return at::_ops::linspace::redispatch( |
308 | _dk, start, end, steps, dtype, layout, device, pin_memory); |
309 | } |
310 | // aten::logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
311 | C10_ALWAYS_INLINE |
312 | at::Tensor logspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
313 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
314 | return at::_ops::logspace::redispatch( |
315 | _dk, start, end, steps, base, dtype, layout, device, pin_memory); |
316 | } |
317 | // aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
318 | C10_ALWAYS_INLINE |
319 | at::Tensor ones_names(at::IntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
320 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
321 | return at::_ops::ones_names::redispatch( |
322 | _dk, size, names, dtype, layout, device, pin_memory); |
323 | } |
324 | // aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
325 | C10_ALWAYS_INLINE |
326 | at::Tensor ones(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
327 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
328 | return at::_ops::ones::redispatch( |
329 | _dk, size, dtype, layout, device, pin_memory); |
330 | } |
331 | // aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
332 | C10_ALWAYS_INLINE |
333 | at::Tensor scalar_tensor(const at::Scalar & s, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
334 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
335 | return at::_ops::scalar_tensor::redispatch( |
336 | _dk, s, dtype, layout, device, pin_memory); |
337 | } |
338 | // aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
339 | C10_ALWAYS_INLINE |
340 | at::Tensor rand_names(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
341 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
342 | return at::_ops::rand_names::redispatch( |
343 | _dk, size, names, dtype, layout, device, pin_memory); |
344 | } |
345 | // aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
346 | C10_ALWAYS_INLINE |
347 | at::Tensor rand_generator_with_names(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
348 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
349 | return at::_ops::rand_generator_with_names::redispatch( |
350 | _dk, size, generator, names, dtype, layout, device, pin_memory); |
351 | } |
352 | // aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
353 | C10_ALWAYS_INLINE |
354 | at::Tensor rand(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
355 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
356 | return at::_ops::rand::redispatch( |
357 | _dk, size, dtype, layout, device, pin_memory); |
358 | } |
359 | // aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
360 | C10_ALWAYS_INLINE |
361 | at::Tensor rand_generator(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
362 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
363 | return at::_ops::rand_generator::redispatch( |
364 | _dk, size, generator, dtype, layout, device, pin_memory); |
365 | } |
366 | // aten::randint(int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
367 | C10_ALWAYS_INLINE |
368 | at::Tensor randint(int64_t high, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
369 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
370 | return at::_ops::randint::redispatch( |
371 | _dk, high, size, dtype, layout, device, pin_memory); |
372 | } |
373 | // aten::randint.generator(int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
374 | C10_ALWAYS_INLINE |
375 | at::Tensor randint_generator(int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
376 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
377 | return at::_ops::randint_generator::redispatch( |
378 | _dk, high, size, generator, dtype, layout, device, pin_memory); |
379 | } |
380 | // aten::randint.low(int low, int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
381 | C10_ALWAYS_INLINE |
382 | at::Tensor randint_low(int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
383 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
384 | return at::_ops::randint_low::redispatch( |
385 | _dk, low, high, size, dtype, layout, device, pin_memory); |
386 | } |
387 | // aten::randint.low_generator(int low, int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
388 | C10_ALWAYS_INLINE |
389 | at::Tensor randint_low_generator(int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
390 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
391 | return at::_ops::randint_low_generator::redispatch( |
392 | _dk, low, high, size, generator, dtype, layout, device, pin_memory); |
393 | } |
394 | // aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
395 | C10_ALWAYS_INLINE |
396 | at::Tensor randn(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
397 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
398 | return at::_ops::randn::redispatch( |
399 | _dk, size, dtype, layout, device, pin_memory); |
400 | } |
401 | // aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
402 | C10_ALWAYS_INLINE |
403 | at::Tensor randn_generator(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
404 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
405 | return at::_ops::randn_generator::redispatch( |
406 | _dk, size, generator, dtype, layout, device, pin_memory); |
407 | } |
408 | // aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
409 | C10_ALWAYS_INLINE |
410 | at::Tensor randn_names(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
411 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
412 | return at::_ops::randn_names::redispatch( |
413 | _dk, size, names, dtype, layout, device, pin_memory); |
414 | } |
415 | // aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
416 | C10_ALWAYS_INLINE |
417 | at::Tensor randn_generator_with_names(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
418 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
419 | return at::_ops::randn_generator_with_names::redispatch( |
420 | _dk, size, generator, names, dtype, layout, device, pin_memory); |
421 | } |
422 | // aten::randperm(int n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
423 | C10_ALWAYS_INLINE |
424 | at::Tensor randperm(int64_t n, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
425 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
426 | return at::_ops::randperm::redispatch( |
427 | _dk, n, dtype, layout, device, pin_memory); |
428 | } |
429 | // aten::randperm.generator(int n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
430 | C10_ALWAYS_INLINE |
431 | at::Tensor randperm_generator(int64_t n, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
432 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
433 | return at::_ops::randperm_generator::redispatch( |
434 | _dk, n, generator, dtype, layout, device, pin_memory); |
435 | } |
436 | // aten::range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
437 | C10_ALWAYS_INLINE |
438 | at::Tensor range_step(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
439 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
440 | return at::_ops::range_step::redispatch( |
441 | _dk, start, end, step, dtype, layout, device, pin_memory); |
442 | } |
443 | // aten::range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
444 | C10_ALWAYS_INLINE |
445 | at::Tensor range(const at::Scalar & start, const at::Scalar & end, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
446 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
447 | return at::_ops::range::redispatch( |
448 | _dk, start, end, dtype, layout, device, pin_memory); |
449 | } |
450 | // aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
451 | C10_ALWAYS_INLINE |
452 | at::Tensor zeros_names(at::IntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
453 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
454 | return at::_ops::zeros_names::redispatch( |
455 | _dk, size, names, dtype, layout, device, pin_memory); |
456 | } |
457 | // aten::_efficientzerotensor(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
458 | C10_ALWAYS_INLINE |
459 | at::Tensor _efficientzerotensor(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
460 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
461 | return at::_ops::_efficientzerotensor::redispatch( |
462 | _dk, size, dtype, layout, device, pin_memory); |
463 | } |
464 | // aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
465 | C10_ALWAYS_INLINE |
466 | at::Tensor zeros(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
467 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
468 | return at::_ops::zeros::redispatch( |
469 | _dk, size, dtype, layout, device, pin_memory); |
470 | } |
471 | // aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor |
472 | C10_ALWAYS_INLINE |
473 | at::Tensor sparse_compressed_tensor_comp_plain_value_size(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
474 | DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(compressed_indices, plain_indices, values); |
475 | DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect); |
476 | DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask); |
477 | return at::_ops::sparse_compressed_tensor_comp_plain_value_size::redispatch( |
478 | _dk, compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory); |
479 | } |
480 | // aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor |
481 | C10_ALWAYS_INLINE |
482 | at::Tensor sparse_csr_tensor_crow_col_value_size(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
483 | DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(crow_indices, col_indices, values); |
484 | DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect); |
485 | DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask); |
486 | return at::_ops::sparse_csr_tensor_crow_col_value_size::redispatch( |
487 | _dk, crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); |
488 | } |
489 | // aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor |
490 | C10_ALWAYS_INLINE |
491 | at::Tensor sparse_csc_tensor_ccol_row_value_size(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
492 | DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(ccol_indices, row_indices, values); |
493 | DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect); |
494 | DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask); |
495 | return at::_ops::sparse_csc_tensor_ccol_row_value_size::redispatch( |
496 | _dk, ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); |
497 | } |
498 | // aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor |
499 | C10_ALWAYS_INLINE |
500 | at::Tensor sparse_bsr_tensor_crow_col_value_size(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
501 | DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(crow_indices, col_indices, values); |
502 | DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect); |
503 | DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask); |
504 | return at::_ops::sparse_bsr_tensor_crow_col_value_size::redispatch( |
505 | _dk, crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); |
506 | } |
507 | // aten::sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor |
508 | C10_ALWAYS_INLINE |
509 | at::Tensor sparse_bsc_tensor_ccol_row_value_size(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
510 | DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(ccol_indices, row_indices, values); |
511 | DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect); |
512 | DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask); |
513 | return at::_ops::sparse_bsc_tensor_ccol_row_value_size::redispatch( |
514 | _dk, ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); |
515 | } |
516 | // aten::sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor |
517 | C10_ALWAYS_INLINE |
518 | at::Tensor sparse_compressed_tensor_comp_plain_value(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
519 | DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(compressed_indices, plain_indices, values); |
520 | DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect); |
521 | DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask); |
522 | return at::_ops::sparse_compressed_tensor_comp_plain_value::redispatch( |
523 | _dk, compressed_indices, plain_indices, values, dtype, layout, device, pin_memory); |
524 | } |
525 | // aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor |
526 | C10_ALWAYS_INLINE |
527 | at::Tensor sparse_csr_tensor_crow_col_value(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
528 | DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(crow_indices, col_indices, values); |
529 | DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect); |
530 | DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask); |
531 | return at::_ops::sparse_csr_tensor_crow_col_value::redispatch( |
532 | _dk, crow_indices, col_indices, values, dtype, layout, device, pin_memory); |
533 | } |
534 | // aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor |
535 | C10_ALWAYS_INLINE |
536 | at::Tensor sparse_csc_tensor_ccol_row_value(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
537 | DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(ccol_indices, row_indices, values); |
538 | DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect); |
539 | DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask); |
540 | return at::_ops::sparse_csc_tensor_ccol_row_value::redispatch( |
541 | _dk, ccol_indices, row_indices, values, dtype, layout, device, pin_memory); |
542 | } |
543 | // aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor |
544 | C10_ALWAYS_INLINE |
545 | at::Tensor sparse_bsr_tensor_crow_col_value(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
546 | DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(crow_indices, col_indices, values); |
547 | DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect); |
548 | DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask); |
549 | return at::_ops::sparse_bsr_tensor_crow_col_value::redispatch( |
550 | _dk, crow_indices, col_indices, values, dtype, layout, device, pin_memory); |
551 | } |
552 | // aten::sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor |
553 | C10_ALWAYS_INLINE |
554 | at::Tensor sparse_bsc_tensor_ccol_row_value(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
555 | DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(ccol_indices, row_indices, values); |
556 | DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect); |
557 | DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask); |
558 | return at::_ops::sparse_bsc_tensor_ccol_row_value::redispatch( |
559 | _dk, ccol_indices, row_indices, values, dtype, layout, device, pin_memory); |
560 | } |
561 | // aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
562 | C10_ALWAYS_INLINE |
563 | at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
564 | DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(compressed_indices, plain_indices, values); |
565 | DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect); |
566 | DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask); |
567 | return at::_ops::_sparse_compressed_tensor_unsafe::redispatch( |
568 | _dk, compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory); |
569 | } |
570 | // aten::_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
571 | C10_ALWAYS_INLINE |
572 | at::Tensor _sparse_csr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
573 | DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(crow_indices, col_indices, values); |
574 | DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect); |
575 | DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask); |
576 | return at::_ops::_sparse_csr_tensor_unsafe::redispatch( |
577 | _dk, crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); |
578 | } |
579 | // aten::_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
580 | C10_ALWAYS_INLINE |
581 | at::Tensor _sparse_csc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
582 | DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(ccol_indices, row_indices, values); |
583 | DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect); |
584 | DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask); |
585 | return at::_ops::_sparse_csc_tensor_unsafe::redispatch( |
586 | _dk, ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); |
587 | } |
588 | // aten::_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
589 | C10_ALWAYS_INLINE |
590 | at::Tensor _sparse_bsr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
591 | DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(crow_indices, col_indices, values); |
592 | DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect); |
593 | DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask); |
594 | return at::_ops::_sparse_bsr_tensor_unsafe::redispatch( |
595 | _dk, crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); |
596 | } |
597 | // aten::_sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
598 | C10_ALWAYS_INLINE |
599 | at::Tensor _sparse_bsc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
600 | DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(ccol_indices, row_indices, values); |
601 | DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect); |
602 | DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask); |
603 | return at::_ops::_sparse_bsc_tensor_unsafe::redispatch( |
604 | _dk, ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); |
605 | } |
606 | // aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor |
607 | C10_ALWAYS_INLINE |
608 | at::Tensor sparse_coo_tensor_size(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
609 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
610 | return at::_ops::sparse_coo_tensor_size::redispatch( |
611 | _dk, size, dtype, layout, device, pin_memory); |
612 | } |
613 | // aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
614 | C10_ALWAYS_INLINE |
615 | at::Tensor sparse_coo_tensor_indices(const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
616 | DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(indices, values); |
617 | DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect); |
618 | DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask); |
619 | return at::_ops::sparse_coo_tensor_indices::redispatch( |
620 | _dk, indices, values, dtype, layout, device, pin_memory); |
621 | } |
622 | // aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
623 | C10_ALWAYS_INLINE |
624 | at::Tensor sparse_coo_tensor_indices_size(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
625 | DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(indices, values); |
626 | DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect); |
627 | DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask); |
628 | return at::_ops::sparse_coo_tensor_indices_size::redispatch( |
629 | _dk, indices, values, size, dtype, layout, device, pin_memory); |
630 | } |
631 | // aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
632 | C10_ALWAYS_INLINE |
633 | at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
634 | DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(indices, values); |
635 | DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect); |
636 | DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask); |
637 | return at::_ops::_sparse_coo_tensor_unsafe::redispatch( |
638 | _dk, indices, values, size, dtype, layout, device, pin_memory); |
639 | } |
640 | // aten::_sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor |
641 | C10_ALWAYS_INLINE |
642 | at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
643 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
644 | return at::_ops::_sparse_coo_tensor_with_dims::redispatch( |
645 | _dk, sparse_dim, dense_dim, size, dtype, layout, device, pin_memory); |
646 | } |
647 | // aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor |
648 | C10_ALWAYS_INLINE |
649 | at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
650 | DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(indices, values); |
651 | DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect); |
652 | DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask); |
653 | return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::redispatch( |
654 | _dk, sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory); |
655 | } |
656 | // aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor |
657 | C10_ALWAYS_INLINE |
658 | at::Tensor _to_copy(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, c10::optional<at::MemoryFormat> memory_format) { |
659 | DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(self); |
660 | DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect); |
661 | DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask); |
662 | return at::_ops::_to_copy::redispatch( |
663 | _dk, self, dtype, layout, device, pin_memory, non_blocking, memory_format); |
664 | } |
665 | // aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) |
666 | C10_ALWAYS_INLINE |
667 | at::Tensor to_dtype_layout(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) { |
668 | DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(self); |
669 | DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect); |
670 | DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask); |
671 | return at::_ops::to_dtype_layout::redispatch( |
672 | _dk, self, dtype, layout, device, pin_memory, non_blocking, copy, memory_format); |
673 | } |
674 | // aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
675 | C10_ALWAYS_INLINE |
676 | at::Tensor tril_indices(int64_t row, int64_t col, int64_t offset, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
677 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
678 | return at::_ops::tril_indices::redispatch( |
679 | _dk, row, col, offset, dtype, layout, device, pin_memory); |
680 | } |
681 | // aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
682 | C10_ALWAYS_INLINE |
683 | at::Tensor triu_indices(int64_t row, int64_t col, int64_t offset, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
684 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
685 | return at::_ops::triu_indices::redispatch( |
686 | _dk, row, col, offset, dtype, layout, device, pin_memory); |
687 | } |
688 | // aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
689 | C10_ALWAYS_INLINE |
690 | at::Tensor normal_float_float(double mean, double std, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
691 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
692 | return at::_ops::normal_float_float::redispatch( |
693 | _dk, mean, std, size, generator, dtype, layout, device, pin_memory); |
694 | } |
695 | // aten::fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
696 | C10_ALWAYS_INLINE |
697 | at::Tensor fft_fftfreq(int64_t n, double d, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
698 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
699 | return at::_ops::fft_fftfreq::redispatch( |
700 | _dk, n, d, dtype, layout, device, pin_memory); |
701 | } |
702 | // aten::fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
703 | C10_ALWAYS_INLINE |
704 | at::Tensor fft_rfftfreq(int64_t n, double d, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
705 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)); |
706 | return at::_ops::fft_rfftfreq::redispatch( |
707 | _dk, n, d, dtype, layout, device, pin_memory); |
708 | } |
709 | |
710 | bool is_pinned(const Tensor& self, c10::optional<at::Device> device) { |
711 | // Only CPU tensors can be pinned |
712 | if (!self.is_cpu()) { |
713 | return false; |
714 | } |
715 | // TODO: fetch scalar type from Tensor? But it doesn't really matter... |
716 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(c10::nullopt, self.layout(), device.value_or(at::kCUDA))); |
717 | return at::_ops::is_pinned::redispatch(_dk, self, device); |
718 | } |
719 | |
720 | at::Tensor _pin_memory(const Tensor& self, c10::optional<at::Device> device) { |
721 | TORCH_CHECK(self.device().is_cpu(), "cannot pin '" , self.toString(), "' only dense CPU tensors can be pinned" ); |
722 | DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(c10::nullopt, self.layout(), device.value_or(at::kCUDA))); |
723 | return at::_ops::_pin_memory::redispatch(_dk, self, device); |
724 | } |
725 | |
726 | TORCH_LIBRARY_IMPL(aten, BackendSelect, m) { |
727 | m.impl("aten::_cudnn_init_dropout_state" , TORCH_FN(_cudnn_init_dropout_state)); |
728 | m.impl("aten::arange" , TORCH_FN(arange)); |
729 | m.impl("aten::arange.start" , TORCH_FN(arange_start)); |
730 | m.impl("aten::arange.start_step" , TORCH_FN(arange_start_step)); |
731 | m.impl("aten::bartlett_window" , TORCH_FN(bartlett_window)); |
732 | m.impl("aten::bartlett_window.periodic" , TORCH_FN(bartlett_window_periodic)); |
733 | m.impl("aten::blackman_window" , TORCH_FN(blackman_window)); |
734 | m.impl("aten::blackman_window.periodic" , TORCH_FN(blackman_window_periodic)); |
735 | m.impl("aten::empty.names" , TORCH_FN(empty_names)); |
736 | m.impl("aten::empty.memory_format" , TORCH_FN(empty_memory_format)); |
737 | m.impl("aten::_empty_affine_quantized" , TORCH_FN(_empty_affine_quantized)); |
738 | m.impl("aten::_empty_per_channel_affine_quantized" , TORCH_FN(_empty_per_channel_affine_quantized)); |
739 | m.impl("aten::empty_quantized" , TORCH_FN(empty_quantized)); |
740 | m.impl("aten::empty_strided" , TORCH_FN(empty_strided)); |
741 | m.impl("aten::eye" , TORCH_FN(eye)); |
742 | m.impl("aten::eye.m" , TORCH_FN(eye_m)); |
743 | m.impl("aten::full.names" , TORCH_FN(full_names)); |
744 | m.impl("aten::full" , TORCH_FN(full)); |
745 | m.impl("aten::from_file" , TORCH_FN(from_file)); |
746 | m.impl("aten::hann_window" , TORCH_FN(hann_window)); |
747 | m.impl("aten::hann_window.periodic" , TORCH_FN(hann_window_periodic)); |
748 | m.impl("aten::hamming_window" , TORCH_FN(hamming_window)); |
749 | m.impl("aten::hamming_window.periodic" , TORCH_FN(hamming_window_periodic)); |
750 | m.impl("aten::hamming_window.periodic_alpha" , TORCH_FN(hamming_window_periodic_alpha)); |
751 | m.impl("aten::hamming_window.periodic_alpha_beta" , TORCH_FN(hamming_window_periodic_alpha_beta)); |
752 | m.impl("aten::kaiser_window" , TORCH_FN(kaiser_window)); |
753 | m.impl("aten::kaiser_window.periodic" , TORCH_FN(kaiser_window_periodic)); |
754 | m.impl("aten::kaiser_window.beta" , TORCH_FN(kaiser_window_beta)); |
755 | m.impl("aten::linspace" , TORCH_FN(linspace)); |
756 | m.impl("aten::logspace" , TORCH_FN(logspace)); |
757 | m.impl("aten::ones.names" , TORCH_FN(ones_names)); |
758 | m.impl("aten::ones" , TORCH_FN(ones)); |
759 | m.impl("aten::scalar_tensor" , TORCH_FN(scalar_tensor)); |
760 | m.impl("aten::rand.names" , TORCH_FN(rand_names)); |
761 | m.impl("aten::rand.generator_with_names" , TORCH_FN(rand_generator_with_names)); |
762 | m.impl("aten::rand" , TORCH_FN(rand)); |
763 | m.impl("aten::rand.generator" , TORCH_FN(rand_generator)); |
764 | m.impl("aten::randint" , TORCH_FN(randint)); |
765 | m.impl("aten::randint.generator" , TORCH_FN(randint_generator)); |
766 | m.impl("aten::randint.low" , TORCH_FN(randint_low)); |
767 | m.impl("aten::randint.low_generator" , TORCH_FN(randint_low_generator)); |
768 | m.impl("aten::randn" , TORCH_FN(randn)); |
769 | m.impl("aten::randn.generator" , TORCH_FN(randn_generator)); |
770 | m.impl("aten::randn.names" , TORCH_FN(randn_names)); |
771 | m.impl("aten::randn.generator_with_names" , TORCH_FN(randn_generator_with_names)); |
772 | m.impl("aten::randperm" , TORCH_FN(randperm)); |
773 | m.impl("aten::randperm.generator" , TORCH_FN(randperm_generator)); |
774 | m.impl("aten::range.step" , TORCH_FN(range_step)); |
775 | m.impl("aten::range" , TORCH_FN(range)); |
776 | m.impl("aten::zeros.names" , TORCH_FN(zeros_names)); |
777 | m.impl("aten::_efficientzerotensor" , TORCH_FN(_efficientzerotensor)); |
778 | m.impl("aten::zeros" , TORCH_FN(zeros)); |
779 | m.impl("aten::sparse_compressed_tensor.comp_plain_value_size" , TORCH_FN(sparse_compressed_tensor_comp_plain_value_size)); |
780 | m.impl("aten::sparse_csr_tensor.crow_col_value_size" , TORCH_FN(sparse_csr_tensor_crow_col_value_size)); |
781 | m.impl("aten::sparse_csc_tensor.ccol_row_value_size" , TORCH_FN(sparse_csc_tensor_ccol_row_value_size)); |
782 | m.impl("aten::sparse_bsr_tensor.crow_col_value_size" , TORCH_FN(sparse_bsr_tensor_crow_col_value_size)); |
783 | m.impl("aten::sparse_bsc_tensor.ccol_row_value_size" , TORCH_FN(sparse_bsc_tensor_ccol_row_value_size)); |
784 | m.impl("aten::sparse_compressed_tensor.comp_plain_value" , TORCH_FN(sparse_compressed_tensor_comp_plain_value)); |
785 | m.impl("aten::sparse_csr_tensor.crow_col_value" , TORCH_FN(sparse_csr_tensor_crow_col_value)); |
786 | m.impl("aten::sparse_csc_tensor.ccol_row_value" , TORCH_FN(sparse_csc_tensor_ccol_row_value)); |
787 | m.impl("aten::sparse_bsr_tensor.crow_col_value" , TORCH_FN(sparse_bsr_tensor_crow_col_value)); |
788 | m.impl("aten::sparse_bsc_tensor.ccol_row_value" , TORCH_FN(sparse_bsc_tensor_ccol_row_value)); |
789 | m.impl("aten::_sparse_compressed_tensor_unsafe" , TORCH_FN(_sparse_compressed_tensor_unsafe)); |
790 | m.impl("aten::_sparse_csr_tensor_unsafe" , TORCH_FN(_sparse_csr_tensor_unsafe)); |
791 | m.impl("aten::_sparse_csc_tensor_unsafe" , TORCH_FN(_sparse_csc_tensor_unsafe)); |
792 | m.impl("aten::_sparse_bsr_tensor_unsafe" , TORCH_FN(_sparse_bsr_tensor_unsafe)); |
793 | m.impl("aten::_sparse_bsc_tensor_unsafe" , TORCH_FN(_sparse_bsc_tensor_unsafe)); |
794 | m.impl("aten::sparse_coo_tensor.size" , TORCH_FN(sparse_coo_tensor_size)); |
795 | m.impl("aten::sparse_coo_tensor.indices" , TORCH_FN(sparse_coo_tensor_indices)); |
796 | m.impl("aten::sparse_coo_tensor.indices_size" , TORCH_FN(sparse_coo_tensor_indices_size)); |
797 | m.impl("aten::_sparse_coo_tensor_unsafe" , TORCH_FN(_sparse_coo_tensor_unsafe)); |
798 | m.impl("aten::_sparse_coo_tensor_with_dims" , TORCH_FN(_sparse_coo_tensor_with_dims)); |
799 | m.impl("aten::_sparse_coo_tensor_with_dims_and_tensors" , TORCH_FN(_sparse_coo_tensor_with_dims_and_tensors)); |
800 | m.impl("aten::_to_copy" , TORCH_FN(_to_copy)); |
801 | m.impl("aten::to.dtype_layout" , TORCH_FN(to_dtype_layout)); |
802 | m.impl("aten::tril_indices" , TORCH_FN(tril_indices)); |
803 | m.impl("aten::triu_indices" , TORCH_FN(triu_indices)); |
804 | m.impl("aten::normal.float_float" , TORCH_FN(normal_float_float)); |
805 | m.impl("aten::fft_fftfreq" , TORCH_FN(fft_fftfreq)); |
806 | m.impl("aten::fft_rfftfreq" , TORCH_FN(fft_rfftfreq));; |
807 | m.impl(TORCH_SELECTIVE_NAME("aten::is_pinned" ), TORCH_FN(is_pinned)); |
808 | m.impl(TORCH_SELECTIVE_NAME("aten::_pin_memory" ), TORCH_FN(_pin_memory)); |
809 | } |
810 | |
811 | } // namespace |
812 | } // at |
813 | |