1// We register ops with a higher priority dispatch key (BackendSelect) than the usual backend-specific keys (e.g. CPU)
2// which makes calls to the factory functions dispatch to here.
3// We then 'manually' compute a lower-priority to re-dispatch to (e.g. CPU) to get to the eventually correct backend.
4// @generated by torchgen/gen.py from RegisterBackendSelect.cpp
5
6#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
7#include <ATen/core/Tensor.h>
8#include <ATen/core/dispatch/DispatchKeyExtractor.h>
9#include <torch/library.h>
10
11#ifndef AT_PER_OPERATOR_HEADERS
12#include <ATen/Operators.h>
13#else
14#include <ATen/ops/is_pinned_ops.h>
15#include <ATen/ops/_pin_memory_ops.h>
16
17#include <ATen/ops/_cudnn_init_dropout_state_ops.h>
18#include <ATen/ops/arange_ops.h>
19#include <ATen/ops/arange_ops.h>
20#include <ATen/ops/arange_ops.h>
21#include <ATen/ops/bartlett_window_ops.h>
22#include <ATen/ops/bartlett_window_ops.h>
23#include <ATen/ops/blackman_window_ops.h>
24#include <ATen/ops/blackman_window_ops.h>
25#include <ATen/ops/empty_ops.h>
26#include <ATen/ops/empty_ops.h>
27#include <ATen/ops/_empty_affine_quantized_ops.h>
28#include <ATen/ops/_empty_per_channel_affine_quantized_ops.h>
29#include <ATen/ops/empty_quantized_ops.h>
30#include <ATen/ops/empty_strided_ops.h>
31#include <ATen/ops/eye_ops.h>
32#include <ATen/ops/eye_ops.h>
33#include <ATen/ops/full_ops.h>
34#include <ATen/ops/full_ops.h>
35#include <ATen/ops/from_file_ops.h>
36#include <ATen/ops/hann_window_ops.h>
37#include <ATen/ops/hann_window_ops.h>
38#include <ATen/ops/hamming_window_ops.h>
39#include <ATen/ops/hamming_window_ops.h>
40#include <ATen/ops/hamming_window_ops.h>
41#include <ATen/ops/hamming_window_ops.h>
42#include <ATen/ops/kaiser_window_ops.h>
43#include <ATen/ops/kaiser_window_ops.h>
44#include <ATen/ops/kaiser_window_ops.h>
45#include <ATen/ops/linspace_ops.h>
46#include <ATen/ops/logspace_ops.h>
47#include <ATen/ops/ones_ops.h>
48#include <ATen/ops/ones_ops.h>
49#include <ATen/ops/scalar_tensor_ops.h>
50#include <ATen/ops/rand_ops.h>
51#include <ATen/ops/rand_ops.h>
52#include <ATen/ops/rand_ops.h>
53#include <ATen/ops/rand_ops.h>
54#include <ATen/ops/randint_ops.h>
55#include <ATen/ops/randint_ops.h>
56#include <ATen/ops/randint_ops.h>
57#include <ATen/ops/randint_ops.h>
58#include <ATen/ops/randn_ops.h>
59#include <ATen/ops/randn_ops.h>
60#include <ATen/ops/randn_ops.h>
61#include <ATen/ops/randn_ops.h>
62#include <ATen/ops/randperm_ops.h>
63#include <ATen/ops/randperm_ops.h>
64#include <ATen/ops/range_ops.h>
65#include <ATen/ops/range_ops.h>
66#include <ATen/ops/zeros_ops.h>
67#include <ATen/ops/_efficientzerotensor_ops.h>
68#include <ATen/ops/zeros_ops.h>
69#include <ATen/ops/sparse_compressed_tensor_ops.h>
70#include <ATen/ops/sparse_csr_tensor_ops.h>
71#include <ATen/ops/sparse_csc_tensor_ops.h>
72#include <ATen/ops/sparse_bsr_tensor_ops.h>
73#include <ATen/ops/sparse_bsc_tensor_ops.h>
74#include <ATen/ops/sparse_compressed_tensor_ops.h>
75#include <ATen/ops/sparse_csr_tensor_ops.h>
76#include <ATen/ops/sparse_csc_tensor_ops.h>
77#include <ATen/ops/sparse_bsr_tensor_ops.h>
78#include <ATen/ops/sparse_bsc_tensor_ops.h>
79#include <ATen/ops/_sparse_compressed_tensor_unsafe_ops.h>
80#include <ATen/ops/_sparse_csr_tensor_unsafe_ops.h>
81#include <ATen/ops/_sparse_csc_tensor_unsafe_ops.h>
82#include <ATen/ops/_sparse_bsr_tensor_unsafe_ops.h>
83#include <ATen/ops/_sparse_bsc_tensor_unsafe_ops.h>
84#include <ATen/ops/sparse_coo_tensor_ops.h>
85#include <ATen/ops/sparse_coo_tensor_ops.h>
86#include <ATen/ops/sparse_coo_tensor_ops.h>
87#include <ATen/ops/_sparse_coo_tensor_unsafe_ops.h>
88#include <ATen/ops/_sparse_coo_tensor_with_dims_ops.h>
89#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_ops.h>
90#include <ATen/ops/_to_copy_ops.h>
91#include <ATen/ops/to_ops.h>
92#include <ATen/ops/tril_indices_ops.h>
93#include <ATen/ops/triu_indices_ops.h>
94#include <ATen/ops/normal_ops.h>
95#include <ATen/ops/fft_fftfreq_ops.h>
96#include <ATen/ops/fft_rfftfreq_ops.h>
97#endif
98
99namespace at {
100
101namespace {
102
103// aten::_cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
104C10_ALWAYS_INLINE
105at::Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
106 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
107 return at::_ops::_cudnn_init_dropout_state::redispatch(
108 _dk, dropout, train, dropout_seed, dtype, layout, device, pin_memory);
109}
110// aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
111C10_ALWAYS_INLINE
112at::Tensor arange(const at::Scalar & end, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
113 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
114 return at::_ops::arange::redispatch(
115 _dk, end, dtype, layout, device, pin_memory);
116}
117// aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
118C10_ALWAYS_INLINE
119at::Tensor arange_start(const at::Scalar & start, const at::Scalar & end, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
120 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
121 return at::_ops::arange_start::redispatch(
122 _dk, start, end, dtype, layout, device, pin_memory);
123}
124// aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
125C10_ALWAYS_INLINE
126at::Tensor arange_start_step(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
127 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
128 return at::_ops::arange_start_step::redispatch(
129 _dk, start, end, step, dtype, layout, device, pin_memory);
130}
131// aten::bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
132C10_ALWAYS_INLINE
133at::Tensor bartlett_window(int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
134 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
135 return at::_ops::bartlett_window::redispatch(
136 _dk, window_length, dtype, layout, device, pin_memory);
137}
138// aten::bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
139C10_ALWAYS_INLINE
140at::Tensor bartlett_window_periodic(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
141 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
142 return at::_ops::bartlett_window_periodic::redispatch(
143 _dk, window_length, periodic, dtype, layout, device, pin_memory);
144}
145// aten::blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
146C10_ALWAYS_INLINE
147at::Tensor blackman_window(int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
148 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
149 return at::_ops::blackman_window::redispatch(
150 _dk, window_length, dtype, layout, device, pin_memory);
151}
152// aten::blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
153C10_ALWAYS_INLINE
154at::Tensor blackman_window_periodic(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
155 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
156 return at::_ops::blackman_window_periodic::redispatch(
157 _dk, window_length, periodic, dtype, layout, device, pin_memory);
158}
159// aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
160C10_ALWAYS_INLINE
161at::Tensor empty_names(at::IntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
162 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
163 return at::_ops::empty_names::redispatch(
164 _dk, size, names, dtype, layout, device, pin_memory, memory_format);
165}
166// aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
167C10_ALWAYS_INLINE
168at::Tensor empty_memory_format(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
169 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
170 return at::_ops::empty_memory_format::redispatch(
171 _dk, size, dtype, layout, device, pin_memory, memory_format);
172}
173// aten::_empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor
174C10_ALWAYS_INLINE
175at::Tensor _empty_affine_quantized(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, double scale, int64_t zero_point, c10::optional<at::MemoryFormat> memory_format) {
176 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
177 return at::_ops::_empty_affine_quantized::redispatch(
178 _dk, size, dtype, layout, device, pin_memory, scale, zero_point, memory_format);
179}
180// aten::_empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor
181C10_ALWAYS_INLINE
182at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
183 DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(scales, zero_points);
184DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect);
185DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask);
186 return at::_ops::_empty_per_channel_affine_quantized::redispatch(
187 _dk, size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format);
188}
189// aten::empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
190C10_ALWAYS_INLINE
191at::Tensor empty_quantized(at::IntArrayRef size, const at::Tensor & qtensor, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
192 DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(qtensor);
193DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect);
194DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask);
195 return at::_ops::empty_quantized::redispatch(
196 _dk, size, qtensor, dtype, layout, device, pin_memory, memory_format);
197}
198// aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
199C10_ALWAYS_INLINE
200at::Tensor empty_strided(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
201 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
202 return at::_ops::empty_strided::redispatch(
203 _dk, size, stride, dtype, layout, device, pin_memory);
204}
205// aten::eye(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
206C10_ALWAYS_INLINE
207at::Tensor eye(int64_t n, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
208 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
209 return at::_ops::eye::redispatch(
210 _dk, n, dtype, layout, device, pin_memory);
211}
212// aten::eye.m(int n, int m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
213C10_ALWAYS_INLINE
214at::Tensor eye_m(int64_t n, int64_t m, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
215 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
216 return at::_ops::eye_m::redispatch(
217 _dk, n, m, dtype, layout, device, pin_memory);
218}
219// aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
220C10_ALWAYS_INLINE
221at::Tensor full_names(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
222 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
223 return at::_ops::full_names::redispatch(
224 _dk, size, fill_value, names, dtype, layout, device, pin_memory);
225}
226// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
227C10_ALWAYS_INLINE
228at::Tensor full(c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
229 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
230 return at::_ops::full::redispatch(
231 _dk, size, fill_value, dtype, layout, device, pin_memory);
232}
233// aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
234C10_ALWAYS_INLINE
235at::Tensor from_file(c10::string_view filename, c10::optional<bool> shared, c10::optional<int64_t> size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
236 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
237 return at::_ops::from_file::redispatch(
238 _dk, filename, shared, size, dtype, layout, device, pin_memory);
239}
240// aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
241C10_ALWAYS_INLINE
242at::Tensor hann_window(int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
243 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
244 return at::_ops::hann_window::redispatch(
245 _dk, window_length, dtype, layout, device, pin_memory);
246}
247// aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
248C10_ALWAYS_INLINE
249at::Tensor hann_window_periodic(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
250 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
251 return at::_ops::hann_window_periodic::redispatch(
252 _dk, window_length, periodic, dtype, layout, device, pin_memory);
253}
254// aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
255C10_ALWAYS_INLINE
256at::Tensor hamming_window(int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
257 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
258 return at::_ops::hamming_window::redispatch(
259 _dk, window_length, dtype, layout, device, pin_memory);
260}
261// aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
262C10_ALWAYS_INLINE
263at::Tensor hamming_window_periodic(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
264 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
265 return at::_ops::hamming_window_periodic::redispatch(
266 _dk, window_length, periodic, dtype, layout, device, pin_memory);
267}
268// aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
269C10_ALWAYS_INLINE
270at::Tensor hamming_window_periodic_alpha(int64_t window_length, bool periodic, double alpha, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
271 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
272 return at::_ops::hamming_window_periodic_alpha::redispatch(
273 _dk, window_length, periodic, alpha, dtype, layout, device, pin_memory);
274}
275// aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
276C10_ALWAYS_INLINE
277at::Tensor hamming_window_periodic_alpha_beta(int64_t window_length, bool periodic, double alpha, double beta, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
278 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
279 return at::_ops::hamming_window_periodic_alpha_beta::redispatch(
280 _dk, window_length, periodic, alpha, beta, dtype, layout, device, pin_memory);
281}
282// aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
283C10_ALWAYS_INLINE
284at::Tensor kaiser_window(int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
285 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
286 return at::_ops::kaiser_window::redispatch(
287 _dk, window_length, dtype, layout, device, pin_memory);
288}
289// aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
290C10_ALWAYS_INLINE
291at::Tensor kaiser_window_periodic(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
292 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
293 return at::_ops::kaiser_window_periodic::redispatch(
294 _dk, window_length, periodic, dtype, layout, device, pin_memory);
295}
296// aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
297C10_ALWAYS_INLINE
298at::Tensor kaiser_window_beta(int64_t window_length, bool periodic, double beta, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
299 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
300 return at::_ops::kaiser_window_beta::redispatch(
301 _dk, window_length, periodic, beta, dtype, layout, device, pin_memory);
302}
303// aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
304C10_ALWAYS_INLINE
305at::Tensor linspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
306 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
307 return at::_ops::linspace::redispatch(
308 _dk, start, end, steps, dtype, layout, device, pin_memory);
309}
310// aten::logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
311C10_ALWAYS_INLINE
312at::Tensor logspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
313 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
314 return at::_ops::logspace::redispatch(
315 _dk, start, end, steps, base, dtype, layout, device, pin_memory);
316}
317// aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
318C10_ALWAYS_INLINE
319at::Tensor ones_names(at::IntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
320 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
321 return at::_ops::ones_names::redispatch(
322 _dk, size, names, dtype, layout, device, pin_memory);
323}
324// aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
325C10_ALWAYS_INLINE
326at::Tensor ones(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
327 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
328 return at::_ops::ones::redispatch(
329 _dk, size, dtype, layout, device, pin_memory);
330}
331// aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
332C10_ALWAYS_INLINE
333at::Tensor scalar_tensor(const at::Scalar & s, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
334 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
335 return at::_ops::scalar_tensor::redispatch(
336 _dk, s, dtype, layout, device, pin_memory);
337}
338// aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
339C10_ALWAYS_INLINE
340at::Tensor rand_names(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
341 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
342 return at::_ops::rand_names::redispatch(
343 _dk, size, names, dtype, layout, device, pin_memory);
344}
345// aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
346C10_ALWAYS_INLINE
347at::Tensor rand_generator_with_names(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
348 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
349 return at::_ops::rand_generator_with_names::redispatch(
350 _dk, size, generator, names, dtype, layout, device, pin_memory);
351}
352// aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
353C10_ALWAYS_INLINE
354at::Tensor rand(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
355 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
356 return at::_ops::rand::redispatch(
357 _dk, size, dtype, layout, device, pin_memory);
358}
359// aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
360C10_ALWAYS_INLINE
361at::Tensor rand_generator(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
362 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
363 return at::_ops::rand_generator::redispatch(
364 _dk, size, generator, dtype, layout, device, pin_memory);
365}
366// aten::randint(int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
367C10_ALWAYS_INLINE
368at::Tensor randint(int64_t high, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
369 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
370 return at::_ops::randint::redispatch(
371 _dk, high, size, dtype, layout, device, pin_memory);
372}
373// aten::randint.generator(int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
374C10_ALWAYS_INLINE
375at::Tensor randint_generator(int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
376 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
377 return at::_ops::randint_generator::redispatch(
378 _dk, high, size, generator, dtype, layout, device, pin_memory);
379}
380// aten::randint.low(int low, int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
381C10_ALWAYS_INLINE
382at::Tensor randint_low(int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
383 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
384 return at::_ops::randint_low::redispatch(
385 _dk, low, high, size, dtype, layout, device, pin_memory);
386}
387// aten::randint.low_generator(int low, int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
388C10_ALWAYS_INLINE
389at::Tensor randint_low_generator(int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
390 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
391 return at::_ops::randint_low_generator::redispatch(
392 _dk, low, high, size, generator, dtype, layout, device, pin_memory);
393}
394// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
395C10_ALWAYS_INLINE
396at::Tensor randn(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
397 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
398 return at::_ops::randn::redispatch(
399 _dk, size, dtype, layout, device, pin_memory);
400}
401// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
402C10_ALWAYS_INLINE
403at::Tensor randn_generator(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
404 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
405 return at::_ops::randn_generator::redispatch(
406 _dk, size, generator, dtype, layout, device, pin_memory);
407}
408// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
409C10_ALWAYS_INLINE
410at::Tensor randn_names(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
411 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
412 return at::_ops::randn_names::redispatch(
413 _dk, size, names, dtype, layout, device, pin_memory);
414}
415// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
416C10_ALWAYS_INLINE
417at::Tensor randn_generator_with_names(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
418 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
419 return at::_ops::randn_generator_with_names::redispatch(
420 _dk, size, generator, names, dtype, layout, device, pin_memory);
421}
422// aten::randperm(int n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
423C10_ALWAYS_INLINE
424at::Tensor randperm(int64_t n, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
425 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
426 return at::_ops::randperm::redispatch(
427 _dk, n, dtype, layout, device, pin_memory);
428}
429// aten::randperm.generator(int n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
430C10_ALWAYS_INLINE
431at::Tensor randperm_generator(int64_t n, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
432 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
433 return at::_ops::randperm_generator::redispatch(
434 _dk, n, generator, dtype, layout, device, pin_memory);
435}
436// aten::range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
437C10_ALWAYS_INLINE
438at::Tensor range_step(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
439 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
440 return at::_ops::range_step::redispatch(
441 _dk, start, end, step, dtype, layout, device, pin_memory);
442}
443// aten::range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
444C10_ALWAYS_INLINE
445at::Tensor range(const at::Scalar & start, const at::Scalar & end, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
446 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
447 return at::_ops::range::redispatch(
448 _dk, start, end, dtype, layout, device, pin_memory);
449}
450// aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
451C10_ALWAYS_INLINE
452at::Tensor zeros_names(at::IntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
453 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
454 return at::_ops::zeros_names::redispatch(
455 _dk, size, names, dtype, layout, device, pin_memory);
456}
457// aten::_efficientzerotensor(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
458C10_ALWAYS_INLINE
459at::Tensor _efficientzerotensor(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
460 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
461 return at::_ops::_efficientzerotensor::redispatch(
462 _dk, size, dtype, layout, device, pin_memory);
463}
464// aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
465C10_ALWAYS_INLINE
466at::Tensor zeros(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
467 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
468 return at::_ops::zeros::redispatch(
469 _dk, size, dtype, layout, device, pin_memory);
470}
471// aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
472C10_ALWAYS_INLINE
473at::Tensor sparse_compressed_tensor_comp_plain_value_size(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
474 DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(compressed_indices, plain_indices, values);
475DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect);
476DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask);
477 return at::_ops::sparse_compressed_tensor_comp_plain_value_size::redispatch(
478 _dk, compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
479}
480// aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
481C10_ALWAYS_INLINE
482at::Tensor sparse_csr_tensor_crow_col_value_size(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
483 DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(crow_indices, col_indices, values);
484DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect);
485DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask);
486 return at::_ops::sparse_csr_tensor_crow_col_value_size::redispatch(
487 _dk, crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
488}
489// aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
490C10_ALWAYS_INLINE
491at::Tensor sparse_csc_tensor_ccol_row_value_size(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
492 DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(ccol_indices, row_indices, values);
493DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect);
494DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask);
495 return at::_ops::sparse_csc_tensor_ccol_row_value_size::redispatch(
496 _dk, ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
497}
498// aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
499C10_ALWAYS_INLINE
500at::Tensor sparse_bsr_tensor_crow_col_value_size(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
501 DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(crow_indices, col_indices, values);
502DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect);
503DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask);
504 return at::_ops::sparse_bsr_tensor_crow_col_value_size::redispatch(
505 _dk, crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
506}
507// aten::sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
508C10_ALWAYS_INLINE
509at::Tensor sparse_bsc_tensor_ccol_row_value_size(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
510 DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(ccol_indices, row_indices, values);
511DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect);
512DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask);
513 return at::_ops::sparse_bsc_tensor_ccol_row_value_size::redispatch(
514 _dk, ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
515}
516// aten::sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
517C10_ALWAYS_INLINE
518at::Tensor sparse_compressed_tensor_comp_plain_value(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
519 DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(compressed_indices, plain_indices, values);
520DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect);
521DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask);
522 return at::_ops::sparse_compressed_tensor_comp_plain_value::redispatch(
523 _dk, compressed_indices, plain_indices, values, dtype, layout, device, pin_memory);
524}
525// aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
526C10_ALWAYS_INLINE
527at::Tensor sparse_csr_tensor_crow_col_value(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
528 DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(crow_indices, col_indices, values);
529DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect);
530DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask);
531 return at::_ops::sparse_csr_tensor_crow_col_value::redispatch(
532 _dk, crow_indices, col_indices, values, dtype, layout, device, pin_memory);
533}
534// aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
535C10_ALWAYS_INLINE
536at::Tensor sparse_csc_tensor_ccol_row_value(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
537 DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(ccol_indices, row_indices, values);
538DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect);
539DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask);
540 return at::_ops::sparse_csc_tensor_ccol_row_value::redispatch(
541 _dk, ccol_indices, row_indices, values, dtype, layout, device, pin_memory);
542}
543// aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
544C10_ALWAYS_INLINE
545at::Tensor sparse_bsr_tensor_crow_col_value(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
546 DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(crow_indices, col_indices, values);
547DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect);
548DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask);
549 return at::_ops::sparse_bsr_tensor_crow_col_value::redispatch(
550 _dk, crow_indices, col_indices, values, dtype, layout, device, pin_memory);
551}
552// aten::sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
553C10_ALWAYS_INLINE
554at::Tensor sparse_bsc_tensor_ccol_row_value(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
555 DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(ccol_indices, row_indices, values);
556DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect);
557DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask);
558 return at::_ops::sparse_bsc_tensor_ccol_row_value::redispatch(
559 _dk, ccol_indices, row_indices, values, dtype, layout, device, pin_memory);
560}
561// aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
562C10_ALWAYS_INLINE
563at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
564 DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(compressed_indices, plain_indices, values);
565DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect);
566DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask);
567 return at::_ops::_sparse_compressed_tensor_unsafe::redispatch(
568 _dk, compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
569}
570// aten::_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
571C10_ALWAYS_INLINE
572at::Tensor _sparse_csr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
573 DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(crow_indices, col_indices, values);
574DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect);
575DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask);
576 return at::_ops::_sparse_csr_tensor_unsafe::redispatch(
577 _dk, crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
578}
579// aten::_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
580C10_ALWAYS_INLINE
581at::Tensor _sparse_csc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
582 DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(ccol_indices, row_indices, values);
583DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect);
584DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask);
585 return at::_ops::_sparse_csc_tensor_unsafe::redispatch(
586 _dk, ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
587}
588// aten::_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
589C10_ALWAYS_INLINE
590at::Tensor _sparse_bsr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
591 DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(crow_indices, col_indices, values);
592DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect);
593DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask);
594 return at::_ops::_sparse_bsr_tensor_unsafe::redispatch(
595 _dk, crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
596}
597// aten::_sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
598C10_ALWAYS_INLINE
599at::Tensor _sparse_bsc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
600 DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(ccol_indices, row_indices, values);
601DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect);
602DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask);
603 return at::_ops::_sparse_bsc_tensor_unsafe::redispatch(
604 _dk, ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
605}
606// aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
607C10_ALWAYS_INLINE
608at::Tensor sparse_coo_tensor_size(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
609 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
610 return at::_ops::sparse_coo_tensor_size::redispatch(
611 _dk, size, dtype, layout, device, pin_memory);
612}
613// aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
614C10_ALWAYS_INLINE
615at::Tensor sparse_coo_tensor_indices(const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
616 DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(indices, values);
617DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect);
618DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask);
619 return at::_ops::sparse_coo_tensor_indices::redispatch(
620 _dk, indices, values, dtype, layout, device, pin_memory);
621}
622// aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
623C10_ALWAYS_INLINE
624at::Tensor sparse_coo_tensor_indices_size(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
625 DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(indices, values);
626DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect);
627DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask);
628 return at::_ops::sparse_coo_tensor_indices_size::redispatch(
629 _dk, indices, values, size, dtype, layout, device, pin_memory);
630}
631// aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
632C10_ALWAYS_INLINE
633at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
634 DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(indices, values);
635DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect);
636DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask);
637 return at::_ops::_sparse_coo_tensor_unsafe::redispatch(
638 _dk, indices, values, size, dtype, layout, device, pin_memory);
639}
640// aten::_sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
641C10_ALWAYS_INLINE
642at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
643 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
644 return at::_ops::_sparse_coo_tensor_with_dims::redispatch(
645 _dk, sparse_dim, dense_dim, size, dtype, layout, device, pin_memory);
646}
647// aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
648C10_ALWAYS_INLINE
649at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
650 DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(indices, values);
651DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect);
652DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask);
653 return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::redispatch(
654 _dk, sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory);
655}
656// aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor
657C10_ALWAYS_INLINE
658at::Tensor _to_copy(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, c10::optional<at::MemoryFormat> memory_format) {
659 DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(self);
660DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect);
661DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask);
662 return at::_ops::_to_copy::redispatch(
663 _dk, self, dtype, layout, device, pin_memory, non_blocking, memory_format);
664}
665// aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
666C10_ALWAYS_INLINE
667at::Tensor to_dtype_layout(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
668 DispatchKeySet _dk_set = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device)) | c10::detail::multi_dispatch_key_set(self);
669DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect);
670DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask);
671 return at::_ops::to_dtype_layout::redispatch(
672 _dk, self, dtype, layout, device, pin_memory, non_blocking, copy, memory_format);
673}
674// aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
675C10_ALWAYS_INLINE
676at::Tensor tril_indices(int64_t row, int64_t col, int64_t offset, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
677 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
678 return at::_ops::tril_indices::redispatch(
679 _dk, row, col, offset, dtype, layout, device, pin_memory);
680}
681// aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
682C10_ALWAYS_INLINE
683at::Tensor triu_indices(int64_t row, int64_t col, int64_t offset, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
684 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
685 return at::_ops::triu_indices::redispatch(
686 _dk, row, col, offset, dtype, layout, device, pin_memory);
687}
688// aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
689C10_ALWAYS_INLINE
690at::Tensor normal_float_float(double mean, double std, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
691 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
692 return at::_ops::normal_float_float::redispatch(
693 _dk, mean, std, size, generator, dtype, layout, device, pin_memory);
694}
695// aten::fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
696C10_ALWAYS_INLINE
697at::Tensor fft_fftfreq(int64_t n, double d, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
698 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
699 return at::_ops::fft_fftfreq::redispatch(
700 _dk, n, d, dtype, layout, device, pin_memory);
701}
702// aten::fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
703C10_ALWAYS_INLINE
704at::Tensor fft_rfftfreq(int64_t n, double d, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
705 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(dtype, layout, device));
706 return at::_ops::fft_rfftfreq::redispatch(
707 _dk, n, d, dtype, layout, device, pin_memory);
708}
709
710bool is_pinned(const Tensor& self, c10::optional<at::Device> device) {
711 // Only CPU tensors can be pinned
712 if (!self.is_cpu()) {
713 return false;
714 }
715 // TODO: fetch scalar type from Tensor? But it doesn't really matter...
716 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(c10::nullopt, self.layout(), device.value_or(at::kCUDA)));
717 return at::_ops::is_pinned::redispatch(_dk, self, device);
718}
719
720at::Tensor _pin_memory(const Tensor& self, c10::optional<at::Device> device) {
721 TORCH_CHECK(self.device().is_cpu(), "cannot pin '", self.toString(), "' only dense CPU tensors can be pinned");
722 DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(c10::nullopt, self.layout(), device.value_or(at::kCUDA)));
723 return at::_ops::_pin_memory::redispatch(_dk, self, device);
724}
725
726TORCH_LIBRARY_IMPL(aten, BackendSelect, m) {
727 m.impl("aten::_cudnn_init_dropout_state", TORCH_FN(_cudnn_init_dropout_state));
728 m.impl("aten::arange", TORCH_FN(arange));
729 m.impl("aten::arange.start", TORCH_FN(arange_start));
730 m.impl("aten::arange.start_step", TORCH_FN(arange_start_step));
731 m.impl("aten::bartlett_window", TORCH_FN(bartlett_window));
732 m.impl("aten::bartlett_window.periodic", TORCH_FN(bartlett_window_periodic));
733 m.impl("aten::blackman_window", TORCH_FN(blackman_window));
734 m.impl("aten::blackman_window.periodic", TORCH_FN(blackman_window_periodic));
735 m.impl("aten::empty.names", TORCH_FN(empty_names));
736 m.impl("aten::empty.memory_format", TORCH_FN(empty_memory_format));
737 m.impl("aten::_empty_affine_quantized", TORCH_FN(_empty_affine_quantized));
738 m.impl("aten::_empty_per_channel_affine_quantized", TORCH_FN(_empty_per_channel_affine_quantized));
739 m.impl("aten::empty_quantized", TORCH_FN(empty_quantized));
740 m.impl("aten::empty_strided", TORCH_FN(empty_strided));
741 m.impl("aten::eye", TORCH_FN(eye));
742 m.impl("aten::eye.m", TORCH_FN(eye_m));
743 m.impl("aten::full.names", TORCH_FN(full_names));
744 m.impl("aten::full", TORCH_FN(full));
745 m.impl("aten::from_file", TORCH_FN(from_file));
746 m.impl("aten::hann_window", TORCH_FN(hann_window));
747 m.impl("aten::hann_window.periodic", TORCH_FN(hann_window_periodic));
748 m.impl("aten::hamming_window", TORCH_FN(hamming_window));
749 m.impl("aten::hamming_window.periodic", TORCH_FN(hamming_window_periodic));
750 m.impl("aten::hamming_window.periodic_alpha", TORCH_FN(hamming_window_periodic_alpha));
751 m.impl("aten::hamming_window.periodic_alpha_beta", TORCH_FN(hamming_window_periodic_alpha_beta));
752 m.impl("aten::kaiser_window", TORCH_FN(kaiser_window));
753 m.impl("aten::kaiser_window.periodic", TORCH_FN(kaiser_window_periodic));
754 m.impl("aten::kaiser_window.beta", TORCH_FN(kaiser_window_beta));
755 m.impl("aten::linspace", TORCH_FN(linspace));
756 m.impl("aten::logspace", TORCH_FN(logspace));
757 m.impl("aten::ones.names", TORCH_FN(ones_names));
758 m.impl("aten::ones", TORCH_FN(ones));
759 m.impl("aten::scalar_tensor", TORCH_FN(scalar_tensor));
760 m.impl("aten::rand.names", TORCH_FN(rand_names));
761 m.impl("aten::rand.generator_with_names", TORCH_FN(rand_generator_with_names));
762 m.impl("aten::rand", TORCH_FN(rand));
763 m.impl("aten::rand.generator", TORCH_FN(rand_generator));
764 m.impl("aten::randint", TORCH_FN(randint));
765 m.impl("aten::randint.generator", TORCH_FN(randint_generator));
766 m.impl("aten::randint.low", TORCH_FN(randint_low));
767 m.impl("aten::randint.low_generator", TORCH_FN(randint_low_generator));
768 m.impl("aten::randn", TORCH_FN(randn));
769 m.impl("aten::randn.generator", TORCH_FN(randn_generator));
770 m.impl("aten::randn.names", TORCH_FN(randn_names));
771 m.impl("aten::randn.generator_with_names", TORCH_FN(randn_generator_with_names));
772 m.impl("aten::randperm", TORCH_FN(randperm));
773 m.impl("aten::randperm.generator", TORCH_FN(randperm_generator));
774 m.impl("aten::range.step", TORCH_FN(range_step));
775 m.impl("aten::range", TORCH_FN(range));
776 m.impl("aten::zeros.names", TORCH_FN(zeros_names));
777 m.impl("aten::_efficientzerotensor", TORCH_FN(_efficientzerotensor));
778 m.impl("aten::zeros", TORCH_FN(zeros));
779 m.impl("aten::sparse_compressed_tensor.comp_plain_value_size", TORCH_FN(sparse_compressed_tensor_comp_plain_value_size));
780 m.impl("aten::sparse_csr_tensor.crow_col_value_size", TORCH_FN(sparse_csr_tensor_crow_col_value_size));
781 m.impl("aten::sparse_csc_tensor.ccol_row_value_size", TORCH_FN(sparse_csc_tensor_ccol_row_value_size));
782 m.impl("aten::sparse_bsr_tensor.crow_col_value_size", TORCH_FN(sparse_bsr_tensor_crow_col_value_size));
783 m.impl("aten::sparse_bsc_tensor.ccol_row_value_size", TORCH_FN(sparse_bsc_tensor_ccol_row_value_size));
784 m.impl("aten::sparse_compressed_tensor.comp_plain_value", TORCH_FN(sparse_compressed_tensor_comp_plain_value));
785 m.impl("aten::sparse_csr_tensor.crow_col_value", TORCH_FN(sparse_csr_tensor_crow_col_value));
786 m.impl("aten::sparse_csc_tensor.ccol_row_value", TORCH_FN(sparse_csc_tensor_ccol_row_value));
787 m.impl("aten::sparse_bsr_tensor.crow_col_value", TORCH_FN(sparse_bsr_tensor_crow_col_value));
788 m.impl("aten::sparse_bsc_tensor.ccol_row_value", TORCH_FN(sparse_bsc_tensor_ccol_row_value));
789 m.impl("aten::_sparse_compressed_tensor_unsafe", TORCH_FN(_sparse_compressed_tensor_unsafe));
790 m.impl("aten::_sparse_csr_tensor_unsafe", TORCH_FN(_sparse_csr_tensor_unsafe));
791 m.impl("aten::_sparse_csc_tensor_unsafe", TORCH_FN(_sparse_csc_tensor_unsafe));
792 m.impl("aten::_sparse_bsr_tensor_unsafe", TORCH_FN(_sparse_bsr_tensor_unsafe));
793 m.impl("aten::_sparse_bsc_tensor_unsafe", TORCH_FN(_sparse_bsc_tensor_unsafe));
794 m.impl("aten::sparse_coo_tensor.size", TORCH_FN(sparse_coo_tensor_size));
795 m.impl("aten::sparse_coo_tensor.indices", TORCH_FN(sparse_coo_tensor_indices));
796 m.impl("aten::sparse_coo_tensor.indices_size", TORCH_FN(sparse_coo_tensor_indices_size));
797 m.impl("aten::_sparse_coo_tensor_unsafe", TORCH_FN(_sparse_coo_tensor_unsafe));
798 m.impl("aten::_sparse_coo_tensor_with_dims", TORCH_FN(_sparse_coo_tensor_with_dims));
799 m.impl("aten::_sparse_coo_tensor_with_dims_and_tensors", TORCH_FN(_sparse_coo_tensor_with_dims_and_tensors));
800 m.impl("aten::_to_copy", TORCH_FN(_to_copy));
801 m.impl("aten::to.dtype_layout", TORCH_FN(to_dtype_layout));
802 m.impl("aten::tril_indices", TORCH_FN(tril_indices));
803 m.impl("aten::triu_indices", TORCH_FN(triu_indices));
804 m.impl("aten::normal.float_float", TORCH_FN(normal_float_float));
805 m.impl("aten::fft_fftfreq", TORCH_FN(fft_fftfreq));
806 m.impl("aten::fft_rfftfreq", TORCH_FN(fft_rfftfreq));;
807 m.impl(TORCH_SELECTIVE_NAME("aten::is_pinned"), TORCH_FN(is_pinned));
808 m.impl(TORCH_SELECTIVE_NAME("aten::_pin_memory"), TORCH_FN(_pin_memory));
809}
810
811} // namespace
812} // at
813