1// required for old g++ to compile PRId64 macros, see
2// https://github.com/pytorch/pytorch/issues/3571
3// for context
4#ifndef __STDC_FORMAT_MACROS
5#define __STDC_FORMAT_MACROS
6#endif
7
8// an external backend might generate file within its code tree
9// and check all the source files within the tree with clang-format.
10// so, disable it since the backend might have a different config.
11// clang-format off
12
13// NOTE: This condition is true for all PyTorch internal libraries, it
14// just excludes external projects such as torch_xla which
15// re-use some of the PyTorch codegen machinery.
16#if defined(CAFFE2_BUILD_MAIN_LIB) || \
17 defined(TORCH_CUDA_BUILD_MAIN_LIB) || \
18 defined(TORCH_HIP_BUILD_MAIN_LIB) || \
19 defined(TORCH_CUDA_CU_BUILD_MAIN_LIB) || \
20 defined(TORCH_CUDA_CPP_BUILD_MAIN_LIB)
21#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
22#endif
23
24// @generated by torchgen/gen.py from RegisterDispatchKey.cpp
25
26#include <c10/core/TensorImpl.h>
27#include <c10/core/Allocator.h>
28#include <ATen/DeviceGuard.h>
29#include <ATen/NamedTensorUtils.h>
30#include <ATen/Utils.h>
31#include <ATen/WrapDimUtils.h>
32#include <ATen/Dispatch.h>
33#include <c10/util/ExclusivelyOwned.h>
34#include <c10/util/Half.h>
35#include <c10/core/UndefinedTensorImpl.h>
36#include <c10/util/Optional.h>
37#include <ATen/Tensor.h>
38#include <ATen/native/Resize.h>
39
40#include <cstddef>
41#include <functional>
42#include <memory>
43#include <utility>
44
45#include <ATen/Config.h>
46#include <ATen/core/op_registration/adaption.h>
47#include <torch/library.h>
48#include <c10/cuda/CUDAGuard.h>
49#include <ATen/cuda/ATenCUDAGeneral.h>
50#include <ATen/cuda/CUDADevice.h>
51#include <ATen/cuda/CUDAContext.h>
52
53#include <ATen/ops/as_strided_native.h>
54#include <ATen/ops/empty.h>
55#include <ATen/ops/empty_strided.h>
56#include <ATen/ops/_copy_from_and_resize.h>
57#include <ATen/ops/_copy_from.h>
58#include <ATen/ops/_coalesce_native.h>
59#include <ATen/ops/_coalesced_native.h>
60#include <ATen/ops/_dimI_native.h>
61#include <ATen/ops/_dimV_native.h>
62#include <ATen/ops/_indices_native.h>
63#include <ATen/ops/_nnz_native.h>
64#include <ATen/ops/_sparse_broadcast_to_native.h>
65#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_native.h>
66#include <ATen/ops/_sparse_coo_tensor_with_dims_native.h>
67#include <ATen/ops/_sparse_log_softmax_backward_data_native.h>
68#include <ATen/ops/_sparse_log_softmax_native.h>
69#include <ATen/ops/_sparse_softmax_backward_data_native.h>
70#include <ATen/ops/_sparse_softmax_native.h>
71#include <ATen/ops/_sparse_sparse_matmul_native.h>
72#include <ATen/ops/_sparse_sum_backward_native.h>
73#include <ATen/ops/_to_dense_native.h>
74#include <ATen/ops/_values_native.h>
75#include <ATen/ops/abs_native.h>
76#include <ATen/ops/add_native.h>
77#include <ATen/ops/addmm_native.h>
78#include <ATen/ops/any_native.h>
79#include <ATen/ops/asin_native.h>
80#include <ATen/ops/asinh_native.h>
81#include <ATen/ops/atan_native.h>
82#include <ATen/ops/atanh_native.h>
83#include <ATen/ops/bmm_native.h>
84#include <ATen/ops/cat_native.h>
85#include <ATen/ops/ceil_native.h>
86#include <ATen/ops/clone_native.h>
87#include <ATen/ops/conj_physical_native.h>
88#include <ATen/ops/copy_native.h>
89#include <ATen/ops/copy_sparse_to_sparse_native.h>
90#include <ATen/ops/deg2rad_native.h>
91#include <ATen/ops/dense_dim_native.h>
92#include <ATen/ops/div_native.h>
93#include <ATen/ops/empty_like_native.h>
94#include <ATen/ops/empty_native.h>
95#include <ATen/ops/erf_native.h>
96#include <ATen/ops/erfinv_native.h>
97#include <ATen/ops/expm1_native.h>
98#include <ATen/ops/floor_divide_native.h>
99#include <ATen/ops/floor_native.h>
100#include <ATen/ops/frac_native.h>
101#include <ATen/ops/hspmm_native.h>
102#include <ATen/ops/index_select_native.h>
103#include <ATen/ops/indices_native.h>
104#include <ATen/ops/is_coalesced_native.h>
105#include <ATen/ops/isinf_native.h>
106#include <ATen/ops/isnan_native.h>
107#include <ATen/ops/isneginf_native.h>
108#include <ATen/ops/isposinf_native.h>
109#include <ATen/ops/log1p_native.h>
110#include <ATen/ops/mm_native.h>
111#include <ATen/ops/mul_native.h>
112#include <ATen/ops/mv_native.h>
113#include <ATen/ops/nan_to_num_native.h>
114#include <ATen/ops/narrow_copy_native.h>
115#include <ATen/ops/native_norm_native.h>
116#include <ATen/ops/neg_native.h>
117#include <ATen/ops/norm_native.h>
118#include <ATen/ops/permute_native.h>
119#include <ATen/ops/pow_native.h>
120#include <ATen/ops/rad2deg_native.h>
121#include <ATen/ops/relu_native.h>
122#include <ATen/ops/resize_as_sparse_native.h>
123#include <ATen/ops/round_native.h>
124#include <ATen/ops/sgn_native.h>
125#include <ATen/ops/sign_native.h>
126#include <ATen/ops/signbit_native.h>
127#include <ATen/ops/sin_native.h>
128#include <ATen/ops/sinh_native.h>
129#include <ATen/ops/sparse_dim_native.h>
130#include <ATen/ops/sparse_mask_native.h>
131#include <ATen/ops/sparse_resize_and_clear_native.h>
132#include <ATen/ops/sparse_resize_native.h>
133#include <ATen/ops/sqrt_native.h>
134#include <ATen/ops/sspaddmm_native.h>
135#include <ATen/ops/sub_native.h>
136#include <ATen/ops/sum_native.h>
137#include <ATen/ops/tan_native.h>
138#include <ATen/ops/tanh_native.h>
139#include <ATen/ops/threshold_backward_native.h>
140#include <ATen/ops/to_sparse_bsc_native.h>
141#include <ATen/ops/to_sparse_bsr_native.h>
142#include <ATen/ops/to_sparse_csc_native.h>
143#include <ATen/ops/to_sparse_csr_native.h>
144#include <ATen/ops/to_sparse_native.h>
145#include <ATen/ops/trunc_native.h>
146#include <ATen/ops/unsqueeze_native.h>
147#include <ATen/ops/values_native.h>
148#include <ATen/ops/zero_native.h>
149#include <ATen/ops/zeros_native.h>
150
151// See template file RegisterDispatchDefinitions.ini
152namespace at {
153// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
154// ambiguity with conflicting identifiers that may have been defined in
155// at namespace already.
156namespace {
157void resize_out(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
158 TORCH_CHECK(options.dtype() == out.dtype(),
159 "Expected out tensor to have dtype ", options.dtype(), ", but got ", out.dtype(), " instead");
160 TORCH_CHECK(options.device() == out.device(),
161 "Expected out tensor to have device ", options.device(), ", but got ", out.device(), " instead");
162 const bool resized = at::native::resize_output(out, sizes);
163 // Only restride if a resize occurred; otherwise we ignore the (advisory)
164 // strides from the meta function and directly use the output tensor's
165 // preexisting strides
166 if (resized) {
167 if (!strides.empty()) {
168 TORCH_INTERNAL_ASSERT(!options.memory_format_opt().has_value());
169 // TODO: avoid the redispatch here
170 out.as_strided_(sizes, strides);
171 } else if (options.memory_format_opt().has_value()) {
172 out.unsafeGetTensorImpl()->empty_tensor_restride(*options.memory_format_opt());
173 }
174 }
175}
176void check_inplace(const Tensor &self, IntArrayRef sizes, const TensorOptions &options) {
177 // These checks are needed on those operators that:
178 // 1) don't use 'TensorIterator' (e.g. 'addmm' and 'baddbmm')
179 // 2) have particular typing rules (e.g. 'cumsum' and 'cumprod')
180 // For other operators (e.g. 'add'), 'TensorIterator' already checks
181 // these things separately.
182 TORCH_CHECK(options.dtype() == self.dtype(),
183 "Bad in-place call: ",
184 "input tensor dtype ", self.dtype(), " and output tensor dtype ", options.dtype(), " should match");
185 TORCH_CHECK(options.device() == self.device(),
186 "Bad in-place call: ",
187 "input tensor device ", self.device(), " and output tensor device ", options.device(), " should match");
188 TORCH_CHECK(sizes == self.sizes(),
189 "Bad in-place call: ",
190 "input tensor size ", self.sizes(), " and output tensor size ", sizes, " should match");
191}
192namespace {
193at::Tensor wrapper_SparseCUDA__abs(const at::Tensor & self) {
194 // No device check
195 const OptionalDeviceGuard device_guard(device_of(self));
196 return at::native::abs_sparse(self);
197}
198} // anonymous namespace
199namespace {
200at::Tensor & wrapper_SparseCUDA_out_abs_out(const at::Tensor & self, at::Tensor & out) {
201 // No device check
202 const OptionalDeviceGuard device_guard(device_of(self));
203 return at::native::abs_sparse_out(self, out);
204}
205} // anonymous namespace
206namespace {
207at::Tensor & wrapper_SparseCUDA__abs_(at::Tensor & self) {
208 // No device check
209 const OptionalDeviceGuard device_guard(device_of(self));
210 return at::native::abs_sparse_(self);
211}
212} // anonymous namespace
213namespace {
214at::Tensor wrapper_SparseCUDA__sgn(const at::Tensor & self) {
215 c10::optional<Device> common_device = nullopt;
216(void)common_device; // Suppress unused variable warning
217 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__sgn", "self");
218 const OptionalDeviceGuard device_guard(device_of(self));
219 return at::native::sgn_sparse(self);
220}
221} // anonymous namespace
222namespace {
223at::Tensor & wrapper_SparseCUDA_out_sgn_out(const at::Tensor & self, at::Tensor & out) {
224 c10::optional<Device> common_device = nullopt;
225(void)common_device; // Suppress unused variable warning
226 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_sgn_out", "out");
227 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_sgn_out", "self");
228 const OptionalDeviceGuard device_guard(device_of(self));
229 return at::native::sgn_sparse_out(self, out);
230}
231} // anonymous namespace
232namespace {
233at::Tensor & wrapper_SparseCUDA__sgn_(at::Tensor & self) {
234 c10::optional<Device> common_device = nullopt;
235(void)common_device; // Suppress unused variable warning
236 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__sgn_", "self");
237 const OptionalDeviceGuard device_guard(device_of(self));
238 return at::native::sgn_sparse_(self);
239}
240} // anonymous namespace
241namespace {
242at::Tensor & wrapper_SparseCUDA_out_conj_physical_out(const at::Tensor & self, at::Tensor & out) {
243 c10::optional<Device> common_device = nullopt;
244(void)common_device; // Suppress unused variable warning
245 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_conj_physical_out", "out");
246 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_conj_physical_out", "self");
247 const OptionalDeviceGuard device_guard(device_of(self));
248 return at::native::conj_physical_out_sparse(self, out);
249}
250} // anonymous namespace
251namespace {
252at::Tensor wrapper_SparseCUDA_Tensor_add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
253 // No device check
254 const OptionalDeviceGuard device_guard(device_of(self));
255 return at::native::add_sparse(self, other, alpha);
256}
257} // anonymous namespace
258namespace {
259at::Tensor & wrapper_SparseCUDA_out_add_out(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
260 // No device check
261 const OptionalDeviceGuard device_guard(device_of(self));
262 return at::native::add_out_sparse_cuda(self, other, alpha, out);
263}
264} // anonymous namespace
265namespace {
266at::Tensor & wrapper_SparseCUDA_Tensor_add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
267 // No device check
268 const OptionalDeviceGuard device_guard(device_of(self));
269 return at::native::add_sparse_(self, other, alpha);
270}
271} // anonymous namespace
272namespace {
273at::Tensor wrapper_SparseCUDA__asinh(const at::Tensor & self) {
274 c10::optional<Device> common_device = nullopt;
275(void)common_device; // Suppress unused variable warning
276 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__asinh", "self");
277 const OptionalDeviceGuard device_guard(device_of(self));
278 return at::native::asinh_sparse(self);
279}
280} // anonymous namespace
281namespace {
282at::Tensor & wrapper_SparseCUDA_out_asinh_out(const at::Tensor & self, at::Tensor & out) {
283 c10::optional<Device> common_device = nullopt;
284(void)common_device; // Suppress unused variable warning
285 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_asinh_out", "out");
286 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_asinh_out", "self");
287 const OptionalDeviceGuard device_guard(device_of(self));
288 return at::native::asinh_sparse_out(self, out);
289}
290} // anonymous namespace
291namespace {
292at::Tensor & wrapper_SparseCUDA__asinh_(at::Tensor & self) {
293 c10::optional<Device> common_device = nullopt;
294(void)common_device; // Suppress unused variable warning
295 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__asinh_", "self");
296 const OptionalDeviceGuard device_guard(device_of(self));
297 return at::native::asinh_sparse_(self);
298}
299} // anonymous namespace
300namespace {
301at::Tensor wrapper_SparseCUDA__atanh(const at::Tensor & self) {
302 c10::optional<Device> common_device = nullopt;
303(void)common_device; // Suppress unused variable warning
304 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__atanh", "self");
305 const OptionalDeviceGuard device_guard(device_of(self));
306 return at::native::atanh_sparse(self);
307}
308} // anonymous namespace
309namespace {
310at::Tensor & wrapper_SparseCUDA_out_atanh_out(const at::Tensor & self, at::Tensor & out) {
311 c10::optional<Device> common_device = nullopt;
312(void)common_device; // Suppress unused variable warning
313 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_atanh_out", "out");
314 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_atanh_out", "self");
315 const OptionalDeviceGuard device_guard(device_of(self));
316 return at::native::atanh_sparse_out(self, out);
317}
318} // anonymous namespace
319namespace {
320at::Tensor & wrapper_SparseCUDA__atanh_(at::Tensor & self) {
321 c10::optional<Device> common_device = nullopt;
322(void)common_device; // Suppress unused variable warning
323 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__atanh_", "self");
324 const OptionalDeviceGuard device_guard(device_of(self));
325 return at::native::atanh_sparse_(self);
326}
327} // anonymous namespace
328namespace {
329at::Tensor wrapper_SparseCUDA__asin(const at::Tensor & self) {
330 // No device check
331 const OptionalDeviceGuard device_guard(device_of(self));
332 return at::native::asin_sparse(self);
333}
334} // anonymous namespace
335namespace {
336at::Tensor & wrapper_SparseCUDA_out_asin_out(const at::Tensor & self, at::Tensor & out) {
337 // No device check
338 const OptionalDeviceGuard device_guard(device_of(self));
339 return at::native::asin_sparse_out(self, out);
340}
341} // anonymous namespace
342namespace {
343at::Tensor & wrapper_SparseCUDA__asin_(at::Tensor & self) {
344 // No device check
345 const OptionalDeviceGuard device_guard(device_of(self));
346 return at::native::asin_sparse_(self);
347}
348} // anonymous namespace
349namespace {
350at::Tensor wrapper_SparseCUDA__atan(const at::Tensor & self) {
351 // No device check
352 const OptionalDeviceGuard device_guard(device_of(self));
353 return at::native::atan_sparse(self);
354}
355} // anonymous namespace
356namespace {
357at::Tensor & wrapper_SparseCUDA_out_atan_out(const at::Tensor & self, at::Tensor & out) {
358 // No device check
359 const OptionalDeviceGuard device_guard(device_of(self));
360 return at::native::atan_sparse_out(self, out);
361}
362} // anonymous namespace
363namespace {
364at::Tensor & wrapper_SparseCUDA__atan_(at::Tensor & self) {
365 // No device check
366 const OptionalDeviceGuard device_guard(device_of(self));
367 return at::native::atan_sparse_(self);
368}
369} // anonymous namespace
370namespace {
371at::Tensor wrapper_SparseCUDA__bmm(const at::Tensor & self, const at::Tensor & mat2) {
372 c10::optional<Device> common_device = nullopt;
373(void)common_device; // Suppress unused variable warning
374 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__bmm", "self");
375 c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCUDA__bmm", "mat2");
376 const OptionalDeviceGuard device_guard(device_of(self));
377 return at::native::bmm_sparse_cuda(self, mat2);
378}
379} // anonymous namespace
380namespace {
381at::Tensor & wrapper_SparseCUDA_out_bmm_out(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
382 c10::optional<Device> common_device = nullopt;
383(void)common_device; // Suppress unused variable warning
384 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_bmm_out", "out");
385 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_bmm_out", "self");
386 c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCUDA_out_bmm_out", "mat2");
387 const OptionalDeviceGuard device_guard(device_of(self));
388 return at::native::bmm_out_sparse_cuda(self, mat2, out);
389}
390} // anonymous namespace
391namespace {
392at::Tensor wrapper_SparseCUDA___sparse_broadcast_to(const at::Tensor & self, at::IntArrayRef size) {
393 c10::optional<Device> common_device = nullopt;
394(void)common_device; // Suppress unused variable warning
395 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA___sparse_broadcast_to", "self");
396 const OptionalDeviceGuard device_guard(device_of(self));
397 return at::native::sparse_broadcast_to(self, size);
398}
399} // anonymous namespace
400namespace {
401at::Tensor wrapper_SparseCUDA__cat(const at::ITensorListRef & tensors, int64_t dim) {
402 c10::optional<Device> common_device = nullopt;
403(void)common_device; // Suppress unused variable warning
404 c10::impl::check_and_update_common_device(common_device, tensors, "wrapper_SparseCUDA__cat", "tensors");
405 const OptionalDeviceGuard device_guard(device_of(tensors));
406 return at::native::cat_sparse(tensors, dim);
407}
408} // anonymous namespace
409namespace {
410at::Tensor wrapper_SparseCUDA__ceil(const at::Tensor & self) {
411 // No device check
412 const OptionalDeviceGuard device_guard(device_of(self));
413 return at::native::ceil_sparse(self);
414}
415} // anonymous namespace
416namespace {
417at::Tensor & wrapper_SparseCUDA_out_ceil_out(const at::Tensor & self, at::Tensor & out) {
418 // No device check
419 const OptionalDeviceGuard device_guard(device_of(self));
420 return at::native::ceil_sparse_out(self, out);
421}
422} // anonymous namespace
423namespace {
424at::Tensor & wrapper_SparseCUDA__ceil_(at::Tensor & self) {
425 // No device check
426 const OptionalDeviceGuard device_guard(device_of(self));
427 return at::native::ceil_sparse_(self);
428}
429} // anonymous namespace
430namespace {
431at::Tensor & wrapper_SparseCUDA__copy_(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
432 // No device check
433 // DeviceGuard omitted
434 return at::native::copy_sparse_wrapper_(self, src, non_blocking);
435}
436} // anonymous namespace
437namespace {
438at::Tensor wrapper_SparseCUDA_Tensor_div(const at::Tensor & self, const at::Tensor & other) {
439 // No device check
440 const OptionalDeviceGuard device_guard(device_of(self));
441 return at::native::div_sparse(self, other);
442}
443} // anonymous namespace
444namespace {
445at::Tensor & wrapper_SparseCUDA_out_div_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
446 // No device check
447 const OptionalDeviceGuard device_guard(device_of(self));
448 return at::native::div_out_sparse_zerodim(self, other, out);
449}
450} // anonymous namespace
451namespace {
452at::Tensor & wrapper_SparseCUDA_Tensor_div_(at::Tensor & self, const at::Tensor & other) {
453 // No device check
454 const OptionalDeviceGuard device_guard(device_of(self));
455 return at::native::div_sparse_(self, other);
456}
457} // anonymous namespace
458namespace {
459at::Tensor wrapper_SparseCUDA_Tensor_mode_div(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
460 // No device check
461 const OptionalDeviceGuard device_guard(device_of(self));
462 return at::native::div_sparse(self, other, rounding_mode);
463}
464} // anonymous namespace
465namespace {
466at::Tensor & wrapper_SparseCUDA_out_mode_div_out(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out) {
467 // No device check
468 const OptionalDeviceGuard device_guard(device_of(self));
469 return at::native::div_out_sparse_zerodim(self, other, rounding_mode, out);
470}
471} // anonymous namespace
472namespace {
473at::Tensor & wrapper_SparseCUDA_Tensor_mode_div_(at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
474 // No device check
475 const OptionalDeviceGuard device_guard(device_of(self));
476 return at::native::div_sparse_(self, other, rounding_mode);
477}
478} // anonymous namespace
479namespace {
480at::Tensor wrapper_SparseCUDA_memory_format_empty(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
481 c10::optional<Device> common_device = nullopt;
482(void)common_device; // Suppress unused variable warning
483 globalContext().lazyInitCUDA();
484 const DeviceGuard device_guard(device_or_default(device));
485 return at::native::empty_sparse(C10_AS_INTARRAYREF_SLOW(size), dtype, layout, device, pin_memory, memory_format);
486}
487} // anonymous namespace
488namespace {
489at::Tensor wrapper_SparseCUDA__empty_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
490 // No device check
491 // DeviceGuard omitted
492 return at::native::empty_like_sparse_coo(self, dtype, layout, device, pin_memory, memory_format);
493}
494} // anonymous namespace
495namespace {
496at::Tensor wrapper_SparseCUDA__erf(const at::Tensor & self) {
497 // No device check
498 const OptionalDeviceGuard device_guard(device_of(self));
499 return at::native::erf_sparse(self);
500}
501} // anonymous namespace
502namespace {
503at::Tensor & wrapper_SparseCUDA_out_erf_out(const at::Tensor & self, at::Tensor & out) {
504 // No device check
505 const OptionalDeviceGuard device_guard(device_of(self));
506 return at::native::erf_sparse_out(self, out);
507}
508} // anonymous namespace
509namespace {
510at::Tensor & wrapper_SparseCUDA__erf_(at::Tensor & self) {
511 // No device check
512 const OptionalDeviceGuard device_guard(device_of(self));
513 return at::native::erf_sparse_(self);
514}
515} // anonymous namespace
516namespace {
517at::Tensor wrapper_SparseCUDA__expm1(const at::Tensor & self) {
518 // No device check
519 const OptionalDeviceGuard device_guard(device_of(self));
520 return at::native::expm1_sparse(self);
521}
522} // anonymous namespace
523namespace {
524at::Tensor & wrapper_SparseCUDA_out_expm1_out(const at::Tensor & self, at::Tensor & out) {
525 // No device check
526 const OptionalDeviceGuard device_guard(device_of(self));
527 return at::native::expm1_sparse_out(self, out);
528}
529} // anonymous namespace
530namespace {
531at::Tensor & wrapper_SparseCUDA__expm1_(at::Tensor & self) {
532 // No device check
533 const OptionalDeviceGuard device_guard(device_of(self));
534 return at::native::expm1_sparse_(self);
535}
536} // anonymous namespace
537namespace {
538at::Tensor wrapper_SparseCUDA__floor(const at::Tensor & self) {
539 // No device check
540 const OptionalDeviceGuard device_guard(device_of(self));
541 return at::native::floor_sparse(self);
542}
543} // anonymous namespace
544namespace {
545at::Tensor & wrapper_SparseCUDA_out_floor_out(const at::Tensor & self, at::Tensor & out) {
546 // No device check
547 const OptionalDeviceGuard device_guard(device_of(self));
548 return at::native::floor_sparse_out(self, out);
549}
550} // anonymous namespace
551namespace {
552at::Tensor & wrapper_SparseCUDA__floor_(at::Tensor & self) {
553 // No device check
554 const OptionalDeviceGuard device_guard(device_of(self));
555 return at::native::floor_sparse_(self);
556}
557} // anonymous namespace
558namespace {
559at::Tensor wrapper_SparseCUDA__floor_divide(const at::Tensor & self, const at::Tensor & other) {
560 // No device check
561 const OptionalDeviceGuard device_guard(device_of(self));
562 return at::native::floor_divide_sparse(self, other);
563}
564} // anonymous namespace
565namespace {
566at::Tensor & wrapper_SparseCUDA_out_floor_divide_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
567 // No device check
568 const OptionalDeviceGuard device_guard(device_of(self));
569 return at::native::floor_divide_out_sparse_zerodim(self, other, out);
570}
571} // anonymous namespace
572namespace {
573at::Tensor & wrapper_SparseCUDA_Tensor_floor_divide_(at::Tensor & self, const at::Tensor & other) {
574 // No device check
575 const OptionalDeviceGuard device_guard(device_of(self));
576 return at::native::floor_divide_sparse_(self, other);
577}
578} // anonymous namespace
579namespace {
580at::Tensor wrapper_SparseCUDA__frac(const at::Tensor & self) {
581 // No device check
582 const OptionalDeviceGuard device_guard(device_of(self));
583 return at::native::frac_sparse(self);
584}
585} // anonymous namespace
586namespace {
587at::Tensor & wrapper_SparseCUDA_out_frac_out(const at::Tensor & self, at::Tensor & out) {
588 // No device check
589 const OptionalDeviceGuard device_guard(device_of(self));
590 return at::native::frac_sparse_out(self, out);
591}
592} // anonymous namespace
593namespace {
594at::Tensor & wrapper_SparseCUDA__frac_(at::Tensor & self) {
595 // No device check
596 const OptionalDeviceGuard device_guard(device_of(self));
597 return at::native::frac_sparse_(self);
598}
599} // anonymous namespace
600namespace {
601at::Tensor wrapper_SparseCUDA__isnan(const at::Tensor & self) {
602 // No device check
603 // DeviceGuard omitted
604 return at::native::isnan_sparse(self);
605}
606} // anonymous namespace
607namespace {
608at::Tensor wrapper_SparseCUDA__nan_to_num(const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) {
609 c10::optional<Device> common_device = nullopt;
610(void)common_device; // Suppress unused variable warning
611 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__nan_to_num", "self");
612 const OptionalDeviceGuard device_guard(device_of(self));
613 return at::native::nan_to_num_sparse(self, nan, posinf, neginf);
614}
615} // anonymous namespace
616namespace {
617at::Tensor & wrapper_SparseCUDA_out_nan_to_num_out(const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf, at::Tensor & out) {
618 c10::optional<Device> common_device = nullopt;
619(void)common_device; // Suppress unused variable warning
620 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_nan_to_num_out", "out");
621 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_nan_to_num_out", "self");
622 const OptionalDeviceGuard device_guard(device_of(self));
623 return at::native::nan_to_num_sparse_out(self, nan, posinf, neginf, out);
624}
625} // anonymous namespace
626namespace {
627at::Tensor & wrapper_SparseCUDA__nan_to_num_(at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) {
628 c10::optional<Device> common_device = nullopt;
629(void)common_device; // Suppress unused variable warning
630 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__nan_to_num_", "self");
631 const OptionalDeviceGuard device_guard(device_of(self));
632 return at::native::nan_to_num_sparse_(self, nan, posinf, neginf);
633}
634} // anonymous namespace
635namespace {
636at::Tensor wrapper_SparseCUDA__log1p(const at::Tensor & self) {
637 // No device check
638 const OptionalDeviceGuard device_guard(device_of(self));
639 return at::native::log1p_sparse(self);
640}
641} // anonymous namespace
642namespace {
643at::Tensor & wrapper_SparseCUDA_out_log1p_out(const at::Tensor & self, at::Tensor & out) {
644 // No device check
645 const OptionalDeviceGuard device_guard(device_of(self));
646 return at::native::log1p_sparse_out(self, out);
647}
648} // anonymous namespace
649namespace {
650at::Tensor & wrapper_SparseCUDA__log1p_(at::Tensor & self) {
651 // No device check
652 const OptionalDeviceGuard device_guard(device_of(self));
653 return at::native::log1p_sparse_(self);
654}
655} // anonymous namespace
656namespace {
657at::Tensor wrapper_SparseCUDA__mm(const at::Tensor & self, const at::Tensor & mat2) {
658 c10::optional<Device> common_device = nullopt;
659(void)common_device; // Suppress unused variable warning
660 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__mm", "self");
661 c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCUDA__mm", "mat2");
662 const OptionalDeviceGuard device_guard(device_of(self));
663 return at::native::_sparse_mm(self, mat2);
664}
665} // anonymous namespace
666namespace {
667at::Tensor & wrapper_SparseCUDA_out_mm_out(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
668 c10::optional<Device> common_device = nullopt;
669(void)common_device; // Suppress unused variable warning
670 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_mm_out", "out");
671 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_mm_out", "self");
672 c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCUDA_out_mm_out", "mat2");
673 const OptionalDeviceGuard device_guard(device_of(self));
674 return at::native::_sparse_mm_out(self, mat2, out);
675}
676} // anonymous namespace
677namespace {
678at::Tensor wrapper_SparseCUDA___sparse_sparse_matmul(const at::Tensor & self, const at::Tensor & other) {
679 c10::optional<Device> common_device = nullopt;
680(void)common_device; // Suppress unused variable warning
681 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA___sparse_sparse_matmul", "self");
682 c10::impl::check_and_update_common_device(common_device, other, "wrapper_SparseCUDA___sparse_sparse_matmul", "other");
683 const OptionalDeviceGuard device_guard(device_of(self));
684 return at::native::sparse_sparse_matmul_cuda(self, other);
685}
686} // anonymous namespace
687namespace {
688at::Tensor wrapper_SparseCUDA_Tensor_mul(const at::Tensor & self, const at::Tensor & other) {
689 // No device check
690 const OptionalDeviceGuard device_guard(device_of(self));
691 return at::native::mul_sparse(self, other);
692}
693} // anonymous namespace
694namespace {
695at::Tensor & wrapper_SparseCUDA_out_mul_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
696 // No device check
697 const OptionalDeviceGuard device_guard(device_of(self));
698 return at::native::mul_out_sparse_cuda(self, other, out);
699}
700} // anonymous namespace
701namespace {
702at::Tensor & wrapper_SparseCUDA_Tensor_mul_(at::Tensor & self, const at::Tensor & other) {
703 // No device check
704 const OptionalDeviceGuard device_guard(device_of(self));
705 return at::native::mul_sparse_(self, other);
706}
707} // anonymous namespace
708namespace {
709at::Tensor wrapper_SparseCUDA__mv(const at::Tensor & self, const at::Tensor & vec) {
710 c10::optional<Device> common_device = nullopt;
711(void)common_device; // Suppress unused variable warning
712 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__mv", "self");
713 c10::impl::check_and_update_common_device(common_device, vec, "wrapper_SparseCUDA__mv", "vec");
714 const OptionalDeviceGuard device_guard(device_of(self));
715 return at::native::mv_sparse(self, vec);
716}
717} // anonymous namespace
718namespace {
719at::Tensor wrapper_SparseCUDA__narrow_copy(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
720 c10::optional<Device> common_device = nullopt;
721(void)common_device; // Suppress unused variable warning
722 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__narrow_copy", "self");
723 const OptionalDeviceGuard device_guard(device_of(self));
724 return at::native::narrow_copy_sparse(self, dim, start.expect_int(), length.expect_int());
725}
726} // anonymous namespace
727namespace {
728at::Tensor wrapper_SparseCUDA__permute(const at::Tensor & self, at::IntArrayRef dims) {
729 c10::optional<Device> common_device = nullopt;
730(void)common_device; // Suppress unused variable warning
731 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__permute", "self");
732 const OptionalDeviceGuard device_guard(device_of(self));
733 return at::native::permute_sparse_coo(self, dims);
734}
735} // anonymous namespace
736namespace {
737at::Tensor wrapper_SparseCUDA__rad2deg(const at::Tensor & self) {
738 c10::optional<Device> common_device = nullopt;
739(void)common_device; // Suppress unused variable warning
740 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__rad2deg", "self");
741 const OptionalDeviceGuard device_guard(device_of(self));
742 return at::native::rad2deg_sparse(self);
743}
744} // anonymous namespace
745namespace {
746at::Tensor & wrapper_SparseCUDA_out_rad2deg_out(const at::Tensor & self, at::Tensor & out) {
747 c10::optional<Device> common_device = nullopt;
748(void)common_device; // Suppress unused variable warning
749 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_rad2deg_out", "out");
750 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_rad2deg_out", "self");
751 const OptionalDeviceGuard device_guard(device_of(self));
752 return at::native::rad2deg_sparse_out(self, out);
753}
754} // anonymous namespace
755namespace {
756at::Tensor & wrapper_SparseCUDA__rad2deg_(at::Tensor & self) {
757 c10::optional<Device> common_device = nullopt;
758(void)common_device; // Suppress unused variable warning
759 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__rad2deg_", "self");
760 const OptionalDeviceGuard device_guard(device_of(self));
761 return at::native::rad2deg_sparse_(self);
762}
763} // anonymous namespace
764namespace {
765at::Tensor wrapper_SparseCUDA__deg2rad(const at::Tensor & self) {
766 c10::optional<Device> common_device = nullopt;
767(void)common_device; // Suppress unused variable warning
768 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__deg2rad", "self");
769 const OptionalDeviceGuard device_guard(device_of(self));
770 return at::native::deg2rad_sparse(self);
771}
772} // anonymous namespace
773namespace {
774at::Tensor & wrapper_SparseCUDA_out_deg2rad_out(const at::Tensor & self, at::Tensor & out) {
775 c10::optional<Device> common_device = nullopt;
776(void)common_device; // Suppress unused variable warning
777 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_deg2rad_out", "out");
778 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_deg2rad_out", "self");
779 const OptionalDeviceGuard device_guard(device_of(self));
780 return at::native::deg2rad_sparse_out(self, out);
781}
782} // anonymous namespace
783namespace {
784at::Tensor & wrapper_SparseCUDA__deg2rad_(at::Tensor & self) {
785 c10::optional<Device> common_device = nullopt;
786(void)common_device; // Suppress unused variable warning
787 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__deg2rad_", "self");
788 const OptionalDeviceGuard device_guard(device_of(self));
789 return at::native::deg2rad_sparse_(self);
790}
791} // anonymous namespace
792namespace {
793at::Tensor wrapper_SparseCUDA__neg(const at::Tensor & self) {
794 // No device check
795 const OptionalDeviceGuard device_guard(device_of(self));
796 return at::native::neg_sparse(self);
797}
798} // anonymous namespace
799namespace {
800at::Tensor & wrapper_SparseCUDA_out_neg_out(const at::Tensor & self, at::Tensor & out) {
801 // No device check
802 const OptionalDeviceGuard device_guard(device_of(self));
803 return at::native::neg_out_sparse(self, out);
804}
805} // anonymous namespace
806namespace {
807at::Tensor & wrapper_SparseCUDA__neg_(at::Tensor & self) {
808 // No device check
809 const OptionalDeviceGuard device_guard(device_of(self));
810 return at::native::neg_sparse_(self);
811}
812} // anonymous namespace
813namespace {
814at::Tensor wrapper_SparseCUDA__round(const at::Tensor & self) {
815 // No device check
816 const OptionalDeviceGuard device_guard(device_of(self));
817 return at::native::round_sparse(self);
818}
819} // anonymous namespace
820namespace {
821at::Tensor & wrapper_SparseCUDA_out_round_out(const at::Tensor & self, at::Tensor & out) {
822 // No device check
823 const OptionalDeviceGuard device_guard(device_of(self));
824 return at::native::round_sparse_out(self, out);
825}
826} // anonymous namespace
827namespace {
828at::Tensor & wrapper_SparseCUDA__round_(at::Tensor & self) {
829 // No device check
830 const OptionalDeviceGuard device_guard(device_of(self));
831 return at::native::round_sparse_(self);
832}
833} // anonymous namespace
834namespace {
835at::Tensor wrapper_SparseCUDA__relu(const at::Tensor & self) {
836 // No device check
837 const OptionalDeviceGuard device_guard(device_of(self));
838 return at::native::relu_sparse(self);
839}
840} // anonymous namespace
841namespace {
842at::Tensor & wrapper_SparseCUDA__relu_(at::Tensor & self) {
843 // No device check
844 const OptionalDeviceGuard device_guard(device_of(self));
845 return at::native::relu_sparse_(self);
846}
847} // anonymous namespace
848namespace {
849at::Tensor wrapper_SparseCUDA__sin(const at::Tensor & self) {
850 // No device check
851 const OptionalDeviceGuard device_guard(device_of(self));
852 return at::native::sin_sparse(self);
853}
854} // anonymous namespace
855namespace {
856at::Tensor & wrapper_SparseCUDA_out_sin_out(const at::Tensor & self, at::Tensor & out) {
857 // No device check
858 const OptionalDeviceGuard device_guard(device_of(self));
859 return at::native::sin_sparse_out(self, out);
860}
861} // anonymous namespace
862namespace {
863at::Tensor & wrapper_SparseCUDA__sin_(at::Tensor & self) {
864 // No device check
865 const OptionalDeviceGuard device_guard(device_of(self));
866 return at::native::sin_sparse_(self);
867}
868} // anonymous namespace
869namespace {
870at::Tensor wrapper_SparseCUDA__sinh(const at::Tensor & self) {
871 // No device check
872 const OptionalDeviceGuard device_guard(device_of(self));
873 return at::native::sinh_sparse(self);
874}
875} // anonymous namespace
876namespace {
877at::Tensor & wrapper_SparseCUDA_out_sinh_out(const at::Tensor & self, at::Tensor & out) {
878 // No device check
879 const OptionalDeviceGuard device_guard(device_of(self));
880 return at::native::sinh_sparse_out(self, out);
881}
882} // anonymous namespace
883namespace {
884at::Tensor & wrapper_SparseCUDA__sinh_(at::Tensor & self) {
885 // No device check
886 const OptionalDeviceGuard device_guard(device_of(self));
887 return at::native::sinh_sparse_(self);
888}
889} // anonymous namespace
890namespace {
891at::Tensor & wrapper_SparseCUDA_out_sspaddmm_out(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
892 c10::optional<Device> common_device = nullopt;
893(void)common_device; // Suppress unused variable warning
894 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_sspaddmm_out", "out");
895 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_sspaddmm_out", "self");
896 c10::impl::check_and_update_common_device(common_device, mat1, "wrapper_SparseCUDA_out_sspaddmm_out", "mat1");
897 c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCUDA_out_sspaddmm_out", "mat2");
898 const OptionalDeviceGuard device_guard(device_of(self));
899 return at::native::_sspaddmm_out_cuda(self, mat1, mat2, beta, alpha, out);
900}
901} // anonymous namespace
902namespace {
903at::Tensor wrapper_SparseCUDA__sum(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
904 // No device check
905 const OptionalDeviceGuard device_guard(device_of(self));
906 return at::native::sum_coo(self, dtype);
907}
908} // anonymous namespace
909namespace {
910at::Tensor wrapper_SparseCUDA_dim_IntList_sum(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
911 // No device check
912 const OptionalDeviceGuard device_guard(device_of(self));
913 return at::native::sum_sparse_coo(self, dim, keepdim, dtype);
914}
915} // anonymous namespace
916namespace {
917at::Tensor wrapper_SparseCUDA__sqrt(const at::Tensor & self) {
918 // No device check
919 const OptionalDeviceGuard device_guard(device_of(self));
920 return at::native::sqrt_sparse(self);
921}
922} // anonymous namespace
923namespace {
924at::Tensor & wrapper_SparseCUDA_out_sqrt_out(const at::Tensor & self, at::Tensor & out) {
925 // No device check
926 const OptionalDeviceGuard device_guard(device_of(self));
927 return at::native::sqrt_sparse_out(self, out);
928}
929} // anonymous namespace
930namespace {
931at::Tensor & wrapper_SparseCUDA__sqrt_(at::Tensor & self) {
932 // No device check
933 const OptionalDeviceGuard device_guard(device_of(self));
934 return at::native::sqrt_sparse_(self);
935}
936} // anonymous namespace
937namespace {
938at::Tensor wrapper_SparseCUDA__tan(const at::Tensor & self) {
939 // No device check
940 const OptionalDeviceGuard device_guard(device_of(self));
941 return at::native::tan_sparse(self);
942}
943} // anonymous namespace
944namespace {
945at::Tensor & wrapper_SparseCUDA_out_tan_out(const at::Tensor & self, at::Tensor & out) {
946 // No device check
947 const OptionalDeviceGuard device_guard(device_of(self));
948 return at::native::tan_sparse_out(self, out);
949}
950} // anonymous namespace
951namespace {
952at::Tensor & wrapper_SparseCUDA__tan_(at::Tensor & self) {
953 // No device check
954 const OptionalDeviceGuard device_guard(device_of(self));
955 return at::native::tan_sparse_(self);
956}
957} // anonymous namespace
958namespace {
959at::Tensor wrapper_SparseCUDA__tanh(const at::Tensor & self) {
960 // No device check
961 const OptionalDeviceGuard device_guard(device_of(self));
962 return at::native::tanh_sparse(self);
963}
964} // anonymous namespace
965namespace {
966at::Tensor & wrapper_SparseCUDA_out_tanh_out(const at::Tensor & self, at::Tensor & out) {
967 // No device check
968 const OptionalDeviceGuard device_guard(device_of(self));
969 return at::native::tanh_sparse_out(self, out);
970}
971} // anonymous namespace
972namespace {
973at::Tensor & wrapper_SparseCUDA__tanh_(at::Tensor & self) {
974 // No device check
975 const OptionalDeviceGuard device_guard(device_of(self));
976 return at::native::tanh_sparse_(self);
977}
978} // anonymous namespace
979namespace {
980at::Tensor wrapper_SparseCUDA__threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
981 c10::optional<Device> common_device = nullopt;
982(void)common_device; // Suppress unused variable warning
983 c10::impl::check_and_update_common_device(common_device, grad_output, "wrapper_SparseCUDA__threshold_backward", "grad_output");
984 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__threshold_backward", "self");
985 const OptionalDeviceGuard device_guard(device_of(self));
986 return at::native::threshold_backward_sparse(grad_output, self, threshold);
987}
988} // anonymous namespace
989namespace {
990at::Tensor & wrapper_SparseCUDA_grad_input_threshold_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) {
991 c10::optional<Device> common_device = nullopt;
992(void)common_device; // Suppress unused variable warning
993 c10::impl::check_and_update_common_device(common_device, grad_input, "wrapper_SparseCUDA_grad_input_threshold_backward_out", "grad_input");
994 c10::impl::check_and_update_common_device(common_device, grad_output, "wrapper_SparseCUDA_grad_input_threshold_backward_out", "grad_output");
995 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_grad_input_threshold_backward_out", "self");
996 const OptionalDeviceGuard device_guard(device_of(self));
997 return at::native::threshold_backward_sparse_out(grad_output, self, threshold, grad_input);
998}
999} // anonymous namespace
1000namespace {
1001at::Tensor wrapper_SparseCUDA__trunc(const at::Tensor & self) {
1002 // No device check
1003 const OptionalDeviceGuard device_guard(device_of(self));
1004 return at::native::trunc_sparse(self);
1005}
1006} // anonymous namespace
1007namespace {
1008at::Tensor & wrapper_SparseCUDA_out_trunc_out(const at::Tensor & self, at::Tensor & out) {
1009 // No device check
1010 const OptionalDeviceGuard device_guard(device_of(self));
1011 return at::native::trunc_sparse_out(self, out);
1012}
1013} // anonymous namespace
1014namespace {
1015at::Tensor & wrapper_SparseCUDA__trunc_(at::Tensor & self) {
1016 // No device check
1017 const OptionalDeviceGuard device_guard(device_of(self));
1018 return at::native::trunc_sparse_(self);
1019}
1020} // anonymous namespace
1021namespace {
1022at::Tensor wrapper_SparseCUDA__unsqueeze(const at::Tensor & self, int64_t dim) {
1023 // No device check
1024 // DeviceGuard omitted
1025 return at::native::unsqueeze_sparse(self, dim);
1026}
1027} // anonymous namespace
1028namespace {
1029at::Tensor & wrapper_SparseCUDA_out_zeros_out(c10::SymIntArrayRef size, at::Tensor & out) {
1030 c10::optional<Device> common_device = nullopt;
1031(void)common_device; // Suppress unused variable warning
1032 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_zeros_out", "out");
1033 const OptionalDeviceGuard device_guard(device_of(out));
1034 return at::native::zeros_sparse_out(C10_AS_INTARRAYREF_SLOW(size), out);
1035}
1036} // anonymous namespace
1037namespace {
1038at::Tensor wrapper_SparseCUDA__native_norm(const at::Tensor & self, const at::Scalar & p) {
1039 c10::optional<Device> common_device = nullopt;
1040(void)common_device; // Suppress unused variable warning
1041 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__native_norm", "self");
1042 const OptionalDeviceGuard device_guard(device_of(self));
1043 return at::native::norm_sparse(self, p);
1044}
1045} // anonymous namespace
1046namespace {
1047at::Tensor wrapper_SparseCUDA_ScalarOpt_dim_dtype_native_norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
1048 c10::optional<Device> common_device = nullopt;
1049(void)common_device; // Suppress unused variable warning
1050 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_ScalarOpt_dim_dtype_native_norm", "self");
1051 const OptionalDeviceGuard device_guard(device_of(self));
1052 return at::native::norm_sparse(self, p, dim, keepdim, dtype);
1053}
1054} // anonymous namespace
1055namespace {
1056at::Tensor wrapper_SparseCUDA___sparse_sum_backward(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) {
1057 c10::optional<Device> common_device = nullopt;
1058(void)common_device; // Suppress unused variable warning
1059 c10::impl::check_and_update_common_device(common_device, grad, "wrapper_SparseCUDA___sparse_sum_backward", "grad");
1060 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA___sparse_sum_backward", "self");
1061 const OptionalDeviceGuard device_guard(device_of(self));
1062 return at::native::_sparse_sum_backward_cuda(grad, self, dim);
1063}
1064} // anonymous namespace
1065namespace {
1066at::Tensor wrapper_SparseCUDA___sparse_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
1067 c10::optional<Device> common_device = nullopt;
1068(void)common_device; // Suppress unused variable warning
1069 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA___sparse_softmax", "self");
1070 const OptionalDeviceGuard device_guard(device_of(self));
1071 return at::native::softmax_sparse_cuda(self, dim, half_to_float);
1072}
1073} // anonymous namespace
1074namespace {
1075at::Tensor wrapper_SparseCUDA___sparse_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
1076 c10::optional<Device> common_device = nullopt;
1077(void)common_device; // Suppress unused variable warning
1078 c10::impl::check_and_update_common_device(common_device, grad_output, "wrapper_SparseCUDA___sparse_softmax_backward_data", "grad_output");
1079 c10::impl::check_and_update_common_device(common_device, output, "wrapper_SparseCUDA___sparse_softmax_backward_data", "output");
1080 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA___sparse_softmax_backward_data", "self");
1081 const OptionalDeviceGuard device_guard(device_of(self));
1082 return at::native::softmax_backward_sparse_cuda(grad_output, output, dim, self);
1083}
1084} // anonymous namespace
1085namespace {
1086at::Tensor wrapper_SparseCUDA___sparse_log_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
1087 c10::optional<Device> common_device = nullopt;
1088(void)common_device; // Suppress unused variable warning
1089 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA___sparse_log_softmax", "self");
1090 const OptionalDeviceGuard device_guard(device_of(self));
1091 return at::native::log_softmax_sparse_cuda(self, dim, half_to_float);
1092}
1093} // anonymous namespace
1094namespace {
1095at::Tensor wrapper_SparseCUDA___sparse_log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
1096 c10::optional<Device> common_device = nullopt;
1097(void)common_device; // Suppress unused variable warning
1098 c10::impl::check_and_update_common_device(common_device, grad_output, "wrapper_SparseCUDA___sparse_log_softmax_backward_data", "grad_output");
1099 c10::impl::check_and_update_common_device(common_device, output, "wrapper_SparseCUDA___sparse_log_softmax_backward_data", "output");
1100 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA___sparse_log_softmax_backward_data", "self");
1101 const OptionalDeviceGuard device_guard(device_of(self));
1102 return at::native::log_softmax_backward_sparse_cuda(grad_output, output, dim, self);
1103}
1104} // anonymous namespace
1105namespace {
1106at::Tensor wrapper_SparseCUDA_ScalarOpt_dim_dtype_norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) {
1107 // No device check
1108 const OptionalDeviceGuard device_guard(device_of(self));
1109 return at::native::sparse_dtype_norm(self, p, dim, keepdim, dtype);
1110}
1111} // anonymous namespace
1112namespace {
1113at::Tensor wrapper_SparseCUDA_ScalarOpt_dim_norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) {
1114 // No device check
1115 const OptionalDeviceGuard device_guard(device_of(self));
1116 return at::native::sparse_norm(self, p, dim, keepdim);
1117}
1118} // anonymous namespace
1119namespace {
1120at::Tensor wrapper_SparseCUDA__clone(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) {
1121 c10::optional<Device> common_device = nullopt;
1122(void)common_device; // Suppress unused variable warning
1123 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__clone", "self");
1124 const OptionalDeviceGuard device_guard(device_of(self));
1125 return at::native::clone_sparse(self, memory_format);
1126}
1127} // anonymous namespace
1128namespace {
1129const at::Tensor & wrapper_SparseCUDA__resize_as_sparse_(const at::Tensor & self, const at::Tensor & the_template) {
1130 c10::optional<Device> common_device = nullopt;
1131(void)common_device; // Suppress unused variable warning
1132 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__resize_as_sparse_", "self");
1133 c10::impl::check_and_update_common_device(common_device, the_template, "wrapper_SparseCUDA__resize_as_sparse_", "the_template");
1134 const OptionalDeviceGuard device_guard(device_of(self));
1135 return at::native::resize_as_sparse_(self, the_template);
1136}
1137} // anonymous namespace
1138namespace {
1139at::Tensor & wrapper_SparseCUDA__zero_(at::Tensor & self) {
1140 // No device check
1141 const OptionalDeviceGuard device_guard(device_of(self));
1142 return at::native::zero_sparse_(self);
1143}
1144} // anonymous namespace
1145namespace {
1146at::Tensor wrapper_SparseCUDA_Tensor_sub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1147 // No device check
1148 const OptionalDeviceGuard device_guard(device_of(self));
1149 return at::native::sub_sparse(self, other, alpha);
1150}
1151} // anonymous namespace
1152namespace {
1153at::Tensor & wrapper_SparseCUDA_out_sub_out(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
1154 // No device check
1155 const OptionalDeviceGuard device_guard(device_of(self));
1156 return at::native::sub_out_sparse(self, other, alpha, out);
1157}
1158} // anonymous namespace
1159namespace {
1160at::Tensor & wrapper_SparseCUDA_Tensor_sub_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1161 // No device check
1162 const OptionalDeviceGuard device_guard(device_of(self));
1163 return at::native::sub_sparse_(self, other, alpha);
1164}
1165} // anonymous namespace
1166namespace {
1167at::Tensor wrapper_SparseCUDA__addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
1168 c10::optional<Device> common_device = nullopt;
1169(void)common_device; // Suppress unused variable warning
1170 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__addmm", "self");
1171 c10::impl::check_and_update_common_device(common_device, mat1, "wrapper_SparseCUDA__addmm", "mat1");
1172 c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCUDA__addmm", "mat2");
1173 const OptionalDeviceGuard device_guard(device_of(self));
1174 return at::native::addmm_sparse_dense_cuda(self, mat1, mat2, beta, alpha);
1175}
1176} // anonymous namespace
1177namespace {
1178at::Tensor & wrapper_SparseCUDA_out_addmm_out(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
1179 c10::optional<Device> common_device = nullopt;
1180(void)common_device; // Suppress unused variable warning
1181 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_addmm_out", "out");
1182 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_addmm_out", "self");
1183 c10::impl::check_and_update_common_device(common_device, mat1, "wrapper_SparseCUDA_out_addmm_out", "mat1");
1184 c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCUDA_out_addmm_out", "mat2");
1185 const OptionalDeviceGuard device_guard(device_of(self));
1186 return at::native::addmm_out_sparse_dense_cuda(self, mat1, mat2, beta, alpha, out);
1187}
1188} // anonymous namespace
1189namespace {
1190at::Tensor & wrapper_SparseCUDA__addmm_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
1191 c10::optional<Device> common_device = nullopt;
1192(void)common_device; // Suppress unused variable warning
1193 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__addmm_", "self");
1194 c10::impl::check_and_update_common_device(common_device, mat1, "wrapper_SparseCUDA__addmm_", "mat1");
1195 c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCUDA__addmm_", "mat2");
1196 const OptionalDeviceGuard device_guard(device_of(self));
1197 return at::native::s_addmm_sparse_dense_cuda_(self, mat1, mat2, beta, alpha);
1198}
1199} // anonymous namespace
1200namespace {
1201at::Tensor wrapper_SparseCUDA___sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
1202 c10::optional<Device> common_device = nullopt;
1203(void)common_device; // Suppress unused variable warning
1204 globalContext().lazyInitCUDA();
1205 const DeviceGuard device_guard(device_or_default(device));
1206 return at::native::new_with_dims_sparse(sparse_dim, dense_dim, size, dtype, layout, device, pin_memory);
1207}
1208} // anonymous namespace
1209namespace {
1210at::Tensor wrapper_SparseCUDA___sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
1211 c10::optional<Device> common_device = nullopt;
1212(void)common_device; // Suppress unused variable warning
1213 c10::impl::check_and_update_common_device(common_device, indices, "wrapper_SparseCUDA___sparse_coo_tensor_with_dims_and_tensors", "indices");
1214 c10::impl::check_and_update_common_device(common_device, values, "wrapper_SparseCUDA___sparse_coo_tensor_with_dims_and_tensors", "values");
1215 globalContext().lazyInitCUDA();
1216 const DeviceGuard device_guard(device_or_default(device));
1217 return at::native::new_with_dims_and_tensor_sparse_symint(sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory);
1218}
1219} // anonymous namespace
1220namespace {
1221const at::Tensor & wrapper_SparseCUDA__sparse_resize_(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
1222 c10::optional<Device> common_device = nullopt;
1223(void)common_device; // Suppress unused variable warning
1224 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__sparse_resize_", "self");
1225 const OptionalDeviceGuard device_guard(device_of(self));
1226 return at::native::sparse_resize_(self, size, sparse_dim, dense_dim);
1227}
1228} // anonymous namespace
1229namespace {
1230const at::Tensor & wrapper_SparseCUDA__sparse_resize_and_clear_(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
1231 c10::optional<Device> common_device = nullopt;
1232(void)common_device; // Suppress unused variable warning
1233 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__sparse_resize_and_clear_", "self");
1234 const OptionalDeviceGuard device_guard(device_of(self));
1235 return at::native::sparse_resize_and_clear_(self, size, sparse_dim, dense_dim);
1236}
1237} // anonymous namespace
1238namespace {
1239at::Tensor wrapper_SparseCUDA__sparse_mask(const at::Tensor & self, const at::Tensor & mask) {
1240 c10::optional<Device> common_device = nullopt;
1241(void)common_device; // Suppress unused variable warning
1242 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__sparse_mask", "self");
1243 c10::impl::check_and_update_common_device(common_device, mask, "wrapper_SparseCUDA__sparse_mask", "mask");
1244 const OptionalDeviceGuard device_guard(device_of(self));
1245 return at::native::sparse_mask(self, mask);
1246}
1247} // anonymous namespace
1248namespace {
1249at::Tensor wrapper_SparseCUDA___to_dense(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
1250 c10::optional<Device> common_device = nullopt;
1251(void)common_device; // Suppress unused variable warning
1252 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA___to_dense", "self");
1253 const OptionalDeviceGuard device_guard(device_of(self));
1254 return at::native::sparse_to_dense(self, dtype);
1255}
1256} // anonymous namespace
1257namespace {
1258int64_t wrapper_SparseCUDA__sparse_dim(const at::Tensor & self) {
1259 // No device check
1260 // DeviceGuard omitted
1261 return at::native::sparse_dim_sparse(self);
1262}
1263} // anonymous namespace
1264namespace {
1265int64_t wrapper_SparseCUDA___dimI(const at::Tensor & self) {
1266 // No device check
1267 // DeviceGuard omitted
1268 return at::native::sparse_dim_sparse(self);
1269}
1270} // anonymous namespace
1271namespace {
1272int64_t wrapper_SparseCUDA__dense_dim(const at::Tensor & self) {
1273 // No device check
1274 // DeviceGuard omitted
1275 return at::native::dense_dim_sparse(self);
1276}
1277} // anonymous namespace
1278namespace {
1279int64_t wrapper_SparseCUDA___dimV(const at::Tensor & self) {
1280 // No device check
1281 // DeviceGuard omitted
1282 return at::native::dense_dim_sparse(self);
1283}
1284} // anonymous namespace
1285namespace {
1286int64_t wrapper_SparseCUDA___nnz(const at::Tensor & self) {
1287 // No device check
1288 // DeviceGuard omitted
1289 return at::native::_nnz_sparse(self);
1290}
1291} // anonymous namespace
1292namespace {
1293at::Tensor wrapper_SparseCUDA___coalesce(const at::Tensor & self) {
1294 c10::optional<Device> common_device = nullopt;
1295(void)common_device; // Suppress unused variable warning
1296 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA___coalesce", "self");
1297 const OptionalDeviceGuard device_guard(device_of(self));
1298 return at::native::_coalesce_sparse_cuda(self);
1299}
1300} // anonymous namespace
1301namespace {
1302bool wrapper_SparseCUDA__is_coalesced(const at::Tensor & self) {
1303 // No device check
1304 // DeviceGuard omitted
1305 return at::native::is_coalesced_sparse(self);
1306}
1307} // anonymous namespace
1308namespace {
1309at::Tensor wrapper_SparseCUDA___indices(const at::Tensor & self) {
1310 // No device check
1311 // DeviceGuard omitted
1312 return at::native::_indices_sparse(self);
1313}
1314} // anonymous namespace
1315namespace {
1316at::Tensor wrapper_SparseCUDA___values(const at::Tensor & self) {
1317 // No device check
1318 // DeviceGuard omitted
1319 return at::native::_values_sparse(self);
1320}
1321} // anonymous namespace
1322namespace {
1323at::Tensor & wrapper_SparseCUDA___coalesced_(at::Tensor & self, bool coalesced) {
1324 // No device check
1325 // DeviceGuard omitted
1326 return at::native::_coalesced_sparse_(self, coalesced);
1327}
1328} // anonymous namespace
1329namespace {
1330at::Tensor wrapper_SparseCUDA__indices(const at::Tensor & self) {
1331 // No device check
1332 // DeviceGuard omitted
1333 return at::native::indices_sparse(self);
1334}
1335} // anonymous namespace
1336namespace {
1337at::Tensor wrapper_SparseCUDA__values(const at::Tensor & self) {
1338 // No device check
1339 // DeviceGuard omitted
1340 return at::native::values_sparse(self);
1341}
1342} // anonymous namespace
1343namespace {
1344at::Tensor wrapper_SparseCUDA__hspmm(const at::Tensor & mat1, const at::Tensor & mat2) {
1345 c10::optional<Device> common_device = nullopt;
1346(void)common_device; // Suppress unused variable warning
1347 c10::impl::check_and_update_common_device(common_device, mat1, "wrapper_SparseCUDA__hspmm", "mat1");
1348 c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCUDA__hspmm", "mat2");
1349 const OptionalDeviceGuard device_guard(device_of(mat1));
1350 return at::native::hspmm_sparse_cuda(mat1, mat2);
1351}
1352} // anonymous namespace
1353namespace {
1354at::Tensor & wrapper_SparseCUDA_out_hspmm_out(const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out) {
1355 c10::optional<Device> common_device = nullopt;
1356(void)common_device; // Suppress unused variable warning
1357 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_hspmm_out", "out");
1358 c10::impl::check_and_update_common_device(common_device, mat1, "wrapper_SparseCUDA_out_hspmm_out", "mat1");
1359 c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCUDA_out_hspmm_out", "mat2");
1360 const OptionalDeviceGuard device_guard(device_of(out));
1361 return at::native::hspmm_out_sparse_cuda(mat1, mat2, out);
1362}
1363} // anonymous namespace
1364namespace {
1365at::Tensor & wrapper_SparseCUDA__copy_sparse_to_sparse_(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
1366 // No device check
1367 const OptionalDeviceGuard device_guard(device_of(self));
1368 return at::native::copy_sparse_(self, src, non_blocking);
1369}
1370} // anonymous namespace
1371namespace {
1372at::Tensor wrapper_SparseCUDA_sparse_dim_to_sparse(const at::Tensor & self, int64_t sparse_dim) {
1373 c10::optional<Device> common_device = nullopt;
1374(void)common_device; // Suppress unused variable warning
1375 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_sparse_dim_to_sparse", "self");
1376 const OptionalDeviceGuard device_guard(device_of(self));
1377 return at::native::sparse_coo_to_sparse(self, sparse_dim);
1378}
1379} // anonymous namespace
1380namespace {
1381at::Tensor wrapper_SparseCUDA__to_sparse(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
1382 c10::optional<Device> common_device = nullopt;
1383(void)common_device; // Suppress unused variable warning
1384 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__to_sparse", "self");
1385 const OptionalDeviceGuard device_guard(device_of(self));
1386 return at::native::sparse_coo_to_sparse(self, layout, blocksize, dense_dim);
1387}
1388} // anonymous namespace
1389namespace {
1390at::Tensor wrapper_SparseCUDA__to_sparse_csr(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
1391 c10::optional<Device> common_device = nullopt;
1392(void)common_device; // Suppress unused variable warning
1393 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__to_sparse_csr", "self");
1394 const OptionalDeviceGuard device_guard(device_of(self));
1395 return at::native::coo_to_sparse_csr(self, dense_dim);
1396}
1397} // anonymous namespace
1398namespace {
1399at::Tensor wrapper_SparseCUDA__to_sparse_csc(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
1400 c10::optional<Device> common_device = nullopt;
1401(void)common_device; // Suppress unused variable warning
1402 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__to_sparse_csc", "self");
1403 const OptionalDeviceGuard device_guard(device_of(self));
1404 return at::native::coo_to_sparse_csc(self, dense_dim);
1405}
1406} // anonymous namespace
1407namespace {
1408at::Tensor wrapper_SparseCUDA__to_sparse_bsr(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
1409 c10::optional<Device> common_device = nullopt;
1410(void)common_device; // Suppress unused variable warning
1411 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__to_sparse_bsr", "self");
1412 const OptionalDeviceGuard device_guard(device_of(self));
1413 return at::native::coo_to_sparse_bsr(self, blocksize, dense_dim);
1414}
1415} // anonymous namespace
1416namespace {
1417at::Tensor wrapper_SparseCUDA__to_sparse_bsc(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
1418 c10::optional<Device> common_device = nullopt;
1419(void)common_device; // Suppress unused variable warning
1420 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__to_sparse_bsc", "self");
1421 const OptionalDeviceGuard device_guard(device_of(self));
1422 return at::native::coo_to_sparse_bsc(self, blocksize, dense_dim);
1423}
1424} // anonymous namespace
1425namespace {
1426at::Tensor wrapper_SparseCUDA__index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index) {
1427 c10::optional<Device> common_device = nullopt;
1428(void)common_device; // Suppress unused variable warning
1429 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__index_select", "self");
1430 c10::impl::check_and_update_common_device(common_device, index, "wrapper_SparseCUDA__index_select", "index");
1431 const OptionalDeviceGuard device_guard(device_of(self));
1432 return at::native::index_select_sparse_cuda(self, dim, index);
1433}
1434} // anonymous namespace
1435namespace {
1436at::Tensor wrapper_SparseCUDA__erfinv(const at::Tensor & self) {
1437 // No device check
1438 const OptionalDeviceGuard device_guard(device_of(self));
1439 return at::native::erfinv_sparse(self);
1440}
1441} // anonymous namespace
1442namespace {
1443at::Tensor & wrapper_SparseCUDA_out_erfinv_out(const at::Tensor & self, at::Tensor & out) {
1444 // No device check
1445 const OptionalDeviceGuard device_guard(device_of(self));
1446 return at::native::erfinv_sparse_out(self, out);
1447}
1448} // anonymous namespace
1449namespace {
1450at::Tensor & wrapper_SparseCUDA__erfinv_(at::Tensor & self) {
1451 // No device check
1452 const OptionalDeviceGuard device_guard(device_of(self));
1453 return at::native::erfinv_sparse_(self);
1454}
1455} // anonymous namespace
1456namespace {
1457at::Tensor wrapper_SparseCUDA__sign(const at::Tensor & self) {
1458 // No device check
1459 const OptionalDeviceGuard device_guard(device_of(self));
1460 return at::native::sign_sparse(self);
1461}
1462} // anonymous namespace
1463namespace {
1464at::Tensor & wrapper_SparseCUDA_out_sign_out(const at::Tensor & self, at::Tensor & out) {
1465 // No device check
1466 const OptionalDeviceGuard device_guard(device_of(self));
1467 return at::native::sign_sparse_out(self, out);
1468}
1469} // anonymous namespace
1470namespace {
1471at::Tensor & wrapper_SparseCUDA__sign_(at::Tensor & self) {
1472 // No device check
1473 const OptionalDeviceGuard device_guard(device_of(self));
1474 return at::native::sign_sparse_(self);
1475}
1476} // anonymous namespace
1477namespace {
1478at::Tensor wrapper_SparseCUDA__signbit(const at::Tensor & self) {
1479 c10::optional<Device> common_device = nullopt;
1480(void)common_device; // Suppress unused variable warning
1481 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__signbit", "self");
1482 const OptionalDeviceGuard device_guard(device_of(self));
1483 return at::native::signbit_sparse(self);
1484}
1485} // anonymous namespace
1486namespace {
1487at::Tensor & wrapper_SparseCUDA_out_signbit_out(const at::Tensor & self, at::Tensor & out) {
1488 c10::optional<Device> common_device = nullopt;
1489(void)common_device; // Suppress unused variable warning
1490 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_signbit_out", "out");
1491 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_signbit_out", "self");
1492 const OptionalDeviceGuard device_guard(device_of(self));
1493 return at::native::signbit_sparse_out(self, out);
1494}
1495} // anonymous namespace
1496namespace {
1497at::Tensor wrapper_SparseCUDA__any(const at::Tensor & self) {
1498 // No device check
1499 const OptionalDeviceGuard device_guard(device_of(self));
1500 return at::native::any_sparse(self);
1501}
1502} // anonymous namespace
1503namespace {
1504at::Tensor wrapper_SparseCUDA_Tensor_Scalar_pow(const at::Tensor & self, const at::Scalar & exponent) {
1505 // No device check
1506 const OptionalDeviceGuard device_guard(device_of(self));
1507 return at::native::pow_sparse_scalar(self, exponent);
1508}
1509} // anonymous namespace
1510namespace {
1511at::Tensor & wrapper_SparseCUDA_Tensor_Scalar_out_pow_out(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) {
1512 // No device check
1513 const OptionalDeviceGuard device_guard(device_of(self));
1514 return at::native::pow_out_sparse_scalar(self, exponent, out);
1515}
1516} // anonymous namespace
1517namespace {
1518at::Tensor wrapper_SparseCUDA__isinf(const at::Tensor & self) {
1519 // No device check
1520 // DeviceGuard omitted
1521 return at::native::isinf_sparse(self);
1522}
1523} // anonymous namespace
1524namespace {
1525at::Tensor wrapper_SparseCUDA__isposinf(const at::Tensor & self) {
1526 c10::optional<Device> common_device = nullopt;
1527(void)common_device; // Suppress unused variable warning
1528 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__isposinf", "self");
1529 const OptionalDeviceGuard device_guard(device_of(self));
1530 return at::native::isposinf_sparse(self);
1531}
1532} // anonymous namespace
1533namespace {
1534at::Tensor & wrapper_SparseCUDA_out_isposinf_out(const at::Tensor & self, at::Tensor & out) {
1535 c10::optional<Device> common_device = nullopt;
1536(void)common_device; // Suppress unused variable warning
1537 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_isposinf_out", "out");
1538 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_isposinf_out", "self");
1539 const OptionalDeviceGuard device_guard(device_of(self));
1540 return at::native::isposinf_sparse_out(self, out);
1541}
1542} // anonymous namespace
1543namespace {
1544at::Tensor wrapper_SparseCUDA__isneginf(const at::Tensor & self) {
1545 c10::optional<Device> common_device = nullopt;
1546(void)common_device; // Suppress unused variable warning
1547 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__isneginf", "self");
1548 const OptionalDeviceGuard device_guard(device_of(self));
1549 return at::native::isneginf_sparse(self);
1550}
1551} // anonymous namespace
1552namespace {
1553at::Tensor & wrapper_SparseCUDA_out_isneginf_out(const at::Tensor & self, at::Tensor & out) {
1554 c10::optional<Device> common_device = nullopt;
1555(void)common_device; // Suppress unused variable warning
1556 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_isneginf_out", "out");
1557 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_isneginf_out", "self");
1558 const OptionalDeviceGuard device_guard(device_of(self));
1559 return at::native::isneginf_sparse_out(self, out);
1560}
1561} // anonymous namespace
1562TORCH_LIBRARY_IMPL(aten, SparseCUDA, m) {
1563 m.impl("abs",
1564TORCH_FN(wrapper_SparseCUDA__abs));
1565m.impl("abs.out",
1566TORCH_FN(wrapper_SparseCUDA_out_abs_out));
1567m.impl("abs_",
1568TORCH_FN(wrapper_SparseCUDA__abs_));
1569m.impl("sgn",
1570TORCH_FN(wrapper_SparseCUDA__sgn));
1571m.impl("sgn.out",
1572TORCH_FN(wrapper_SparseCUDA_out_sgn_out));
1573m.impl("sgn_",
1574TORCH_FN(wrapper_SparseCUDA__sgn_));
1575m.impl("conj_physical.out",
1576TORCH_FN(wrapper_SparseCUDA_out_conj_physical_out));
1577m.impl("add.Tensor",
1578TORCH_FN(wrapper_SparseCUDA_Tensor_add));
1579m.impl("add.out",
1580TORCH_FN(wrapper_SparseCUDA_out_add_out));
1581m.impl("add_.Tensor",
1582TORCH_FN(wrapper_SparseCUDA_Tensor_add_));
1583m.impl("asinh",
1584TORCH_FN(wrapper_SparseCUDA__asinh));
1585m.impl("asinh.out",
1586TORCH_FN(wrapper_SparseCUDA_out_asinh_out));
1587m.impl("asinh_",
1588TORCH_FN(wrapper_SparseCUDA__asinh_));
1589m.impl("atanh",
1590TORCH_FN(wrapper_SparseCUDA__atanh));
1591m.impl("atanh.out",
1592TORCH_FN(wrapper_SparseCUDA_out_atanh_out));
1593m.impl("atanh_",
1594TORCH_FN(wrapper_SparseCUDA__atanh_));
1595m.impl("asin",
1596TORCH_FN(wrapper_SparseCUDA__asin));
1597m.impl("asin.out",
1598TORCH_FN(wrapper_SparseCUDA_out_asin_out));
1599m.impl("asin_",
1600TORCH_FN(wrapper_SparseCUDA__asin_));
1601m.impl("atan",
1602TORCH_FN(wrapper_SparseCUDA__atan));
1603m.impl("atan.out",
1604TORCH_FN(wrapper_SparseCUDA_out_atan_out));
1605m.impl("atan_",
1606TORCH_FN(wrapper_SparseCUDA__atan_));
1607m.impl("bmm",
1608TORCH_FN(wrapper_SparseCUDA__bmm));
1609m.impl("bmm.out",
1610TORCH_FN(wrapper_SparseCUDA_out_bmm_out));
1611m.impl("_sparse_broadcast_to",
1612TORCH_FN(wrapper_SparseCUDA___sparse_broadcast_to));
1613m.impl("cat",
1614TORCH_FN(wrapper_SparseCUDA__cat));
1615m.impl("ceil",
1616TORCH_FN(wrapper_SparseCUDA__ceil));
1617m.impl("ceil.out",
1618TORCH_FN(wrapper_SparseCUDA_out_ceil_out));
1619m.impl("ceil_",
1620TORCH_FN(wrapper_SparseCUDA__ceil_));
1621m.impl("copy_",
1622TORCH_FN(wrapper_SparseCUDA__copy_));
1623m.impl("div.Tensor",
1624TORCH_FN(wrapper_SparseCUDA_Tensor_div));
1625m.impl("div.out",
1626TORCH_FN(wrapper_SparseCUDA_out_div_out));
1627m.impl("div_.Tensor",
1628TORCH_FN(wrapper_SparseCUDA_Tensor_div_));
1629m.impl("div.Tensor_mode",
1630TORCH_FN(wrapper_SparseCUDA_Tensor_mode_div));
1631m.impl("div.out_mode",
1632TORCH_FN(wrapper_SparseCUDA_out_mode_div_out));
1633m.impl("div_.Tensor_mode",
1634TORCH_FN(wrapper_SparseCUDA_Tensor_mode_div_));
1635m.impl("empty.memory_format",
1636TORCH_FN(wrapper_SparseCUDA_memory_format_empty));
1637m.impl("empty_like",
1638TORCH_FN(wrapper_SparseCUDA__empty_like));
1639m.impl("erf",
1640TORCH_FN(wrapper_SparseCUDA__erf));
1641m.impl("erf.out",
1642TORCH_FN(wrapper_SparseCUDA_out_erf_out));
1643m.impl("erf_",
1644TORCH_FN(wrapper_SparseCUDA__erf_));
1645m.impl("expm1",
1646TORCH_FN(wrapper_SparseCUDA__expm1));
1647m.impl("expm1.out",
1648TORCH_FN(wrapper_SparseCUDA_out_expm1_out));
1649m.impl("expm1_",
1650TORCH_FN(wrapper_SparseCUDA__expm1_));
1651m.impl("floor",
1652TORCH_FN(wrapper_SparseCUDA__floor));
1653m.impl("floor.out",
1654TORCH_FN(wrapper_SparseCUDA_out_floor_out));
1655m.impl("floor_",
1656TORCH_FN(wrapper_SparseCUDA__floor_));
1657m.impl("floor_divide",
1658TORCH_FN(wrapper_SparseCUDA__floor_divide));
1659m.impl("floor_divide.out",
1660TORCH_FN(wrapper_SparseCUDA_out_floor_divide_out));
1661m.impl("floor_divide_.Tensor",
1662TORCH_FN(wrapper_SparseCUDA_Tensor_floor_divide_));
1663m.impl("frac",
1664TORCH_FN(wrapper_SparseCUDA__frac));
1665m.impl("frac.out",
1666TORCH_FN(wrapper_SparseCUDA_out_frac_out));
1667m.impl("frac_",
1668TORCH_FN(wrapper_SparseCUDA__frac_));
1669m.impl("isnan",
1670TORCH_FN(wrapper_SparseCUDA__isnan));
1671m.impl("nan_to_num",
1672TORCH_FN(wrapper_SparseCUDA__nan_to_num));
1673m.impl("nan_to_num.out",
1674TORCH_FN(wrapper_SparseCUDA_out_nan_to_num_out));
1675m.impl("nan_to_num_",
1676TORCH_FN(wrapper_SparseCUDA__nan_to_num_));
1677m.impl("log1p",
1678TORCH_FN(wrapper_SparseCUDA__log1p));
1679m.impl("log1p.out",
1680TORCH_FN(wrapper_SparseCUDA_out_log1p_out));
1681m.impl("log1p_",
1682TORCH_FN(wrapper_SparseCUDA__log1p_));
1683m.impl("mm",
1684TORCH_FN(wrapper_SparseCUDA__mm));
1685m.impl("mm.out",
1686TORCH_FN(wrapper_SparseCUDA_out_mm_out));
1687m.impl("_sparse_sparse_matmul",
1688TORCH_FN(wrapper_SparseCUDA___sparse_sparse_matmul));
1689m.impl("mul.Tensor",
1690TORCH_FN(wrapper_SparseCUDA_Tensor_mul));
1691m.impl("mul.out",
1692TORCH_FN(wrapper_SparseCUDA_out_mul_out));
1693m.impl("mul_.Tensor",
1694TORCH_FN(wrapper_SparseCUDA_Tensor_mul_));
1695m.impl("mv",
1696TORCH_FN(wrapper_SparseCUDA__mv));
1697m.impl("narrow_copy",
1698TORCH_FN(wrapper_SparseCUDA__narrow_copy));
1699m.impl("permute",
1700TORCH_FN(wrapper_SparseCUDA__permute));
1701m.impl("rad2deg",
1702TORCH_FN(wrapper_SparseCUDA__rad2deg));
1703m.impl("rad2deg.out",
1704TORCH_FN(wrapper_SparseCUDA_out_rad2deg_out));
1705m.impl("rad2deg_",
1706TORCH_FN(wrapper_SparseCUDA__rad2deg_));
1707m.impl("deg2rad",
1708TORCH_FN(wrapper_SparseCUDA__deg2rad));
1709m.impl("deg2rad.out",
1710TORCH_FN(wrapper_SparseCUDA_out_deg2rad_out));
1711m.impl("deg2rad_",
1712TORCH_FN(wrapper_SparseCUDA__deg2rad_));
1713m.impl("neg",
1714TORCH_FN(wrapper_SparseCUDA__neg));
1715m.impl("neg.out",
1716TORCH_FN(wrapper_SparseCUDA_out_neg_out));
1717m.impl("neg_",
1718TORCH_FN(wrapper_SparseCUDA__neg_));
1719m.impl("round",
1720TORCH_FN(wrapper_SparseCUDA__round));
1721m.impl("round.out",
1722TORCH_FN(wrapper_SparseCUDA_out_round_out));
1723m.impl("round_",
1724TORCH_FN(wrapper_SparseCUDA__round_));
1725m.impl("relu",
1726TORCH_FN(wrapper_SparseCUDA__relu));
1727m.impl("relu_",
1728TORCH_FN(wrapper_SparseCUDA__relu_));
1729m.impl("sin",
1730TORCH_FN(wrapper_SparseCUDA__sin));
1731m.impl("sin.out",
1732TORCH_FN(wrapper_SparseCUDA_out_sin_out));
1733m.impl("sin_",
1734TORCH_FN(wrapper_SparseCUDA__sin_));
1735m.impl("sinh",
1736TORCH_FN(wrapper_SparseCUDA__sinh));
1737m.impl("sinh.out",
1738TORCH_FN(wrapper_SparseCUDA_out_sinh_out));
1739m.impl("sinh_",
1740TORCH_FN(wrapper_SparseCUDA__sinh_));
1741m.impl("sspaddmm.out",
1742TORCH_FN(wrapper_SparseCUDA_out_sspaddmm_out));
1743m.impl("sum",
1744TORCH_FN(wrapper_SparseCUDA__sum));
1745m.impl("sum.dim_IntList",
1746TORCH_FN(wrapper_SparseCUDA_dim_IntList_sum));
1747m.impl("sqrt",
1748TORCH_FN(wrapper_SparseCUDA__sqrt));
1749m.impl("sqrt.out",
1750TORCH_FN(wrapper_SparseCUDA_out_sqrt_out));
1751m.impl("sqrt_",
1752TORCH_FN(wrapper_SparseCUDA__sqrt_));
1753m.impl("tan",
1754TORCH_FN(wrapper_SparseCUDA__tan));
1755m.impl("tan.out",
1756TORCH_FN(wrapper_SparseCUDA_out_tan_out));
1757m.impl("tan_",
1758TORCH_FN(wrapper_SparseCUDA__tan_));
1759m.impl("tanh",
1760TORCH_FN(wrapper_SparseCUDA__tanh));
1761m.impl("tanh.out",
1762TORCH_FN(wrapper_SparseCUDA_out_tanh_out));
1763m.impl("tanh_",
1764TORCH_FN(wrapper_SparseCUDA__tanh_));
1765m.impl("threshold_backward",
1766TORCH_FN(wrapper_SparseCUDA__threshold_backward));
1767m.impl("threshold_backward.grad_input",
1768TORCH_FN(wrapper_SparseCUDA_grad_input_threshold_backward_out));
1769m.impl("trunc",
1770TORCH_FN(wrapper_SparseCUDA__trunc));
1771m.impl("trunc.out",
1772TORCH_FN(wrapper_SparseCUDA_out_trunc_out));
1773m.impl("trunc_",
1774TORCH_FN(wrapper_SparseCUDA__trunc_));
1775m.impl("unsqueeze",
1776TORCH_FN(wrapper_SparseCUDA__unsqueeze));
1777m.impl("zeros.out",
1778TORCH_FN(wrapper_SparseCUDA_out_zeros_out));
1779m.impl("native_norm",
1780TORCH_FN(wrapper_SparseCUDA__native_norm));
1781m.impl("native_norm.ScalarOpt_dim_dtype",
1782TORCH_FN(wrapper_SparseCUDA_ScalarOpt_dim_dtype_native_norm));
1783m.impl("_sparse_sum_backward",
1784TORCH_FN(wrapper_SparseCUDA___sparse_sum_backward));
1785m.impl("_sparse_softmax",
1786TORCH_FN(wrapper_SparseCUDA___sparse_softmax));
1787m.impl("_sparse_softmax_backward_data",
1788TORCH_FN(wrapper_SparseCUDA___sparse_softmax_backward_data));
1789m.impl("_sparse_log_softmax",
1790TORCH_FN(wrapper_SparseCUDA___sparse_log_softmax));
1791m.impl("_sparse_log_softmax_backward_data",
1792TORCH_FN(wrapper_SparseCUDA___sparse_log_softmax_backward_data));
1793m.impl("norm.ScalarOpt_dim_dtype",
1794TORCH_FN(wrapper_SparseCUDA_ScalarOpt_dim_dtype_norm));
1795m.impl("norm.ScalarOpt_dim",
1796TORCH_FN(wrapper_SparseCUDA_ScalarOpt_dim_norm));
1797m.impl("clone",
1798TORCH_FN(wrapper_SparseCUDA__clone));
1799m.impl("resize_as_sparse_",
1800TORCH_FN(wrapper_SparseCUDA__resize_as_sparse_));
1801m.impl("zero_",
1802TORCH_FN(wrapper_SparseCUDA__zero_));
1803m.impl("sub.Tensor",
1804TORCH_FN(wrapper_SparseCUDA_Tensor_sub));
1805m.impl("sub.out",
1806TORCH_FN(wrapper_SparseCUDA_out_sub_out));
1807m.impl("sub_.Tensor",
1808TORCH_FN(wrapper_SparseCUDA_Tensor_sub_));
1809m.impl("addmm",
1810TORCH_FN(wrapper_SparseCUDA__addmm));
1811m.impl("addmm.out",
1812TORCH_FN(wrapper_SparseCUDA_out_addmm_out));
1813m.impl("addmm_",
1814TORCH_FN(wrapper_SparseCUDA__addmm_));
1815m.impl("_sparse_coo_tensor_with_dims",
1816TORCH_FN(wrapper_SparseCUDA___sparse_coo_tensor_with_dims));
1817m.impl("_sparse_coo_tensor_with_dims_and_tensors",
1818TORCH_FN(wrapper_SparseCUDA___sparse_coo_tensor_with_dims_and_tensors));
1819m.impl("sparse_resize_",
1820TORCH_FN(wrapper_SparseCUDA__sparse_resize_));
1821m.impl("sparse_resize_and_clear_",
1822TORCH_FN(wrapper_SparseCUDA__sparse_resize_and_clear_));
1823m.impl("sparse_mask",
1824TORCH_FN(wrapper_SparseCUDA__sparse_mask));
1825m.impl("_to_dense",
1826TORCH_FN(wrapper_SparseCUDA___to_dense));
1827m.impl("sparse_dim",
1828TORCH_FN(wrapper_SparseCUDA__sparse_dim));
1829m.impl("_dimI",
1830TORCH_FN(wrapper_SparseCUDA___dimI));
1831m.impl("dense_dim",
1832TORCH_FN(wrapper_SparseCUDA__dense_dim));
1833m.impl("_dimV",
1834TORCH_FN(wrapper_SparseCUDA___dimV));
1835m.impl("_nnz",
1836TORCH_FN(wrapper_SparseCUDA___nnz));
1837m.impl("_coalesce",
1838TORCH_FN(wrapper_SparseCUDA___coalesce));
1839m.impl("is_coalesced",
1840TORCH_FN(wrapper_SparseCUDA__is_coalesced));
1841m.impl("_indices",
1842TORCH_FN(wrapper_SparseCUDA___indices));
1843m.impl("_values",
1844TORCH_FN(wrapper_SparseCUDA___values));
1845m.impl("_coalesced_",
1846TORCH_FN(wrapper_SparseCUDA___coalesced_));
1847m.impl("indices",
1848TORCH_FN(wrapper_SparseCUDA__indices));
1849m.impl("values",
1850TORCH_FN(wrapper_SparseCUDA__values));
1851m.impl("hspmm",
1852TORCH_FN(wrapper_SparseCUDA__hspmm));
1853m.impl("hspmm.out",
1854TORCH_FN(wrapper_SparseCUDA_out_hspmm_out));
1855m.impl("copy_sparse_to_sparse_",
1856TORCH_FN(wrapper_SparseCUDA__copy_sparse_to_sparse_));
1857m.impl("to_sparse.sparse_dim",
1858TORCH_FN(wrapper_SparseCUDA_sparse_dim_to_sparse));
1859m.impl("to_sparse",
1860TORCH_FN(wrapper_SparseCUDA__to_sparse));
1861m.impl("to_sparse_csr",
1862TORCH_FN(wrapper_SparseCUDA__to_sparse_csr));
1863m.impl("to_sparse_csc",
1864TORCH_FN(wrapper_SparseCUDA__to_sparse_csc));
1865m.impl("to_sparse_bsr",
1866TORCH_FN(wrapper_SparseCUDA__to_sparse_bsr));
1867m.impl("to_sparse_bsc",
1868TORCH_FN(wrapper_SparseCUDA__to_sparse_bsc));
1869m.impl("index_select",
1870TORCH_FN(wrapper_SparseCUDA__index_select));
1871m.impl("erfinv",
1872TORCH_FN(wrapper_SparseCUDA__erfinv));
1873m.impl("erfinv.out",
1874TORCH_FN(wrapper_SparseCUDA_out_erfinv_out));
1875m.impl("erfinv_",
1876TORCH_FN(wrapper_SparseCUDA__erfinv_));
1877m.impl("sign",
1878TORCH_FN(wrapper_SparseCUDA__sign));
1879m.impl("sign.out",
1880TORCH_FN(wrapper_SparseCUDA_out_sign_out));
1881m.impl("sign_",
1882TORCH_FN(wrapper_SparseCUDA__sign_));
1883m.impl("signbit",
1884TORCH_FN(wrapper_SparseCUDA__signbit));
1885m.impl("signbit.out",
1886TORCH_FN(wrapper_SparseCUDA_out_signbit_out));
1887m.impl("any",
1888TORCH_FN(wrapper_SparseCUDA__any));
1889m.impl("pow.Tensor_Scalar",
1890TORCH_FN(wrapper_SparseCUDA_Tensor_Scalar_pow));
1891m.impl("pow.Tensor_Scalar_out",
1892TORCH_FN(wrapper_SparseCUDA_Tensor_Scalar_out_pow_out));
1893m.impl("isinf",
1894TORCH_FN(wrapper_SparseCUDA__isinf));
1895m.impl("isposinf",
1896TORCH_FN(wrapper_SparseCUDA__isposinf));
1897m.impl("isposinf.out",
1898TORCH_FN(wrapper_SparseCUDA_out_isposinf_out));
1899m.impl("isneginf",
1900TORCH_FN(wrapper_SparseCUDA__isneginf));
1901m.impl("isneginf.out",
1902TORCH_FN(wrapper_SparseCUDA_out_isneginf_out));
1903};
1904} // anonymous namespace
1905namespace sparsecuda {
1906at::Tensor abs(const at::Tensor & self) {
1907return wrapper_SparseCUDA__abs(self);
1908}
1909at::Tensor & abs_out(at::Tensor & out, const at::Tensor & self) {
1910return wrapper_SparseCUDA_out_abs_out(self, out);
1911}
1912at::Tensor & abs_outf(const at::Tensor & self, at::Tensor & out) {
1913return wrapper_SparseCUDA_out_abs_out(self, out);
1914}
1915at::Tensor & abs_(at::Tensor & self) {
1916return wrapper_SparseCUDA__abs_(self);
1917}
1918at::Tensor sgn(const at::Tensor & self) {
1919return wrapper_SparseCUDA__sgn(self);
1920}
1921at::Tensor & sgn_out(at::Tensor & out, const at::Tensor & self) {
1922return wrapper_SparseCUDA_out_sgn_out(self, out);
1923}
1924at::Tensor & sgn_outf(const at::Tensor & self, at::Tensor & out) {
1925return wrapper_SparseCUDA_out_sgn_out(self, out);
1926}
1927at::Tensor & sgn_(at::Tensor & self) {
1928return wrapper_SparseCUDA__sgn_(self);
1929}
1930at::Tensor & conj_physical_out(at::Tensor & out, const at::Tensor & self) {
1931return wrapper_SparseCUDA_out_conj_physical_out(self, out);
1932}
1933at::Tensor & conj_physical_outf(const at::Tensor & self, at::Tensor & out) {
1934return wrapper_SparseCUDA_out_conj_physical_out(self, out);
1935}
1936at::Tensor add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1937return wrapper_SparseCUDA_Tensor_add(self, other, alpha);
1938}
1939at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1940return wrapper_SparseCUDA_out_add_out(self, other, alpha, out);
1941}
1942at::Tensor & add_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
1943return wrapper_SparseCUDA_out_add_out(self, other, alpha, out);
1944}
1945at::Tensor & add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1946return wrapper_SparseCUDA_Tensor_add_(self, other, alpha);
1947}
1948at::Tensor asinh(const at::Tensor & self) {
1949return wrapper_SparseCUDA__asinh(self);
1950}
1951at::Tensor & asinh_out(at::Tensor & out, const at::Tensor & self) {
1952return wrapper_SparseCUDA_out_asinh_out(self, out);
1953}
1954at::Tensor & asinh_outf(const at::Tensor & self, at::Tensor & out) {
1955return wrapper_SparseCUDA_out_asinh_out(self, out);
1956}
1957at::Tensor & asinh_(at::Tensor & self) {
1958return wrapper_SparseCUDA__asinh_(self);
1959}
1960at::Tensor atanh(const at::Tensor & self) {
1961return wrapper_SparseCUDA__atanh(self);
1962}
1963at::Tensor & atanh_out(at::Tensor & out, const at::Tensor & self) {
1964return wrapper_SparseCUDA_out_atanh_out(self, out);
1965}
1966at::Tensor & atanh_outf(const at::Tensor & self, at::Tensor & out) {
1967return wrapper_SparseCUDA_out_atanh_out(self, out);
1968}
1969at::Tensor & atanh_(at::Tensor & self) {
1970return wrapper_SparseCUDA__atanh_(self);
1971}
1972at::Tensor asin(const at::Tensor & self) {
1973return wrapper_SparseCUDA__asin(self);
1974}
1975at::Tensor & asin_out(at::Tensor & out, const at::Tensor & self) {
1976return wrapper_SparseCUDA_out_asin_out(self, out);
1977}
1978at::Tensor & asin_outf(const at::Tensor & self, at::Tensor & out) {
1979return wrapper_SparseCUDA_out_asin_out(self, out);
1980}
1981at::Tensor & asin_(at::Tensor & self) {
1982return wrapper_SparseCUDA__asin_(self);
1983}
1984at::Tensor atan(const at::Tensor & self) {
1985return wrapper_SparseCUDA__atan(self);
1986}
1987at::Tensor & atan_out(at::Tensor & out, const at::Tensor & self) {
1988return wrapper_SparseCUDA_out_atan_out(self, out);
1989}
1990at::Tensor & atan_outf(const at::Tensor & self, at::Tensor & out) {
1991return wrapper_SparseCUDA_out_atan_out(self, out);
1992}
1993at::Tensor & atan_(at::Tensor & self) {
1994return wrapper_SparseCUDA__atan_(self);
1995}
1996at::Tensor bmm(const at::Tensor & self, const at::Tensor & mat2) {
1997return wrapper_SparseCUDA__bmm(self, mat2);
1998}
1999at::Tensor & bmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) {
2000return wrapper_SparseCUDA_out_bmm_out(self, mat2, out);
2001}
2002at::Tensor & bmm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
2003return wrapper_SparseCUDA_out_bmm_out(self, mat2, out);
2004}
2005at::Tensor _sparse_broadcast_to(const at::Tensor & self, at::IntArrayRef size) {
2006return wrapper_SparseCUDA___sparse_broadcast_to(self, size);
2007}
2008at::Tensor cat(const at::ITensorListRef & tensors, int64_t dim) {
2009return wrapper_SparseCUDA__cat(tensors, dim);
2010}
2011at::Tensor ceil(const at::Tensor & self) {
2012return wrapper_SparseCUDA__ceil(self);
2013}
2014at::Tensor & ceil_out(at::Tensor & out, const at::Tensor & self) {
2015return wrapper_SparseCUDA_out_ceil_out(self, out);
2016}
2017at::Tensor & ceil_outf(const at::Tensor & self, at::Tensor & out) {
2018return wrapper_SparseCUDA_out_ceil_out(self, out);
2019}
2020at::Tensor & ceil_(at::Tensor & self) {
2021return wrapper_SparseCUDA__ceil_(self);
2022}
2023at::Tensor & copy_(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
2024return wrapper_SparseCUDA__copy_(self, src, non_blocking);
2025}
2026at::Tensor div(const at::Tensor & self, const at::Tensor & other) {
2027return wrapper_SparseCUDA_Tensor_div(self, other);
2028}
2029at::Tensor & div_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
2030return wrapper_SparseCUDA_out_div_out(self, other, out);
2031}
2032at::Tensor & div_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
2033return wrapper_SparseCUDA_out_div_out(self, other, out);
2034}
2035at::Tensor & div_(at::Tensor & self, const at::Tensor & other) {
2036return wrapper_SparseCUDA_Tensor_div_(self, other);
2037}
2038at::Tensor div(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
2039return wrapper_SparseCUDA_Tensor_mode_div(self, other, rounding_mode);
2040}
2041at::Tensor & div_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
2042return wrapper_SparseCUDA_out_mode_div_out(self, other, rounding_mode, out);
2043}
2044at::Tensor & div_outf(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out) {
2045return wrapper_SparseCUDA_out_mode_div_out(self, other, rounding_mode, out);
2046}
2047at::Tensor & div_(at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
2048return wrapper_SparseCUDA_Tensor_mode_div_(self, other, rounding_mode);
2049}
2050at::Tensor empty(at::IntArrayRef size, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) {
2051return wrapper_SparseCUDA_memory_format_empty(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
2052}
2053at::Tensor empty(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
2054return wrapper_SparseCUDA_memory_format_empty(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, memory_format);
2055}
2056at::Tensor empty_symint(c10::SymIntArrayRef size, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) {
2057return wrapper_SparseCUDA_memory_format_empty(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
2058}
2059at::Tensor empty_symint(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
2060return wrapper_SparseCUDA_memory_format_empty(size, dtype, layout, device, pin_memory, memory_format);
2061}
2062at::Tensor empty_like(const at::Tensor & self, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) {
2063return wrapper_SparseCUDA__empty_like(self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
2064}
2065at::Tensor empty_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
2066return wrapper_SparseCUDA__empty_like(self, dtype, layout, device, pin_memory, memory_format);
2067}
2068at::Tensor erf(const at::Tensor & self) {
2069return wrapper_SparseCUDA__erf(self);
2070}
2071at::Tensor & erf_out(at::Tensor & out, const at::Tensor & self) {
2072return wrapper_SparseCUDA_out_erf_out(self, out);
2073}
2074at::Tensor & erf_outf(const at::Tensor & self, at::Tensor & out) {
2075return wrapper_SparseCUDA_out_erf_out(self, out);
2076}
2077at::Tensor & erf_(at::Tensor & self) {
2078return wrapper_SparseCUDA__erf_(self);
2079}
2080at::Tensor expm1(const at::Tensor & self) {
2081return wrapper_SparseCUDA__expm1(self);
2082}
2083at::Tensor & expm1_out(at::Tensor & out, const at::Tensor & self) {
2084return wrapper_SparseCUDA_out_expm1_out(self, out);
2085}
2086at::Tensor & expm1_outf(const at::Tensor & self, at::Tensor & out) {
2087return wrapper_SparseCUDA_out_expm1_out(self, out);
2088}
2089at::Tensor & expm1_(at::Tensor & self) {
2090return wrapper_SparseCUDA__expm1_(self);
2091}
2092at::Tensor floor(const at::Tensor & self) {
2093return wrapper_SparseCUDA__floor(self);
2094}
2095at::Tensor & floor_out(at::Tensor & out, const at::Tensor & self) {
2096return wrapper_SparseCUDA_out_floor_out(self, out);
2097}
2098at::Tensor & floor_outf(const at::Tensor & self, at::Tensor & out) {
2099return wrapper_SparseCUDA_out_floor_out(self, out);
2100}
2101at::Tensor & floor_(at::Tensor & self) {
2102return wrapper_SparseCUDA__floor_(self);
2103}
2104at::Tensor floor_divide(const at::Tensor & self, const at::Tensor & other) {
2105return wrapper_SparseCUDA__floor_divide(self, other);
2106}
2107at::Tensor & floor_divide_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
2108return wrapper_SparseCUDA_out_floor_divide_out(self, other, out);
2109}
2110at::Tensor & floor_divide_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
2111return wrapper_SparseCUDA_out_floor_divide_out(self, other, out);
2112}
2113at::Tensor & floor_divide_(at::Tensor & self, const at::Tensor & other) {
2114return wrapper_SparseCUDA_Tensor_floor_divide_(self, other);
2115}
2116at::Tensor frac(const at::Tensor & self) {
2117return wrapper_SparseCUDA__frac(self);
2118}
2119at::Tensor & frac_out(at::Tensor & out, const at::Tensor & self) {
2120return wrapper_SparseCUDA_out_frac_out(self, out);
2121}
2122at::Tensor & frac_outf(const at::Tensor & self, at::Tensor & out) {
2123return wrapper_SparseCUDA_out_frac_out(self, out);
2124}
2125at::Tensor & frac_(at::Tensor & self) {
2126return wrapper_SparseCUDA__frac_(self);
2127}
2128at::Tensor isnan(const at::Tensor & self) {
2129return wrapper_SparseCUDA__isnan(self);
2130}
2131at::Tensor nan_to_num(const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) {
2132return wrapper_SparseCUDA__nan_to_num(self, nan, posinf, neginf);
2133}
2134at::Tensor & nan_to_num_out(at::Tensor & out, const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) {
2135return wrapper_SparseCUDA_out_nan_to_num_out(self, nan, posinf, neginf, out);
2136}
2137at::Tensor & nan_to_num_outf(const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf, at::Tensor & out) {
2138return wrapper_SparseCUDA_out_nan_to_num_out(self, nan, posinf, neginf, out);
2139}
2140at::Tensor & nan_to_num_(at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) {
2141return wrapper_SparseCUDA__nan_to_num_(self, nan, posinf, neginf);
2142}
2143at::Tensor log1p(const at::Tensor & self) {
2144return wrapper_SparseCUDA__log1p(self);
2145}
2146at::Tensor & log1p_out(at::Tensor & out, const at::Tensor & self) {
2147return wrapper_SparseCUDA_out_log1p_out(self, out);
2148}
2149at::Tensor & log1p_outf(const at::Tensor & self, at::Tensor & out) {
2150return wrapper_SparseCUDA_out_log1p_out(self, out);
2151}
2152at::Tensor & log1p_(at::Tensor & self) {
2153return wrapper_SparseCUDA__log1p_(self);
2154}
2155at::Tensor mm(const at::Tensor & self, const at::Tensor & mat2) {
2156return wrapper_SparseCUDA__mm(self, mat2);
2157}
2158at::Tensor & mm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) {
2159return wrapper_SparseCUDA_out_mm_out(self, mat2, out);
2160}
2161at::Tensor & mm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
2162return wrapper_SparseCUDA_out_mm_out(self, mat2, out);
2163}
2164at::Tensor _sparse_sparse_matmul(const at::Tensor & self, const at::Tensor & other) {
2165return wrapper_SparseCUDA___sparse_sparse_matmul(self, other);
2166}
2167at::Tensor mul(const at::Tensor & self, const at::Tensor & other) {
2168return wrapper_SparseCUDA_Tensor_mul(self, other);
2169}
2170at::Tensor & mul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
2171return wrapper_SparseCUDA_out_mul_out(self, other, out);
2172}
2173at::Tensor & mul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
2174return wrapper_SparseCUDA_out_mul_out(self, other, out);
2175}
2176at::Tensor & mul_(at::Tensor & self, const at::Tensor & other) {
2177return wrapper_SparseCUDA_Tensor_mul_(self, other);
2178}
2179at::Tensor mv(const at::Tensor & self, const at::Tensor & vec) {
2180return wrapper_SparseCUDA__mv(self, vec);
2181}
2182at::Tensor narrow_copy(const at::Tensor & self, int64_t dim, int64_t start, int64_t length) {
2183return wrapper_SparseCUDA__narrow_copy(self, dim, start, length);
2184}
2185at::Tensor narrow_copy_symint(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
2186return wrapper_SparseCUDA__narrow_copy(self, dim, start, length);
2187}
2188at::Tensor permute(const at::Tensor & self, at::IntArrayRef dims) {
2189return wrapper_SparseCUDA__permute(self, dims);
2190}
2191at::Tensor rad2deg(const at::Tensor & self) {
2192return wrapper_SparseCUDA__rad2deg(self);
2193}
2194at::Tensor & rad2deg_out(at::Tensor & out, const at::Tensor & self) {
2195return wrapper_SparseCUDA_out_rad2deg_out(self, out);
2196}
2197at::Tensor & rad2deg_outf(const at::Tensor & self, at::Tensor & out) {
2198return wrapper_SparseCUDA_out_rad2deg_out(self, out);
2199}
2200at::Tensor & rad2deg_(at::Tensor & self) {
2201return wrapper_SparseCUDA__rad2deg_(self);
2202}
2203at::Tensor deg2rad(const at::Tensor & self) {
2204return wrapper_SparseCUDA__deg2rad(self);
2205}
2206at::Tensor & deg2rad_out(at::Tensor & out, const at::Tensor & self) {
2207return wrapper_SparseCUDA_out_deg2rad_out(self, out);
2208}
2209at::Tensor & deg2rad_outf(const at::Tensor & self, at::Tensor & out) {
2210return wrapper_SparseCUDA_out_deg2rad_out(self, out);
2211}
2212at::Tensor & deg2rad_(at::Tensor & self) {
2213return wrapper_SparseCUDA__deg2rad_(self);
2214}
2215at::Tensor neg(const at::Tensor & self) {
2216return wrapper_SparseCUDA__neg(self);
2217}
2218at::Tensor & neg_out(at::Tensor & out, const at::Tensor & self) {
2219return wrapper_SparseCUDA_out_neg_out(self, out);
2220}
2221at::Tensor & neg_outf(const at::Tensor & self, at::Tensor & out) {
2222return wrapper_SparseCUDA_out_neg_out(self, out);
2223}
2224at::Tensor & neg_(at::Tensor & self) {
2225return wrapper_SparseCUDA__neg_(self);
2226}
2227at::Tensor round(const at::Tensor & self) {
2228return wrapper_SparseCUDA__round(self);
2229}
2230at::Tensor & round_out(at::Tensor & out, const at::Tensor & self) {
2231return wrapper_SparseCUDA_out_round_out(self, out);
2232}
2233at::Tensor & round_outf(const at::Tensor & self, at::Tensor & out) {
2234return wrapper_SparseCUDA_out_round_out(self, out);
2235}
2236at::Tensor & round_(at::Tensor & self) {
2237return wrapper_SparseCUDA__round_(self);
2238}
2239at::Tensor relu(const at::Tensor & self) {
2240return wrapper_SparseCUDA__relu(self);
2241}
2242at::Tensor & relu_(at::Tensor & self) {
2243return wrapper_SparseCUDA__relu_(self);
2244}
2245at::Tensor sin(const at::Tensor & self) {
2246return wrapper_SparseCUDA__sin(self);
2247}
2248at::Tensor & sin_out(at::Tensor & out, const at::Tensor & self) {
2249return wrapper_SparseCUDA_out_sin_out(self, out);
2250}
2251at::Tensor & sin_outf(const at::Tensor & self, at::Tensor & out) {
2252return wrapper_SparseCUDA_out_sin_out(self, out);
2253}
2254at::Tensor & sin_(at::Tensor & self) {
2255return wrapper_SparseCUDA__sin_(self);
2256}
2257at::Tensor sinh(const at::Tensor & self) {
2258return wrapper_SparseCUDA__sinh(self);
2259}
2260at::Tensor & sinh_out(at::Tensor & out, const at::Tensor & self) {
2261return wrapper_SparseCUDA_out_sinh_out(self, out);
2262}
2263at::Tensor & sinh_outf(const at::Tensor & self, at::Tensor & out) {
2264return wrapper_SparseCUDA_out_sinh_out(self, out);
2265}
2266at::Tensor & sinh_(at::Tensor & self) {
2267return wrapper_SparseCUDA__sinh_(self);
2268}
2269at::Tensor & sspaddmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
2270return wrapper_SparseCUDA_out_sspaddmm_out(self, mat1, mat2, beta, alpha, out);
2271}
2272at::Tensor & sspaddmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
2273return wrapper_SparseCUDA_out_sspaddmm_out(self, mat1, mat2, beta, alpha, out);
2274}
2275at::Tensor sum(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
2276return wrapper_SparseCUDA__sum(self, dtype);
2277}
2278at::Tensor sum(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
2279return wrapper_SparseCUDA_dim_IntList_sum(self, dim, keepdim, dtype);
2280}
2281at::Tensor sqrt(const at::Tensor & self) {
2282return wrapper_SparseCUDA__sqrt(self);
2283}
2284at::Tensor & sqrt_out(at::Tensor & out, const at::Tensor & self) {
2285return wrapper_SparseCUDA_out_sqrt_out(self, out);
2286}
2287at::Tensor & sqrt_outf(const at::Tensor & self, at::Tensor & out) {
2288return wrapper_SparseCUDA_out_sqrt_out(self, out);
2289}
2290at::Tensor & sqrt_(at::Tensor & self) {
2291return wrapper_SparseCUDA__sqrt_(self);
2292}
2293at::Tensor tan(const at::Tensor & self) {
2294return wrapper_SparseCUDA__tan(self);
2295}
2296at::Tensor & tan_out(at::Tensor & out, const at::Tensor & self) {
2297return wrapper_SparseCUDA_out_tan_out(self, out);
2298}
2299at::Tensor & tan_outf(const at::Tensor & self, at::Tensor & out) {
2300return wrapper_SparseCUDA_out_tan_out(self, out);
2301}
2302at::Tensor & tan_(at::Tensor & self) {
2303return wrapper_SparseCUDA__tan_(self);
2304}
2305at::Tensor tanh(const at::Tensor & self) {
2306return wrapper_SparseCUDA__tanh(self);
2307}
2308at::Tensor & tanh_out(at::Tensor & out, const at::Tensor & self) {
2309return wrapper_SparseCUDA_out_tanh_out(self, out);
2310}
2311at::Tensor & tanh_outf(const at::Tensor & self, at::Tensor & out) {
2312return wrapper_SparseCUDA_out_tanh_out(self, out);
2313}
2314at::Tensor & tanh_(at::Tensor & self) {
2315return wrapper_SparseCUDA__tanh_(self);
2316}
2317at::Tensor threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
2318return wrapper_SparseCUDA__threshold_backward(grad_output, self, threshold);
2319}
2320at::Tensor & threshold_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
2321return wrapper_SparseCUDA_grad_input_threshold_backward_out(grad_output, self, threshold, grad_input);
2322}
2323at::Tensor & threshold_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) {
2324return wrapper_SparseCUDA_grad_input_threshold_backward_out(grad_output, self, threshold, grad_input);
2325}
2326at::Tensor trunc(const at::Tensor & self) {
2327return wrapper_SparseCUDA__trunc(self);
2328}
2329at::Tensor & trunc_out(at::Tensor & out, const at::Tensor & self) {
2330return wrapper_SparseCUDA_out_trunc_out(self, out);
2331}
2332at::Tensor & trunc_outf(const at::Tensor & self, at::Tensor & out) {
2333return wrapper_SparseCUDA_out_trunc_out(self, out);
2334}
2335at::Tensor & trunc_(at::Tensor & self) {
2336return wrapper_SparseCUDA__trunc_(self);
2337}
2338at::Tensor unsqueeze(const at::Tensor & self, int64_t dim) {
2339return wrapper_SparseCUDA__unsqueeze(self, dim);
2340}
2341at::Tensor & zeros_out(at::Tensor & out, at::IntArrayRef size) {
2342return wrapper_SparseCUDA_out_zeros_out(c10::fromIntArrayRefSlow(size), out);
2343}
2344at::Tensor & zeros_outf(at::IntArrayRef size, at::Tensor & out) {
2345return wrapper_SparseCUDA_out_zeros_out(c10::fromIntArrayRefSlow(size), out);
2346}
2347at::Tensor & zeros_symint_out(at::Tensor & out, c10::SymIntArrayRef size) {
2348return wrapper_SparseCUDA_out_zeros_out(size, out);
2349}
2350at::Tensor & zeros_symint_outf(c10::SymIntArrayRef size, at::Tensor & out) {
2351return wrapper_SparseCUDA_out_zeros_out(size, out);
2352}
2353at::Tensor native_norm(const at::Tensor & self, const at::Scalar & p) {
2354return wrapper_SparseCUDA__native_norm(self, p);
2355}
2356at::Tensor native_norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
2357return wrapper_SparseCUDA_ScalarOpt_dim_dtype_native_norm(self, p, dim, keepdim, dtype);
2358}
2359at::Tensor _sparse_sum_backward(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) {
2360return wrapper_SparseCUDA___sparse_sum_backward(grad, self, dim);
2361}
2362at::Tensor _sparse_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
2363return wrapper_SparseCUDA___sparse_softmax(self, dim, half_to_float);
2364}
2365at::Tensor _sparse_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
2366return wrapper_SparseCUDA___sparse_softmax_backward_data(grad_output, output, dim, self);
2367}
2368at::Tensor _sparse_log_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
2369return wrapper_SparseCUDA___sparse_log_softmax(self, dim, half_to_float);
2370}
2371at::Tensor _sparse_log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
2372return wrapper_SparseCUDA___sparse_log_softmax_backward_data(grad_output, output, dim, self);
2373}
2374at::Tensor norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) {
2375return wrapper_SparseCUDA_ScalarOpt_dim_dtype_norm(self, p, dim, keepdim, dtype);
2376}
2377at::Tensor norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) {
2378return wrapper_SparseCUDA_ScalarOpt_dim_norm(self, p, dim, keepdim);
2379}
2380at::Tensor clone(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) {
2381return wrapper_SparseCUDA__clone(self, memory_format);
2382}
2383const at::Tensor & resize_as_sparse_(const at::Tensor & self, const at::Tensor & the_template) {
2384return wrapper_SparseCUDA__resize_as_sparse_(self, the_template);
2385}
2386at::Tensor & zero_(at::Tensor & self) {
2387return wrapper_SparseCUDA__zero_(self);
2388}
2389at::Tensor sub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
2390return wrapper_SparseCUDA_Tensor_sub(self, other, alpha);
2391}
2392at::Tensor & sub_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
2393return wrapper_SparseCUDA_out_sub_out(self, other, alpha, out);
2394}
2395at::Tensor & sub_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
2396return wrapper_SparseCUDA_out_sub_out(self, other, alpha, out);
2397}
2398at::Tensor & sub_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
2399return wrapper_SparseCUDA_Tensor_sub_(self, other, alpha);
2400}
2401at::Tensor addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
2402return wrapper_SparseCUDA__addmm(self, mat1, mat2, beta, alpha);
2403}
2404at::Tensor & addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
2405return wrapper_SparseCUDA_out_addmm_out(self, mat1, mat2, beta, alpha, out);
2406}
2407at::Tensor & addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
2408return wrapper_SparseCUDA_out_addmm_out(self, mat1, mat2, beta, alpha, out);
2409}
2410at::Tensor & addmm_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
2411return wrapper_SparseCUDA__addmm_(self, mat1, mat2, beta, alpha);
2412}
2413at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::TensorOptions options) {
2414return wrapper_SparseCUDA___sparse_coo_tensor_with_dims(sparse_dim, dense_dim, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2415}
2416at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2417return wrapper_SparseCUDA___sparse_coo_tensor_with_dims(sparse_dim, dense_dim, size, dtype, layout, device, pin_memory);
2418}
2419at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options) {
2420return wrapper_SparseCUDA___sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2421}
2422at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2423return wrapper_SparseCUDA___sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, dtype, layout, device, pin_memory);
2424}
2425at::Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options) {
2426return wrapper_SparseCUDA___sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, size, indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2427}
2428at::Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2429return wrapper_SparseCUDA___sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory);
2430}
2431const at::Tensor & sparse_resize_(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
2432return wrapper_SparseCUDA__sparse_resize_(self, size, sparse_dim, dense_dim);
2433}
2434const at::Tensor & sparse_resize_and_clear_(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
2435return wrapper_SparseCUDA__sparse_resize_and_clear_(self, size, sparse_dim, dense_dim);
2436}
2437at::Tensor sparse_mask(const at::Tensor & self, const at::Tensor & mask) {
2438return wrapper_SparseCUDA__sparse_mask(self, mask);
2439}
2440at::Tensor _to_dense(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
2441return wrapper_SparseCUDA___to_dense(self, dtype);
2442}
2443int64_t sparse_dim(const at::Tensor & self) {
2444return wrapper_SparseCUDA__sparse_dim(self);
2445}
2446int64_t _dimI(const at::Tensor & self) {
2447return wrapper_SparseCUDA___dimI(self);
2448}
2449int64_t dense_dim(const at::Tensor & self) {
2450return wrapper_SparseCUDA__dense_dim(self);
2451}
2452int64_t _dimV(const at::Tensor & self) {
2453return wrapper_SparseCUDA___dimV(self);
2454}
2455int64_t _nnz(const at::Tensor & self) {
2456return wrapper_SparseCUDA___nnz(self);
2457}
2458at::Tensor _coalesce(const at::Tensor & self) {
2459return wrapper_SparseCUDA___coalesce(self);
2460}
2461bool is_coalesced(const at::Tensor & self) {
2462return wrapper_SparseCUDA__is_coalesced(self);
2463}
2464at::Tensor _indices(const at::Tensor & self) {
2465return wrapper_SparseCUDA___indices(self);
2466}
2467at::Tensor _values(const at::Tensor & self) {
2468return wrapper_SparseCUDA___values(self);
2469}
2470at::Tensor & _coalesced_(at::Tensor & self, bool coalesced) {
2471return wrapper_SparseCUDA___coalesced_(self, coalesced);
2472}
2473at::Tensor indices(const at::Tensor & self) {
2474return wrapper_SparseCUDA__indices(self);
2475}
2476at::Tensor values(const at::Tensor & self) {
2477return wrapper_SparseCUDA__values(self);
2478}
2479at::Tensor hspmm(const at::Tensor & mat1, const at::Tensor & mat2) {
2480return wrapper_SparseCUDA__hspmm(mat1, mat2);
2481}
2482at::Tensor & hspmm_out(at::Tensor & out, const at::Tensor & mat1, const at::Tensor & mat2) {
2483return wrapper_SparseCUDA_out_hspmm_out(mat1, mat2, out);
2484}
2485at::Tensor & hspmm_outf(const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out) {
2486return wrapper_SparseCUDA_out_hspmm_out(mat1, mat2, out);
2487}
2488at::Tensor & copy_sparse_to_sparse_(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
2489return wrapper_SparseCUDA__copy_sparse_to_sparse_(self, src, non_blocking);
2490}
2491at::Tensor to_sparse(const at::Tensor & self, int64_t sparse_dim) {
2492return wrapper_SparseCUDA_sparse_dim_to_sparse(self, sparse_dim);
2493}
2494at::Tensor to_sparse(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
2495return wrapper_SparseCUDA__to_sparse(self, layout, blocksize, dense_dim);
2496}
2497at::Tensor to_sparse_csr(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
2498return wrapper_SparseCUDA__to_sparse_csr(self, dense_dim);
2499}
2500at::Tensor to_sparse_csc(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
2501return wrapper_SparseCUDA__to_sparse_csc(self, dense_dim);
2502}
2503at::Tensor to_sparse_bsr(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
2504return wrapper_SparseCUDA__to_sparse_bsr(self, blocksize, dense_dim);
2505}
2506at::Tensor to_sparse_bsc(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
2507return wrapper_SparseCUDA__to_sparse_bsc(self, blocksize, dense_dim);
2508}
2509at::Tensor index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index) {
2510return wrapper_SparseCUDA__index_select(self, dim, index);
2511}
2512at::Tensor erfinv(const at::Tensor & self) {
2513return wrapper_SparseCUDA__erfinv(self);
2514}
2515at::Tensor & erfinv_out(at::Tensor & out, const at::Tensor & self) {
2516return wrapper_SparseCUDA_out_erfinv_out(self, out);
2517}
2518at::Tensor & erfinv_outf(const at::Tensor & self, at::Tensor & out) {
2519return wrapper_SparseCUDA_out_erfinv_out(self, out);
2520}
2521at::Tensor & erfinv_(at::Tensor & self) {
2522return wrapper_SparseCUDA__erfinv_(self);
2523}
2524at::Tensor sign(const at::Tensor & self) {
2525return wrapper_SparseCUDA__sign(self);
2526}
2527at::Tensor & sign_out(at::Tensor & out, const at::Tensor & self) {
2528return wrapper_SparseCUDA_out_sign_out(self, out);
2529}
2530at::Tensor & sign_outf(const at::Tensor & self, at::Tensor & out) {
2531return wrapper_SparseCUDA_out_sign_out(self, out);
2532}
2533at::Tensor & sign_(at::Tensor & self) {
2534return wrapper_SparseCUDA__sign_(self);
2535}
2536at::Tensor signbit(const at::Tensor & self) {
2537return wrapper_SparseCUDA__signbit(self);
2538}
2539at::Tensor & signbit_out(at::Tensor & out, const at::Tensor & self) {
2540return wrapper_SparseCUDA_out_signbit_out(self, out);
2541}
2542at::Tensor & signbit_outf(const at::Tensor & self, at::Tensor & out) {
2543return wrapper_SparseCUDA_out_signbit_out(self, out);
2544}
2545at::Tensor any(const at::Tensor & self) {
2546return wrapper_SparseCUDA__any(self);
2547}
2548at::Tensor pow(const at::Tensor & self, const at::Scalar & exponent) {
2549return wrapper_SparseCUDA_Tensor_Scalar_pow(self, exponent);
2550}
2551at::Tensor & pow_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & exponent) {
2552return wrapper_SparseCUDA_Tensor_Scalar_out_pow_out(self, exponent, out);
2553}
2554at::Tensor & pow_outf(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) {
2555return wrapper_SparseCUDA_Tensor_Scalar_out_pow_out(self, exponent, out);
2556}
2557at::Tensor isinf(const at::Tensor & self) {
2558return wrapper_SparseCUDA__isinf(self);
2559}
2560at::Tensor isposinf(const at::Tensor & self) {
2561return wrapper_SparseCUDA__isposinf(self);
2562}
2563at::Tensor & isposinf_out(at::Tensor & out, const at::Tensor & self) {
2564return wrapper_SparseCUDA_out_isposinf_out(self, out);
2565}
2566at::Tensor & isposinf_outf(const at::Tensor & self, at::Tensor & out) {
2567return wrapper_SparseCUDA_out_isposinf_out(self, out);
2568}
2569at::Tensor isneginf(const at::Tensor & self) {
2570return wrapper_SparseCUDA__isneginf(self);
2571}
2572at::Tensor & isneginf_out(at::Tensor & out, const at::Tensor & self) {
2573return wrapper_SparseCUDA_out_isneginf_out(self, out);
2574}
2575at::Tensor & isneginf_outf(const at::Tensor & self, at::Tensor & out) {
2576return wrapper_SparseCUDA_out_isneginf_out(self, out);
2577}
2578} // namespace sparsecuda
2579} // namespace at
2580