1// required for old g++ to compile PRId64 macros, see
2// https://github.com/pytorch/pytorch/issues/3571
3// for context
4#ifndef __STDC_FORMAT_MACROS
5#define __STDC_FORMAT_MACROS
6#endif
7
8// an external backend might generate file within its code tree
9// and check all the source files within the tree with clang-format.
10// so, disable it since the backend might have a different config.
11// clang-format off
12
13// NOTE: This condition is true for all PyTorch internal libraries, it
14// just excludes external projects such as torch_xla which
15// re-use some of the PyTorch codegen machinery.
16#if defined(CAFFE2_BUILD_MAIN_LIB) || \
17 defined(TORCH_CUDA_BUILD_MAIN_LIB) || \
18 defined(TORCH_HIP_BUILD_MAIN_LIB) || \
19 defined(TORCH_CUDA_CU_BUILD_MAIN_LIB) || \
20 defined(TORCH_CUDA_CPP_BUILD_MAIN_LIB)
21#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
22#endif
23
24// @generated by torchgen/gen.py from RegisterDispatchKey.cpp
25
26#include <c10/core/TensorImpl.h>
27#include <c10/core/Allocator.h>
28#include <ATen/DeviceGuard.h>
29#include <ATen/NamedTensorUtils.h>
30#include <ATen/Utils.h>
31#include <ATen/WrapDimUtils.h>
32#include <ATen/Dispatch.h>
33#include <c10/util/ExclusivelyOwned.h>
34#include <c10/util/Half.h>
35#include <c10/core/UndefinedTensorImpl.h>
36#include <c10/util/Optional.h>
37#include <ATen/Tensor.h>
38#include <ATen/native/Resize.h>
39
40#include <cstddef>
41#include <functional>
42#include <memory>
43#include <utility>
44
45#include <ATen/Config.h>
46#include <ATen/core/op_registration/adaption.h>
47#include <torch/library.h>
48#include <c10/cuda/CUDAGuard.h>
49#include <ATen/cuda/ATenCUDAGeneral.h>
50#include <ATen/cuda/CUDADevice.h>
51#include <ATen/cuda/CUDAContext.h>
52
53#include <ATen/ops/as_strided_native.h>
54#include <ATen/ops/empty.h>
55#include <ATen/ops/empty_strided.h>
56#include <ATen/ops/_copy_from_and_resize.h>
57#include <ATen/ops/_copy_from.h>
58#include <ATen/ops/_conj_physical_native.h>
59#include <ATen/ops/_nnz_native.h>
60#include <ATen/ops/_sparse_csr_prod_native.h>
61#include <ATen/ops/_sparse_csr_sum_native.h>
62#include <ATen/ops/_to_dense_native.h>
63#include <ATen/ops/abs_native.h>
64#include <ATen/ops/add_native.h>
65#include <ATen/ops/addmm_native.h>
66#include <ATen/ops/addmv_native.h>
67#include <ATen/ops/angle_native.h>
68#include <ATen/ops/asin_native.h>
69#include <ATen/ops/asinh_native.h>
70#include <ATen/ops/atan_native.h>
71#include <ATen/ops/atanh_native.h>
72#include <ATen/ops/baddbmm_native.h>
73#include <ATen/ops/bmm_native.h>
74#include <ATen/ops/ccol_indices_native.h>
75#include <ATen/ops/ceil_native.h>
76#include <ATen/ops/clone_native.h>
77#include <ATen/ops/col_indices_native.h>
78#include <ATen/ops/conj_physical_native.h>
79#include <ATen/ops/copy_native.h>
80#include <ATen/ops/crow_indices_native.h>
81#include <ATen/ops/deg2rad_native.h>
82#include <ATen/ops/dense_dim_native.h>
83#include <ATen/ops/empty_like_native.h>
84#include <ATen/ops/empty_native.h>
85#include <ATen/ops/erf_native.h>
86#include <ATen/ops/erfinv_native.h>
87#include <ATen/ops/expm1_native.h>
88#include <ATen/ops/fill_native.h>
89#include <ATen/ops/floor_native.h>
90#include <ATen/ops/frac_native.h>
91#include <ATen/ops/isinf_native.h>
92#include <ATen/ops/isnan_native.h>
93#include <ATen/ops/isneginf_native.h>
94#include <ATen/ops/isposinf_native.h>
95#include <ATen/ops/log1p_native.h>
96#include <ATen/ops/mm_native.h>
97#include <ATen/ops/mul_native.h>
98#include <ATen/ops/neg_native.h>
99#include <ATen/ops/normal_native.h>
100#include <ATen/ops/rad2deg_native.h>
101#include <ATen/ops/relu_native.h>
102#include <ATen/ops/resize_as_sparse_native.h>
103#include <ATen/ops/resize_native.h>
104#include <ATen/ops/round_native.h>
105#include <ATen/ops/row_indices_native.h>
106#include <ATen/ops/select_copy_native.h>
107#include <ATen/ops/select_native.h>
108#include <ATen/ops/sgn_native.h>
109#include <ATen/ops/sign_native.h>
110#include <ATen/ops/signbit_native.h>
111#include <ATen/ops/sin_native.h>
112#include <ATen/ops/sinh_native.h>
113#include <ATen/ops/sparse_dim_native.h>
114#include <ATen/ops/sparse_mask_native.h>
115#include <ATen/ops/sparse_sampled_addmm_native.h>
116#include <ATen/ops/sqrt_native.h>
117#include <ATen/ops/sum_native.h>
118#include <ATen/ops/tan_native.h>
119#include <ATen/ops/tanh_native.h>
120#include <ATen/ops/threshold_backward_native.h>
121#include <ATen/ops/to_sparse_bsc_native.h>
122#include <ATen/ops/to_sparse_bsr_native.h>
123#include <ATen/ops/to_sparse_csc_native.h>
124#include <ATen/ops/to_sparse_csr_native.h>
125#include <ATen/ops/to_sparse_native.h>
126#include <ATen/ops/triangular_solve_native.h>
127#include <ATen/ops/trunc_native.h>
128#include <ATen/ops/values_native.h>
129#include <ATen/ops/zero_native.h>
130
131// See template file RegisterDispatchDefinitions.ini
132namespace at {
133// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
134// ambiguity with conflicting identifiers that may have been defined in
135// at namespace already.
136namespace {
137void resize_out(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
138 TORCH_CHECK(options.dtype() == out.dtype(),
139 "Expected out tensor to have dtype ", options.dtype(), ", but got ", out.dtype(), " instead");
140 TORCH_CHECK(options.device() == out.device(),
141 "Expected out tensor to have device ", options.device(), ", but got ", out.device(), " instead");
142 const bool resized = at::native::resize_output(out, sizes);
143 // Only restride if a resize occurred; otherwise we ignore the (advisory)
144 // strides from the meta function and directly use the output tensor's
145 // preexisting strides
146 if (resized) {
147 if (!strides.empty()) {
148 TORCH_INTERNAL_ASSERT(!options.memory_format_opt().has_value());
149 // TODO: avoid the redispatch here
150 out.as_strided_(sizes, strides);
151 } else if (options.memory_format_opt().has_value()) {
152 out.unsafeGetTensorImpl()->empty_tensor_restride(*options.memory_format_opt());
153 }
154 }
155}
156void check_inplace(const Tensor &self, IntArrayRef sizes, const TensorOptions &options) {
157 // These checks are needed on those operators that:
158 // 1) don't use 'TensorIterator' (e.g. 'addmm' and 'baddbmm')
159 // 2) have particular typing rules (e.g. 'cumsum' and 'cumprod')
160 // For other operators (e.g. 'add'), 'TensorIterator' already checks
161 // these things separately.
162 TORCH_CHECK(options.dtype() == self.dtype(),
163 "Bad in-place call: ",
164 "input tensor dtype ", self.dtype(), " and output tensor dtype ", options.dtype(), " should match");
165 TORCH_CHECK(options.device() == self.device(),
166 "Bad in-place call: ",
167 "input tensor device ", self.device(), " and output tensor device ", options.device(), " should match");
168 TORCH_CHECK(sizes == self.sizes(),
169 "Bad in-place call: ",
170 "input tensor size ", self.sizes(), " and output tensor size ", sizes, " should match");
171}
172namespace {
173at::Tensor wrapper_SparseCsrCUDA__abs(const at::Tensor & self) {
174 // No device check
175 const OptionalDeviceGuard device_guard(device_of(self));
176 return at::native::abs_sparse_csr(self);
177}
178} // anonymous namespace
179namespace {
180at::Tensor & wrapper_SparseCsrCUDA_out_abs_out(const at::Tensor & self, at::Tensor & out) {
181 // No device check
182 const OptionalDeviceGuard device_guard(device_of(self));
183 return at::native::abs_sparse_csr_out(self, out);
184}
185} // anonymous namespace
186namespace {
187at::Tensor & wrapper_SparseCsrCUDA__abs_(at::Tensor & self) {
188 // No device check
189 const OptionalDeviceGuard device_guard(device_of(self));
190 return at::native::abs_sparse_csr_(self);
191}
192} // anonymous namespace
193namespace {
194at::Tensor wrapper_SparseCsrCUDA__angle(const at::Tensor & self) {
195 // No device check
196 const OptionalDeviceGuard device_guard(device_of(self));
197 return at::native::angle_sparse_csr(self);
198}
199} // anonymous namespace
200namespace {
201at::Tensor & wrapper_SparseCsrCUDA_out_angle_out(const at::Tensor & self, at::Tensor & out) {
202 // No device check
203 const OptionalDeviceGuard device_guard(device_of(self));
204 return at::native::angle_sparse_csr_out(self, out);
205}
206} // anonymous namespace
207namespace {
208at::Tensor wrapper_SparseCsrCUDA__sgn(const at::Tensor & self) {
209 c10::optional<Device> common_device = nullopt;
210(void)common_device; // Suppress unused variable warning
211 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__sgn", "self");
212 const OptionalDeviceGuard device_guard(device_of(self));
213 return at::native::sgn_sparse_csr(self);
214}
215} // anonymous namespace
216namespace {
217at::Tensor & wrapper_SparseCsrCUDA_out_sgn_out(const at::Tensor & self, at::Tensor & out) {
218 c10::optional<Device> common_device = nullopt;
219(void)common_device; // Suppress unused variable warning
220 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_sgn_out", "out");
221 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_sgn_out", "self");
222 const OptionalDeviceGuard device_guard(device_of(self));
223 return at::native::sgn_sparse_csr_out(self, out);
224}
225} // anonymous namespace
226namespace {
227at::Tensor & wrapper_SparseCsrCUDA__sgn_(at::Tensor & self) {
228 c10::optional<Device> common_device = nullopt;
229(void)common_device; // Suppress unused variable warning
230 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__sgn_", "self");
231 const OptionalDeviceGuard device_guard(device_of(self));
232 return at::native::sgn_sparse_csr_(self);
233}
234} // anonymous namespace
235namespace {
236at::Tensor wrapper_SparseCsrCUDA___conj_physical(const at::Tensor & self) {
237 c10::optional<Device> common_device = nullopt;
238(void)common_device; // Suppress unused variable warning
239 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA___conj_physical", "self");
240 const OptionalDeviceGuard device_guard(device_of(self));
241 return at::native::conj_physical_sparse_csr(self);
242}
243} // anonymous namespace
244namespace {
245at::Tensor & wrapper_SparseCsrCUDA_out_conj_physical_out(const at::Tensor & self, at::Tensor & out) {
246 c10::optional<Device> common_device = nullopt;
247(void)common_device; // Suppress unused variable warning
248 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_conj_physical_out", "out");
249 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_conj_physical_out", "self");
250 const OptionalDeviceGuard device_guard(device_of(self));
251 return at::native::conj_physical_sparse_csr_out(self, out);
252}
253} // anonymous namespace
254namespace {
255at::Tensor & wrapper_SparseCsrCUDA__conj_physical_(at::Tensor & self) {
256 c10::optional<Device> common_device = nullopt;
257(void)common_device; // Suppress unused variable warning
258 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__conj_physical_", "self");
259 const OptionalDeviceGuard device_guard(device_of(self));
260 return at::native::conj_physical_sparse_csr_(self);
261}
262} // anonymous namespace
263namespace {
264at::Tensor wrapper_SparseCsrCUDA_Tensor_add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
265 // No device check
266 const OptionalDeviceGuard device_guard(device_of(self));
267 return at::native::add_sparse_csr(self, other, alpha);
268}
269} // anonymous namespace
270namespace {
271at::Tensor & wrapper_SparseCsrCUDA_out_add_out(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
272 // No device check
273 const OptionalDeviceGuard device_guard(device_of(self));
274 return at::native::add_out_sparse_csr_cuda(self, other, alpha, out);
275}
276} // anonymous namespace
277namespace {
278at::Tensor & wrapper_SparseCsrCUDA_Tensor_add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
279 // No device check
280 const OptionalDeviceGuard device_guard(device_of(self));
281 return at::native::add_sparse_csr_(self, other, alpha);
282}
283} // anonymous namespace
284namespace {
285at::Tensor & wrapper_SparseCsrCUDA_out_addmv_out(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
286 c10::optional<Device> common_device = nullopt;
287(void)common_device; // Suppress unused variable warning
288 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_addmv_out", "out");
289 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_addmv_out", "self");
290 c10::impl::check_and_update_common_device(common_device, mat, "wrapper_SparseCsrCUDA_out_addmv_out", "mat");
291 c10::impl::check_and_update_common_device(common_device, vec, "wrapper_SparseCsrCUDA_out_addmv_out", "vec");
292 const OptionalDeviceGuard device_guard(device_of(self));
293 return at::native::addmv_out_sparse_compressed_cuda(self, mat, vec, beta, alpha, out);
294}
295} // anonymous namespace
296namespace {
297at::Tensor wrapper_SparseCsrCUDA__asinh(const at::Tensor & self) {
298 c10::optional<Device> common_device = nullopt;
299(void)common_device; // Suppress unused variable warning
300 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__asinh", "self");
301 const OptionalDeviceGuard device_guard(device_of(self));
302 return at::native::asinh_sparse_csr(self);
303}
304} // anonymous namespace
305namespace {
306at::Tensor & wrapper_SparseCsrCUDA_out_asinh_out(const at::Tensor & self, at::Tensor & out) {
307 c10::optional<Device> common_device = nullopt;
308(void)common_device; // Suppress unused variable warning
309 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_asinh_out", "out");
310 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_asinh_out", "self");
311 const OptionalDeviceGuard device_guard(device_of(self));
312 return at::native::asinh_sparse_csr_out(self, out);
313}
314} // anonymous namespace
315namespace {
316at::Tensor & wrapper_SparseCsrCUDA__asinh_(at::Tensor & self) {
317 c10::optional<Device> common_device = nullopt;
318(void)common_device; // Suppress unused variable warning
319 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__asinh_", "self");
320 const OptionalDeviceGuard device_guard(device_of(self));
321 return at::native::asinh_sparse_csr_(self);
322}
323} // anonymous namespace
324namespace {
325at::Tensor wrapper_SparseCsrCUDA__atanh(const at::Tensor & self) {
326 c10::optional<Device> common_device = nullopt;
327(void)common_device; // Suppress unused variable warning
328 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__atanh", "self");
329 const OptionalDeviceGuard device_guard(device_of(self));
330 return at::native::atanh_sparse_csr(self);
331}
332} // anonymous namespace
333namespace {
334at::Tensor & wrapper_SparseCsrCUDA_out_atanh_out(const at::Tensor & self, at::Tensor & out) {
335 c10::optional<Device> common_device = nullopt;
336(void)common_device; // Suppress unused variable warning
337 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_atanh_out", "out");
338 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_atanh_out", "self");
339 const OptionalDeviceGuard device_guard(device_of(self));
340 return at::native::atanh_sparse_csr_out(self, out);
341}
342} // anonymous namespace
343namespace {
344at::Tensor & wrapper_SparseCsrCUDA__atanh_(at::Tensor & self) {
345 c10::optional<Device> common_device = nullopt;
346(void)common_device; // Suppress unused variable warning
347 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__atanh_", "self");
348 const OptionalDeviceGuard device_guard(device_of(self));
349 return at::native::atanh_sparse_csr_(self);
350}
351} // anonymous namespace
352namespace {
353at::Tensor wrapper_SparseCsrCUDA__asin(const at::Tensor & self) {
354 // No device check
355 const OptionalDeviceGuard device_guard(device_of(self));
356 return at::native::asin_sparse_csr(self);
357}
358} // anonymous namespace
359namespace {
360at::Tensor & wrapper_SparseCsrCUDA_out_asin_out(const at::Tensor & self, at::Tensor & out) {
361 // No device check
362 const OptionalDeviceGuard device_guard(device_of(self));
363 return at::native::asin_sparse_csr_out(self, out);
364}
365} // anonymous namespace
366namespace {
367at::Tensor & wrapper_SparseCsrCUDA__asin_(at::Tensor & self) {
368 // No device check
369 const OptionalDeviceGuard device_guard(device_of(self));
370 return at::native::asin_sparse_csr_(self);
371}
372} // anonymous namespace
373namespace {
374at::Tensor wrapper_SparseCsrCUDA__atan(const at::Tensor & self) {
375 // No device check
376 const OptionalDeviceGuard device_guard(device_of(self));
377 return at::native::atan_sparse_csr(self);
378}
379} // anonymous namespace
380namespace {
381at::Tensor & wrapper_SparseCsrCUDA_out_atan_out(const at::Tensor & self, at::Tensor & out) {
382 // No device check
383 const OptionalDeviceGuard device_guard(device_of(self));
384 return at::native::atan_sparse_csr_out(self, out);
385}
386} // anonymous namespace
387namespace {
388at::Tensor & wrapper_SparseCsrCUDA__atan_(at::Tensor & self) {
389 // No device check
390 const OptionalDeviceGuard device_guard(device_of(self));
391 return at::native::atan_sparse_csr_(self);
392}
393} // anonymous namespace
394namespace {
395at::Tensor & wrapper_SparseCsrCUDA_out_baddbmm_out(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
396 c10::optional<Device> common_device = nullopt;
397(void)common_device; // Suppress unused variable warning
398 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_baddbmm_out", "out");
399 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_baddbmm_out", "self");
400 c10::impl::check_and_update_common_device(common_device, batch1, "wrapper_SparseCsrCUDA_out_baddbmm_out", "batch1");
401 c10::impl::check_and_update_common_device(common_device, batch2, "wrapper_SparseCsrCUDA_out_baddbmm_out", "batch2");
402 const OptionalDeviceGuard device_guard(device_of(self));
403 return at::native::baddbmm_out_sparse_csr_cuda(self, batch1, batch2, beta, alpha, out);
404}
405} // anonymous namespace
406namespace {
407at::Tensor & wrapper_SparseCsrCUDA_out_bmm_out(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
408 c10::optional<Device> common_device = nullopt;
409(void)common_device; // Suppress unused variable warning
410 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_bmm_out", "out");
411 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_bmm_out", "self");
412 c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCsrCUDA_out_bmm_out", "mat2");
413 const OptionalDeviceGuard device_guard(device_of(self));
414 return at::native::bmm_out_sparse_csr_cuda(self, mat2, out);
415}
416} // anonymous namespace
417namespace {
418at::Tensor wrapper_SparseCsrCUDA__ceil(const at::Tensor & self) {
419 // No device check
420 const OptionalDeviceGuard device_guard(device_of(self));
421 return at::native::ceil_sparse_csr(self);
422}
423} // anonymous namespace
424namespace {
425at::Tensor & wrapper_SparseCsrCUDA_out_ceil_out(const at::Tensor & self, at::Tensor & out) {
426 // No device check
427 const OptionalDeviceGuard device_guard(device_of(self));
428 return at::native::ceil_sparse_csr_out(self, out);
429}
430} // anonymous namespace
431namespace {
432at::Tensor & wrapper_SparseCsrCUDA__ceil_(at::Tensor & self) {
433 // No device check
434 const OptionalDeviceGuard device_guard(device_of(self));
435 return at::native::ceil_sparse_csr_(self);
436}
437} // anonymous namespace
438namespace {
439at::Tensor & wrapper_SparseCsrCUDA__copy_(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
440 // No device check
441 // DeviceGuard omitted
442 return at::native::copy_sparse_compressed_(self, src, non_blocking);
443}
444} // anonymous namespace
445namespace {
446at::Tensor wrapper_SparseCsrCUDA_memory_format_empty(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
447 c10::optional<Device> common_device = nullopt;
448(void)common_device; // Suppress unused variable warning
449 globalContext().lazyInitCUDA();
450 const DeviceGuard device_guard(device_or_default(device));
451 return at::native::empty_sparse_compressed(C10_AS_INTARRAYREF_SLOW(size), dtype, layout, device, pin_memory, memory_format);
452}
453} // anonymous namespace
454namespace {
455const at::Tensor & wrapper_SparseCsrCUDA__resize_(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) {
456 // No device check
457 // DeviceGuard omitted
458 return at::native::resize_sparse_csr_(self, C10_AS_INTARRAYREF_SLOW(size), memory_format);
459}
460} // anonymous namespace
461namespace {
462at::Tensor wrapper_SparseCsrCUDA__empty_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
463 // No device check
464 // DeviceGuard omitted
465 return at::native::empty_like_sparse_csr(self, dtype, layout, device, pin_memory, memory_format);
466}
467} // anonymous namespace
468namespace {
469at::Tensor wrapper_SparseCsrCUDA__erf(const at::Tensor & self) {
470 // No device check
471 const OptionalDeviceGuard device_guard(device_of(self));
472 return at::native::erf_sparse_csr(self);
473}
474} // anonymous namespace
475namespace {
476at::Tensor & wrapper_SparseCsrCUDA_out_erf_out(const at::Tensor & self, at::Tensor & out) {
477 // No device check
478 const OptionalDeviceGuard device_guard(device_of(self));
479 return at::native::erf_sparse_csr_out(self, out);
480}
481} // anonymous namespace
482namespace {
483at::Tensor & wrapper_SparseCsrCUDA__erf_(at::Tensor & self) {
484 // No device check
485 const OptionalDeviceGuard device_guard(device_of(self));
486 return at::native::erf_sparse_csr_(self);
487}
488} // anonymous namespace
489namespace {
490at::Tensor wrapper_SparseCsrCUDA__expm1(const at::Tensor & self) {
491 // No device check
492 const OptionalDeviceGuard device_guard(device_of(self));
493 return at::native::expm1_sparse_csr(self);
494}
495} // anonymous namespace
496namespace {
497at::Tensor & wrapper_SparseCsrCUDA_out_expm1_out(const at::Tensor & self, at::Tensor & out) {
498 // No device check
499 const OptionalDeviceGuard device_guard(device_of(self));
500 return at::native::expm1_sparse_csr_out(self, out);
501}
502} // anonymous namespace
503namespace {
504at::Tensor & wrapper_SparseCsrCUDA__expm1_(at::Tensor & self) {
505 // No device check
506 const OptionalDeviceGuard device_guard(device_of(self));
507 return at::native::expm1_sparse_csr_(self);
508}
509} // anonymous namespace
510namespace {
511at::Tensor & wrapper_SparseCsrCUDA_Scalar_fill_(at::Tensor & self, const at::Scalar & value) {
512 // No device check
513 const OptionalDeviceGuard device_guard(device_of(self));
514 return at::native::fill_sparse_csr_(self, value);
515}
516} // anonymous namespace
517namespace {
518at::Tensor wrapper_SparseCsrCUDA__floor(const at::Tensor & self) {
519 // No device check
520 const OptionalDeviceGuard device_guard(device_of(self));
521 return at::native::floor_sparse_csr(self);
522}
523} // anonymous namespace
524namespace {
525at::Tensor & wrapper_SparseCsrCUDA_out_floor_out(const at::Tensor & self, at::Tensor & out) {
526 // No device check
527 const OptionalDeviceGuard device_guard(device_of(self));
528 return at::native::floor_sparse_csr_out(self, out);
529}
530} // anonymous namespace
531namespace {
532at::Tensor & wrapper_SparseCsrCUDA__floor_(at::Tensor & self) {
533 // No device check
534 const OptionalDeviceGuard device_guard(device_of(self));
535 return at::native::floor_sparse_csr_(self);
536}
537} // anonymous namespace
538namespace {
539at::Tensor wrapper_SparseCsrCUDA__frac(const at::Tensor & self) {
540 // No device check
541 const OptionalDeviceGuard device_guard(device_of(self));
542 return at::native::frac_sparse_csr(self);
543}
544} // anonymous namespace
545namespace {
546at::Tensor & wrapper_SparseCsrCUDA_out_frac_out(const at::Tensor & self, at::Tensor & out) {
547 // No device check
548 const OptionalDeviceGuard device_guard(device_of(self));
549 return at::native::frac_sparse_csr_out(self, out);
550}
551} // anonymous namespace
552namespace {
553at::Tensor & wrapper_SparseCsrCUDA__frac_(at::Tensor & self) {
554 // No device check
555 const OptionalDeviceGuard device_guard(device_of(self));
556 return at::native::frac_sparse_csr_(self);
557}
558} // anonymous namespace
559namespace {
560at::Tensor wrapper_SparseCsrCUDA__isnan(const at::Tensor & self) {
561 // No device check
562 // DeviceGuard omitted
563 return at::native::isnan_sparse_csr(self);
564}
565} // anonymous namespace
566namespace {
567at::Tensor wrapper_SparseCsrCUDA__log1p(const at::Tensor & self) {
568 // No device check
569 const OptionalDeviceGuard device_guard(device_of(self));
570 return at::native::log1p_sparse_csr(self);
571}
572} // anonymous namespace
573namespace {
574at::Tensor & wrapper_SparseCsrCUDA_out_log1p_out(const at::Tensor & self, at::Tensor & out) {
575 // No device check
576 const OptionalDeviceGuard device_guard(device_of(self));
577 return at::native::log1p_sparse_csr_out(self, out);
578}
579} // anonymous namespace
580namespace {
581at::Tensor & wrapper_SparseCsrCUDA__log1p_(at::Tensor & self) {
582 // No device check
583 const OptionalDeviceGuard device_guard(device_of(self));
584 return at::native::log1p_sparse_csr_(self);
585}
586} // anonymous namespace
587namespace {
588at::Tensor wrapper_SparseCsrCUDA__mm(const at::Tensor & self, const at::Tensor & mat2) {
589 c10::optional<Device> common_device = nullopt;
590(void)common_device; // Suppress unused variable warning
591 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__mm", "self");
592 c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCsrCUDA__mm", "mat2");
593 const OptionalDeviceGuard device_guard(device_of(self));
594 return at::native::_sparse_csr_mm(self, mat2);
595}
596} // anonymous namespace
597namespace {
598at::Tensor & wrapper_SparseCsrCUDA_out_mm_out(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
599 c10::optional<Device> common_device = nullopt;
600(void)common_device; // Suppress unused variable warning
601 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_mm_out", "out");
602 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_mm_out", "self");
603 c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCsrCUDA_out_mm_out", "mat2");
604 const OptionalDeviceGuard device_guard(device_of(self));
605 return at::native::_sparse_csr_mm_out(self, mat2, out);
606}
607} // anonymous namespace
608namespace {
609at::Tensor wrapper_SparseCsrCUDA_Tensor_mul(const at::Tensor & self, const at::Tensor & other) {
610 // No device check
611 const OptionalDeviceGuard device_guard(device_of(self));
612 return at::native::mul_sparse_csr(self, other);
613}
614} // anonymous namespace
615namespace {
616at::Tensor & wrapper_SparseCsrCUDA_out_mul_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
617 // No device check
618 const OptionalDeviceGuard device_guard(device_of(self));
619 return at::native::mul_out_sparse_csr(self, other, out);
620}
621} // anonymous namespace
622namespace {
623at::Tensor & wrapper_SparseCsrCUDA_Tensor_mul_(at::Tensor & self, const at::Tensor & other) {
624 // No device check
625 const OptionalDeviceGuard device_guard(device_of(self));
626 return at::native::mul_sparse_csr_(self, other);
627}
628} // anonymous namespace
629namespace {
630at::Tensor wrapper_SparseCsrCUDA_Scalar_mul(const at::Tensor & self, const at::Scalar & other) {
631 // No device check
632 const OptionalDeviceGuard device_guard(device_of(self));
633 return at::native::mul_scalar_sparse_csr(self, other);
634}
635} // anonymous namespace
636namespace {
637at::Tensor & wrapper_SparseCsrCUDA_Scalar_mul_(at::Tensor & self, const at::Scalar & other) {
638 // No device check
639 const OptionalDeviceGuard device_guard(device_of(self));
640 return at::native::mul__scalar_sparse_csr(self, other);
641}
642} // anonymous namespace
643namespace {
644at::Tensor wrapper_SparseCsrCUDA__rad2deg(const at::Tensor & self) {
645 c10::optional<Device> common_device = nullopt;
646(void)common_device; // Suppress unused variable warning
647 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__rad2deg", "self");
648 const OptionalDeviceGuard device_guard(device_of(self));
649 return at::native::rad2deg_sparse_csr(self);
650}
651} // anonymous namespace
652namespace {
653at::Tensor & wrapper_SparseCsrCUDA_out_rad2deg_out(const at::Tensor & self, at::Tensor & out) {
654 c10::optional<Device> common_device = nullopt;
655(void)common_device; // Suppress unused variable warning
656 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_rad2deg_out", "out");
657 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_rad2deg_out", "self");
658 const OptionalDeviceGuard device_guard(device_of(self));
659 return at::native::rad2deg_sparse_csr_out(self, out);
660}
661} // anonymous namespace
662namespace {
663at::Tensor & wrapper_SparseCsrCUDA__rad2deg_(at::Tensor & self) {
664 c10::optional<Device> common_device = nullopt;
665(void)common_device; // Suppress unused variable warning
666 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__rad2deg_", "self");
667 const OptionalDeviceGuard device_guard(device_of(self));
668 return at::native::rad2deg_sparse_csr_(self);
669}
670} // anonymous namespace
671namespace {
672at::Tensor wrapper_SparseCsrCUDA__deg2rad(const at::Tensor & self) {
673 c10::optional<Device> common_device = nullopt;
674(void)common_device; // Suppress unused variable warning
675 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__deg2rad", "self");
676 const OptionalDeviceGuard device_guard(device_of(self));
677 return at::native::deg2rad_sparse_csr(self);
678}
679} // anonymous namespace
680namespace {
681at::Tensor & wrapper_SparseCsrCUDA_out_deg2rad_out(const at::Tensor & self, at::Tensor & out) {
682 c10::optional<Device> common_device = nullopt;
683(void)common_device; // Suppress unused variable warning
684 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_deg2rad_out", "out");
685 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_deg2rad_out", "self");
686 const OptionalDeviceGuard device_guard(device_of(self));
687 return at::native::deg2rad_sparse_csr_out(self, out);
688}
689} // anonymous namespace
690namespace {
691at::Tensor & wrapper_SparseCsrCUDA__deg2rad_(at::Tensor & self) {
692 c10::optional<Device> common_device = nullopt;
693(void)common_device; // Suppress unused variable warning
694 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__deg2rad_", "self");
695 const OptionalDeviceGuard device_guard(device_of(self));
696 return at::native::deg2rad_sparse_csr_(self);
697}
698} // anonymous namespace
699namespace {
700at::Tensor wrapper_SparseCsrCUDA__neg(const at::Tensor & self) {
701 // No device check
702 const OptionalDeviceGuard device_guard(device_of(self));
703 return at::native::neg_sparse_csr(self);
704}
705} // anonymous namespace
706namespace {
707at::Tensor & wrapper_SparseCsrCUDA_out_neg_out(const at::Tensor & self, at::Tensor & out) {
708 // No device check
709 const OptionalDeviceGuard device_guard(device_of(self));
710 return at::native::neg_sparse_csr_out(self, out);
711}
712} // anonymous namespace
713namespace {
714at::Tensor & wrapper_SparseCsrCUDA__neg_(at::Tensor & self) {
715 // No device check
716 const OptionalDeviceGuard device_guard(device_of(self));
717 return at::native::neg_sparse_csr_(self);
718}
719} // anonymous namespace
720namespace {
721at::Tensor wrapper_SparseCsrCUDA__round(const at::Tensor & self) {
722 // No device check
723 const OptionalDeviceGuard device_guard(device_of(self));
724 return at::native::round_sparse_csr(self);
725}
726} // anonymous namespace
727namespace {
728at::Tensor & wrapper_SparseCsrCUDA_out_round_out(const at::Tensor & self, at::Tensor & out) {
729 // No device check
730 const OptionalDeviceGuard device_guard(device_of(self));
731 return at::native::round_sparse_csr_out(self, out);
732}
733} // anonymous namespace
734namespace {
735at::Tensor & wrapper_SparseCsrCUDA__round_(at::Tensor & self) {
736 // No device check
737 const OptionalDeviceGuard device_guard(device_of(self));
738 return at::native::round_sparse_csr_(self);
739}
740} // anonymous namespace
741namespace {
742at::Tensor wrapper_SparseCsrCUDA__relu(const at::Tensor & self) {
743 // No device check
744 const OptionalDeviceGuard device_guard(device_of(self));
745 return at::native::relu_sparse_csr(self);
746}
747} // anonymous namespace
748namespace {
749at::Tensor & wrapper_SparseCsrCUDA__relu_(at::Tensor & self) {
750 // No device check
751 const OptionalDeviceGuard device_guard(device_of(self));
752 return at::native::relu_sparse_csr_(self);
753}
754} // anonymous namespace
755namespace {
756at::Tensor wrapper_SparseCsrCUDA_int_select(const at::Tensor & self, int64_t dim, c10::SymInt index) {
757 // No device check
758 // DeviceGuard omitted
759 return at::native::select_sparse_csr(self, dim, index.expect_int());
760}
761} // anonymous namespace
762namespace {
763at::Tensor wrapper_SparseCsrCUDA__sin(const at::Tensor & self) {
764 // No device check
765 const OptionalDeviceGuard device_guard(device_of(self));
766 return at::native::sin_sparse_csr(self);
767}
768} // anonymous namespace
769namespace {
770at::Tensor & wrapper_SparseCsrCUDA_out_sin_out(const at::Tensor & self, at::Tensor & out) {
771 // No device check
772 const OptionalDeviceGuard device_guard(device_of(self));
773 return at::native::sin_sparse_csr_out(self, out);
774}
775} // anonymous namespace
776namespace {
777at::Tensor & wrapper_SparseCsrCUDA__sin_(at::Tensor & self) {
778 // No device check
779 const OptionalDeviceGuard device_guard(device_of(self));
780 return at::native::sin_sparse_csr_(self);
781}
782} // anonymous namespace
783namespace {
784at::Tensor wrapper_SparseCsrCUDA__sinh(const at::Tensor & self) {
785 // No device check
786 const OptionalDeviceGuard device_guard(device_of(self));
787 return at::native::sinh_sparse_csr(self);
788}
789} // anonymous namespace
790namespace {
791at::Tensor & wrapper_SparseCsrCUDA_out_sinh_out(const at::Tensor & self, at::Tensor & out) {
792 // No device check
793 const OptionalDeviceGuard device_guard(device_of(self));
794 return at::native::sinh_sparse_csr_out(self, out);
795}
796} // anonymous namespace
797namespace {
798at::Tensor & wrapper_SparseCsrCUDA__sinh_(at::Tensor & self) {
799 // No device check
800 const OptionalDeviceGuard device_guard(device_of(self));
801 return at::native::sinh_sparse_csr_(self);
802}
803} // anonymous namespace
804namespace {
805at::Tensor wrapper_SparseCsrCUDA__sum(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
806 // No device check
807 const OptionalDeviceGuard device_guard(device_of(self));
808 return at::native::sum_csr(self, dtype);
809}
810} // anonymous namespace
811namespace {
812at::Tensor wrapper_SparseCsrCUDA__sqrt(const at::Tensor & self) {
813 // No device check
814 const OptionalDeviceGuard device_guard(device_of(self));
815 return at::native::sqrt_sparse_csr(self);
816}
817} // anonymous namespace
818namespace {
819at::Tensor & wrapper_SparseCsrCUDA_out_sqrt_out(const at::Tensor & self, at::Tensor & out) {
820 // No device check
821 const OptionalDeviceGuard device_guard(device_of(self));
822 return at::native::sqrt_sparse_csr_out(self, out);
823}
824} // anonymous namespace
825namespace {
826at::Tensor & wrapper_SparseCsrCUDA__sqrt_(at::Tensor & self) {
827 // No device check
828 const OptionalDeviceGuard device_guard(device_of(self));
829 return at::native::sqrt_sparse_csr_(self);
830}
831} // anonymous namespace
832namespace {
833at::Tensor wrapper_SparseCsrCUDA__tan(const at::Tensor & self) {
834 // No device check
835 const OptionalDeviceGuard device_guard(device_of(self));
836 return at::native::tan_sparse_csr(self);
837}
838} // anonymous namespace
839namespace {
840at::Tensor & wrapper_SparseCsrCUDA_out_tan_out(const at::Tensor & self, at::Tensor & out) {
841 // No device check
842 const OptionalDeviceGuard device_guard(device_of(self));
843 return at::native::tan_sparse_csr_out(self, out);
844}
845} // anonymous namespace
846namespace {
847at::Tensor & wrapper_SparseCsrCUDA__tan_(at::Tensor & self) {
848 // No device check
849 const OptionalDeviceGuard device_guard(device_of(self));
850 return at::native::tan_sparse_csr_(self);
851}
852} // anonymous namespace
853namespace {
854at::Tensor wrapper_SparseCsrCUDA__tanh(const at::Tensor & self) {
855 // No device check
856 const OptionalDeviceGuard device_guard(device_of(self));
857 return at::native::tanh_sparse_csr(self);
858}
859} // anonymous namespace
860namespace {
861at::Tensor & wrapper_SparseCsrCUDA_out_tanh_out(const at::Tensor & self, at::Tensor & out) {
862 // No device check
863 const OptionalDeviceGuard device_guard(device_of(self));
864 return at::native::tanh_sparse_csr_out(self, out);
865}
866} // anonymous namespace
867namespace {
868at::Tensor & wrapper_SparseCsrCUDA__tanh_(at::Tensor & self) {
869 // No device check
870 const OptionalDeviceGuard device_guard(device_of(self));
871 return at::native::tanh_sparse_csr_(self);
872}
873} // anonymous namespace
874namespace {
875at::Tensor wrapper_SparseCsrCUDA__threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
876 c10::optional<Device> common_device = nullopt;
877(void)common_device; // Suppress unused variable warning
878 c10::impl::check_and_update_common_device(common_device, grad_output, "wrapper_SparseCsrCUDA__threshold_backward", "grad_output");
879 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__threshold_backward", "self");
880 const OptionalDeviceGuard device_guard(device_of(self));
881 return at::native::threshold_backward_sparse_compressed(grad_output, self, threshold);
882}
883} // anonymous namespace
884namespace {
885at::Tensor & wrapper_SparseCsrCUDA_grad_input_threshold_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) {
886 c10::optional<Device> common_device = nullopt;
887(void)common_device; // Suppress unused variable warning
888 c10::impl::check_and_update_common_device(common_device, grad_input, "wrapper_SparseCsrCUDA_grad_input_threshold_backward_out", "grad_input");
889 c10::impl::check_and_update_common_device(common_device, grad_output, "wrapper_SparseCsrCUDA_grad_input_threshold_backward_out", "grad_output");
890 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_grad_input_threshold_backward_out", "self");
891 const OptionalDeviceGuard device_guard(device_of(self));
892 return at::native::threshold_backward_sparse_compressed_out(grad_output, self, threshold, grad_input);
893}
894} // anonymous namespace
895namespace {
896at::Tensor wrapper_SparseCsrCUDA__trunc(const at::Tensor & self) {
897 // No device check
898 const OptionalDeviceGuard device_guard(device_of(self));
899 return at::native::trunc_sparse_csr(self);
900}
901} // anonymous namespace
902namespace {
903at::Tensor & wrapper_SparseCsrCUDA_out_trunc_out(const at::Tensor & self, at::Tensor & out) {
904 // No device check
905 const OptionalDeviceGuard device_guard(device_of(self));
906 return at::native::trunc_sparse_csr_out(self, out);
907}
908} // anonymous namespace
909namespace {
910at::Tensor & wrapper_SparseCsrCUDA__trunc_(at::Tensor & self) {
911 // No device check
912 const OptionalDeviceGuard device_guard(device_of(self));
913 return at::native::trunc_sparse_csr_(self);
914}
915} // anonymous namespace
916namespace {
917at::Tensor wrapper_SparseCsrCUDA_dim_dtype__sparse_csr_sum(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
918 c10::optional<Device> common_device = nullopt;
919(void)common_device; // Suppress unused variable warning
920 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_dim_dtype__sparse_csr_sum", "self");
921 const OptionalDeviceGuard device_guard(device_of(self));
922 return at::native::_sparse_csr_sum_cuda(self, dim, keepdim, dtype);
923}
924} // anonymous namespace
925namespace {
926at::Tensor wrapper_SparseCsrCUDA_dim_dtype__sparse_csr_prod(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
927 c10::optional<Device> common_device = nullopt;
928(void)common_device; // Suppress unused variable warning
929 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_dim_dtype__sparse_csr_prod", "self");
930 const OptionalDeviceGuard device_guard(device_of(self));
931 return at::native::_sparse_csr_prod_cuda(self, dim, keepdim, dtype);
932}
933} // anonymous namespace
934namespace {
935at::Tensor wrapper_SparseCsrCUDA__clone(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) {
936 c10::optional<Device> common_device = nullopt;
937(void)common_device; // Suppress unused variable warning
938 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__clone", "self");
939 const OptionalDeviceGuard device_guard(device_of(self));
940 return at::native::clone_sparse_compressed(self, memory_format);
941}
942} // anonymous namespace
943namespace {
944const at::Tensor & wrapper_SparseCsrCUDA__resize_as_sparse_(const at::Tensor & self, const at::Tensor & the_template) {
945 c10::optional<Device> common_device = nullopt;
946(void)common_device; // Suppress unused variable warning
947 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__resize_as_sparse_", "self");
948 c10::impl::check_and_update_common_device(common_device, the_template, "wrapper_SparseCsrCUDA__resize_as_sparse_", "the_template");
949 const OptionalDeviceGuard device_guard(device_of(self));
950 return at::native::resize_as_sparse_compressed_(self, the_template);
951}
952} // anonymous namespace
953namespace {
954at::Tensor & wrapper_SparseCsrCUDA__zero_(at::Tensor & self) {
955 // No device check
956 const OptionalDeviceGuard device_guard(device_of(self));
957 return at::native::zero_sparse_csr_(self);
958}
959} // anonymous namespace
960namespace {
961at::Tensor wrapper_SparseCsrCUDA__sparse_sampled_addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
962 c10::optional<Device> common_device = nullopt;
963(void)common_device; // Suppress unused variable warning
964 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__sparse_sampled_addmm", "self");
965 c10::impl::check_and_update_common_device(common_device, mat1, "wrapper_SparseCsrCUDA__sparse_sampled_addmm", "mat1");
966 c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCsrCUDA__sparse_sampled_addmm", "mat2");
967 const OptionalDeviceGuard device_guard(device_of(self));
968 return at::native::sparse_sampled_addmm_sparse_csr_cuda(self, mat1, mat2, beta, alpha);
969}
970} // anonymous namespace
971namespace {
972at::Tensor & wrapper_SparseCsrCUDA_out_sparse_sampled_addmm_out(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
973 c10::optional<Device> common_device = nullopt;
974(void)common_device; // Suppress unused variable warning
975 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_sparse_sampled_addmm_out", "out");
976 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_sparse_sampled_addmm_out", "self");
977 c10::impl::check_and_update_common_device(common_device, mat1, "wrapper_SparseCsrCUDA_out_sparse_sampled_addmm_out", "mat1");
978 c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCsrCUDA_out_sparse_sampled_addmm_out", "mat2");
979 const OptionalDeviceGuard device_guard(device_of(self));
980 return at::native::sparse_sampled_addmm_out_sparse_csr_cuda(self, mat1, mat2, beta, alpha, out);
981}
982} // anonymous namespace
983namespace {
984at::Tensor wrapper_SparseCsrCUDA__addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
985 c10::optional<Device> common_device = nullopt;
986(void)common_device; // Suppress unused variable warning
987 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__addmm", "self");
988 c10::impl::check_and_update_common_device(common_device, mat1, "wrapper_SparseCsrCUDA__addmm", "mat1");
989 c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCsrCUDA__addmm", "mat2");
990 const OptionalDeviceGuard device_guard(device_of(self));
991 return at::native::addmm_sparse_compressed_dense(self, mat1, mat2, beta, alpha);
992}
993} // anonymous namespace
994namespace {
995at::Tensor & wrapper_SparseCsrCUDA_out_addmm_out(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
996 c10::optional<Device> common_device = nullopt;
997(void)common_device; // Suppress unused variable warning
998 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_addmm_out", "out");
999 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_addmm_out", "self");
1000 c10::impl::check_and_update_common_device(common_device, mat1, "wrapper_SparseCsrCUDA_out_addmm_out", "mat1");
1001 c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCsrCUDA_out_addmm_out", "mat2");
1002 const OptionalDeviceGuard device_guard(device_of(self));
1003 return at::native::addmm_out_sparse_compressed_cuda(self, mat1, mat2, beta, alpha, out);
1004}
1005} // anonymous namespace
1006namespace {
1007at::Tensor wrapper_SparseCsrCUDA__sparse_mask(const at::Tensor & self, const at::Tensor & mask) {
1008 c10::optional<Device> common_device = nullopt;
1009(void)common_device; // Suppress unused variable warning
1010 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__sparse_mask", "self");
1011 c10::impl::check_and_update_common_device(common_device, mask, "wrapper_SparseCsrCUDA__sparse_mask", "mask");
1012 const OptionalDeviceGuard device_guard(device_of(self));
1013 return at::native::sparse_mask_sparse_csr(self, mask);
1014}
1015} // anonymous namespace
1016namespace {
1017at::Tensor wrapper_SparseCsrCUDA___to_dense(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
1018 c10::optional<Device> common_device = nullopt;
1019(void)common_device; // Suppress unused variable warning
1020 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA___to_dense", "self");
1021 const OptionalDeviceGuard device_guard(device_of(self));
1022 return at::native::sparse_compressed_to_dense(self, dtype);
1023}
1024} // anonymous namespace
1025namespace {
1026int64_t wrapper_SparseCsrCUDA__sparse_dim(const at::Tensor & self) {
1027 // No device check
1028 // DeviceGuard omitted
1029 return at::native::sparse_dim_sparse_csr(self);
1030}
1031} // anonymous namespace
1032namespace {
1033int64_t wrapper_SparseCsrCUDA__dense_dim(const at::Tensor & self) {
1034 // No device check
1035 // DeviceGuard omitted
1036 return at::native::dense_dim_sparse_csr(self);
1037}
1038} // anonymous namespace
1039namespace {
1040int64_t wrapper_SparseCsrCUDA___nnz(const at::Tensor & self) {
1041 // No device check
1042 // DeviceGuard omitted
1043 return at::native::_nnz_sparse_csr(self);
1044}
1045} // anonymous namespace
1046namespace {
1047at::Tensor wrapper_SparseCsrCUDA__values(const at::Tensor & self) {
1048 // No device check
1049 // DeviceGuard omitted
1050 return at::native::values_sparse_csr(self);
1051}
1052} // anonymous namespace
1053namespace {
1054at::Tensor wrapper_SparseCsrCUDA__crow_indices(const at::Tensor & self) {
1055 // No device check
1056 // DeviceGuard omitted
1057 return at::native::crow_indices_sparse_csr(self);
1058}
1059} // anonymous namespace
1060namespace {
1061at::Tensor wrapper_SparseCsrCUDA__col_indices(const at::Tensor & self) {
1062 // No device check
1063 // DeviceGuard omitted
1064 return at::native::col_indices_sparse_csr(self);
1065}
1066} // anonymous namespace
1067namespace {
1068at::Tensor wrapper_SparseCsrCUDA__ccol_indices(const at::Tensor & self) {
1069 // No device check
1070 // DeviceGuard omitted
1071 return at::native::ccol_indices_sparse_csr(self);
1072}
1073} // anonymous namespace
1074namespace {
1075at::Tensor wrapper_SparseCsrCUDA__row_indices(const at::Tensor & self) {
1076 // No device check
1077 // DeviceGuard omitted
1078 return at::native::row_indices_sparse_csr(self);
1079}
1080} // anonymous namespace
1081namespace {
1082at::Tensor wrapper_SparseCsrCUDA_sparse_dim_to_sparse(const at::Tensor & self, int64_t sparse_dim) {
1083 c10::optional<Device> common_device = nullopt;
1084(void)common_device; // Suppress unused variable warning
1085 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_sparse_dim_to_sparse", "self");
1086 const OptionalDeviceGuard device_guard(device_of(self));
1087 return at::native::sparse_compressed_to_sparse(self, sparse_dim);
1088}
1089} // anonymous namespace
1090namespace {
1091at::Tensor wrapper_SparseCsrCUDA__to_sparse(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
1092 c10::optional<Device> common_device = nullopt;
1093(void)common_device; // Suppress unused variable warning
1094 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__to_sparse", "self");
1095 const OptionalDeviceGuard device_guard(device_of(self));
1096 return at::native::sparse_compressed_to_sparse(self, layout, blocksize, dense_dim);
1097}
1098} // anonymous namespace
1099namespace {
1100at::Tensor wrapper_SparseCsrCUDA__to_sparse_csr(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
1101 c10::optional<Device> common_device = nullopt;
1102(void)common_device; // Suppress unused variable warning
1103 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__to_sparse_csr", "self");
1104 const OptionalDeviceGuard device_guard(device_of(self));
1105 return at::native::sparse_compressed_to_sparse_csr(self, dense_dim);
1106}
1107} // anonymous namespace
1108namespace {
1109at::Tensor wrapper_SparseCsrCUDA__to_sparse_csc(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
1110 c10::optional<Device> common_device = nullopt;
1111(void)common_device; // Suppress unused variable warning
1112 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__to_sparse_csc", "self");
1113 const OptionalDeviceGuard device_guard(device_of(self));
1114 return at::native::sparse_compressed_to_sparse_csc(self, dense_dim);
1115}
1116} // anonymous namespace
1117namespace {
1118at::Tensor wrapper_SparseCsrCUDA__to_sparse_bsr(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
1119 c10::optional<Device> common_device = nullopt;
1120(void)common_device; // Suppress unused variable warning
1121 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__to_sparse_bsr", "self");
1122 const OptionalDeviceGuard device_guard(device_of(self));
1123 return at::native::sparse_compressed_to_sparse_bsr(self, blocksize, dense_dim);
1124}
1125} // anonymous namespace
1126namespace {
1127at::Tensor wrapper_SparseCsrCUDA__to_sparse_bsc(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
1128 c10::optional<Device> common_device = nullopt;
1129(void)common_device; // Suppress unused variable warning
1130 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__to_sparse_bsc", "self");
1131 const OptionalDeviceGuard device_guard(device_of(self));
1132 return at::native::sparse_compressed_to_sparse_bsc(self, blocksize, dense_dim);
1133}
1134} // anonymous namespace
1135namespace {
1136::std::tuple<at::Tensor &,at::Tensor &> wrapper_SparseCsrCUDA_X_triangular_solve_out(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular, at::Tensor & X, at::Tensor & M) {
1137 c10::optional<Device> common_device = nullopt;
1138(void)common_device; // Suppress unused variable warning
1139 c10::impl::check_and_update_common_device(common_device, X, "wrapper_SparseCsrCUDA_X_triangular_solve_out", "X");
1140 c10::impl::check_and_update_common_device(common_device, M, "wrapper_SparseCsrCUDA_X_triangular_solve_out", "M");
1141 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_X_triangular_solve_out", "self");
1142 c10::impl::check_and_update_common_device(common_device, A, "wrapper_SparseCsrCUDA_X_triangular_solve_out", "A");
1143 const OptionalDeviceGuard device_guard(device_of(self));
1144 return at::native::triangular_solve_out_sparse_csr_cuda(self, A, upper, transpose, unitriangular, X, M);
1145}
1146} // anonymous namespace
1147namespace {
1148at::Tensor wrapper_SparseCsrCUDA__erfinv(const at::Tensor & self) {
1149 // No device check
1150 const OptionalDeviceGuard device_guard(device_of(self));
1151 return at::native::erfinv_sparse_csr(self);
1152}
1153} // anonymous namespace
1154namespace {
1155at::Tensor & wrapper_SparseCsrCUDA_out_erfinv_out(const at::Tensor & self, at::Tensor & out) {
1156 // No device check
1157 const OptionalDeviceGuard device_guard(device_of(self));
1158 return at::native::erfinv_sparse_csr_out(self, out);
1159}
1160} // anonymous namespace
1161namespace {
1162at::Tensor & wrapper_SparseCsrCUDA__erfinv_(at::Tensor & self) {
1163 // No device check
1164 const OptionalDeviceGuard device_guard(device_of(self));
1165 return at::native::erfinv_sparse_csr_(self);
1166}
1167} // anonymous namespace
1168namespace {
1169at::Tensor wrapper_SparseCsrCUDA__sign(const at::Tensor & self) {
1170 // No device check
1171 const OptionalDeviceGuard device_guard(device_of(self));
1172 return at::native::sign_sparse_csr(self);
1173}
1174} // anonymous namespace
1175namespace {
1176at::Tensor & wrapper_SparseCsrCUDA_out_sign_out(const at::Tensor & self, at::Tensor & out) {
1177 // No device check
1178 const OptionalDeviceGuard device_guard(device_of(self));
1179 return at::native::sign_sparse_csr_out(self, out);
1180}
1181} // anonymous namespace
1182namespace {
1183at::Tensor & wrapper_SparseCsrCUDA__sign_(at::Tensor & self) {
1184 // No device check
1185 const OptionalDeviceGuard device_guard(device_of(self));
1186 return at::native::sign_sparse_csr_(self);
1187}
1188} // anonymous namespace
1189namespace {
1190at::Tensor wrapper_SparseCsrCUDA__signbit(const at::Tensor & self) {
1191 c10::optional<Device> common_device = nullopt;
1192(void)common_device; // Suppress unused variable warning
1193 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__signbit", "self");
1194 const OptionalDeviceGuard device_guard(device_of(self));
1195 return at::native::signbit_sparse_csr(self);
1196}
1197} // anonymous namespace
1198namespace {
1199at::Tensor & wrapper_SparseCsrCUDA_out_signbit_out(const at::Tensor & self, at::Tensor & out) {
1200 c10::optional<Device> common_device = nullopt;
1201(void)common_device; // Suppress unused variable warning
1202 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_signbit_out", "out");
1203 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_signbit_out", "self");
1204 const OptionalDeviceGuard device_guard(device_of(self));
1205 return at::native::signbit_sparse_csr_out(self, out);
1206}
1207} // anonymous namespace
1208namespace {
1209at::Tensor & wrapper_SparseCsrCUDA__normal_(at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
1210 // No device check
1211 const OptionalDeviceGuard device_guard(device_of(self));
1212 return at::native::normal_sparse_csr_(self, mean, std, generator);
1213}
1214} // anonymous namespace
1215namespace {
1216at::Tensor wrapper_SparseCsrCUDA__isinf(const at::Tensor & self) {
1217 // No device check
1218 // DeviceGuard omitted
1219 return at::native::isinf_sparse_csr(self);
1220}
1221} // anonymous namespace
1222namespace {
1223at::Tensor wrapper_SparseCsrCUDA__isposinf(const at::Tensor & self) {
1224 c10::optional<Device> common_device = nullopt;
1225(void)common_device; // Suppress unused variable warning
1226 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__isposinf", "self");
1227 const OptionalDeviceGuard device_guard(device_of(self));
1228 return at::native::isposinf_sparse_csr(self);
1229}
1230} // anonymous namespace
1231namespace {
1232at::Tensor & wrapper_SparseCsrCUDA_out_isposinf_out(const at::Tensor & self, at::Tensor & out) {
1233 c10::optional<Device> common_device = nullopt;
1234(void)common_device; // Suppress unused variable warning
1235 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_isposinf_out", "out");
1236 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_isposinf_out", "self");
1237 const OptionalDeviceGuard device_guard(device_of(self));
1238 return at::native::isposinf_sparse_csr_out(self, out);
1239}
1240} // anonymous namespace
1241namespace {
1242at::Tensor wrapper_SparseCsrCUDA__isneginf(const at::Tensor & self) {
1243 c10::optional<Device> common_device = nullopt;
1244(void)common_device; // Suppress unused variable warning
1245 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__isneginf", "self");
1246 const OptionalDeviceGuard device_guard(device_of(self));
1247 return at::native::isneginf_sparse_csr(self);
1248}
1249} // anonymous namespace
1250namespace {
1251at::Tensor & wrapper_SparseCsrCUDA_out_isneginf_out(const at::Tensor & self, at::Tensor & out) {
1252 c10::optional<Device> common_device = nullopt;
1253(void)common_device; // Suppress unused variable warning
1254 c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_isneginf_out", "out");
1255 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_isneginf_out", "self");
1256 const OptionalDeviceGuard device_guard(device_of(self));
1257 return at::native::isneginf_sparse_csr_out(self, out);
1258}
1259} // anonymous namespace
1260namespace {
1261at::Tensor wrapper_SparseCsrCUDA_int_select_copy(const at::Tensor & self, int64_t dim, c10::SymInt index) {
1262 c10::optional<Device> common_device = nullopt;
1263(void)common_device; // Suppress unused variable warning
1264 c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_int_select_copy", "self");
1265 const OptionalDeviceGuard device_guard(device_of(self));
1266 return at::native::select_copy_sparse_csr(self, dim, index.expect_int());
1267}
1268} // anonymous namespace
1269TORCH_LIBRARY_IMPL(aten, SparseCsrCUDA, m) {
1270 m.impl("abs",
1271TORCH_FN(wrapper_SparseCsrCUDA__abs));
1272m.impl("abs.out",
1273TORCH_FN(wrapper_SparseCsrCUDA_out_abs_out));
1274m.impl("abs_",
1275TORCH_FN(wrapper_SparseCsrCUDA__abs_));
1276m.impl("angle",
1277TORCH_FN(wrapper_SparseCsrCUDA__angle));
1278m.impl("angle.out",
1279TORCH_FN(wrapper_SparseCsrCUDA_out_angle_out));
1280m.impl("sgn",
1281TORCH_FN(wrapper_SparseCsrCUDA__sgn));
1282m.impl("sgn.out",
1283TORCH_FN(wrapper_SparseCsrCUDA_out_sgn_out));
1284m.impl("sgn_",
1285TORCH_FN(wrapper_SparseCsrCUDA__sgn_));
1286m.impl("_conj_physical",
1287TORCH_FN(wrapper_SparseCsrCUDA___conj_physical));
1288m.impl("conj_physical.out",
1289TORCH_FN(wrapper_SparseCsrCUDA_out_conj_physical_out));
1290m.impl("conj_physical_",
1291TORCH_FN(wrapper_SparseCsrCUDA__conj_physical_));
1292m.impl("add.Tensor",
1293TORCH_FN(wrapper_SparseCsrCUDA_Tensor_add));
1294m.impl("add.out",
1295TORCH_FN(wrapper_SparseCsrCUDA_out_add_out));
1296m.impl("add_.Tensor",
1297TORCH_FN(wrapper_SparseCsrCUDA_Tensor_add_));
1298m.impl("addmv.out",
1299TORCH_FN(wrapper_SparseCsrCUDA_out_addmv_out));
1300m.impl("asinh",
1301TORCH_FN(wrapper_SparseCsrCUDA__asinh));
1302m.impl("asinh.out",
1303TORCH_FN(wrapper_SparseCsrCUDA_out_asinh_out));
1304m.impl("asinh_",
1305TORCH_FN(wrapper_SparseCsrCUDA__asinh_));
1306m.impl("atanh",
1307TORCH_FN(wrapper_SparseCsrCUDA__atanh));
1308m.impl("atanh.out",
1309TORCH_FN(wrapper_SparseCsrCUDA_out_atanh_out));
1310m.impl("atanh_",
1311TORCH_FN(wrapper_SparseCsrCUDA__atanh_));
1312m.impl("asin",
1313TORCH_FN(wrapper_SparseCsrCUDA__asin));
1314m.impl("asin.out",
1315TORCH_FN(wrapper_SparseCsrCUDA_out_asin_out));
1316m.impl("asin_",
1317TORCH_FN(wrapper_SparseCsrCUDA__asin_));
1318m.impl("atan",
1319TORCH_FN(wrapper_SparseCsrCUDA__atan));
1320m.impl("atan.out",
1321TORCH_FN(wrapper_SparseCsrCUDA_out_atan_out));
1322m.impl("atan_",
1323TORCH_FN(wrapper_SparseCsrCUDA__atan_));
1324m.impl("baddbmm.out",
1325TORCH_FN(wrapper_SparseCsrCUDA_out_baddbmm_out));
1326m.impl("bmm.out",
1327TORCH_FN(wrapper_SparseCsrCUDA_out_bmm_out));
1328m.impl("ceil",
1329TORCH_FN(wrapper_SparseCsrCUDA__ceil));
1330m.impl("ceil.out",
1331TORCH_FN(wrapper_SparseCsrCUDA_out_ceil_out));
1332m.impl("ceil_",
1333TORCH_FN(wrapper_SparseCsrCUDA__ceil_));
1334m.impl("copy_",
1335TORCH_FN(wrapper_SparseCsrCUDA__copy_));
1336m.impl("empty.memory_format",
1337TORCH_FN(wrapper_SparseCsrCUDA_memory_format_empty));
1338m.impl("resize_",
1339TORCH_FN(wrapper_SparseCsrCUDA__resize_));
1340m.impl("empty_like",
1341TORCH_FN(wrapper_SparseCsrCUDA__empty_like));
1342m.impl("erf",
1343TORCH_FN(wrapper_SparseCsrCUDA__erf));
1344m.impl("erf.out",
1345TORCH_FN(wrapper_SparseCsrCUDA_out_erf_out));
1346m.impl("erf_",
1347TORCH_FN(wrapper_SparseCsrCUDA__erf_));
1348m.impl("expm1",
1349TORCH_FN(wrapper_SparseCsrCUDA__expm1));
1350m.impl("expm1.out",
1351TORCH_FN(wrapper_SparseCsrCUDA_out_expm1_out));
1352m.impl("expm1_",
1353TORCH_FN(wrapper_SparseCsrCUDA__expm1_));
1354m.impl("fill_.Scalar",
1355TORCH_FN(wrapper_SparseCsrCUDA_Scalar_fill_));
1356m.impl("floor",
1357TORCH_FN(wrapper_SparseCsrCUDA__floor));
1358m.impl("floor.out",
1359TORCH_FN(wrapper_SparseCsrCUDA_out_floor_out));
1360m.impl("floor_",
1361TORCH_FN(wrapper_SparseCsrCUDA__floor_));
1362m.impl("frac",
1363TORCH_FN(wrapper_SparseCsrCUDA__frac));
1364m.impl("frac.out",
1365TORCH_FN(wrapper_SparseCsrCUDA_out_frac_out));
1366m.impl("frac_",
1367TORCH_FN(wrapper_SparseCsrCUDA__frac_));
1368m.impl("isnan",
1369TORCH_FN(wrapper_SparseCsrCUDA__isnan));
1370m.impl("log1p",
1371TORCH_FN(wrapper_SparseCsrCUDA__log1p));
1372m.impl("log1p.out",
1373TORCH_FN(wrapper_SparseCsrCUDA_out_log1p_out));
1374m.impl("log1p_",
1375TORCH_FN(wrapper_SparseCsrCUDA__log1p_));
1376m.impl("mm",
1377TORCH_FN(wrapper_SparseCsrCUDA__mm));
1378m.impl("mm.out",
1379TORCH_FN(wrapper_SparseCsrCUDA_out_mm_out));
1380m.impl("mul.Tensor",
1381TORCH_FN(wrapper_SparseCsrCUDA_Tensor_mul));
1382m.impl("mul.out",
1383TORCH_FN(wrapper_SparseCsrCUDA_out_mul_out));
1384m.impl("mul_.Tensor",
1385TORCH_FN(wrapper_SparseCsrCUDA_Tensor_mul_));
1386m.impl("mul.Scalar",
1387TORCH_FN(wrapper_SparseCsrCUDA_Scalar_mul));
1388m.impl("mul_.Scalar",
1389TORCH_FN(wrapper_SparseCsrCUDA_Scalar_mul_));
1390m.impl("rad2deg",
1391TORCH_FN(wrapper_SparseCsrCUDA__rad2deg));
1392m.impl("rad2deg.out",
1393TORCH_FN(wrapper_SparseCsrCUDA_out_rad2deg_out));
1394m.impl("rad2deg_",
1395TORCH_FN(wrapper_SparseCsrCUDA__rad2deg_));
1396m.impl("deg2rad",
1397TORCH_FN(wrapper_SparseCsrCUDA__deg2rad));
1398m.impl("deg2rad.out",
1399TORCH_FN(wrapper_SparseCsrCUDA_out_deg2rad_out));
1400m.impl("deg2rad_",
1401TORCH_FN(wrapper_SparseCsrCUDA__deg2rad_));
1402m.impl("neg",
1403TORCH_FN(wrapper_SparseCsrCUDA__neg));
1404m.impl("neg.out",
1405TORCH_FN(wrapper_SparseCsrCUDA_out_neg_out));
1406m.impl("neg_",
1407TORCH_FN(wrapper_SparseCsrCUDA__neg_));
1408m.impl("round",
1409TORCH_FN(wrapper_SparseCsrCUDA__round));
1410m.impl("round.out",
1411TORCH_FN(wrapper_SparseCsrCUDA_out_round_out));
1412m.impl("round_",
1413TORCH_FN(wrapper_SparseCsrCUDA__round_));
1414m.impl("relu",
1415TORCH_FN(wrapper_SparseCsrCUDA__relu));
1416m.impl("relu_",
1417TORCH_FN(wrapper_SparseCsrCUDA__relu_));
1418m.impl("select.int",
1419TORCH_FN(wrapper_SparseCsrCUDA_int_select));
1420m.impl("sin",
1421TORCH_FN(wrapper_SparseCsrCUDA__sin));
1422m.impl("sin.out",
1423TORCH_FN(wrapper_SparseCsrCUDA_out_sin_out));
1424m.impl("sin_",
1425TORCH_FN(wrapper_SparseCsrCUDA__sin_));
1426m.impl("sinh",
1427TORCH_FN(wrapper_SparseCsrCUDA__sinh));
1428m.impl("sinh.out",
1429TORCH_FN(wrapper_SparseCsrCUDA_out_sinh_out));
1430m.impl("sinh_",
1431TORCH_FN(wrapper_SparseCsrCUDA__sinh_));
1432m.impl("sum",
1433TORCH_FN(wrapper_SparseCsrCUDA__sum));
1434m.impl("sqrt",
1435TORCH_FN(wrapper_SparseCsrCUDA__sqrt));
1436m.impl("sqrt.out",
1437TORCH_FN(wrapper_SparseCsrCUDA_out_sqrt_out));
1438m.impl("sqrt_",
1439TORCH_FN(wrapper_SparseCsrCUDA__sqrt_));
1440m.impl("tan",
1441TORCH_FN(wrapper_SparseCsrCUDA__tan));
1442m.impl("tan.out",
1443TORCH_FN(wrapper_SparseCsrCUDA_out_tan_out));
1444m.impl("tan_",
1445TORCH_FN(wrapper_SparseCsrCUDA__tan_));
1446m.impl("tanh",
1447TORCH_FN(wrapper_SparseCsrCUDA__tanh));
1448m.impl("tanh.out",
1449TORCH_FN(wrapper_SparseCsrCUDA_out_tanh_out));
1450m.impl("tanh_",
1451TORCH_FN(wrapper_SparseCsrCUDA__tanh_));
1452m.impl("threshold_backward",
1453TORCH_FN(wrapper_SparseCsrCUDA__threshold_backward));
1454m.impl("threshold_backward.grad_input",
1455TORCH_FN(wrapper_SparseCsrCUDA_grad_input_threshold_backward_out));
1456m.impl("trunc",
1457TORCH_FN(wrapper_SparseCsrCUDA__trunc));
1458m.impl("trunc.out",
1459TORCH_FN(wrapper_SparseCsrCUDA_out_trunc_out));
1460m.impl("trunc_",
1461TORCH_FN(wrapper_SparseCsrCUDA__trunc_));
1462m.impl("_sparse_csr_sum.dim_dtype",
1463TORCH_FN(wrapper_SparseCsrCUDA_dim_dtype__sparse_csr_sum));
1464m.impl("_sparse_csr_prod.dim_dtype",
1465TORCH_FN(wrapper_SparseCsrCUDA_dim_dtype__sparse_csr_prod));
1466m.impl("clone",
1467TORCH_FN(wrapper_SparseCsrCUDA__clone));
1468m.impl("resize_as_sparse_",
1469TORCH_FN(wrapper_SparseCsrCUDA__resize_as_sparse_));
1470m.impl("zero_",
1471TORCH_FN(wrapper_SparseCsrCUDA__zero_));
1472m.impl("sparse_sampled_addmm",
1473TORCH_FN(wrapper_SparseCsrCUDA__sparse_sampled_addmm));
1474m.impl("sparse_sampled_addmm.out",
1475TORCH_FN(wrapper_SparseCsrCUDA_out_sparse_sampled_addmm_out));
1476m.impl("addmm",
1477TORCH_FN(wrapper_SparseCsrCUDA__addmm));
1478m.impl("addmm.out",
1479TORCH_FN(wrapper_SparseCsrCUDA_out_addmm_out));
1480m.impl("sparse_mask",
1481TORCH_FN(wrapper_SparseCsrCUDA__sparse_mask));
1482m.impl("_to_dense",
1483TORCH_FN(wrapper_SparseCsrCUDA___to_dense));
1484m.impl("sparse_dim",
1485TORCH_FN(wrapper_SparseCsrCUDA__sparse_dim));
1486m.impl("dense_dim",
1487TORCH_FN(wrapper_SparseCsrCUDA__dense_dim));
1488m.impl("_nnz",
1489TORCH_FN(wrapper_SparseCsrCUDA___nnz));
1490m.impl("values",
1491TORCH_FN(wrapper_SparseCsrCUDA__values));
1492m.impl("crow_indices",
1493TORCH_FN(wrapper_SparseCsrCUDA__crow_indices));
1494m.impl("col_indices",
1495TORCH_FN(wrapper_SparseCsrCUDA__col_indices));
1496m.impl("ccol_indices",
1497TORCH_FN(wrapper_SparseCsrCUDA__ccol_indices));
1498m.impl("row_indices",
1499TORCH_FN(wrapper_SparseCsrCUDA__row_indices));
1500m.impl("to_sparse.sparse_dim",
1501TORCH_FN(wrapper_SparseCsrCUDA_sparse_dim_to_sparse));
1502m.impl("to_sparse",
1503TORCH_FN(wrapper_SparseCsrCUDA__to_sparse));
1504m.impl("to_sparse_csr",
1505TORCH_FN(wrapper_SparseCsrCUDA__to_sparse_csr));
1506m.impl("to_sparse_csc",
1507TORCH_FN(wrapper_SparseCsrCUDA__to_sparse_csc));
1508m.impl("to_sparse_bsr",
1509TORCH_FN(wrapper_SparseCsrCUDA__to_sparse_bsr));
1510m.impl("to_sparse_bsc",
1511TORCH_FN(wrapper_SparseCsrCUDA__to_sparse_bsc));
1512m.impl("triangular_solve.X",
1513TORCH_FN(wrapper_SparseCsrCUDA_X_triangular_solve_out));
1514m.impl("erfinv",
1515TORCH_FN(wrapper_SparseCsrCUDA__erfinv));
1516m.impl("erfinv.out",
1517TORCH_FN(wrapper_SparseCsrCUDA_out_erfinv_out));
1518m.impl("erfinv_",
1519TORCH_FN(wrapper_SparseCsrCUDA__erfinv_));
1520m.impl("sign",
1521TORCH_FN(wrapper_SparseCsrCUDA__sign));
1522m.impl("sign.out",
1523TORCH_FN(wrapper_SparseCsrCUDA_out_sign_out));
1524m.impl("sign_",
1525TORCH_FN(wrapper_SparseCsrCUDA__sign_));
1526m.impl("signbit",
1527TORCH_FN(wrapper_SparseCsrCUDA__signbit));
1528m.impl("signbit.out",
1529TORCH_FN(wrapper_SparseCsrCUDA_out_signbit_out));
1530m.impl("normal_",
1531TORCH_FN(wrapper_SparseCsrCUDA__normal_));
1532m.impl("isinf",
1533TORCH_FN(wrapper_SparseCsrCUDA__isinf));
1534m.impl("isposinf",
1535TORCH_FN(wrapper_SparseCsrCUDA__isposinf));
1536m.impl("isposinf.out",
1537TORCH_FN(wrapper_SparseCsrCUDA_out_isposinf_out));
1538m.impl("isneginf",
1539TORCH_FN(wrapper_SparseCsrCUDA__isneginf));
1540m.impl("isneginf.out",
1541TORCH_FN(wrapper_SparseCsrCUDA_out_isneginf_out));
1542m.impl("select_copy.int",
1543TORCH_FN(wrapper_SparseCsrCUDA_int_select_copy));
1544};
1545} // anonymous namespace
1546namespace sparsecsrcuda {
1547at::Tensor abs(const at::Tensor & self) {
1548return wrapper_SparseCsrCUDA__abs(self);
1549}
1550at::Tensor & abs_out(at::Tensor & out, const at::Tensor & self) {
1551return wrapper_SparseCsrCUDA_out_abs_out(self, out);
1552}
1553at::Tensor & abs_outf(const at::Tensor & self, at::Tensor & out) {
1554return wrapper_SparseCsrCUDA_out_abs_out(self, out);
1555}
1556at::Tensor & abs_(at::Tensor & self) {
1557return wrapper_SparseCsrCUDA__abs_(self);
1558}
1559at::Tensor angle(const at::Tensor & self) {
1560return wrapper_SparseCsrCUDA__angle(self);
1561}
1562at::Tensor & angle_out(at::Tensor & out, const at::Tensor & self) {
1563return wrapper_SparseCsrCUDA_out_angle_out(self, out);
1564}
1565at::Tensor & angle_outf(const at::Tensor & self, at::Tensor & out) {
1566return wrapper_SparseCsrCUDA_out_angle_out(self, out);
1567}
1568at::Tensor sgn(const at::Tensor & self) {
1569return wrapper_SparseCsrCUDA__sgn(self);
1570}
1571at::Tensor & sgn_out(at::Tensor & out, const at::Tensor & self) {
1572return wrapper_SparseCsrCUDA_out_sgn_out(self, out);
1573}
1574at::Tensor & sgn_outf(const at::Tensor & self, at::Tensor & out) {
1575return wrapper_SparseCsrCUDA_out_sgn_out(self, out);
1576}
1577at::Tensor & sgn_(at::Tensor & self) {
1578return wrapper_SparseCsrCUDA__sgn_(self);
1579}
1580at::Tensor _conj_physical(const at::Tensor & self) {
1581return wrapper_SparseCsrCUDA___conj_physical(self);
1582}
1583at::Tensor & conj_physical_out(at::Tensor & out, const at::Tensor & self) {
1584return wrapper_SparseCsrCUDA_out_conj_physical_out(self, out);
1585}
1586at::Tensor & conj_physical_outf(const at::Tensor & self, at::Tensor & out) {
1587return wrapper_SparseCsrCUDA_out_conj_physical_out(self, out);
1588}
1589at::Tensor & conj_physical_(at::Tensor & self) {
1590return wrapper_SparseCsrCUDA__conj_physical_(self);
1591}
1592at::Tensor add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1593return wrapper_SparseCsrCUDA_Tensor_add(self, other, alpha);
1594}
1595at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1596return wrapper_SparseCsrCUDA_out_add_out(self, other, alpha, out);
1597}
1598at::Tensor & add_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
1599return wrapper_SparseCsrCUDA_out_add_out(self, other, alpha, out);
1600}
1601at::Tensor & add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1602return wrapper_SparseCsrCUDA_Tensor_add_(self, other, alpha);
1603}
1604at::Tensor & addmv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
1605return wrapper_SparseCsrCUDA_out_addmv_out(self, mat, vec, beta, alpha, out);
1606}
1607at::Tensor & addmv_outf(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
1608return wrapper_SparseCsrCUDA_out_addmv_out(self, mat, vec, beta, alpha, out);
1609}
1610at::Tensor asinh(const at::Tensor & self) {
1611return wrapper_SparseCsrCUDA__asinh(self);
1612}
1613at::Tensor & asinh_out(at::Tensor & out, const at::Tensor & self) {
1614return wrapper_SparseCsrCUDA_out_asinh_out(self, out);
1615}
1616at::Tensor & asinh_outf(const at::Tensor & self, at::Tensor & out) {
1617return wrapper_SparseCsrCUDA_out_asinh_out(self, out);
1618}
1619at::Tensor & asinh_(at::Tensor & self) {
1620return wrapper_SparseCsrCUDA__asinh_(self);
1621}
1622at::Tensor atanh(const at::Tensor & self) {
1623return wrapper_SparseCsrCUDA__atanh(self);
1624}
1625at::Tensor & atanh_out(at::Tensor & out, const at::Tensor & self) {
1626return wrapper_SparseCsrCUDA_out_atanh_out(self, out);
1627}
1628at::Tensor & atanh_outf(const at::Tensor & self, at::Tensor & out) {
1629return wrapper_SparseCsrCUDA_out_atanh_out(self, out);
1630}
1631at::Tensor & atanh_(at::Tensor & self) {
1632return wrapper_SparseCsrCUDA__atanh_(self);
1633}
1634at::Tensor asin(const at::Tensor & self) {
1635return wrapper_SparseCsrCUDA__asin(self);
1636}
1637at::Tensor & asin_out(at::Tensor & out, const at::Tensor & self) {
1638return wrapper_SparseCsrCUDA_out_asin_out(self, out);
1639}
1640at::Tensor & asin_outf(const at::Tensor & self, at::Tensor & out) {
1641return wrapper_SparseCsrCUDA_out_asin_out(self, out);
1642}
1643at::Tensor & asin_(at::Tensor & self) {
1644return wrapper_SparseCsrCUDA__asin_(self);
1645}
1646at::Tensor atan(const at::Tensor & self) {
1647return wrapper_SparseCsrCUDA__atan(self);
1648}
1649at::Tensor & atan_out(at::Tensor & out, const at::Tensor & self) {
1650return wrapper_SparseCsrCUDA_out_atan_out(self, out);
1651}
1652at::Tensor & atan_outf(const at::Tensor & self, at::Tensor & out) {
1653return wrapper_SparseCsrCUDA_out_atan_out(self, out);
1654}
1655at::Tensor & atan_(at::Tensor & self) {
1656return wrapper_SparseCsrCUDA__atan_(self);
1657}
1658at::Tensor & baddbmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
1659return wrapper_SparseCsrCUDA_out_baddbmm_out(self, batch1, batch2, beta, alpha, out);
1660}
1661at::Tensor & baddbmm_outf(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
1662return wrapper_SparseCsrCUDA_out_baddbmm_out(self, batch1, batch2, beta, alpha, out);
1663}
1664at::Tensor & bmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) {
1665return wrapper_SparseCsrCUDA_out_bmm_out(self, mat2, out);
1666}
1667at::Tensor & bmm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
1668return wrapper_SparseCsrCUDA_out_bmm_out(self, mat2, out);
1669}
1670at::Tensor ceil(const at::Tensor & self) {
1671return wrapper_SparseCsrCUDA__ceil(self);
1672}
1673at::Tensor & ceil_out(at::Tensor & out, const at::Tensor & self) {
1674return wrapper_SparseCsrCUDA_out_ceil_out(self, out);
1675}
1676at::Tensor & ceil_outf(const at::Tensor & self, at::Tensor & out) {
1677return wrapper_SparseCsrCUDA_out_ceil_out(self, out);
1678}
1679at::Tensor & ceil_(at::Tensor & self) {
1680return wrapper_SparseCsrCUDA__ceil_(self);
1681}
1682at::Tensor & copy_(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
1683return wrapper_SparseCsrCUDA__copy_(self, src, non_blocking);
1684}
1685at::Tensor empty(at::IntArrayRef size, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) {
1686return wrapper_SparseCsrCUDA_memory_format_empty(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
1687}
1688at::Tensor empty(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
1689return wrapper_SparseCsrCUDA_memory_format_empty(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, memory_format);
1690}
1691at::Tensor empty_symint(c10::SymIntArrayRef size, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) {
1692return wrapper_SparseCsrCUDA_memory_format_empty(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
1693}
1694at::Tensor empty_symint(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
1695return wrapper_SparseCsrCUDA_memory_format_empty(size, dtype, layout, device, pin_memory, memory_format);
1696}
1697const at::Tensor & resize_(const at::Tensor & self, at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format) {
1698return wrapper_SparseCsrCUDA__resize_(self, c10::fromIntArrayRefSlow(size), memory_format);
1699}
1700const at::Tensor & resize__symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) {
1701return wrapper_SparseCsrCUDA__resize_(self, size, memory_format);
1702}
1703at::Tensor empty_like(const at::Tensor & self, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) {
1704return wrapper_SparseCsrCUDA__empty_like(self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
1705}
1706at::Tensor empty_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
1707return wrapper_SparseCsrCUDA__empty_like(self, dtype, layout, device, pin_memory, memory_format);
1708}
1709at::Tensor erf(const at::Tensor & self) {
1710return wrapper_SparseCsrCUDA__erf(self);
1711}
1712at::Tensor & erf_out(at::Tensor & out, const at::Tensor & self) {
1713return wrapper_SparseCsrCUDA_out_erf_out(self, out);
1714}
1715at::Tensor & erf_outf(const at::Tensor & self, at::Tensor & out) {
1716return wrapper_SparseCsrCUDA_out_erf_out(self, out);
1717}
1718at::Tensor & erf_(at::Tensor & self) {
1719return wrapper_SparseCsrCUDA__erf_(self);
1720}
1721at::Tensor expm1(const at::Tensor & self) {
1722return wrapper_SparseCsrCUDA__expm1(self);
1723}
1724at::Tensor & expm1_out(at::Tensor & out, const at::Tensor & self) {
1725return wrapper_SparseCsrCUDA_out_expm1_out(self, out);
1726}
1727at::Tensor & expm1_outf(const at::Tensor & self, at::Tensor & out) {
1728return wrapper_SparseCsrCUDA_out_expm1_out(self, out);
1729}
1730at::Tensor & expm1_(at::Tensor & self) {
1731return wrapper_SparseCsrCUDA__expm1_(self);
1732}
1733at::Tensor & fill_(at::Tensor & self, const at::Scalar & value) {
1734return wrapper_SparseCsrCUDA_Scalar_fill_(self, value);
1735}
1736at::Tensor floor(const at::Tensor & self) {
1737return wrapper_SparseCsrCUDA__floor(self);
1738}
1739at::Tensor & floor_out(at::Tensor & out, const at::Tensor & self) {
1740return wrapper_SparseCsrCUDA_out_floor_out(self, out);
1741}
1742at::Tensor & floor_outf(const at::Tensor & self, at::Tensor & out) {
1743return wrapper_SparseCsrCUDA_out_floor_out(self, out);
1744}
1745at::Tensor & floor_(at::Tensor & self) {
1746return wrapper_SparseCsrCUDA__floor_(self);
1747}
1748at::Tensor frac(const at::Tensor & self) {
1749return wrapper_SparseCsrCUDA__frac(self);
1750}
1751at::Tensor & frac_out(at::Tensor & out, const at::Tensor & self) {
1752return wrapper_SparseCsrCUDA_out_frac_out(self, out);
1753}
1754at::Tensor & frac_outf(const at::Tensor & self, at::Tensor & out) {
1755return wrapper_SparseCsrCUDA_out_frac_out(self, out);
1756}
1757at::Tensor & frac_(at::Tensor & self) {
1758return wrapper_SparseCsrCUDA__frac_(self);
1759}
1760at::Tensor isnan(const at::Tensor & self) {
1761return wrapper_SparseCsrCUDA__isnan(self);
1762}
1763at::Tensor log1p(const at::Tensor & self) {
1764return wrapper_SparseCsrCUDA__log1p(self);
1765}
1766at::Tensor & log1p_out(at::Tensor & out, const at::Tensor & self) {
1767return wrapper_SparseCsrCUDA_out_log1p_out(self, out);
1768}
1769at::Tensor & log1p_outf(const at::Tensor & self, at::Tensor & out) {
1770return wrapper_SparseCsrCUDA_out_log1p_out(self, out);
1771}
1772at::Tensor & log1p_(at::Tensor & self) {
1773return wrapper_SparseCsrCUDA__log1p_(self);
1774}
1775at::Tensor mm(const at::Tensor & self, const at::Tensor & mat2) {
1776return wrapper_SparseCsrCUDA__mm(self, mat2);
1777}
1778at::Tensor & mm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) {
1779return wrapper_SparseCsrCUDA_out_mm_out(self, mat2, out);
1780}
1781at::Tensor & mm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
1782return wrapper_SparseCsrCUDA_out_mm_out(self, mat2, out);
1783}
1784at::Tensor mul(const at::Tensor & self, const at::Tensor & other) {
1785return wrapper_SparseCsrCUDA_Tensor_mul(self, other);
1786}
1787at::Tensor & mul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
1788return wrapper_SparseCsrCUDA_out_mul_out(self, other, out);
1789}
1790at::Tensor & mul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
1791return wrapper_SparseCsrCUDA_out_mul_out(self, other, out);
1792}
1793at::Tensor & mul_(at::Tensor & self, const at::Tensor & other) {
1794return wrapper_SparseCsrCUDA_Tensor_mul_(self, other);
1795}
1796at::Tensor mul(const at::Tensor & self, const at::Scalar & other) {
1797return wrapper_SparseCsrCUDA_Scalar_mul(self, other);
1798}
1799at::Tensor & mul_(at::Tensor & self, const at::Scalar & other) {
1800return wrapper_SparseCsrCUDA_Scalar_mul_(self, other);
1801}
1802at::Tensor rad2deg(const at::Tensor & self) {
1803return wrapper_SparseCsrCUDA__rad2deg(self);
1804}
1805at::Tensor & rad2deg_out(at::Tensor & out, const at::Tensor & self) {
1806return wrapper_SparseCsrCUDA_out_rad2deg_out(self, out);
1807}
1808at::Tensor & rad2deg_outf(const at::Tensor & self, at::Tensor & out) {
1809return wrapper_SparseCsrCUDA_out_rad2deg_out(self, out);
1810}
1811at::Tensor & rad2deg_(at::Tensor & self) {
1812return wrapper_SparseCsrCUDA__rad2deg_(self);
1813}
1814at::Tensor deg2rad(const at::Tensor & self) {
1815return wrapper_SparseCsrCUDA__deg2rad(self);
1816}
1817at::Tensor & deg2rad_out(at::Tensor & out, const at::Tensor & self) {
1818return wrapper_SparseCsrCUDA_out_deg2rad_out(self, out);
1819}
1820at::Tensor & deg2rad_outf(const at::Tensor & self, at::Tensor & out) {
1821return wrapper_SparseCsrCUDA_out_deg2rad_out(self, out);
1822}
1823at::Tensor & deg2rad_(at::Tensor & self) {
1824return wrapper_SparseCsrCUDA__deg2rad_(self);
1825}
1826at::Tensor neg(const at::Tensor & self) {
1827return wrapper_SparseCsrCUDA__neg(self);
1828}
1829at::Tensor & neg_out(at::Tensor & out, const at::Tensor & self) {
1830return wrapper_SparseCsrCUDA_out_neg_out(self, out);
1831}
1832at::Tensor & neg_outf(const at::Tensor & self, at::Tensor & out) {
1833return wrapper_SparseCsrCUDA_out_neg_out(self, out);
1834}
1835at::Tensor & neg_(at::Tensor & self) {
1836return wrapper_SparseCsrCUDA__neg_(self);
1837}
1838at::Tensor round(const at::Tensor & self) {
1839return wrapper_SparseCsrCUDA__round(self);
1840}
1841at::Tensor & round_out(at::Tensor & out, const at::Tensor & self) {
1842return wrapper_SparseCsrCUDA_out_round_out(self, out);
1843}
1844at::Tensor & round_outf(const at::Tensor & self, at::Tensor & out) {
1845return wrapper_SparseCsrCUDA_out_round_out(self, out);
1846}
1847at::Tensor & round_(at::Tensor & self) {
1848return wrapper_SparseCsrCUDA__round_(self);
1849}
1850at::Tensor relu(const at::Tensor & self) {
1851return wrapper_SparseCsrCUDA__relu(self);
1852}
1853at::Tensor & relu_(at::Tensor & self) {
1854return wrapper_SparseCsrCUDA__relu_(self);
1855}
1856at::Tensor select(const at::Tensor & self, int64_t dim, int64_t index) {
1857return wrapper_SparseCsrCUDA_int_select(self, dim, index);
1858}
1859at::Tensor select_symint(const at::Tensor & self, int64_t dim, c10::SymInt index) {
1860return wrapper_SparseCsrCUDA_int_select(self, dim, index);
1861}
1862at::Tensor sin(const at::Tensor & self) {
1863return wrapper_SparseCsrCUDA__sin(self);
1864}
1865at::Tensor & sin_out(at::Tensor & out, const at::Tensor & self) {
1866return wrapper_SparseCsrCUDA_out_sin_out(self, out);
1867}
1868at::Tensor & sin_outf(const at::Tensor & self, at::Tensor & out) {
1869return wrapper_SparseCsrCUDA_out_sin_out(self, out);
1870}
1871at::Tensor & sin_(at::Tensor & self) {
1872return wrapper_SparseCsrCUDA__sin_(self);
1873}
1874at::Tensor sinh(const at::Tensor & self) {
1875return wrapper_SparseCsrCUDA__sinh(self);
1876}
1877at::Tensor & sinh_out(at::Tensor & out, const at::Tensor & self) {
1878return wrapper_SparseCsrCUDA_out_sinh_out(self, out);
1879}
1880at::Tensor & sinh_outf(const at::Tensor & self, at::Tensor & out) {
1881return wrapper_SparseCsrCUDA_out_sinh_out(self, out);
1882}
1883at::Tensor & sinh_(at::Tensor & self) {
1884return wrapper_SparseCsrCUDA__sinh_(self);
1885}
1886at::Tensor sum(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
1887return wrapper_SparseCsrCUDA__sum(self, dtype);
1888}
1889at::Tensor sqrt(const at::Tensor & self) {
1890return wrapper_SparseCsrCUDA__sqrt(self);
1891}
1892at::Tensor & sqrt_out(at::Tensor & out, const at::Tensor & self) {
1893return wrapper_SparseCsrCUDA_out_sqrt_out(self, out);
1894}
1895at::Tensor & sqrt_outf(const at::Tensor & self, at::Tensor & out) {
1896return wrapper_SparseCsrCUDA_out_sqrt_out(self, out);
1897}
1898at::Tensor & sqrt_(at::Tensor & self) {
1899return wrapper_SparseCsrCUDA__sqrt_(self);
1900}
1901at::Tensor tan(const at::Tensor & self) {
1902return wrapper_SparseCsrCUDA__tan(self);
1903}
1904at::Tensor & tan_out(at::Tensor & out, const at::Tensor & self) {
1905return wrapper_SparseCsrCUDA_out_tan_out(self, out);
1906}
1907at::Tensor & tan_outf(const at::Tensor & self, at::Tensor & out) {
1908return wrapper_SparseCsrCUDA_out_tan_out(self, out);
1909}
1910at::Tensor & tan_(at::Tensor & self) {
1911return wrapper_SparseCsrCUDA__tan_(self);
1912}
1913at::Tensor tanh(const at::Tensor & self) {
1914return wrapper_SparseCsrCUDA__tanh(self);
1915}
1916at::Tensor & tanh_out(at::Tensor & out, const at::Tensor & self) {
1917return wrapper_SparseCsrCUDA_out_tanh_out(self, out);
1918}
1919at::Tensor & tanh_outf(const at::Tensor & self, at::Tensor & out) {
1920return wrapper_SparseCsrCUDA_out_tanh_out(self, out);
1921}
1922at::Tensor & tanh_(at::Tensor & self) {
1923return wrapper_SparseCsrCUDA__tanh_(self);
1924}
1925at::Tensor threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
1926return wrapper_SparseCsrCUDA__threshold_backward(grad_output, self, threshold);
1927}
1928at::Tensor & threshold_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
1929return wrapper_SparseCsrCUDA_grad_input_threshold_backward_out(grad_output, self, threshold, grad_input);
1930}
1931at::Tensor & threshold_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) {
1932return wrapper_SparseCsrCUDA_grad_input_threshold_backward_out(grad_output, self, threshold, grad_input);
1933}
1934at::Tensor trunc(const at::Tensor & self) {
1935return wrapper_SparseCsrCUDA__trunc(self);
1936}
1937at::Tensor & trunc_out(at::Tensor & out, const at::Tensor & self) {
1938return wrapper_SparseCsrCUDA_out_trunc_out(self, out);
1939}
1940at::Tensor & trunc_outf(const at::Tensor & self, at::Tensor & out) {
1941return wrapper_SparseCsrCUDA_out_trunc_out(self, out);
1942}
1943at::Tensor & trunc_(at::Tensor & self) {
1944return wrapper_SparseCsrCUDA__trunc_(self);
1945}
1946at::Tensor _sparse_csr_sum(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
1947return wrapper_SparseCsrCUDA_dim_dtype__sparse_csr_sum(self, dim, keepdim, dtype);
1948}
1949at::Tensor _sparse_csr_prod(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
1950return wrapper_SparseCsrCUDA_dim_dtype__sparse_csr_prod(self, dim, keepdim, dtype);
1951}
1952at::Tensor clone(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) {
1953return wrapper_SparseCsrCUDA__clone(self, memory_format);
1954}
1955const at::Tensor & resize_as_sparse_(const at::Tensor & self, const at::Tensor & the_template) {
1956return wrapper_SparseCsrCUDA__resize_as_sparse_(self, the_template);
1957}
1958at::Tensor & zero_(at::Tensor & self) {
1959return wrapper_SparseCsrCUDA__zero_(self);
1960}
1961at::Tensor sparse_sampled_addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
1962return wrapper_SparseCsrCUDA__sparse_sampled_addmm(self, mat1, mat2, beta, alpha);
1963}
1964at::Tensor & sparse_sampled_addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
1965return wrapper_SparseCsrCUDA_out_sparse_sampled_addmm_out(self, mat1, mat2, beta, alpha, out);
1966}
1967at::Tensor & sparse_sampled_addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
1968return wrapper_SparseCsrCUDA_out_sparse_sampled_addmm_out(self, mat1, mat2, beta, alpha, out);
1969}
1970at::Tensor addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
1971return wrapper_SparseCsrCUDA__addmm(self, mat1, mat2, beta, alpha);
1972}
1973at::Tensor & addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
1974return wrapper_SparseCsrCUDA_out_addmm_out(self, mat1, mat2, beta, alpha, out);
1975}
1976at::Tensor & addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
1977return wrapper_SparseCsrCUDA_out_addmm_out(self, mat1, mat2, beta, alpha, out);
1978}
1979at::Tensor sparse_mask(const at::Tensor & self, const at::Tensor & mask) {
1980return wrapper_SparseCsrCUDA__sparse_mask(self, mask);
1981}
1982at::Tensor _to_dense(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
1983return wrapper_SparseCsrCUDA___to_dense(self, dtype);
1984}
1985int64_t sparse_dim(const at::Tensor & self) {
1986return wrapper_SparseCsrCUDA__sparse_dim(self);
1987}
1988int64_t dense_dim(const at::Tensor & self) {
1989return wrapper_SparseCsrCUDA__dense_dim(self);
1990}
1991int64_t _nnz(const at::Tensor & self) {
1992return wrapper_SparseCsrCUDA___nnz(self);
1993}
1994at::Tensor values(const at::Tensor & self) {
1995return wrapper_SparseCsrCUDA__values(self);
1996}
1997at::Tensor crow_indices(const at::Tensor & self) {
1998return wrapper_SparseCsrCUDA__crow_indices(self);
1999}
2000at::Tensor col_indices(const at::Tensor & self) {
2001return wrapper_SparseCsrCUDA__col_indices(self);
2002}
2003at::Tensor ccol_indices(const at::Tensor & self) {
2004return wrapper_SparseCsrCUDA__ccol_indices(self);
2005}
2006at::Tensor row_indices(const at::Tensor & self) {
2007return wrapper_SparseCsrCUDA__row_indices(self);
2008}
2009at::Tensor to_sparse(const at::Tensor & self, int64_t sparse_dim) {
2010return wrapper_SparseCsrCUDA_sparse_dim_to_sparse(self, sparse_dim);
2011}
2012at::Tensor to_sparse(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
2013return wrapper_SparseCsrCUDA__to_sparse(self, layout, blocksize, dense_dim);
2014}
2015at::Tensor to_sparse_csr(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
2016return wrapper_SparseCsrCUDA__to_sparse_csr(self, dense_dim);
2017}
2018at::Tensor to_sparse_csc(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
2019return wrapper_SparseCsrCUDA__to_sparse_csc(self, dense_dim);
2020}
2021at::Tensor to_sparse_bsr(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
2022return wrapper_SparseCsrCUDA__to_sparse_bsr(self, blocksize, dense_dim);
2023}
2024at::Tensor to_sparse_bsc(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
2025return wrapper_SparseCsrCUDA__to_sparse_bsc(self, blocksize, dense_dim);
2026}
2027::std::tuple<at::Tensor &,at::Tensor &> triangular_solve_out(at::Tensor & X, at::Tensor & M, const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) {
2028return wrapper_SparseCsrCUDA_X_triangular_solve_out(self, A, upper, transpose, unitriangular, X, M);
2029}
2030::std::tuple<at::Tensor &,at::Tensor &> triangular_solve_outf(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular, at::Tensor & X, at::Tensor & M) {
2031return wrapper_SparseCsrCUDA_X_triangular_solve_out(self, A, upper, transpose, unitriangular, X, M);
2032}
2033at::Tensor erfinv(const at::Tensor & self) {
2034return wrapper_SparseCsrCUDA__erfinv(self);
2035}
2036at::Tensor & erfinv_out(at::Tensor & out, const at::Tensor & self) {
2037return wrapper_SparseCsrCUDA_out_erfinv_out(self, out);
2038}
2039at::Tensor & erfinv_outf(const at::Tensor & self, at::Tensor & out) {
2040return wrapper_SparseCsrCUDA_out_erfinv_out(self, out);
2041}
2042at::Tensor & erfinv_(at::Tensor & self) {
2043return wrapper_SparseCsrCUDA__erfinv_(self);
2044}
2045at::Tensor sign(const at::Tensor & self) {
2046return wrapper_SparseCsrCUDA__sign(self);
2047}
2048at::Tensor & sign_out(at::Tensor & out, const at::Tensor & self) {
2049return wrapper_SparseCsrCUDA_out_sign_out(self, out);
2050}
2051at::Tensor & sign_outf(const at::Tensor & self, at::Tensor & out) {
2052return wrapper_SparseCsrCUDA_out_sign_out(self, out);
2053}
2054at::Tensor & sign_(at::Tensor & self) {
2055return wrapper_SparseCsrCUDA__sign_(self);
2056}
2057at::Tensor signbit(const at::Tensor & self) {
2058return wrapper_SparseCsrCUDA__signbit(self);
2059}
2060at::Tensor & signbit_out(at::Tensor & out, const at::Tensor & self) {
2061return wrapper_SparseCsrCUDA_out_signbit_out(self, out);
2062}
2063at::Tensor & signbit_outf(const at::Tensor & self, at::Tensor & out) {
2064return wrapper_SparseCsrCUDA_out_signbit_out(self, out);
2065}
2066at::Tensor & normal_(at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
2067return wrapper_SparseCsrCUDA__normal_(self, mean, std, generator);
2068}
2069at::Tensor isinf(const at::Tensor & self) {
2070return wrapper_SparseCsrCUDA__isinf(self);
2071}
2072at::Tensor isposinf(const at::Tensor & self) {
2073return wrapper_SparseCsrCUDA__isposinf(self);
2074}
2075at::Tensor & isposinf_out(at::Tensor & out, const at::Tensor & self) {
2076return wrapper_SparseCsrCUDA_out_isposinf_out(self, out);
2077}
2078at::Tensor & isposinf_outf(const at::Tensor & self, at::Tensor & out) {
2079return wrapper_SparseCsrCUDA_out_isposinf_out(self, out);
2080}
2081at::Tensor isneginf(const at::Tensor & self) {
2082return wrapper_SparseCsrCUDA__isneginf(self);
2083}
2084at::Tensor & isneginf_out(at::Tensor & out, const at::Tensor & self) {
2085return wrapper_SparseCsrCUDA_out_isneginf_out(self, out);
2086}
2087at::Tensor & isneginf_outf(const at::Tensor & self, at::Tensor & out) {
2088return wrapper_SparseCsrCUDA_out_isneginf_out(self, out);
2089}
2090at::Tensor select_copy(const at::Tensor & self, int64_t dim, int64_t index) {
2091return wrapper_SparseCsrCUDA_int_select_copy(self, dim, index);
2092}
2093at::Tensor select_copy_symint(const at::Tensor & self, int64_t dim, c10::SymInt index) {
2094return wrapper_SparseCsrCUDA_int_select_copy(self, dim, index);
2095}
2096} // namespace sparsecsrcuda
2097} // namespace at
2098