1// required for old g++ to compile PRId64 macros, see
2// https://github.com/pytorch/pytorch/issues/3571
3// for context
4#ifndef __STDC_FORMAT_MACROS
5#define __STDC_FORMAT_MACROS
6#endif
7
8// an external backend might generate file within its code tree
9// and check all the source files within the tree with clang-format.
10// so, disable it since the backend might have a different config.
11// clang-format off
12
13// NOTE: This condition is true for all PyTorch internal libraries, it
14// just excludes external projects such as torch_xla which
15// re-use some of the PyTorch codegen machinery.
16#if defined(CAFFE2_BUILD_MAIN_LIB) || \
17 defined(TORCH_CUDA_BUILD_MAIN_LIB) || \
18 defined(TORCH_HIP_BUILD_MAIN_LIB) || \
19 defined(TORCH_CUDA_CU_BUILD_MAIN_LIB) || \
20 defined(TORCH_CUDA_CPP_BUILD_MAIN_LIB)
21#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
22#endif
23
24// @generated by torchgen/gen.py from RegisterDispatchKey.cpp
25
26#include <c10/core/TensorImpl.h>
27#include <c10/core/Allocator.h>
28#include <ATen/DeviceGuard.h>
29#include <ATen/NamedTensorUtils.h>
30#include <ATen/Utils.h>
31#include <ATen/WrapDimUtils.h>
32#include <ATen/Dispatch.h>
33#include <c10/util/ExclusivelyOwned.h>
34#include <c10/util/Half.h>
35#include <c10/core/UndefinedTensorImpl.h>
36#include <c10/util/Optional.h>
37#include <ATen/Tensor.h>
38#include <ATen/native/Resize.h>
39
40#include <cstddef>
41#include <functional>
42#include <memory>
43#include <utility>
44
45#include <ATen/Config.h>
46#include <ATen/core/op_registration/adaption.h>
47#include <torch/library.h>
48
49
50#include <ATen/ops/as_strided_native.h>
51#include <ATen/ops/empty.h>
52#include <ATen/ops/empty_strided.h>
53#include <ATen/ops/_copy_from_and_resize.h>
54#include <ATen/ops/_copy_from.h>
55#include <ATen/ops/_conj_physical_native.h>
56#include <ATen/ops/_nnz_native.h>
57#include <ATen/ops/_sparse_csr_prod_native.h>
58#include <ATen/ops/_sparse_csr_sum_native.h>
59#include <ATen/ops/_sparse_mm_reduce_impl_backward_native.h>
60#include <ATen/ops/_sparse_mm_reduce_impl_native.h>
61#include <ATen/ops/_to_dense_native.h>
62#include <ATen/ops/abs_native.h>
63#include <ATen/ops/add_native.h>
64#include <ATen/ops/addmm_native.h>
65#include <ATen/ops/addmv_native.h>
66#include <ATen/ops/angle_native.h>
67#include <ATen/ops/asin_native.h>
68#include <ATen/ops/asinh_native.h>
69#include <ATen/ops/atan_native.h>
70#include <ATen/ops/atanh_native.h>
71#include <ATen/ops/ccol_indices_native.h>
72#include <ATen/ops/ceil_native.h>
73#include <ATen/ops/clone_native.h>
74#include <ATen/ops/col_indices_native.h>
75#include <ATen/ops/conj_physical_native.h>
76#include <ATen/ops/copy_native.h>
77#include <ATen/ops/crow_indices_native.h>
78#include <ATen/ops/deg2rad_native.h>
79#include <ATen/ops/dense_dim_native.h>
80#include <ATen/ops/empty_like_native.h>
81#include <ATen/ops/empty_native.h>
82#include <ATen/ops/erf_native.h>
83#include <ATen/ops/erfinv_native.h>
84#include <ATen/ops/expm1_native.h>
85#include <ATen/ops/fill_native.h>
86#include <ATen/ops/floor_native.h>
87#include <ATen/ops/frac_native.h>
88#include <ATen/ops/isinf_native.h>
89#include <ATen/ops/isnan_native.h>
90#include <ATen/ops/isneginf_native.h>
91#include <ATen/ops/isposinf_native.h>
92#include <ATen/ops/log1p_native.h>
93#include <ATen/ops/mm_native.h>
94#include <ATen/ops/mul_native.h>
95#include <ATen/ops/neg_native.h>
96#include <ATen/ops/normal_native.h>
97#include <ATen/ops/rad2deg_native.h>
98#include <ATen/ops/relu_native.h>
99#include <ATen/ops/resize_as_sparse_native.h>
100#include <ATen/ops/resize_native.h>
101#include <ATen/ops/round_native.h>
102#include <ATen/ops/row_indices_native.h>
103#include <ATen/ops/select_copy_native.h>
104#include <ATen/ops/select_native.h>
105#include <ATen/ops/sgn_native.h>
106#include <ATen/ops/sign_native.h>
107#include <ATen/ops/signbit_native.h>
108#include <ATen/ops/sin_native.h>
109#include <ATen/ops/sinh_native.h>
110#include <ATen/ops/sparse_dim_native.h>
111#include <ATen/ops/sparse_mask_native.h>
112#include <ATen/ops/sparse_sampled_addmm_native.h>
113#include <ATen/ops/sqrt_native.h>
114#include <ATen/ops/sum_native.h>
115#include <ATen/ops/tan_native.h>
116#include <ATen/ops/tanh_native.h>
117#include <ATen/ops/threshold_backward_native.h>
118#include <ATen/ops/to_sparse_bsc_native.h>
119#include <ATen/ops/to_sparse_bsr_native.h>
120#include <ATen/ops/to_sparse_csc_native.h>
121#include <ATen/ops/to_sparse_csr_native.h>
122#include <ATen/ops/to_sparse_native.h>
123#include <ATen/ops/triangular_solve_native.h>
124#include <ATen/ops/trunc_native.h>
125#include <ATen/ops/values_native.h>
126#include <ATen/ops/zero_native.h>
127
128// See template file RegisterDispatchDefinitions.ini
129namespace at {
130// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
131// ambiguity with conflicting identifiers that may have been defined in
132// at namespace already.
133namespace {
134void resize_out(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
135 TORCH_CHECK(options.dtype() == out.dtype(),
136 "Expected out tensor to have dtype ", options.dtype(), ", but got ", out.dtype(), " instead");
137 TORCH_CHECK(options.device() == out.device(),
138 "Expected out tensor to have device ", options.device(), ", but got ", out.device(), " instead");
139 const bool resized = at::native::resize_output(out, sizes);
140 // Only restride if a resize occurred; otherwise we ignore the (advisory)
141 // strides from the meta function and directly use the output tensor's
142 // preexisting strides
143 if (resized) {
144 if (!strides.empty()) {
145 TORCH_INTERNAL_ASSERT(!options.memory_format_opt().has_value());
146 // TODO: avoid the redispatch here
147 out.as_strided_(sizes, strides);
148 } else if (options.memory_format_opt().has_value()) {
149 out.unsafeGetTensorImpl()->empty_tensor_restride(*options.memory_format_opt());
150 }
151 }
152}
153void check_inplace(const Tensor &self, IntArrayRef sizes, const TensorOptions &options) {
154 // These checks are needed on those operators that:
155 // 1) don't use 'TensorIterator' (e.g. 'addmm' and 'baddbmm')
156 // 2) have particular typing rules (e.g. 'cumsum' and 'cumprod')
157 // For other operators (e.g. 'add'), 'TensorIterator' already checks
158 // these things separately.
159 TORCH_CHECK(options.dtype() == self.dtype(),
160 "Bad in-place call: ",
161 "input tensor dtype ", self.dtype(), " and output tensor dtype ", options.dtype(), " should match");
162 TORCH_CHECK(options.device() == self.device(),
163 "Bad in-place call: ",
164 "input tensor device ", self.device(), " and output tensor device ", options.device(), " should match");
165 TORCH_CHECK(sizes == self.sizes(),
166 "Bad in-place call: ",
167 "input tensor size ", self.sizes(), " and output tensor size ", sizes, " should match");
168}
169namespace {
170at::Tensor wrapper_SparseCsrCPU__abs(const at::Tensor & self) {
171 // No device check
172 // DeviceGuard omitted
173 return at::native::abs_sparse_csr(self);
174}
175} // anonymous namespace
176namespace {
177at::Tensor & wrapper_SparseCsrCPU_out_abs_out(const at::Tensor & self, at::Tensor & out) {
178 // No device check
179 // DeviceGuard omitted
180 return at::native::abs_sparse_csr_out(self, out);
181}
182} // anonymous namespace
183namespace {
184at::Tensor & wrapper_SparseCsrCPU__abs_(at::Tensor & self) {
185 // No device check
186 // DeviceGuard omitted
187 return at::native::abs_sparse_csr_(self);
188}
189} // anonymous namespace
190namespace {
191at::Tensor wrapper_SparseCsrCPU__angle(const at::Tensor & self) {
192 // No device check
193 // DeviceGuard omitted
194 return at::native::angle_sparse_csr(self);
195}
196} // anonymous namespace
197namespace {
198at::Tensor & wrapper_SparseCsrCPU_out_angle_out(const at::Tensor & self, at::Tensor & out) {
199 // No device check
200 // DeviceGuard omitted
201 return at::native::angle_sparse_csr_out(self, out);
202}
203} // anonymous namespace
204namespace {
205at::Tensor wrapper_SparseCsrCPU__sgn(const at::Tensor & self) {
206 // No device check
207 // DeviceGuard omitted
208 return at::native::sgn_sparse_csr(self);
209}
210} // anonymous namespace
211namespace {
212at::Tensor & wrapper_SparseCsrCPU_out_sgn_out(const at::Tensor & self, at::Tensor & out) {
213 // No device check
214 // DeviceGuard omitted
215 return at::native::sgn_sparse_csr_out(self, out);
216}
217} // anonymous namespace
218namespace {
219at::Tensor & wrapper_SparseCsrCPU__sgn_(at::Tensor & self) {
220 // No device check
221 // DeviceGuard omitted
222 return at::native::sgn_sparse_csr_(self);
223}
224} // anonymous namespace
225namespace {
226at::Tensor wrapper_SparseCsrCPU___conj_physical(const at::Tensor & self) {
227 // No device check
228 // DeviceGuard omitted
229 return at::native::conj_physical_sparse_csr(self);
230}
231} // anonymous namespace
232namespace {
233at::Tensor & wrapper_SparseCsrCPU_out_conj_physical_out(const at::Tensor & self, at::Tensor & out) {
234 // No device check
235 // DeviceGuard omitted
236 return at::native::conj_physical_sparse_csr_out(self, out);
237}
238} // anonymous namespace
239namespace {
240at::Tensor & wrapper_SparseCsrCPU__conj_physical_(at::Tensor & self) {
241 // No device check
242 // DeviceGuard omitted
243 return at::native::conj_physical_sparse_csr_(self);
244}
245} // anonymous namespace
246namespace {
247at::Tensor wrapper_SparseCsrCPU_Tensor_add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
248 // No device check
249 // DeviceGuard omitted
250 return at::native::add_sparse_csr(self, other, alpha);
251}
252} // anonymous namespace
253namespace {
254at::Tensor & wrapper_SparseCsrCPU_out_add_out(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
255 // No device check
256 // DeviceGuard omitted
257 return at::native::add_out_sparse_csr_cpu(self, other, alpha, out);
258}
259} // anonymous namespace
260namespace {
261at::Tensor & wrapper_SparseCsrCPU_Tensor_add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
262 // No device check
263 // DeviceGuard omitted
264 return at::native::add_sparse_csr_(self, other, alpha);
265}
266} // anonymous namespace
267namespace {
268at::Tensor & wrapper_SparseCsrCPU_out_addmv_out(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
269 // No device check
270 // DeviceGuard omitted
271 return at::native::addmv_out_sparse_compressed(self, mat, vec, beta, alpha, out);
272}
273} // anonymous namespace
274namespace {
275at::Tensor wrapper_SparseCsrCPU__asinh(const at::Tensor & self) {
276 // No device check
277 // DeviceGuard omitted
278 return at::native::asinh_sparse_csr(self);
279}
280} // anonymous namespace
281namespace {
282at::Tensor & wrapper_SparseCsrCPU_out_asinh_out(const at::Tensor & self, at::Tensor & out) {
283 // No device check
284 // DeviceGuard omitted
285 return at::native::asinh_sparse_csr_out(self, out);
286}
287} // anonymous namespace
288namespace {
289at::Tensor & wrapper_SparseCsrCPU__asinh_(at::Tensor & self) {
290 // No device check
291 // DeviceGuard omitted
292 return at::native::asinh_sparse_csr_(self);
293}
294} // anonymous namespace
295namespace {
296at::Tensor wrapper_SparseCsrCPU__atanh(const at::Tensor & self) {
297 // No device check
298 // DeviceGuard omitted
299 return at::native::atanh_sparse_csr(self);
300}
301} // anonymous namespace
302namespace {
303at::Tensor & wrapper_SparseCsrCPU_out_atanh_out(const at::Tensor & self, at::Tensor & out) {
304 // No device check
305 // DeviceGuard omitted
306 return at::native::atanh_sparse_csr_out(self, out);
307}
308} // anonymous namespace
309namespace {
310at::Tensor & wrapper_SparseCsrCPU__atanh_(at::Tensor & self) {
311 // No device check
312 // DeviceGuard omitted
313 return at::native::atanh_sparse_csr_(self);
314}
315} // anonymous namespace
316namespace {
317at::Tensor wrapper_SparseCsrCPU__asin(const at::Tensor & self) {
318 // No device check
319 // DeviceGuard omitted
320 return at::native::asin_sparse_csr(self);
321}
322} // anonymous namespace
323namespace {
324at::Tensor & wrapper_SparseCsrCPU_out_asin_out(const at::Tensor & self, at::Tensor & out) {
325 // No device check
326 // DeviceGuard omitted
327 return at::native::asin_sparse_csr_out(self, out);
328}
329} // anonymous namespace
330namespace {
331at::Tensor & wrapper_SparseCsrCPU__asin_(at::Tensor & self) {
332 // No device check
333 // DeviceGuard omitted
334 return at::native::asin_sparse_csr_(self);
335}
336} // anonymous namespace
337namespace {
338at::Tensor wrapper_SparseCsrCPU__atan(const at::Tensor & self) {
339 // No device check
340 // DeviceGuard omitted
341 return at::native::atan_sparse_csr(self);
342}
343} // anonymous namespace
344namespace {
345at::Tensor & wrapper_SparseCsrCPU_out_atan_out(const at::Tensor & self, at::Tensor & out) {
346 // No device check
347 // DeviceGuard omitted
348 return at::native::atan_sparse_csr_out(self, out);
349}
350} // anonymous namespace
351namespace {
352at::Tensor & wrapper_SparseCsrCPU__atan_(at::Tensor & self) {
353 // No device check
354 // DeviceGuard omitted
355 return at::native::atan_sparse_csr_(self);
356}
357} // anonymous namespace
358namespace {
359at::Tensor wrapper_SparseCsrCPU__ceil(const at::Tensor & self) {
360 // No device check
361 // DeviceGuard omitted
362 return at::native::ceil_sparse_csr(self);
363}
364} // anonymous namespace
365namespace {
366at::Tensor & wrapper_SparseCsrCPU_out_ceil_out(const at::Tensor & self, at::Tensor & out) {
367 // No device check
368 // DeviceGuard omitted
369 return at::native::ceil_sparse_csr_out(self, out);
370}
371} // anonymous namespace
372namespace {
373at::Tensor & wrapper_SparseCsrCPU__ceil_(at::Tensor & self) {
374 // No device check
375 // DeviceGuard omitted
376 return at::native::ceil_sparse_csr_(self);
377}
378} // anonymous namespace
379namespace {
380at::Tensor & wrapper_SparseCsrCPU__copy_(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
381 // No device check
382 // DeviceGuard omitted
383 return at::native::copy_sparse_compressed_(self, src, non_blocking);
384}
385} // anonymous namespace
386namespace {
387at::Tensor wrapper_SparseCsrCPU_memory_format_empty(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
388 // No device check
389 // DeviceGuard omitted
390 return at::native::empty_sparse_compressed(C10_AS_INTARRAYREF_SLOW(size), dtype, layout, device, pin_memory, memory_format);
391}
392} // anonymous namespace
393namespace {
394const at::Tensor & wrapper_SparseCsrCPU__resize_(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) {
395 // No device check
396 // DeviceGuard omitted
397 return at::native::resize_sparse_csr_(self, C10_AS_INTARRAYREF_SLOW(size), memory_format);
398}
399} // anonymous namespace
400namespace {
401at::Tensor wrapper_SparseCsrCPU__empty_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
402 // No device check
403 // DeviceGuard omitted
404 return at::native::empty_like_sparse_csr(self, dtype, layout, device, pin_memory, memory_format);
405}
406} // anonymous namespace
407namespace {
408at::Tensor wrapper_SparseCsrCPU__erf(const at::Tensor & self) {
409 // No device check
410 // DeviceGuard omitted
411 return at::native::erf_sparse_csr(self);
412}
413} // anonymous namespace
414namespace {
415at::Tensor & wrapper_SparseCsrCPU_out_erf_out(const at::Tensor & self, at::Tensor & out) {
416 // No device check
417 // DeviceGuard omitted
418 return at::native::erf_sparse_csr_out(self, out);
419}
420} // anonymous namespace
421namespace {
422at::Tensor & wrapper_SparseCsrCPU__erf_(at::Tensor & self) {
423 // No device check
424 // DeviceGuard omitted
425 return at::native::erf_sparse_csr_(self);
426}
427} // anonymous namespace
428namespace {
429at::Tensor wrapper_SparseCsrCPU__expm1(const at::Tensor & self) {
430 // No device check
431 // DeviceGuard omitted
432 return at::native::expm1_sparse_csr(self);
433}
434} // anonymous namespace
435namespace {
436at::Tensor & wrapper_SparseCsrCPU_out_expm1_out(const at::Tensor & self, at::Tensor & out) {
437 // No device check
438 // DeviceGuard omitted
439 return at::native::expm1_sparse_csr_out(self, out);
440}
441} // anonymous namespace
442namespace {
443at::Tensor & wrapper_SparseCsrCPU__expm1_(at::Tensor & self) {
444 // No device check
445 // DeviceGuard omitted
446 return at::native::expm1_sparse_csr_(self);
447}
448} // anonymous namespace
449namespace {
450at::Tensor & wrapper_SparseCsrCPU_Scalar_fill_(at::Tensor & self, const at::Scalar & value) {
451 // No device check
452 // DeviceGuard omitted
453 return at::native::fill_sparse_csr_(self, value);
454}
455} // anonymous namespace
456namespace {
457at::Tensor wrapper_SparseCsrCPU__floor(const at::Tensor & self) {
458 // No device check
459 // DeviceGuard omitted
460 return at::native::floor_sparse_csr(self);
461}
462} // anonymous namespace
463namespace {
464at::Tensor & wrapper_SparseCsrCPU_out_floor_out(const at::Tensor & self, at::Tensor & out) {
465 // No device check
466 // DeviceGuard omitted
467 return at::native::floor_sparse_csr_out(self, out);
468}
469} // anonymous namespace
470namespace {
471at::Tensor & wrapper_SparseCsrCPU__floor_(at::Tensor & self) {
472 // No device check
473 // DeviceGuard omitted
474 return at::native::floor_sparse_csr_(self);
475}
476} // anonymous namespace
477namespace {
478at::Tensor wrapper_SparseCsrCPU__frac(const at::Tensor & self) {
479 // No device check
480 // DeviceGuard omitted
481 return at::native::frac_sparse_csr(self);
482}
483} // anonymous namespace
484namespace {
485at::Tensor & wrapper_SparseCsrCPU_out_frac_out(const at::Tensor & self, at::Tensor & out) {
486 // No device check
487 // DeviceGuard omitted
488 return at::native::frac_sparse_csr_out(self, out);
489}
490} // anonymous namespace
491namespace {
492at::Tensor & wrapper_SparseCsrCPU__frac_(at::Tensor & self) {
493 // No device check
494 // DeviceGuard omitted
495 return at::native::frac_sparse_csr_(self);
496}
497} // anonymous namespace
498namespace {
499at::Tensor wrapper_SparseCsrCPU__isnan(const at::Tensor & self) {
500 // No device check
501 // DeviceGuard omitted
502 return at::native::isnan_sparse_csr(self);
503}
504} // anonymous namespace
505namespace {
506at::Tensor wrapper_SparseCsrCPU__log1p(const at::Tensor & self) {
507 // No device check
508 // DeviceGuard omitted
509 return at::native::log1p_sparse_csr(self);
510}
511} // anonymous namespace
512namespace {
513at::Tensor & wrapper_SparseCsrCPU_out_log1p_out(const at::Tensor & self, at::Tensor & out) {
514 // No device check
515 // DeviceGuard omitted
516 return at::native::log1p_sparse_csr_out(self, out);
517}
518} // anonymous namespace
519namespace {
520at::Tensor & wrapper_SparseCsrCPU__log1p_(at::Tensor & self) {
521 // No device check
522 // DeviceGuard omitted
523 return at::native::log1p_sparse_csr_(self);
524}
525} // anonymous namespace
526namespace {
527at::Tensor wrapper_SparseCsrCPU__mm(const at::Tensor & self, const at::Tensor & mat2) {
528 // No device check
529 // DeviceGuard omitted
530 return at::native::_sparse_csr_mm(self, mat2);
531}
532} // anonymous namespace
533namespace {
534at::Tensor & wrapper_SparseCsrCPU_out_mm_out(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
535 // No device check
536 // DeviceGuard omitted
537 return at::native::_sparse_csr_mm_out(self, mat2, out);
538}
539} // anonymous namespace
540namespace {
541at::Tensor wrapper_SparseCsrCPU_Tensor_mul(const at::Tensor & self, const at::Tensor & other) {
542 // No device check
543 // DeviceGuard omitted
544 return at::native::mul_sparse_csr(self, other);
545}
546} // anonymous namespace
547namespace {
548at::Tensor & wrapper_SparseCsrCPU_out_mul_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
549 // No device check
550 // DeviceGuard omitted
551 return at::native::mul_out_sparse_csr(self, other, out);
552}
553} // anonymous namespace
554namespace {
555at::Tensor & wrapper_SparseCsrCPU_Tensor_mul_(at::Tensor & self, const at::Tensor & other) {
556 // No device check
557 // DeviceGuard omitted
558 return at::native::mul_sparse_csr_(self, other);
559}
560} // anonymous namespace
561namespace {
562at::Tensor wrapper_SparseCsrCPU_Scalar_mul(const at::Tensor & self, const at::Scalar & other) {
563 // No device check
564 // DeviceGuard omitted
565 return at::native::mul_scalar_sparse_csr(self, other);
566}
567} // anonymous namespace
568namespace {
569at::Tensor & wrapper_SparseCsrCPU_Scalar_mul_(at::Tensor & self, const at::Scalar & other) {
570 // No device check
571 // DeviceGuard omitted
572 return at::native::mul__scalar_sparse_csr(self, other);
573}
574} // anonymous namespace
575namespace {
576at::Tensor wrapper_SparseCsrCPU__rad2deg(const at::Tensor & self) {
577 // No device check
578 // DeviceGuard omitted
579 return at::native::rad2deg_sparse_csr(self);
580}
581} // anonymous namespace
582namespace {
583at::Tensor & wrapper_SparseCsrCPU_out_rad2deg_out(const at::Tensor & self, at::Tensor & out) {
584 // No device check
585 // DeviceGuard omitted
586 return at::native::rad2deg_sparse_csr_out(self, out);
587}
588} // anonymous namespace
589namespace {
590at::Tensor & wrapper_SparseCsrCPU__rad2deg_(at::Tensor & self) {
591 // No device check
592 // DeviceGuard omitted
593 return at::native::rad2deg_sparse_csr_(self);
594}
595} // anonymous namespace
596namespace {
597at::Tensor wrapper_SparseCsrCPU__deg2rad(const at::Tensor & self) {
598 // No device check
599 // DeviceGuard omitted
600 return at::native::deg2rad_sparse_csr(self);
601}
602} // anonymous namespace
603namespace {
604at::Tensor & wrapper_SparseCsrCPU_out_deg2rad_out(const at::Tensor & self, at::Tensor & out) {
605 // No device check
606 // DeviceGuard omitted
607 return at::native::deg2rad_sparse_csr_out(self, out);
608}
609} // anonymous namespace
610namespace {
611at::Tensor & wrapper_SparseCsrCPU__deg2rad_(at::Tensor & self) {
612 // No device check
613 // DeviceGuard omitted
614 return at::native::deg2rad_sparse_csr_(self);
615}
616} // anonymous namespace
617namespace {
618at::Tensor wrapper_SparseCsrCPU__neg(const at::Tensor & self) {
619 // No device check
620 // DeviceGuard omitted
621 return at::native::neg_sparse_csr(self);
622}
623} // anonymous namespace
624namespace {
625at::Tensor & wrapper_SparseCsrCPU_out_neg_out(const at::Tensor & self, at::Tensor & out) {
626 // No device check
627 // DeviceGuard omitted
628 return at::native::neg_sparse_csr_out(self, out);
629}
630} // anonymous namespace
631namespace {
632at::Tensor & wrapper_SparseCsrCPU__neg_(at::Tensor & self) {
633 // No device check
634 // DeviceGuard omitted
635 return at::native::neg_sparse_csr_(self);
636}
637} // anonymous namespace
638namespace {
639at::Tensor wrapper_SparseCsrCPU__round(const at::Tensor & self) {
640 // No device check
641 // DeviceGuard omitted
642 return at::native::round_sparse_csr(self);
643}
644} // anonymous namespace
645namespace {
646at::Tensor & wrapper_SparseCsrCPU_out_round_out(const at::Tensor & self, at::Tensor & out) {
647 // No device check
648 // DeviceGuard omitted
649 return at::native::round_sparse_csr_out(self, out);
650}
651} // anonymous namespace
652namespace {
653at::Tensor & wrapper_SparseCsrCPU__round_(at::Tensor & self) {
654 // No device check
655 // DeviceGuard omitted
656 return at::native::round_sparse_csr_(self);
657}
658} // anonymous namespace
659namespace {
660at::Tensor wrapper_SparseCsrCPU__relu(const at::Tensor & self) {
661 // No device check
662 // DeviceGuard omitted
663 return at::native::relu_sparse_csr(self);
664}
665} // anonymous namespace
666namespace {
667at::Tensor & wrapper_SparseCsrCPU__relu_(at::Tensor & self) {
668 // No device check
669 // DeviceGuard omitted
670 return at::native::relu_sparse_csr_(self);
671}
672} // anonymous namespace
673namespace {
674at::Tensor wrapper_SparseCsrCPU_int_select(const at::Tensor & self, int64_t dim, c10::SymInt index) {
675 // No device check
676 // DeviceGuard omitted
677 return at::native::select_sparse_csr(self, dim, index.expect_int());
678}
679} // anonymous namespace
680namespace {
681at::Tensor wrapper_SparseCsrCPU__sin(const at::Tensor & self) {
682 // No device check
683 // DeviceGuard omitted
684 return at::native::sin_sparse_csr(self);
685}
686} // anonymous namespace
687namespace {
688at::Tensor & wrapper_SparseCsrCPU_out_sin_out(const at::Tensor & self, at::Tensor & out) {
689 // No device check
690 // DeviceGuard omitted
691 return at::native::sin_sparse_csr_out(self, out);
692}
693} // anonymous namespace
694namespace {
695at::Tensor & wrapper_SparseCsrCPU__sin_(at::Tensor & self) {
696 // No device check
697 // DeviceGuard omitted
698 return at::native::sin_sparse_csr_(self);
699}
700} // anonymous namespace
701namespace {
702at::Tensor wrapper_SparseCsrCPU__sinh(const at::Tensor & self) {
703 // No device check
704 // DeviceGuard omitted
705 return at::native::sinh_sparse_csr(self);
706}
707} // anonymous namespace
708namespace {
709at::Tensor & wrapper_SparseCsrCPU_out_sinh_out(const at::Tensor & self, at::Tensor & out) {
710 // No device check
711 // DeviceGuard omitted
712 return at::native::sinh_sparse_csr_out(self, out);
713}
714} // anonymous namespace
715namespace {
716at::Tensor & wrapper_SparseCsrCPU__sinh_(at::Tensor & self) {
717 // No device check
718 // DeviceGuard omitted
719 return at::native::sinh_sparse_csr_(self);
720}
721} // anonymous namespace
722namespace {
723at::Tensor wrapper_SparseCsrCPU__sum(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
724 // No device check
725 // DeviceGuard omitted
726 return at::native::sum_csr(self, dtype);
727}
728} // anonymous namespace
729namespace {
730at::Tensor wrapper_SparseCsrCPU__sqrt(const at::Tensor & self) {
731 // No device check
732 // DeviceGuard omitted
733 return at::native::sqrt_sparse_csr(self);
734}
735} // anonymous namespace
736namespace {
737at::Tensor & wrapper_SparseCsrCPU_out_sqrt_out(const at::Tensor & self, at::Tensor & out) {
738 // No device check
739 // DeviceGuard omitted
740 return at::native::sqrt_sparse_csr_out(self, out);
741}
742} // anonymous namespace
743namespace {
744at::Tensor & wrapper_SparseCsrCPU__sqrt_(at::Tensor & self) {
745 // No device check
746 // DeviceGuard omitted
747 return at::native::sqrt_sparse_csr_(self);
748}
749} // anonymous namespace
750namespace {
751at::Tensor wrapper_SparseCsrCPU__tan(const at::Tensor & self) {
752 // No device check
753 // DeviceGuard omitted
754 return at::native::tan_sparse_csr(self);
755}
756} // anonymous namespace
757namespace {
758at::Tensor & wrapper_SparseCsrCPU_out_tan_out(const at::Tensor & self, at::Tensor & out) {
759 // No device check
760 // DeviceGuard omitted
761 return at::native::tan_sparse_csr_out(self, out);
762}
763} // anonymous namespace
764namespace {
765at::Tensor & wrapper_SparseCsrCPU__tan_(at::Tensor & self) {
766 // No device check
767 // DeviceGuard omitted
768 return at::native::tan_sparse_csr_(self);
769}
770} // anonymous namespace
771namespace {
772at::Tensor wrapper_SparseCsrCPU__tanh(const at::Tensor & self) {
773 // No device check
774 // DeviceGuard omitted
775 return at::native::tanh_sparse_csr(self);
776}
777} // anonymous namespace
778namespace {
779at::Tensor & wrapper_SparseCsrCPU_out_tanh_out(const at::Tensor & self, at::Tensor & out) {
780 // No device check
781 // DeviceGuard omitted
782 return at::native::tanh_sparse_csr_out(self, out);
783}
784} // anonymous namespace
785namespace {
786at::Tensor & wrapper_SparseCsrCPU__tanh_(at::Tensor & self) {
787 // No device check
788 // DeviceGuard omitted
789 return at::native::tanh_sparse_csr_(self);
790}
791} // anonymous namespace
792namespace {
793at::Tensor wrapper_SparseCsrCPU__threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
794 // No device check
795 // DeviceGuard omitted
796 return at::native::threshold_backward_sparse_compressed(grad_output, self, threshold);
797}
798} // anonymous namespace
799namespace {
800at::Tensor & wrapper_SparseCsrCPU_grad_input_threshold_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) {
801 // No device check
802 // DeviceGuard omitted
803 return at::native::threshold_backward_sparse_compressed_out(grad_output, self, threshold, grad_input);
804}
805} // anonymous namespace
806namespace {
807at::Tensor wrapper_SparseCsrCPU__trunc(const at::Tensor & self) {
808 // No device check
809 // DeviceGuard omitted
810 return at::native::trunc_sparse_csr(self);
811}
812} // anonymous namespace
813namespace {
814at::Tensor & wrapper_SparseCsrCPU_out_trunc_out(const at::Tensor & self, at::Tensor & out) {
815 // No device check
816 // DeviceGuard omitted
817 return at::native::trunc_sparse_csr_out(self, out);
818}
819} // anonymous namespace
820namespace {
821at::Tensor & wrapper_SparseCsrCPU__trunc_(at::Tensor & self) {
822 // No device check
823 // DeviceGuard omitted
824 return at::native::trunc_sparse_csr_(self);
825}
826} // anonymous namespace
827namespace {
828at::Tensor wrapper_SparseCsrCPU_dim_dtype__sparse_csr_sum(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
829 // No device check
830 // DeviceGuard omitted
831 return at::native::_sparse_csr_sum_cpu(self, dim, keepdim, dtype);
832}
833} // anonymous namespace
834namespace {
835at::Tensor wrapper_SparseCsrCPU_dim_dtype__sparse_csr_prod(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
836 // No device check
837 // DeviceGuard omitted
838 return at::native::_sparse_csr_prod_cpu(self, dim, keepdim, dtype);
839}
840} // anonymous namespace
841namespace {
842at::Tensor wrapper_SparseCsrCPU__clone(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) {
843 // No device check
844 // DeviceGuard omitted
845 return at::native::clone_sparse_compressed(self, memory_format);
846}
847} // anonymous namespace
848namespace {
849const at::Tensor & wrapper_SparseCsrCPU__resize_as_sparse_(const at::Tensor & self, const at::Tensor & the_template) {
850 // No device check
851 // DeviceGuard omitted
852 return at::native::resize_as_sparse_compressed_(self, the_template);
853}
854} // anonymous namespace
855namespace {
856at::Tensor & wrapper_SparseCsrCPU__zero_(at::Tensor & self) {
857 // No device check
858 // DeviceGuard omitted
859 return at::native::zero_sparse_csr_(self);
860}
861} // anonymous namespace
862namespace {
863at::Tensor wrapper_SparseCsrCPU__sparse_sampled_addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
864 // No device check
865 // DeviceGuard omitted
866 return at::native::sparse_sampled_addmm_sparse_csr_cpu(self, mat1, mat2, beta, alpha);
867}
868} // anonymous namespace
869namespace {
870at::Tensor & wrapper_SparseCsrCPU_out_sparse_sampled_addmm_out(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
871 // No device check
872 // DeviceGuard omitted
873 return at::native::sparse_sampled_addmm_out_sparse_csr_cpu(self, mat1, mat2, beta, alpha, out);
874}
875} // anonymous namespace
876namespace {
877::std::tuple<at::Tensor,at::Tensor> wrapper_SparseCsrCPU___sparse_mm_reduce_impl(const at::Tensor & self, const at::Tensor & other, c10::string_view reduce) {
878 // No device check
879 // DeviceGuard omitted
880 return at::native::_sparse_mm_reduce_impl_sparse_csr_cpu(self, other, reduce);
881}
882} // anonymous namespace
883namespace {
884::std::tuple<at::Tensor,at::Tensor> wrapper_SparseCsrCPU___sparse_mm_reduce_impl_backward(const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array<bool,2> output_mask) {
885 // No device check
886 // DeviceGuard omitted
887 return at::native::_sparse_mm_reduce_impl_backward_sparse_csr_cpu(self, grad_out, weight, reduce, arg_out, output_mask);
888}
889} // anonymous namespace
890namespace {
891at::Tensor wrapper_SparseCsrCPU__addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
892 // No device check
893 // DeviceGuard omitted
894 return at::native::addmm_sparse_compressed_dense(self, mat1, mat2, beta, alpha);
895}
896} // anonymous namespace
897namespace {
898at::Tensor & wrapper_SparseCsrCPU_out_addmm_out(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
899 // No device check
900 // DeviceGuard omitted
901 return at::native::addmm_out_sparse_compressed_cpu(self, mat1, mat2, beta, alpha, out);
902}
903} // anonymous namespace
904namespace {
905at::Tensor wrapper_SparseCsrCPU__sparse_mask(const at::Tensor & self, const at::Tensor & mask) {
906 // No device check
907 // DeviceGuard omitted
908 return at::native::sparse_mask_sparse_csr(self, mask);
909}
910} // anonymous namespace
911namespace {
912at::Tensor wrapper_SparseCsrCPU___to_dense(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
913 // No device check
914 // DeviceGuard omitted
915 return at::native::sparse_compressed_to_dense(self, dtype);
916}
917} // anonymous namespace
918namespace {
919int64_t wrapper_SparseCsrCPU__sparse_dim(const at::Tensor & self) {
920 // No device check
921 // DeviceGuard omitted
922 return at::native::sparse_dim_sparse_csr(self);
923}
924} // anonymous namespace
925namespace {
926int64_t wrapper_SparseCsrCPU__dense_dim(const at::Tensor & self) {
927 // No device check
928 // DeviceGuard omitted
929 return at::native::dense_dim_sparse_csr(self);
930}
931} // anonymous namespace
932namespace {
933int64_t wrapper_SparseCsrCPU___nnz(const at::Tensor & self) {
934 // No device check
935 // DeviceGuard omitted
936 return at::native::_nnz_sparse_csr(self);
937}
938} // anonymous namespace
939namespace {
940at::Tensor wrapper_SparseCsrCPU__values(const at::Tensor & self) {
941 // No device check
942 // DeviceGuard omitted
943 return at::native::values_sparse_csr(self);
944}
945} // anonymous namespace
946namespace {
947at::Tensor wrapper_SparseCsrCPU__crow_indices(const at::Tensor & self) {
948 // No device check
949 // DeviceGuard omitted
950 return at::native::crow_indices_sparse_csr(self);
951}
952} // anonymous namespace
953namespace {
954at::Tensor wrapper_SparseCsrCPU__col_indices(const at::Tensor & self) {
955 // No device check
956 // DeviceGuard omitted
957 return at::native::col_indices_sparse_csr(self);
958}
959} // anonymous namespace
960namespace {
961at::Tensor wrapper_SparseCsrCPU__ccol_indices(const at::Tensor & self) {
962 // No device check
963 // DeviceGuard omitted
964 return at::native::ccol_indices_sparse_csr(self);
965}
966} // anonymous namespace
967namespace {
968at::Tensor wrapper_SparseCsrCPU__row_indices(const at::Tensor & self) {
969 // No device check
970 // DeviceGuard omitted
971 return at::native::row_indices_sparse_csr(self);
972}
973} // anonymous namespace
974namespace {
975at::Tensor wrapper_SparseCsrCPU_sparse_dim_to_sparse(const at::Tensor & self, int64_t sparse_dim) {
976 // No device check
977 // DeviceGuard omitted
978 return at::native::sparse_compressed_to_sparse(self, sparse_dim);
979}
980} // anonymous namespace
981namespace {
982at::Tensor wrapper_SparseCsrCPU__to_sparse(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
983 // No device check
984 // DeviceGuard omitted
985 return at::native::sparse_compressed_to_sparse(self, layout, blocksize, dense_dim);
986}
987} // anonymous namespace
988namespace {
989at::Tensor wrapper_SparseCsrCPU__to_sparse_csr(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
990 // No device check
991 // DeviceGuard omitted
992 return at::native::sparse_compressed_to_sparse_csr(self, dense_dim);
993}
994} // anonymous namespace
995namespace {
996at::Tensor wrapper_SparseCsrCPU__to_sparse_csc(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
997 // No device check
998 // DeviceGuard omitted
999 return at::native::sparse_compressed_to_sparse_csc(self, dense_dim);
1000}
1001} // anonymous namespace
1002namespace {
1003at::Tensor wrapper_SparseCsrCPU__to_sparse_bsr(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
1004 // No device check
1005 // DeviceGuard omitted
1006 return at::native::sparse_compressed_to_sparse_bsr(self, blocksize, dense_dim);
1007}
1008} // anonymous namespace
1009namespace {
1010at::Tensor wrapper_SparseCsrCPU__to_sparse_bsc(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
1011 // No device check
1012 // DeviceGuard omitted
1013 return at::native::sparse_compressed_to_sparse_bsc(self, blocksize, dense_dim);
1014}
1015} // anonymous namespace
1016namespace {
1017::std::tuple<at::Tensor &,at::Tensor &> wrapper_SparseCsrCPU_X_triangular_solve_out(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular, at::Tensor & X, at::Tensor & M) {
1018 // No device check
1019 // DeviceGuard omitted
1020 return at::native::triangular_solve_out_sparse_csr_cpu(self, A, upper, transpose, unitriangular, X, M);
1021}
1022} // anonymous namespace
1023namespace {
1024at::Tensor wrapper_SparseCsrCPU__erfinv(const at::Tensor & self) {
1025 // No device check
1026 // DeviceGuard omitted
1027 return at::native::erfinv_sparse_csr(self);
1028}
1029} // anonymous namespace
1030namespace {
1031at::Tensor & wrapper_SparseCsrCPU_out_erfinv_out(const at::Tensor & self, at::Tensor & out) {
1032 // No device check
1033 // DeviceGuard omitted
1034 return at::native::erfinv_sparse_csr_out(self, out);
1035}
1036} // anonymous namespace
1037namespace {
1038at::Tensor & wrapper_SparseCsrCPU__erfinv_(at::Tensor & self) {
1039 // No device check
1040 // DeviceGuard omitted
1041 return at::native::erfinv_sparse_csr_(self);
1042}
1043} // anonymous namespace
1044namespace {
1045at::Tensor wrapper_SparseCsrCPU__sign(const at::Tensor & self) {
1046 // No device check
1047 // DeviceGuard omitted
1048 return at::native::sign_sparse_csr(self);
1049}
1050} // anonymous namespace
1051namespace {
1052at::Tensor & wrapper_SparseCsrCPU_out_sign_out(const at::Tensor & self, at::Tensor & out) {
1053 // No device check
1054 // DeviceGuard omitted
1055 return at::native::sign_sparse_csr_out(self, out);
1056}
1057} // anonymous namespace
1058namespace {
1059at::Tensor & wrapper_SparseCsrCPU__sign_(at::Tensor & self) {
1060 // No device check
1061 // DeviceGuard omitted
1062 return at::native::sign_sparse_csr_(self);
1063}
1064} // anonymous namespace
1065namespace {
1066at::Tensor wrapper_SparseCsrCPU__signbit(const at::Tensor & self) {
1067 // No device check
1068 // DeviceGuard omitted
1069 return at::native::signbit_sparse_csr(self);
1070}
1071} // anonymous namespace
1072namespace {
1073at::Tensor & wrapper_SparseCsrCPU_out_signbit_out(const at::Tensor & self, at::Tensor & out) {
1074 // No device check
1075 // DeviceGuard omitted
1076 return at::native::signbit_sparse_csr_out(self, out);
1077}
1078} // anonymous namespace
1079namespace {
1080at::Tensor & wrapper_SparseCsrCPU__normal_(at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
1081 // No device check
1082 // DeviceGuard omitted
1083 return at::native::normal_sparse_csr_(self, mean, std, generator);
1084}
1085} // anonymous namespace
1086namespace {
1087at::Tensor wrapper_SparseCsrCPU__isinf(const at::Tensor & self) {
1088 // No device check
1089 // DeviceGuard omitted
1090 return at::native::isinf_sparse_csr(self);
1091}
1092} // anonymous namespace
1093namespace {
1094at::Tensor wrapper_SparseCsrCPU__isposinf(const at::Tensor & self) {
1095 // No device check
1096 // DeviceGuard omitted
1097 return at::native::isposinf_sparse_csr(self);
1098}
1099} // anonymous namespace
1100namespace {
1101at::Tensor & wrapper_SparseCsrCPU_out_isposinf_out(const at::Tensor & self, at::Tensor & out) {
1102 // No device check
1103 // DeviceGuard omitted
1104 return at::native::isposinf_sparse_csr_out(self, out);
1105}
1106} // anonymous namespace
1107namespace {
1108at::Tensor wrapper_SparseCsrCPU__isneginf(const at::Tensor & self) {
1109 // No device check
1110 // DeviceGuard omitted
1111 return at::native::isneginf_sparse_csr(self);
1112}
1113} // anonymous namespace
1114namespace {
1115at::Tensor & wrapper_SparseCsrCPU_out_isneginf_out(const at::Tensor & self, at::Tensor & out) {
1116 // No device check
1117 // DeviceGuard omitted
1118 return at::native::isneginf_sparse_csr_out(self, out);
1119}
1120} // anonymous namespace
1121namespace {
1122at::Tensor wrapper_SparseCsrCPU_int_select_copy(const at::Tensor & self, int64_t dim, c10::SymInt index) {
1123 // No device check
1124 // DeviceGuard omitted
1125 return at::native::select_copy_sparse_csr(self, dim, index.expect_int());
1126}
1127} // anonymous namespace
1128TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
1129 m.impl("abs",
1130TORCH_FN(wrapper_SparseCsrCPU__abs));
1131m.impl("abs.out",
1132TORCH_FN(wrapper_SparseCsrCPU_out_abs_out));
1133m.impl("abs_",
1134TORCH_FN(wrapper_SparseCsrCPU__abs_));
1135m.impl("angle",
1136TORCH_FN(wrapper_SparseCsrCPU__angle));
1137m.impl("angle.out",
1138TORCH_FN(wrapper_SparseCsrCPU_out_angle_out));
1139m.impl("sgn",
1140TORCH_FN(wrapper_SparseCsrCPU__sgn));
1141m.impl("sgn.out",
1142TORCH_FN(wrapper_SparseCsrCPU_out_sgn_out));
1143m.impl("sgn_",
1144TORCH_FN(wrapper_SparseCsrCPU__sgn_));
1145m.impl("_conj_physical",
1146TORCH_FN(wrapper_SparseCsrCPU___conj_physical));
1147m.impl("conj_physical.out",
1148TORCH_FN(wrapper_SparseCsrCPU_out_conj_physical_out));
1149m.impl("conj_physical_",
1150TORCH_FN(wrapper_SparseCsrCPU__conj_physical_));
1151m.impl("add.Tensor",
1152TORCH_FN(wrapper_SparseCsrCPU_Tensor_add));
1153m.impl("add.out",
1154TORCH_FN(wrapper_SparseCsrCPU_out_add_out));
1155m.impl("add_.Tensor",
1156TORCH_FN(wrapper_SparseCsrCPU_Tensor_add_));
1157m.impl("addmv.out",
1158TORCH_FN(wrapper_SparseCsrCPU_out_addmv_out));
1159m.impl("asinh",
1160TORCH_FN(wrapper_SparseCsrCPU__asinh));
1161m.impl("asinh.out",
1162TORCH_FN(wrapper_SparseCsrCPU_out_asinh_out));
1163m.impl("asinh_",
1164TORCH_FN(wrapper_SparseCsrCPU__asinh_));
1165m.impl("atanh",
1166TORCH_FN(wrapper_SparseCsrCPU__atanh));
1167m.impl("atanh.out",
1168TORCH_FN(wrapper_SparseCsrCPU_out_atanh_out));
1169m.impl("atanh_",
1170TORCH_FN(wrapper_SparseCsrCPU__atanh_));
1171m.impl("asin",
1172TORCH_FN(wrapper_SparseCsrCPU__asin));
1173m.impl("asin.out",
1174TORCH_FN(wrapper_SparseCsrCPU_out_asin_out));
1175m.impl("asin_",
1176TORCH_FN(wrapper_SparseCsrCPU__asin_));
1177m.impl("atan",
1178TORCH_FN(wrapper_SparseCsrCPU__atan));
1179m.impl("atan.out",
1180TORCH_FN(wrapper_SparseCsrCPU_out_atan_out));
1181m.impl("atan_",
1182TORCH_FN(wrapper_SparseCsrCPU__atan_));
1183m.impl("ceil",
1184TORCH_FN(wrapper_SparseCsrCPU__ceil));
1185m.impl("ceil.out",
1186TORCH_FN(wrapper_SparseCsrCPU_out_ceil_out));
1187m.impl("ceil_",
1188TORCH_FN(wrapper_SparseCsrCPU__ceil_));
1189m.impl("copy_",
1190TORCH_FN(wrapper_SparseCsrCPU__copy_));
1191m.impl("empty.memory_format",
1192TORCH_FN(wrapper_SparseCsrCPU_memory_format_empty));
1193m.impl("resize_",
1194TORCH_FN(wrapper_SparseCsrCPU__resize_));
1195m.impl("empty_like",
1196TORCH_FN(wrapper_SparseCsrCPU__empty_like));
1197m.impl("erf",
1198TORCH_FN(wrapper_SparseCsrCPU__erf));
1199m.impl("erf.out",
1200TORCH_FN(wrapper_SparseCsrCPU_out_erf_out));
1201m.impl("erf_",
1202TORCH_FN(wrapper_SparseCsrCPU__erf_));
1203m.impl("expm1",
1204TORCH_FN(wrapper_SparseCsrCPU__expm1));
1205m.impl("expm1.out",
1206TORCH_FN(wrapper_SparseCsrCPU_out_expm1_out));
1207m.impl("expm1_",
1208TORCH_FN(wrapper_SparseCsrCPU__expm1_));
1209m.impl("fill_.Scalar",
1210TORCH_FN(wrapper_SparseCsrCPU_Scalar_fill_));
1211m.impl("floor",
1212TORCH_FN(wrapper_SparseCsrCPU__floor));
1213m.impl("floor.out",
1214TORCH_FN(wrapper_SparseCsrCPU_out_floor_out));
1215m.impl("floor_",
1216TORCH_FN(wrapper_SparseCsrCPU__floor_));
1217m.impl("frac",
1218TORCH_FN(wrapper_SparseCsrCPU__frac));
1219m.impl("frac.out",
1220TORCH_FN(wrapper_SparseCsrCPU_out_frac_out));
1221m.impl("frac_",
1222TORCH_FN(wrapper_SparseCsrCPU__frac_));
1223m.impl("isnan",
1224TORCH_FN(wrapper_SparseCsrCPU__isnan));
1225m.impl("log1p",
1226TORCH_FN(wrapper_SparseCsrCPU__log1p));
1227m.impl("log1p.out",
1228TORCH_FN(wrapper_SparseCsrCPU_out_log1p_out));
1229m.impl("log1p_",
1230TORCH_FN(wrapper_SparseCsrCPU__log1p_));
1231m.impl("mm",
1232TORCH_FN(wrapper_SparseCsrCPU__mm));
1233m.impl("mm.out",
1234TORCH_FN(wrapper_SparseCsrCPU_out_mm_out));
1235m.impl("mul.Tensor",
1236TORCH_FN(wrapper_SparseCsrCPU_Tensor_mul));
1237m.impl("mul.out",
1238TORCH_FN(wrapper_SparseCsrCPU_out_mul_out));
1239m.impl("mul_.Tensor",
1240TORCH_FN(wrapper_SparseCsrCPU_Tensor_mul_));
1241m.impl("mul.Scalar",
1242TORCH_FN(wrapper_SparseCsrCPU_Scalar_mul));
1243m.impl("mul_.Scalar",
1244TORCH_FN(wrapper_SparseCsrCPU_Scalar_mul_));
1245m.impl("rad2deg",
1246TORCH_FN(wrapper_SparseCsrCPU__rad2deg));
1247m.impl("rad2deg.out",
1248TORCH_FN(wrapper_SparseCsrCPU_out_rad2deg_out));
1249m.impl("rad2deg_",
1250TORCH_FN(wrapper_SparseCsrCPU__rad2deg_));
1251m.impl("deg2rad",
1252TORCH_FN(wrapper_SparseCsrCPU__deg2rad));
1253m.impl("deg2rad.out",
1254TORCH_FN(wrapper_SparseCsrCPU_out_deg2rad_out));
1255m.impl("deg2rad_",
1256TORCH_FN(wrapper_SparseCsrCPU__deg2rad_));
1257m.impl("neg",
1258TORCH_FN(wrapper_SparseCsrCPU__neg));
1259m.impl("neg.out",
1260TORCH_FN(wrapper_SparseCsrCPU_out_neg_out));
1261m.impl("neg_",
1262TORCH_FN(wrapper_SparseCsrCPU__neg_));
1263m.impl("round",
1264TORCH_FN(wrapper_SparseCsrCPU__round));
1265m.impl("round.out",
1266TORCH_FN(wrapper_SparseCsrCPU_out_round_out));
1267m.impl("round_",
1268TORCH_FN(wrapper_SparseCsrCPU__round_));
1269m.impl("relu",
1270TORCH_FN(wrapper_SparseCsrCPU__relu));
1271m.impl("relu_",
1272TORCH_FN(wrapper_SparseCsrCPU__relu_));
1273m.impl("select.int",
1274TORCH_FN(wrapper_SparseCsrCPU_int_select));
1275m.impl("sin",
1276TORCH_FN(wrapper_SparseCsrCPU__sin));
1277m.impl("sin.out",
1278TORCH_FN(wrapper_SparseCsrCPU_out_sin_out));
1279m.impl("sin_",
1280TORCH_FN(wrapper_SparseCsrCPU__sin_));
1281m.impl("sinh",
1282TORCH_FN(wrapper_SparseCsrCPU__sinh));
1283m.impl("sinh.out",
1284TORCH_FN(wrapper_SparseCsrCPU_out_sinh_out));
1285m.impl("sinh_",
1286TORCH_FN(wrapper_SparseCsrCPU__sinh_));
1287m.impl("sum",
1288TORCH_FN(wrapper_SparseCsrCPU__sum));
1289m.impl("sqrt",
1290TORCH_FN(wrapper_SparseCsrCPU__sqrt));
1291m.impl("sqrt.out",
1292TORCH_FN(wrapper_SparseCsrCPU_out_sqrt_out));
1293m.impl("sqrt_",
1294TORCH_FN(wrapper_SparseCsrCPU__sqrt_));
1295m.impl("tan",
1296TORCH_FN(wrapper_SparseCsrCPU__tan));
1297m.impl("tan.out",
1298TORCH_FN(wrapper_SparseCsrCPU_out_tan_out));
1299m.impl("tan_",
1300TORCH_FN(wrapper_SparseCsrCPU__tan_));
1301m.impl("tanh",
1302TORCH_FN(wrapper_SparseCsrCPU__tanh));
1303m.impl("tanh.out",
1304TORCH_FN(wrapper_SparseCsrCPU_out_tanh_out));
1305m.impl("tanh_",
1306TORCH_FN(wrapper_SparseCsrCPU__tanh_));
1307m.impl("threshold_backward",
1308TORCH_FN(wrapper_SparseCsrCPU__threshold_backward));
1309m.impl("threshold_backward.grad_input",
1310TORCH_FN(wrapper_SparseCsrCPU_grad_input_threshold_backward_out));
1311m.impl("trunc",
1312TORCH_FN(wrapper_SparseCsrCPU__trunc));
1313m.impl("trunc.out",
1314TORCH_FN(wrapper_SparseCsrCPU_out_trunc_out));
1315m.impl("trunc_",
1316TORCH_FN(wrapper_SparseCsrCPU__trunc_));
1317m.impl("_sparse_csr_sum.dim_dtype",
1318TORCH_FN(wrapper_SparseCsrCPU_dim_dtype__sparse_csr_sum));
1319m.impl("_sparse_csr_prod.dim_dtype",
1320TORCH_FN(wrapper_SparseCsrCPU_dim_dtype__sparse_csr_prod));
1321m.impl("clone",
1322TORCH_FN(wrapper_SparseCsrCPU__clone));
1323m.impl("resize_as_sparse_",
1324TORCH_FN(wrapper_SparseCsrCPU__resize_as_sparse_));
1325m.impl("zero_",
1326TORCH_FN(wrapper_SparseCsrCPU__zero_));
1327m.impl("sparse_sampled_addmm",
1328TORCH_FN(wrapper_SparseCsrCPU__sparse_sampled_addmm));
1329m.impl("sparse_sampled_addmm.out",
1330TORCH_FN(wrapper_SparseCsrCPU_out_sparse_sampled_addmm_out));
1331m.impl("_sparse_mm_reduce_impl",
1332TORCH_FN(wrapper_SparseCsrCPU___sparse_mm_reduce_impl));
1333m.impl("_sparse_mm_reduce_impl_backward",
1334TORCH_FN(wrapper_SparseCsrCPU___sparse_mm_reduce_impl_backward));
1335m.impl("addmm",
1336TORCH_FN(wrapper_SparseCsrCPU__addmm));
1337m.impl("addmm.out",
1338TORCH_FN(wrapper_SparseCsrCPU_out_addmm_out));
1339m.impl("sparse_mask",
1340TORCH_FN(wrapper_SparseCsrCPU__sparse_mask));
1341m.impl("_to_dense",
1342TORCH_FN(wrapper_SparseCsrCPU___to_dense));
1343m.impl("sparse_dim",
1344TORCH_FN(wrapper_SparseCsrCPU__sparse_dim));
1345m.impl("dense_dim",
1346TORCH_FN(wrapper_SparseCsrCPU__dense_dim));
1347m.impl("_nnz",
1348TORCH_FN(wrapper_SparseCsrCPU___nnz));
1349m.impl("values",
1350TORCH_FN(wrapper_SparseCsrCPU__values));
1351m.impl("crow_indices",
1352TORCH_FN(wrapper_SparseCsrCPU__crow_indices));
1353m.impl("col_indices",
1354TORCH_FN(wrapper_SparseCsrCPU__col_indices));
1355m.impl("ccol_indices",
1356TORCH_FN(wrapper_SparseCsrCPU__ccol_indices));
1357m.impl("row_indices",
1358TORCH_FN(wrapper_SparseCsrCPU__row_indices));
1359m.impl("to_sparse.sparse_dim",
1360TORCH_FN(wrapper_SparseCsrCPU_sparse_dim_to_sparse));
1361m.impl("to_sparse",
1362TORCH_FN(wrapper_SparseCsrCPU__to_sparse));
1363m.impl("to_sparse_csr",
1364TORCH_FN(wrapper_SparseCsrCPU__to_sparse_csr));
1365m.impl("to_sparse_csc",
1366TORCH_FN(wrapper_SparseCsrCPU__to_sparse_csc));
1367m.impl("to_sparse_bsr",
1368TORCH_FN(wrapper_SparseCsrCPU__to_sparse_bsr));
1369m.impl("to_sparse_bsc",
1370TORCH_FN(wrapper_SparseCsrCPU__to_sparse_bsc));
1371m.impl("triangular_solve.X",
1372TORCH_FN(wrapper_SparseCsrCPU_X_triangular_solve_out));
1373m.impl("erfinv",
1374TORCH_FN(wrapper_SparseCsrCPU__erfinv));
1375m.impl("erfinv.out",
1376TORCH_FN(wrapper_SparseCsrCPU_out_erfinv_out));
1377m.impl("erfinv_",
1378TORCH_FN(wrapper_SparseCsrCPU__erfinv_));
1379m.impl("sign",
1380TORCH_FN(wrapper_SparseCsrCPU__sign));
1381m.impl("sign.out",
1382TORCH_FN(wrapper_SparseCsrCPU_out_sign_out));
1383m.impl("sign_",
1384TORCH_FN(wrapper_SparseCsrCPU__sign_));
1385m.impl("signbit",
1386TORCH_FN(wrapper_SparseCsrCPU__signbit));
1387m.impl("signbit.out",
1388TORCH_FN(wrapper_SparseCsrCPU_out_signbit_out));
1389m.impl("normal_",
1390TORCH_FN(wrapper_SparseCsrCPU__normal_));
1391m.impl("isinf",
1392TORCH_FN(wrapper_SparseCsrCPU__isinf));
1393m.impl("isposinf",
1394TORCH_FN(wrapper_SparseCsrCPU__isposinf));
1395m.impl("isposinf.out",
1396TORCH_FN(wrapper_SparseCsrCPU_out_isposinf_out));
1397m.impl("isneginf",
1398TORCH_FN(wrapper_SparseCsrCPU__isneginf));
1399m.impl("isneginf.out",
1400TORCH_FN(wrapper_SparseCsrCPU_out_isneginf_out));
1401m.impl("select_copy.int",
1402TORCH_FN(wrapper_SparseCsrCPU_int_select_copy));
1403};
1404} // anonymous namespace
1405namespace sparsecsrcpu {
1406at::Tensor abs(const at::Tensor & self) {
1407return wrapper_SparseCsrCPU__abs(self);
1408}
1409at::Tensor & abs_out(at::Tensor & out, const at::Tensor & self) {
1410return wrapper_SparseCsrCPU_out_abs_out(self, out);
1411}
1412at::Tensor & abs_outf(const at::Tensor & self, at::Tensor & out) {
1413return wrapper_SparseCsrCPU_out_abs_out(self, out);
1414}
1415at::Tensor & abs_(at::Tensor & self) {
1416return wrapper_SparseCsrCPU__abs_(self);
1417}
1418at::Tensor angle(const at::Tensor & self) {
1419return wrapper_SparseCsrCPU__angle(self);
1420}
1421at::Tensor & angle_out(at::Tensor & out, const at::Tensor & self) {
1422return wrapper_SparseCsrCPU_out_angle_out(self, out);
1423}
1424at::Tensor & angle_outf(const at::Tensor & self, at::Tensor & out) {
1425return wrapper_SparseCsrCPU_out_angle_out(self, out);
1426}
1427at::Tensor sgn(const at::Tensor & self) {
1428return wrapper_SparseCsrCPU__sgn(self);
1429}
1430at::Tensor & sgn_out(at::Tensor & out, const at::Tensor & self) {
1431return wrapper_SparseCsrCPU_out_sgn_out(self, out);
1432}
1433at::Tensor & sgn_outf(const at::Tensor & self, at::Tensor & out) {
1434return wrapper_SparseCsrCPU_out_sgn_out(self, out);
1435}
1436at::Tensor & sgn_(at::Tensor & self) {
1437return wrapper_SparseCsrCPU__sgn_(self);
1438}
1439at::Tensor _conj_physical(const at::Tensor & self) {
1440return wrapper_SparseCsrCPU___conj_physical(self);
1441}
1442at::Tensor & conj_physical_out(at::Tensor & out, const at::Tensor & self) {
1443return wrapper_SparseCsrCPU_out_conj_physical_out(self, out);
1444}
1445at::Tensor & conj_physical_outf(const at::Tensor & self, at::Tensor & out) {
1446return wrapper_SparseCsrCPU_out_conj_physical_out(self, out);
1447}
1448at::Tensor & conj_physical_(at::Tensor & self) {
1449return wrapper_SparseCsrCPU__conj_physical_(self);
1450}
1451at::Tensor add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1452return wrapper_SparseCsrCPU_Tensor_add(self, other, alpha);
1453}
1454at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1455return wrapper_SparseCsrCPU_out_add_out(self, other, alpha, out);
1456}
1457at::Tensor & add_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
1458return wrapper_SparseCsrCPU_out_add_out(self, other, alpha, out);
1459}
1460at::Tensor & add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1461return wrapper_SparseCsrCPU_Tensor_add_(self, other, alpha);
1462}
1463at::Tensor & addmv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
1464return wrapper_SparseCsrCPU_out_addmv_out(self, mat, vec, beta, alpha, out);
1465}
1466at::Tensor & addmv_outf(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
1467return wrapper_SparseCsrCPU_out_addmv_out(self, mat, vec, beta, alpha, out);
1468}
1469at::Tensor asinh(const at::Tensor & self) {
1470return wrapper_SparseCsrCPU__asinh(self);
1471}
1472at::Tensor & asinh_out(at::Tensor & out, const at::Tensor & self) {
1473return wrapper_SparseCsrCPU_out_asinh_out(self, out);
1474}
1475at::Tensor & asinh_outf(const at::Tensor & self, at::Tensor & out) {
1476return wrapper_SparseCsrCPU_out_asinh_out(self, out);
1477}
1478at::Tensor & asinh_(at::Tensor & self) {
1479return wrapper_SparseCsrCPU__asinh_(self);
1480}
1481at::Tensor atanh(const at::Tensor & self) {
1482return wrapper_SparseCsrCPU__atanh(self);
1483}
1484at::Tensor & atanh_out(at::Tensor & out, const at::Tensor & self) {
1485return wrapper_SparseCsrCPU_out_atanh_out(self, out);
1486}
1487at::Tensor & atanh_outf(const at::Tensor & self, at::Tensor & out) {
1488return wrapper_SparseCsrCPU_out_atanh_out(self, out);
1489}
1490at::Tensor & atanh_(at::Tensor & self) {
1491return wrapper_SparseCsrCPU__atanh_(self);
1492}
1493at::Tensor asin(const at::Tensor & self) {
1494return wrapper_SparseCsrCPU__asin(self);
1495}
1496at::Tensor & asin_out(at::Tensor & out, const at::Tensor & self) {
1497return wrapper_SparseCsrCPU_out_asin_out(self, out);
1498}
1499at::Tensor & asin_outf(const at::Tensor & self, at::Tensor & out) {
1500return wrapper_SparseCsrCPU_out_asin_out(self, out);
1501}
1502at::Tensor & asin_(at::Tensor & self) {
1503return wrapper_SparseCsrCPU__asin_(self);
1504}
1505at::Tensor atan(const at::Tensor & self) {
1506return wrapper_SparseCsrCPU__atan(self);
1507}
1508at::Tensor & atan_out(at::Tensor & out, const at::Tensor & self) {
1509return wrapper_SparseCsrCPU_out_atan_out(self, out);
1510}
1511at::Tensor & atan_outf(const at::Tensor & self, at::Tensor & out) {
1512return wrapper_SparseCsrCPU_out_atan_out(self, out);
1513}
1514at::Tensor & atan_(at::Tensor & self) {
1515return wrapper_SparseCsrCPU__atan_(self);
1516}
1517at::Tensor ceil(const at::Tensor & self) {
1518return wrapper_SparseCsrCPU__ceil(self);
1519}
1520at::Tensor & ceil_out(at::Tensor & out, const at::Tensor & self) {
1521return wrapper_SparseCsrCPU_out_ceil_out(self, out);
1522}
1523at::Tensor & ceil_outf(const at::Tensor & self, at::Tensor & out) {
1524return wrapper_SparseCsrCPU_out_ceil_out(self, out);
1525}
1526at::Tensor & ceil_(at::Tensor & self) {
1527return wrapper_SparseCsrCPU__ceil_(self);
1528}
1529at::Tensor & copy_(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
1530return wrapper_SparseCsrCPU__copy_(self, src, non_blocking);
1531}
1532at::Tensor empty(at::IntArrayRef size, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) {
1533return wrapper_SparseCsrCPU_memory_format_empty(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
1534}
1535at::Tensor empty(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
1536return wrapper_SparseCsrCPU_memory_format_empty(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, memory_format);
1537}
1538at::Tensor empty_symint(c10::SymIntArrayRef size, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) {
1539return wrapper_SparseCsrCPU_memory_format_empty(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
1540}
1541at::Tensor empty_symint(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
1542return wrapper_SparseCsrCPU_memory_format_empty(size, dtype, layout, device, pin_memory, memory_format);
1543}
1544const at::Tensor & resize_(const at::Tensor & self, at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format) {
1545return wrapper_SparseCsrCPU__resize_(self, c10::fromIntArrayRefSlow(size), memory_format);
1546}
1547const at::Tensor & resize__symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) {
1548return wrapper_SparseCsrCPU__resize_(self, size, memory_format);
1549}
1550at::Tensor empty_like(const at::Tensor & self, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) {
1551return wrapper_SparseCsrCPU__empty_like(self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
1552}
1553at::Tensor empty_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
1554return wrapper_SparseCsrCPU__empty_like(self, dtype, layout, device, pin_memory, memory_format);
1555}
1556at::Tensor erf(const at::Tensor & self) {
1557return wrapper_SparseCsrCPU__erf(self);
1558}
1559at::Tensor & erf_out(at::Tensor & out, const at::Tensor & self) {
1560return wrapper_SparseCsrCPU_out_erf_out(self, out);
1561}
1562at::Tensor & erf_outf(const at::Tensor & self, at::Tensor & out) {
1563return wrapper_SparseCsrCPU_out_erf_out(self, out);
1564}
1565at::Tensor & erf_(at::Tensor & self) {
1566return wrapper_SparseCsrCPU__erf_(self);
1567}
1568at::Tensor expm1(const at::Tensor & self) {
1569return wrapper_SparseCsrCPU__expm1(self);
1570}
1571at::Tensor & expm1_out(at::Tensor & out, const at::Tensor & self) {
1572return wrapper_SparseCsrCPU_out_expm1_out(self, out);
1573}
1574at::Tensor & expm1_outf(const at::Tensor & self, at::Tensor & out) {
1575return wrapper_SparseCsrCPU_out_expm1_out(self, out);
1576}
1577at::Tensor & expm1_(at::Tensor & self) {
1578return wrapper_SparseCsrCPU__expm1_(self);
1579}
1580at::Tensor & fill_(at::Tensor & self, const at::Scalar & value) {
1581return wrapper_SparseCsrCPU_Scalar_fill_(self, value);
1582}
1583at::Tensor floor(const at::Tensor & self) {
1584return wrapper_SparseCsrCPU__floor(self);
1585}
1586at::Tensor & floor_out(at::Tensor & out, const at::Tensor & self) {
1587return wrapper_SparseCsrCPU_out_floor_out(self, out);
1588}
1589at::Tensor & floor_outf(const at::Tensor & self, at::Tensor & out) {
1590return wrapper_SparseCsrCPU_out_floor_out(self, out);
1591}
1592at::Tensor & floor_(at::Tensor & self) {
1593return wrapper_SparseCsrCPU__floor_(self);
1594}
1595at::Tensor frac(const at::Tensor & self) {
1596return wrapper_SparseCsrCPU__frac(self);
1597}
1598at::Tensor & frac_out(at::Tensor & out, const at::Tensor & self) {
1599return wrapper_SparseCsrCPU_out_frac_out(self, out);
1600}
1601at::Tensor & frac_outf(const at::Tensor & self, at::Tensor & out) {
1602return wrapper_SparseCsrCPU_out_frac_out(self, out);
1603}
1604at::Tensor & frac_(at::Tensor & self) {
1605return wrapper_SparseCsrCPU__frac_(self);
1606}
1607at::Tensor isnan(const at::Tensor & self) {
1608return wrapper_SparseCsrCPU__isnan(self);
1609}
1610at::Tensor log1p(const at::Tensor & self) {
1611return wrapper_SparseCsrCPU__log1p(self);
1612}
1613at::Tensor & log1p_out(at::Tensor & out, const at::Tensor & self) {
1614return wrapper_SparseCsrCPU_out_log1p_out(self, out);
1615}
1616at::Tensor & log1p_outf(const at::Tensor & self, at::Tensor & out) {
1617return wrapper_SparseCsrCPU_out_log1p_out(self, out);
1618}
1619at::Tensor & log1p_(at::Tensor & self) {
1620return wrapper_SparseCsrCPU__log1p_(self);
1621}
1622at::Tensor mm(const at::Tensor & self, const at::Tensor & mat2) {
1623return wrapper_SparseCsrCPU__mm(self, mat2);
1624}
1625at::Tensor & mm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) {
1626return wrapper_SparseCsrCPU_out_mm_out(self, mat2, out);
1627}
1628at::Tensor & mm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
1629return wrapper_SparseCsrCPU_out_mm_out(self, mat2, out);
1630}
1631at::Tensor mul(const at::Tensor & self, const at::Tensor & other) {
1632return wrapper_SparseCsrCPU_Tensor_mul(self, other);
1633}
1634at::Tensor & mul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
1635return wrapper_SparseCsrCPU_out_mul_out(self, other, out);
1636}
1637at::Tensor & mul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
1638return wrapper_SparseCsrCPU_out_mul_out(self, other, out);
1639}
1640at::Tensor & mul_(at::Tensor & self, const at::Tensor & other) {
1641return wrapper_SparseCsrCPU_Tensor_mul_(self, other);
1642}
1643at::Tensor mul(const at::Tensor & self, const at::Scalar & other) {
1644return wrapper_SparseCsrCPU_Scalar_mul(self, other);
1645}
1646at::Tensor & mul_(at::Tensor & self, const at::Scalar & other) {
1647return wrapper_SparseCsrCPU_Scalar_mul_(self, other);
1648}
1649at::Tensor rad2deg(const at::Tensor & self) {
1650return wrapper_SparseCsrCPU__rad2deg(self);
1651}
1652at::Tensor & rad2deg_out(at::Tensor & out, const at::Tensor & self) {
1653return wrapper_SparseCsrCPU_out_rad2deg_out(self, out);
1654}
1655at::Tensor & rad2deg_outf(const at::Tensor & self, at::Tensor & out) {
1656return wrapper_SparseCsrCPU_out_rad2deg_out(self, out);
1657}
1658at::Tensor & rad2deg_(at::Tensor & self) {
1659return wrapper_SparseCsrCPU__rad2deg_(self);
1660}
1661at::Tensor deg2rad(const at::Tensor & self) {
1662return wrapper_SparseCsrCPU__deg2rad(self);
1663}
1664at::Tensor & deg2rad_out(at::Tensor & out, const at::Tensor & self) {
1665return wrapper_SparseCsrCPU_out_deg2rad_out(self, out);
1666}
1667at::Tensor & deg2rad_outf(const at::Tensor & self, at::Tensor & out) {
1668return wrapper_SparseCsrCPU_out_deg2rad_out(self, out);
1669}
1670at::Tensor & deg2rad_(at::Tensor & self) {
1671return wrapper_SparseCsrCPU__deg2rad_(self);
1672}
1673at::Tensor neg(const at::Tensor & self) {
1674return wrapper_SparseCsrCPU__neg(self);
1675}
1676at::Tensor & neg_out(at::Tensor & out, const at::Tensor & self) {
1677return wrapper_SparseCsrCPU_out_neg_out(self, out);
1678}
1679at::Tensor & neg_outf(const at::Tensor & self, at::Tensor & out) {
1680return wrapper_SparseCsrCPU_out_neg_out(self, out);
1681}
1682at::Tensor & neg_(at::Tensor & self) {
1683return wrapper_SparseCsrCPU__neg_(self);
1684}
1685at::Tensor round(const at::Tensor & self) {
1686return wrapper_SparseCsrCPU__round(self);
1687}
1688at::Tensor & round_out(at::Tensor & out, const at::Tensor & self) {
1689return wrapper_SparseCsrCPU_out_round_out(self, out);
1690}
1691at::Tensor & round_outf(const at::Tensor & self, at::Tensor & out) {
1692return wrapper_SparseCsrCPU_out_round_out(self, out);
1693}
1694at::Tensor & round_(at::Tensor & self) {
1695return wrapper_SparseCsrCPU__round_(self);
1696}
1697at::Tensor relu(const at::Tensor & self) {
1698return wrapper_SparseCsrCPU__relu(self);
1699}
1700at::Tensor & relu_(at::Tensor & self) {
1701return wrapper_SparseCsrCPU__relu_(self);
1702}
1703at::Tensor select(const at::Tensor & self, int64_t dim, int64_t index) {
1704return wrapper_SparseCsrCPU_int_select(self, dim, index);
1705}
1706at::Tensor select_symint(const at::Tensor & self, int64_t dim, c10::SymInt index) {
1707return wrapper_SparseCsrCPU_int_select(self, dim, index);
1708}
1709at::Tensor sin(const at::Tensor & self) {
1710return wrapper_SparseCsrCPU__sin(self);
1711}
1712at::Tensor & sin_out(at::Tensor & out, const at::Tensor & self) {
1713return wrapper_SparseCsrCPU_out_sin_out(self, out);
1714}
1715at::Tensor & sin_outf(const at::Tensor & self, at::Tensor & out) {
1716return wrapper_SparseCsrCPU_out_sin_out(self, out);
1717}
1718at::Tensor & sin_(at::Tensor & self) {
1719return wrapper_SparseCsrCPU__sin_(self);
1720}
1721at::Tensor sinh(const at::Tensor & self) {
1722return wrapper_SparseCsrCPU__sinh(self);
1723}
1724at::Tensor & sinh_out(at::Tensor & out, const at::Tensor & self) {
1725return wrapper_SparseCsrCPU_out_sinh_out(self, out);
1726}
1727at::Tensor & sinh_outf(const at::Tensor & self, at::Tensor & out) {
1728return wrapper_SparseCsrCPU_out_sinh_out(self, out);
1729}
1730at::Tensor & sinh_(at::Tensor & self) {
1731return wrapper_SparseCsrCPU__sinh_(self);
1732}
1733at::Tensor sum(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
1734return wrapper_SparseCsrCPU__sum(self, dtype);
1735}
1736at::Tensor sqrt(const at::Tensor & self) {
1737return wrapper_SparseCsrCPU__sqrt(self);
1738}
1739at::Tensor & sqrt_out(at::Tensor & out, const at::Tensor & self) {
1740return wrapper_SparseCsrCPU_out_sqrt_out(self, out);
1741}
1742at::Tensor & sqrt_outf(const at::Tensor & self, at::Tensor & out) {
1743return wrapper_SparseCsrCPU_out_sqrt_out(self, out);
1744}
1745at::Tensor & sqrt_(at::Tensor & self) {
1746return wrapper_SparseCsrCPU__sqrt_(self);
1747}
1748at::Tensor tan(const at::Tensor & self) {
1749return wrapper_SparseCsrCPU__tan(self);
1750}
1751at::Tensor & tan_out(at::Tensor & out, const at::Tensor & self) {
1752return wrapper_SparseCsrCPU_out_tan_out(self, out);
1753}
1754at::Tensor & tan_outf(const at::Tensor & self, at::Tensor & out) {
1755return wrapper_SparseCsrCPU_out_tan_out(self, out);
1756}
1757at::Tensor & tan_(at::Tensor & self) {
1758return wrapper_SparseCsrCPU__tan_(self);
1759}
1760at::Tensor tanh(const at::Tensor & self) {
1761return wrapper_SparseCsrCPU__tanh(self);
1762}
1763at::Tensor & tanh_out(at::Tensor & out, const at::Tensor & self) {
1764return wrapper_SparseCsrCPU_out_tanh_out(self, out);
1765}
1766at::Tensor & tanh_outf(const at::Tensor & self, at::Tensor & out) {
1767return wrapper_SparseCsrCPU_out_tanh_out(self, out);
1768}
1769at::Tensor & tanh_(at::Tensor & self) {
1770return wrapper_SparseCsrCPU__tanh_(self);
1771}
1772at::Tensor threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
1773return wrapper_SparseCsrCPU__threshold_backward(grad_output, self, threshold);
1774}
1775at::Tensor & threshold_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
1776return wrapper_SparseCsrCPU_grad_input_threshold_backward_out(grad_output, self, threshold, grad_input);
1777}
1778at::Tensor & threshold_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) {
1779return wrapper_SparseCsrCPU_grad_input_threshold_backward_out(grad_output, self, threshold, grad_input);
1780}
1781at::Tensor trunc(const at::Tensor & self) {
1782return wrapper_SparseCsrCPU__trunc(self);
1783}
1784at::Tensor & trunc_out(at::Tensor & out, const at::Tensor & self) {
1785return wrapper_SparseCsrCPU_out_trunc_out(self, out);
1786}
1787at::Tensor & trunc_outf(const at::Tensor & self, at::Tensor & out) {
1788return wrapper_SparseCsrCPU_out_trunc_out(self, out);
1789}
1790at::Tensor & trunc_(at::Tensor & self) {
1791return wrapper_SparseCsrCPU__trunc_(self);
1792}
1793at::Tensor _sparse_csr_sum(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
1794return wrapper_SparseCsrCPU_dim_dtype__sparse_csr_sum(self, dim, keepdim, dtype);
1795}
1796at::Tensor _sparse_csr_prod(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
1797return wrapper_SparseCsrCPU_dim_dtype__sparse_csr_prod(self, dim, keepdim, dtype);
1798}
1799at::Tensor clone(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) {
1800return wrapper_SparseCsrCPU__clone(self, memory_format);
1801}
1802const at::Tensor & resize_as_sparse_(const at::Tensor & self, const at::Tensor & the_template) {
1803return wrapper_SparseCsrCPU__resize_as_sparse_(self, the_template);
1804}
1805at::Tensor & zero_(at::Tensor & self) {
1806return wrapper_SparseCsrCPU__zero_(self);
1807}
1808at::Tensor sparse_sampled_addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
1809return wrapper_SparseCsrCPU__sparse_sampled_addmm(self, mat1, mat2, beta, alpha);
1810}
1811at::Tensor & sparse_sampled_addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
1812return wrapper_SparseCsrCPU_out_sparse_sampled_addmm_out(self, mat1, mat2, beta, alpha, out);
1813}
1814at::Tensor & sparse_sampled_addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
1815return wrapper_SparseCsrCPU_out_sparse_sampled_addmm_out(self, mat1, mat2, beta, alpha, out);
1816}
1817::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl(const at::Tensor & self, const at::Tensor & other, c10::string_view reduce) {
1818return wrapper_SparseCsrCPU___sparse_mm_reduce_impl(self, other, reduce);
1819}
1820::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl_backward(const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array<bool,2> output_mask) {
1821return wrapper_SparseCsrCPU___sparse_mm_reduce_impl_backward(self, grad_out, weight, reduce, arg_out, output_mask);
1822}
1823at::Tensor addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
1824return wrapper_SparseCsrCPU__addmm(self, mat1, mat2, beta, alpha);
1825}
1826at::Tensor & addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
1827return wrapper_SparseCsrCPU_out_addmm_out(self, mat1, mat2, beta, alpha, out);
1828}
1829at::Tensor & addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
1830return wrapper_SparseCsrCPU_out_addmm_out(self, mat1, mat2, beta, alpha, out);
1831}
1832at::Tensor sparse_mask(const at::Tensor & self, const at::Tensor & mask) {
1833return wrapper_SparseCsrCPU__sparse_mask(self, mask);
1834}
1835at::Tensor _to_dense(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
1836return wrapper_SparseCsrCPU___to_dense(self, dtype);
1837}
1838int64_t sparse_dim(const at::Tensor & self) {
1839return wrapper_SparseCsrCPU__sparse_dim(self);
1840}
1841int64_t dense_dim(const at::Tensor & self) {
1842return wrapper_SparseCsrCPU__dense_dim(self);
1843}
1844int64_t _nnz(const at::Tensor & self) {
1845return wrapper_SparseCsrCPU___nnz(self);
1846}
1847at::Tensor values(const at::Tensor & self) {
1848return wrapper_SparseCsrCPU__values(self);
1849}
1850at::Tensor crow_indices(const at::Tensor & self) {
1851return wrapper_SparseCsrCPU__crow_indices(self);
1852}
1853at::Tensor col_indices(const at::Tensor & self) {
1854return wrapper_SparseCsrCPU__col_indices(self);
1855}
1856at::Tensor ccol_indices(const at::Tensor & self) {
1857return wrapper_SparseCsrCPU__ccol_indices(self);
1858}
1859at::Tensor row_indices(const at::Tensor & self) {
1860return wrapper_SparseCsrCPU__row_indices(self);
1861}
1862at::Tensor to_sparse(const at::Tensor & self, int64_t sparse_dim) {
1863return wrapper_SparseCsrCPU_sparse_dim_to_sparse(self, sparse_dim);
1864}
1865at::Tensor to_sparse(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
1866return wrapper_SparseCsrCPU__to_sparse(self, layout, blocksize, dense_dim);
1867}
1868at::Tensor to_sparse_csr(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
1869return wrapper_SparseCsrCPU__to_sparse_csr(self, dense_dim);
1870}
1871at::Tensor to_sparse_csc(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
1872return wrapper_SparseCsrCPU__to_sparse_csc(self, dense_dim);
1873}
1874at::Tensor to_sparse_bsr(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
1875return wrapper_SparseCsrCPU__to_sparse_bsr(self, blocksize, dense_dim);
1876}
1877at::Tensor to_sparse_bsc(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
1878return wrapper_SparseCsrCPU__to_sparse_bsc(self, blocksize, dense_dim);
1879}
1880::std::tuple<at::Tensor &,at::Tensor &> triangular_solve_out(at::Tensor & X, at::Tensor & M, const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) {
1881return wrapper_SparseCsrCPU_X_triangular_solve_out(self, A, upper, transpose, unitriangular, X, M);
1882}
1883::std::tuple<at::Tensor &,at::Tensor &> triangular_solve_outf(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular, at::Tensor & X, at::Tensor & M) {
1884return wrapper_SparseCsrCPU_X_triangular_solve_out(self, A, upper, transpose, unitriangular, X, M);
1885}
1886at::Tensor erfinv(const at::Tensor & self) {
1887return wrapper_SparseCsrCPU__erfinv(self);
1888}
1889at::Tensor & erfinv_out(at::Tensor & out, const at::Tensor & self) {
1890return wrapper_SparseCsrCPU_out_erfinv_out(self, out);
1891}
1892at::Tensor & erfinv_outf(const at::Tensor & self, at::Tensor & out) {
1893return wrapper_SparseCsrCPU_out_erfinv_out(self, out);
1894}
1895at::Tensor & erfinv_(at::Tensor & self) {
1896return wrapper_SparseCsrCPU__erfinv_(self);
1897}
1898at::Tensor sign(const at::Tensor & self) {
1899return wrapper_SparseCsrCPU__sign(self);
1900}
1901at::Tensor & sign_out(at::Tensor & out, const at::Tensor & self) {
1902return wrapper_SparseCsrCPU_out_sign_out(self, out);
1903}
1904at::Tensor & sign_outf(const at::Tensor & self, at::Tensor & out) {
1905return wrapper_SparseCsrCPU_out_sign_out(self, out);
1906}
1907at::Tensor & sign_(at::Tensor & self) {
1908return wrapper_SparseCsrCPU__sign_(self);
1909}
1910at::Tensor signbit(const at::Tensor & self) {
1911return wrapper_SparseCsrCPU__signbit(self);
1912}
1913at::Tensor & signbit_out(at::Tensor & out, const at::Tensor & self) {
1914return wrapper_SparseCsrCPU_out_signbit_out(self, out);
1915}
1916at::Tensor & signbit_outf(const at::Tensor & self, at::Tensor & out) {
1917return wrapper_SparseCsrCPU_out_signbit_out(self, out);
1918}
1919at::Tensor & normal_(at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
1920return wrapper_SparseCsrCPU__normal_(self, mean, std, generator);
1921}
1922at::Tensor isinf(const at::Tensor & self) {
1923return wrapper_SparseCsrCPU__isinf(self);
1924}
1925at::Tensor isposinf(const at::Tensor & self) {
1926return wrapper_SparseCsrCPU__isposinf(self);
1927}
1928at::Tensor & isposinf_out(at::Tensor & out, const at::Tensor & self) {
1929return wrapper_SparseCsrCPU_out_isposinf_out(self, out);
1930}
1931at::Tensor & isposinf_outf(const at::Tensor & self, at::Tensor & out) {
1932return wrapper_SparseCsrCPU_out_isposinf_out(self, out);
1933}
1934at::Tensor isneginf(const at::Tensor & self) {
1935return wrapper_SparseCsrCPU__isneginf(self);
1936}
1937at::Tensor & isneginf_out(at::Tensor & out, const at::Tensor & self) {
1938return wrapper_SparseCsrCPU_out_isneginf_out(self, out);
1939}
1940at::Tensor & isneginf_outf(const at::Tensor & self, at::Tensor & out) {
1941return wrapper_SparseCsrCPU_out_isneginf_out(self, out);
1942}
1943at::Tensor select_copy(const at::Tensor & self, int64_t dim, int64_t index) {
1944return wrapper_SparseCsrCPU_int_select_copy(self, dim, index);
1945}
1946at::Tensor select_copy_symint(const at::Tensor & self, int64_t dim, c10::SymInt index) {
1947return wrapper_SparseCsrCPU_int_select_copy(self, dim, index);
1948}
1949} // namespace sparsecsrcpu
1950} // namespace at
1951