1// required for old g++ to compile PRId64 macros, see
2// https://github.com/pytorch/pytorch/issues/3571
3// for context
4#ifndef __STDC_FORMAT_MACROS
5#define __STDC_FORMAT_MACROS
6#endif
7
8// an external backend might generate file within its code tree
9// and check all the source files within the tree with clang-format.
10// so, disable it since the backend might have a different config.
11// clang-format off
12
13// NOTE: This condition is true for all PyTorch internal libraries, it
14// just excludes external projects such as torch_xla which
15// re-use some of the PyTorch codegen machinery.
16#if defined(CAFFE2_BUILD_MAIN_LIB) || \
17 defined(TORCH_CUDA_BUILD_MAIN_LIB) || \
18 defined(TORCH_HIP_BUILD_MAIN_LIB) || \
19 defined(TORCH_CUDA_CU_BUILD_MAIN_LIB) || \
20 defined(TORCH_CUDA_CPP_BUILD_MAIN_LIB)
21#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
22#endif
23
24// @generated by torchgen/gen.py from RegisterDispatchKey.cpp
25
26#include <c10/core/TensorImpl.h>
27#include <c10/core/Allocator.h>
28#include <ATen/DeviceGuard.h>
29#include <ATen/NamedTensorUtils.h>
30#include <ATen/Utils.h>
31#include <ATen/WrapDimUtils.h>
32#include <ATen/Dispatch.h>
33#include <c10/util/ExclusivelyOwned.h>
34#include <c10/util/Half.h>
35#include <c10/core/UndefinedTensorImpl.h>
36#include <c10/util/Optional.h>
37#include <ATen/Tensor.h>
38#include <ATen/native/Resize.h>
39
40#include <cstddef>
41#include <functional>
42#include <memory>
43#include <utility>
44
45#include <ATen/Config.h>
46#include <ATen/core/op_registration/adaption.h>
47#include <torch/library.h>
48
49
50#include <ATen/ops/as_strided_native.h>
51#include <ATen/ops/empty.h>
52#include <ATen/ops/empty_strided.h>
53#include <ATen/ops/_copy_from_and_resize.h>
54#include <ATen/ops/_copy_from.h>
55#include <ATen/ops/_coalesce_native.h>
56#include <ATen/ops/_coalesced_native.h>
57#include <ATen/ops/_dimI_native.h>
58#include <ATen/ops/_dimV_native.h>
59#include <ATen/ops/_indices_native.h>
60#include <ATen/ops/_nnz_native.h>
61#include <ATen/ops/_sparse_broadcast_to_native.h>
62#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_native.h>
63#include <ATen/ops/_sparse_coo_tensor_with_dims_native.h>
64#include <ATen/ops/_sparse_log_softmax_backward_data_native.h>
65#include <ATen/ops/_sparse_log_softmax_native.h>
66#include <ATen/ops/_sparse_softmax_backward_data_native.h>
67#include <ATen/ops/_sparse_softmax_native.h>
68#include <ATen/ops/_sparse_sparse_matmul_native.h>
69#include <ATen/ops/_sparse_sum_backward_native.h>
70#include <ATen/ops/_to_dense_native.h>
71#include <ATen/ops/_values_native.h>
72#include <ATen/ops/abs_native.h>
73#include <ATen/ops/add_native.h>
74#include <ATen/ops/addmm_native.h>
75#include <ATen/ops/any_native.h>
76#include <ATen/ops/asin_native.h>
77#include <ATen/ops/asinh_native.h>
78#include <ATen/ops/atan_native.h>
79#include <ATen/ops/atanh_native.h>
80#include <ATen/ops/bmm_native.h>
81#include <ATen/ops/cat_native.h>
82#include <ATen/ops/ceil_native.h>
83#include <ATen/ops/clone_native.h>
84#include <ATen/ops/conj_physical_native.h>
85#include <ATen/ops/copy_native.h>
86#include <ATen/ops/copy_sparse_to_sparse_native.h>
87#include <ATen/ops/deg2rad_native.h>
88#include <ATen/ops/dense_dim_native.h>
89#include <ATen/ops/div_native.h>
90#include <ATen/ops/empty_like_native.h>
91#include <ATen/ops/empty_native.h>
92#include <ATen/ops/erf_native.h>
93#include <ATen/ops/erfinv_native.h>
94#include <ATen/ops/expm1_native.h>
95#include <ATen/ops/floor_divide_native.h>
96#include <ATen/ops/floor_native.h>
97#include <ATen/ops/frac_native.h>
98#include <ATen/ops/hspmm_native.h>
99#include <ATen/ops/index_select_native.h>
100#include <ATen/ops/indices_native.h>
101#include <ATen/ops/is_coalesced_native.h>
102#include <ATen/ops/isinf_native.h>
103#include <ATen/ops/isnan_native.h>
104#include <ATen/ops/isneginf_native.h>
105#include <ATen/ops/isposinf_native.h>
106#include <ATen/ops/log1p_native.h>
107#include <ATen/ops/mm_native.h>
108#include <ATen/ops/mul_native.h>
109#include <ATen/ops/mv_native.h>
110#include <ATen/ops/nan_to_num_native.h>
111#include <ATen/ops/narrow_copy_native.h>
112#include <ATen/ops/native_norm_native.h>
113#include <ATen/ops/neg_native.h>
114#include <ATen/ops/norm_native.h>
115#include <ATen/ops/permute_native.h>
116#include <ATen/ops/pow_native.h>
117#include <ATen/ops/rad2deg_native.h>
118#include <ATen/ops/relu_native.h>
119#include <ATen/ops/resize_as_sparse_native.h>
120#include <ATen/ops/round_native.h>
121#include <ATen/ops/sgn_native.h>
122#include <ATen/ops/sign_native.h>
123#include <ATen/ops/signbit_native.h>
124#include <ATen/ops/sin_native.h>
125#include <ATen/ops/sinh_native.h>
126#include <ATen/ops/sparse_dim_native.h>
127#include <ATen/ops/sparse_mask_native.h>
128#include <ATen/ops/sparse_resize_and_clear_native.h>
129#include <ATen/ops/sparse_resize_native.h>
130#include <ATen/ops/sqrt_native.h>
131#include <ATen/ops/sspaddmm_native.h>
132#include <ATen/ops/sub_native.h>
133#include <ATen/ops/sum_native.h>
134#include <ATen/ops/tan_native.h>
135#include <ATen/ops/tanh_native.h>
136#include <ATen/ops/threshold_backward_native.h>
137#include <ATen/ops/to_sparse_bsc_native.h>
138#include <ATen/ops/to_sparse_bsr_native.h>
139#include <ATen/ops/to_sparse_csc_native.h>
140#include <ATen/ops/to_sparse_csr_native.h>
141#include <ATen/ops/to_sparse_native.h>
142#include <ATen/ops/trunc_native.h>
143#include <ATen/ops/unsqueeze_native.h>
144#include <ATen/ops/values_native.h>
145#include <ATen/ops/zero_native.h>
146#include <ATen/ops/zeros_native.h>
147
148// See template file RegisterDispatchDefinitions.ini
149namespace at {
150// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
151// ambiguity with conflicting identifiers that may have been defined in
152// at namespace already.
153namespace {
154void resize_out(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
155 TORCH_CHECK(options.dtype() == out.dtype(),
156 "Expected out tensor to have dtype ", options.dtype(), ", but got ", out.dtype(), " instead");
157 TORCH_CHECK(options.device() == out.device(),
158 "Expected out tensor to have device ", options.device(), ", but got ", out.device(), " instead");
159 const bool resized = at::native::resize_output(out, sizes);
160 // Only restride if a resize occurred; otherwise we ignore the (advisory)
161 // strides from the meta function and directly use the output tensor's
162 // preexisting strides
163 if (resized) {
164 if (!strides.empty()) {
165 TORCH_INTERNAL_ASSERT(!options.memory_format_opt().has_value());
166 // TODO: avoid the redispatch here
167 out.as_strided_(sizes, strides);
168 } else if (options.memory_format_opt().has_value()) {
169 out.unsafeGetTensorImpl()->empty_tensor_restride(*options.memory_format_opt());
170 }
171 }
172}
173void check_inplace(const Tensor &self, IntArrayRef sizes, const TensorOptions &options) {
174 // These checks are needed on those operators that:
175 // 1) don't use 'TensorIterator' (e.g. 'addmm' and 'baddbmm')
176 // 2) have particular typing rules (e.g. 'cumsum' and 'cumprod')
177 // For other operators (e.g. 'add'), 'TensorIterator' already checks
178 // these things separately.
179 TORCH_CHECK(options.dtype() == self.dtype(),
180 "Bad in-place call: ",
181 "input tensor dtype ", self.dtype(), " and output tensor dtype ", options.dtype(), " should match");
182 TORCH_CHECK(options.device() == self.device(),
183 "Bad in-place call: ",
184 "input tensor device ", self.device(), " and output tensor device ", options.device(), " should match");
185 TORCH_CHECK(sizes == self.sizes(),
186 "Bad in-place call: ",
187 "input tensor size ", self.sizes(), " and output tensor size ", sizes, " should match");
188}
189namespace {
190at::Tensor wrapper_SparseCPU__abs(const at::Tensor & self) {
191 // No device check
192 // DeviceGuard omitted
193 return at::native::abs_sparse(self);
194}
195} // anonymous namespace
196namespace {
197at::Tensor & wrapper_SparseCPU_out_abs_out(const at::Tensor & self, at::Tensor & out) {
198 // No device check
199 // DeviceGuard omitted
200 return at::native::abs_sparse_out(self, out);
201}
202} // anonymous namespace
203namespace {
204at::Tensor & wrapper_SparseCPU__abs_(at::Tensor & self) {
205 // No device check
206 // DeviceGuard omitted
207 return at::native::abs_sparse_(self);
208}
209} // anonymous namespace
210namespace {
211at::Tensor wrapper_SparseCPU__sgn(const at::Tensor & self) {
212 // No device check
213 // DeviceGuard omitted
214 return at::native::sgn_sparse(self);
215}
216} // anonymous namespace
217namespace {
218at::Tensor & wrapper_SparseCPU_out_sgn_out(const at::Tensor & self, at::Tensor & out) {
219 // No device check
220 // DeviceGuard omitted
221 return at::native::sgn_sparse_out(self, out);
222}
223} // anonymous namespace
224namespace {
225at::Tensor & wrapper_SparseCPU__sgn_(at::Tensor & self) {
226 // No device check
227 // DeviceGuard omitted
228 return at::native::sgn_sparse_(self);
229}
230} // anonymous namespace
231namespace {
232at::Tensor & wrapper_SparseCPU_out_conj_physical_out(const at::Tensor & self, at::Tensor & out) {
233 // No device check
234 // DeviceGuard omitted
235 return at::native::conj_physical_out_sparse(self, out);
236}
237} // anonymous namespace
238namespace {
239at::Tensor wrapper_SparseCPU_Tensor_add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
240 // No device check
241 // DeviceGuard omitted
242 return at::native::add_sparse(self, other, alpha);
243}
244} // anonymous namespace
245namespace {
246at::Tensor & wrapper_SparseCPU_out_add_out(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
247 // No device check
248 // DeviceGuard omitted
249 return at::native::add_out_sparse_cpu(self, other, alpha, out);
250}
251} // anonymous namespace
252namespace {
253at::Tensor & wrapper_SparseCPU_Tensor_add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
254 // No device check
255 // DeviceGuard omitted
256 return at::native::add_sparse_(self, other, alpha);
257}
258} // anonymous namespace
259namespace {
260at::Tensor wrapper_SparseCPU__asinh(const at::Tensor & self) {
261 // No device check
262 // DeviceGuard omitted
263 return at::native::asinh_sparse(self);
264}
265} // anonymous namespace
266namespace {
267at::Tensor & wrapper_SparseCPU_out_asinh_out(const at::Tensor & self, at::Tensor & out) {
268 // No device check
269 // DeviceGuard omitted
270 return at::native::asinh_sparse_out(self, out);
271}
272} // anonymous namespace
273namespace {
274at::Tensor & wrapper_SparseCPU__asinh_(at::Tensor & self) {
275 // No device check
276 // DeviceGuard omitted
277 return at::native::asinh_sparse_(self);
278}
279} // anonymous namespace
280namespace {
281at::Tensor wrapper_SparseCPU__atanh(const at::Tensor & self) {
282 // No device check
283 // DeviceGuard omitted
284 return at::native::atanh_sparse(self);
285}
286} // anonymous namespace
287namespace {
288at::Tensor & wrapper_SparseCPU_out_atanh_out(const at::Tensor & self, at::Tensor & out) {
289 // No device check
290 // DeviceGuard omitted
291 return at::native::atanh_sparse_out(self, out);
292}
293} // anonymous namespace
294namespace {
295at::Tensor & wrapper_SparseCPU__atanh_(at::Tensor & self) {
296 // No device check
297 // DeviceGuard omitted
298 return at::native::atanh_sparse_(self);
299}
300} // anonymous namespace
301namespace {
302at::Tensor wrapper_SparseCPU__asin(const at::Tensor & self) {
303 // No device check
304 // DeviceGuard omitted
305 return at::native::asin_sparse(self);
306}
307} // anonymous namespace
308namespace {
309at::Tensor & wrapper_SparseCPU_out_asin_out(const at::Tensor & self, at::Tensor & out) {
310 // No device check
311 // DeviceGuard omitted
312 return at::native::asin_sparse_out(self, out);
313}
314} // anonymous namespace
315namespace {
316at::Tensor & wrapper_SparseCPU__asin_(at::Tensor & self) {
317 // No device check
318 // DeviceGuard omitted
319 return at::native::asin_sparse_(self);
320}
321} // anonymous namespace
322namespace {
323at::Tensor wrapper_SparseCPU__atan(const at::Tensor & self) {
324 // No device check
325 // DeviceGuard omitted
326 return at::native::atan_sparse(self);
327}
328} // anonymous namespace
329namespace {
330at::Tensor & wrapper_SparseCPU_out_atan_out(const at::Tensor & self, at::Tensor & out) {
331 // No device check
332 // DeviceGuard omitted
333 return at::native::atan_sparse_out(self, out);
334}
335} // anonymous namespace
336namespace {
337at::Tensor & wrapper_SparseCPU__atan_(at::Tensor & self) {
338 // No device check
339 // DeviceGuard omitted
340 return at::native::atan_sparse_(self);
341}
342} // anonymous namespace
343namespace {
344at::Tensor wrapper_SparseCPU__bmm(const at::Tensor & self, const at::Tensor & mat2) {
345 // No device check
346 // DeviceGuard omitted
347 return at::native::bmm_sparse_cpu(self, mat2);
348}
349} // anonymous namespace
350namespace {
351at::Tensor & wrapper_SparseCPU_out_bmm_out(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
352 // No device check
353 // DeviceGuard omitted
354 return at::native::bmm_out_sparse_cpu(self, mat2, out);
355}
356} // anonymous namespace
357namespace {
358at::Tensor wrapper_SparseCPU___sparse_broadcast_to(const at::Tensor & self, at::IntArrayRef size) {
359 // No device check
360 // DeviceGuard omitted
361 return at::native::sparse_broadcast_to(self, size);
362}
363} // anonymous namespace
364namespace {
365at::Tensor wrapper_SparseCPU__cat(const at::ITensorListRef & tensors, int64_t dim) {
366 // No device check
367 // DeviceGuard omitted
368 return at::native::cat_sparse(tensors, dim);
369}
370} // anonymous namespace
371namespace {
372at::Tensor wrapper_SparseCPU__ceil(const at::Tensor & self) {
373 // No device check
374 // DeviceGuard omitted
375 return at::native::ceil_sparse(self);
376}
377} // anonymous namespace
378namespace {
379at::Tensor & wrapper_SparseCPU_out_ceil_out(const at::Tensor & self, at::Tensor & out) {
380 // No device check
381 // DeviceGuard omitted
382 return at::native::ceil_sparse_out(self, out);
383}
384} // anonymous namespace
385namespace {
386at::Tensor & wrapper_SparseCPU__ceil_(at::Tensor & self) {
387 // No device check
388 // DeviceGuard omitted
389 return at::native::ceil_sparse_(self);
390}
391} // anonymous namespace
392namespace {
393at::Tensor & wrapper_SparseCPU__copy_(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
394 // No device check
395 // DeviceGuard omitted
396 return at::native::copy_sparse_wrapper_(self, src, non_blocking);
397}
398} // anonymous namespace
399namespace {
400at::Tensor wrapper_SparseCPU_Tensor_div(const at::Tensor & self, const at::Tensor & other) {
401 // No device check
402 // DeviceGuard omitted
403 return at::native::div_sparse(self, other);
404}
405} // anonymous namespace
406namespace {
407at::Tensor & wrapper_SparseCPU_out_div_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
408 // No device check
409 // DeviceGuard omitted
410 return at::native::div_out_sparse_zerodim(self, other, out);
411}
412} // anonymous namespace
413namespace {
414at::Tensor & wrapper_SparseCPU_Tensor_div_(at::Tensor & self, const at::Tensor & other) {
415 // No device check
416 // DeviceGuard omitted
417 return at::native::div_sparse_(self, other);
418}
419} // anonymous namespace
420namespace {
421at::Tensor wrapper_SparseCPU_Tensor_mode_div(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
422 // No device check
423 // DeviceGuard omitted
424 return at::native::div_sparse(self, other, rounding_mode);
425}
426} // anonymous namespace
427namespace {
428at::Tensor & wrapper_SparseCPU_out_mode_div_out(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out) {
429 // No device check
430 // DeviceGuard omitted
431 return at::native::div_out_sparse_zerodim(self, other, rounding_mode, out);
432}
433} // anonymous namespace
434namespace {
435at::Tensor & wrapper_SparseCPU_Tensor_mode_div_(at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
436 // No device check
437 // DeviceGuard omitted
438 return at::native::div_sparse_(self, other, rounding_mode);
439}
440} // anonymous namespace
441namespace {
442at::Tensor wrapper_SparseCPU_memory_format_empty(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
443 // No device check
444 // DeviceGuard omitted
445 return at::native::empty_sparse(C10_AS_INTARRAYREF_SLOW(size), dtype, layout, device, pin_memory, memory_format);
446}
447} // anonymous namespace
448namespace {
449at::Tensor wrapper_SparseCPU__empty_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
450 // No device check
451 // DeviceGuard omitted
452 return at::native::empty_like_sparse_coo(self, dtype, layout, device, pin_memory, memory_format);
453}
454} // anonymous namespace
455namespace {
456at::Tensor wrapper_SparseCPU__erf(const at::Tensor & self) {
457 // No device check
458 // DeviceGuard omitted
459 return at::native::erf_sparse(self);
460}
461} // anonymous namespace
462namespace {
463at::Tensor & wrapper_SparseCPU_out_erf_out(const at::Tensor & self, at::Tensor & out) {
464 // No device check
465 // DeviceGuard omitted
466 return at::native::erf_sparse_out(self, out);
467}
468} // anonymous namespace
469namespace {
470at::Tensor & wrapper_SparseCPU__erf_(at::Tensor & self) {
471 // No device check
472 // DeviceGuard omitted
473 return at::native::erf_sparse_(self);
474}
475} // anonymous namespace
476namespace {
477at::Tensor wrapper_SparseCPU__expm1(const at::Tensor & self) {
478 // No device check
479 // DeviceGuard omitted
480 return at::native::expm1_sparse(self);
481}
482} // anonymous namespace
483namespace {
484at::Tensor & wrapper_SparseCPU_out_expm1_out(const at::Tensor & self, at::Tensor & out) {
485 // No device check
486 // DeviceGuard omitted
487 return at::native::expm1_sparse_out(self, out);
488}
489} // anonymous namespace
490namespace {
491at::Tensor & wrapper_SparseCPU__expm1_(at::Tensor & self) {
492 // No device check
493 // DeviceGuard omitted
494 return at::native::expm1_sparse_(self);
495}
496} // anonymous namespace
497namespace {
498at::Tensor wrapper_SparseCPU__floor(const at::Tensor & self) {
499 // No device check
500 // DeviceGuard omitted
501 return at::native::floor_sparse(self);
502}
503} // anonymous namespace
504namespace {
505at::Tensor & wrapper_SparseCPU_out_floor_out(const at::Tensor & self, at::Tensor & out) {
506 // No device check
507 // DeviceGuard omitted
508 return at::native::floor_sparse_out(self, out);
509}
510} // anonymous namespace
511namespace {
512at::Tensor & wrapper_SparseCPU__floor_(at::Tensor & self) {
513 // No device check
514 // DeviceGuard omitted
515 return at::native::floor_sparse_(self);
516}
517} // anonymous namespace
518namespace {
519at::Tensor wrapper_SparseCPU__floor_divide(const at::Tensor & self, const at::Tensor & other) {
520 // No device check
521 // DeviceGuard omitted
522 return at::native::floor_divide_sparse(self, other);
523}
524} // anonymous namespace
525namespace {
526at::Tensor & wrapper_SparseCPU_out_floor_divide_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
527 // No device check
528 // DeviceGuard omitted
529 return at::native::floor_divide_out_sparse_zerodim(self, other, out);
530}
531} // anonymous namespace
532namespace {
533at::Tensor & wrapper_SparseCPU_Tensor_floor_divide_(at::Tensor & self, const at::Tensor & other) {
534 // No device check
535 // DeviceGuard omitted
536 return at::native::floor_divide_sparse_(self, other);
537}
538} // anonymous namespace
539namespace {
540at::Tensor wrapper_SparseCPU__frac(const at::Tensor & self) {
541 // No device check
542 // DeviceGuard omitted
543 return at::native::frac_sparse(self);
544}
545} // anonymous namespace
546namespace {
547at::Tensor & wrapper_SparseCPU_out_frac_out(const at::Tensor & self, at::Tensor & out) {
548 // No device check
549 // DeviceGuard omitted
550 return at::native::frac_sparse_out(self, out);
551}
552} // anonymous namespace
553namespace {
554at::Tensor & wrapper_SparseCPU__frac_(at::Tensor & self) {
555 // No device check
556 // DeviceGuard omitted
557 return at::native::frac_sparse_(self);
558}
559} // anonymous namespace
560namespace {
561at::Tensor wrapper_SparseCPU__isnan(const at::Tensor & self) {
562 // No device check
563 // DeviceGuard omitted
564 return at::native::isnan_sparse(self);
565}
566} // anonymous namespace
567namespace {
568at::Tensor wrapper_SparseCPU__nan_to_num(const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) {
569 // No device check
570 // DeviceGuard omitted
571 return at::native::nan_to_num_sparse(self, nan, posinf, neginf);
572}
573} // anonymous namespace
574namespace {
575at::Tensor & wrapper_SparseCPU_out_nan_to_num_out(const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf, at::Tensor & out) {
576 // No device check
577 // DeviceGuard omitted
578 return at::native::nan_to_num_sparse_out(self, nan, posinf, neginf, out);
579}
580} // anonymous namespace
581namespace {
582at::Tensor & wrapper_SparseCPU__nan_to_num_(at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) {
583 // No device check
584 // DeviceGuard omitted
585 return at::native::nan_to_num_sparse_(self, nan, posinf, neginf);
586}
587} // anonymous namespace
588namespace {
589at::Tensor wrapper_SparseCPU__log1p(const at::Tensor & self) {
590 // No device check
591 // DeviceGuard omitted
592 return at::native::log1p_sparse(self);
593}
594} // anonymous namespace
595namespace {
596at::Tensor & wrapper_SparseCPU_out_log1p_out(const at::Tensor & self, at::Tensor & out) {
597 // No device check
598 // DeviceGuard omitted
599 return at::native::log1p_sparse_out(self, out);
600}
601} // anonymous namespace
602namespace {
603at::Tensor & wrapper_SparseCPU__log1p_(at::Tensor & self) {
604 // No device check
605 // DeviceGuard omitted
606 return at::native::log1p_sparse_(self);
607}
608} // anonymous namespace
609namespace {
610at::Tensor wrapper_SparseCPU__mm(const at::Tensor & self, const at::Tensor & mat2) {
611 // No device check
612 // DeviceGuard omitted
613 return at::native::_sparse_mm(self, mat2);
614}
615} // anonymous namespace
616namespace {
617at::Tensor & wrapper_SparseCPU_out_mm_out(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
618 // No device check
619 // DeviceGuard omitted
620 return at::native::_sparse_mm_out(self, mat2, out);
621}
622} // anonymous namespace
623namespace {
624at::Tensor wrapper_SparseCPU___sparse_sparse_matmul(const at::Tensor & self, const at::Tensor & other) {
625 // No device check
626 // DeviceGuard omitted
627 return at::native::sparse_sparse_matmul_cpu(self, other);
628}
629} // anonymous namespace
630namespace {
631at::Tensor wrapper_SparseCPU_Tensor_mul(const at::Tensor & self, const at::Tensor & other) {
632 // No device check
633 // DeviceGuard omitted
634 return at::native::mul_sparse(self, other);
635}
636} // anonymous namespace
637namespace {
638at::Tensor & wrapper_SparseCPU_out_mul_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
639 // No device check
640 // DeviceGuard omitted
641 return at::native::mul_out_sparse_cpu(self, other, out);
642}
643} // anonymous namespace
644namespace {
645at::Tensor & wrapper_SparseCPU_Tensor_mul_(at::Tensor & self, const at::Tensor & other) {
646 // No device check
647 // DeviceGuard omitted
648 return at::native::mul_sparse_(self, other);
649}
650} // anonymous namespace
651namespace {
652at::Tensor wrapper_SparseCPU__mv(const at::Tensor & self, const at::Tensor & vec) {
653 // No device check
654 // DeviceGuard omitted
655 return at::native::mv_sparse(self, vec);
656}
657} // anonymous namespace
658namespace {
659at::Tensor wrapper_SparseCPU__narrow_copy(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
660 // No device check
661 // DeviceGuard omitted
662 return at::native::narrow_copy_sparse(self, dim, start.expect_int(), length.expect_int());
663}
664} // anonymous namespace
665namespace {
666at::Tensor wrapper_SparseCPU__permute(const at::Tensor & self, at::IntArrayRef dims) {
667 // No device check
668 // DeviceGuard omitted
669 return at::native::permute_sparse_coo(self, dims);
670}
671} // anonymous namespace
672namespace {
673at::Tensor wrapper_SparseCPU__rad2deg(const at::Tensor & self) {
674 // No device check
675 // DeviceGuard omitted
676 return at::native::rad2deg_sparse(self);
677}
678} // anonymous namespace
679namespace {
680at::Tensor & wrapper_SparseCPU_out_rad2deg_out(const at::Tensor & self, at::Tensor & out) {
681 // No device check
682 // DeviceGuard omitted
683 return at::native::rad2deg_sparse_out(self, out);
684}
685} // anonymous namespace
686namespace {
687at::Tensor & wrapper_SparseCPU__rad2deg_(at::Tensor & self) {
688 // No device check
689 // DeviceGuard omitted
690 return at::native::rad2deg_sparse_(self);
691}
692} // anonymous namespace
693namespace {
694at::Tensor wrapper_SparseCPU__deg2rad(const at::Tensor & self) {
695 // No device check
696 // DeviceGuard omitted
697 return at::native::deg2rad_sparse(self);
698}
699} // anonymous namespace
700namespace {
701at::Tensor & wrapper_SparseCPU_out_deg2rad_out(const at::Tensor & self, at::Tensor & out) {
702 // No device check
703 // DeviceGuard omitted
704 return at::native::deg2rad_sparse_out(self, out);
705}
706} // anonymous namespace
707namespace {
708at::Tensor & wrapper_SparseCPU__deg2rad_(at::Tensor & self) {
709 // No device check
710 // DeviceGuard omitted
711 return at::native::deg2rad_sparse_(self);
712}
713} // anonymous namespace
714namespace {
715at::Tensor wrapper_SparseCPU__neg(const at::Tensor & self) {
716 // No device check
717 // DeviceGuard omitted
718 return at::native::neg_sparse(self);
719}
720} // anonymous namespace
721namespace {
722at::Tensor & wrapper_SparseCPU_out_neg_out(const at::Tensor & self, at::Tensor & out) {
723 // No device check
724 // DeviceGuard omitted
725 return at::native::neg_out_sparse(self, out);
726}
727} // anonymous namespace
728namespace {
729at::Tensor & wrapper_SparseCPU__neg_(at::Tensor & self) {
730 // No device check
731 // DeviceGuard omitted
732 return at::native::neg_sparse_(self);
733}
734} // anonymous namespace
735namespace {
736at::Tensor wrapper_SparseCPU__round(const at::Tensor & self) {
737 // No device check
738 // DeviceGuard omitted
739 return at::native::round_sparse(self);
740}
741} // anonymous namespace
742namespace {
743at::Tensor & wrapper_SparseCPU_out_round_out(const at::Tensor & self, at::Tensor & out) {
744 // No device check
745 // DeviceGuard omitted
746 return at::native::round_sparse_out(self, out);
747}
748} // anonymous namespace
749namespace {
750at::Tensor & wrapper_SparseCPU__round_(at::Tensor & self) {
751 // No device check
752 // DeviceGuard omitted
753 return at::native::round_sparse_(self);
754}
755} // anonymous namespace
756namespace {
757at::Tensor wrapper_SparseCPU__relu(const at::Tensor & self) {
758 // No device check
759 // DeviceGuard omitted
760 return at::native::relu_sparse(self);
761}
762} // anonymous namespace
763namespace {
764at::Tensor & wrapper_SparseCPU__relu_(at::Tensor & self) {
765 // No device check
766 // DeviceGuard omitted
767 return at::native::relu_sparse_(self);
768}
769} // anonymous namespace
770namespace {
771at::Tensor wrapper_SparseCPU__sin(const at::Tensor & self) {
772 // No device check
773 // DeviceGuard omitted
774 return at::native::sin_sparse(self);
775}
776} // anonymous namespace
777namespace {
778at::Tensor & wrapper_SparseCPU_out_sin_out(const at::Tensor & self, at::Tensor & out) {
779 // No device check
780 // DeviceGuard omitted
781 return at::native::sin_sparse_out(self, out);
782}
783} // anonymous namespace
784namespace {
785at::Tensor & wrapper_SparseCPU__sin_(at::Tensor & self) {
786 // No device check
787 // DeviceGuard omitted
788 return at::native::sin_sparse_(self);
789}
790} // anonymous namespace
791namespace {
792at::Tensor wrapper_SparseCPU__sinh(const at::Tensor & self) {
793 // No device check
794 // DeviceGuard omitted
795 return at::native::sinh_sparse(self);
796}
797} // anonymous namespace
798namespace {
799at::Tensor & wrapper_SparseCPU_out_sinh_out(const at::Tensor & self, at::Tensor & out) {
800 // No device check
801 // DeviceGuard omitted
802 return at::native::sinh_sparse_out(self, out);
803}
804} // anonymous namespace
805namespace {
806at::Tensor & wrapper_SparseCPU__sinh_(at::Tensor & self) {
807 // No device check
808 // DeviceGuard omitted
809 return at::native::sinh_sparse_(self);
810}
811} // anonymous namespace
812namespace {
813at::Tensor & wrapper_SparseCPU_out_sspaddmm_out(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
814 // No device check
815 // DeviceGuard omitted
816 return at::native::_sspaddmm_out_cpu(self, mat1, mat2, beta, alpha, out);
817}
818} // anonymous namespace
819namespace {
820at::Tensor wrapper_SparseCPU__sum(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
821 // No device check
822 // DeviceGuard omitted
823 return at::native::sum_coo(self, dtype);
824}
825} // anonymous namespace
826namespace {
827at::Tensor wrapper_SparseCPU_dim_IntList_sum(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
828 // No device check
829 // DeviceGuard omitted
830 return at::native::sum_sparse_coo(self, dim, keepdim, dtype);
831}
832} // anonymous namespace
833namespace {
834at::Tensor wrapper_SparseCPU__sqrt(const at::Tensor & self) {
835 // No device check
836 // DeviceGuard omitted
837 return at::native::sqrt_sparse(self);
838}
839} // anonymous namespace
840namespace {
841at::Tensor & wrapper_SparseCPU_out_sqrt_out(const at::Tensor & self, at::Tensor & out) {
842 // No device check
843 // DeviceGuard omitted
844 return at::native::sqrt_sparse_out(self, out);
845}
846} // anonymous namespace
847namespace {
848at::Tensor & wrapper_SparseCPU__sqrt_(at::Tensor & self) {
849 // No device check
850 // DeviceGuard omitted
851 return at::native::sqrt_sparse_(self);
852}
853} // anonymous namespace
854namespace {
855at::Tensor wrapper_SparseCPU__tan(const at::Tensor & self) {
856 // No device check
857 // DeviceGuard omitted
858 return at::native::tan_sparse(self);
859}
860} // anonymous namespace
861namespace {
862at::Tensor & wrapper_SparseCPU_out_tan_out(const at::Tensor & self, at::Tensor & out) {
863 // No device check
864 // DeviceGuard omitted
865 return at::native::tan_sparse_out(self, out);
866}
867} // anonymous namespace
868namespace {
869at::Tensor & wrapper_SparseCPU__tan_(at::Tensor & self) {
870 // No device check
871 // DeviceGuard omitted
872 return at::native::tan_sparse_(self);
873}
874} // anonymous namespace
875namespace {
876at::Tensor wrapper_SparseCPU__tanh(const at::Tensor & self) {
877 // No device check
878 // DeviceGuard omitted
879 return at::native::tanh_sparse(self);
880}
881} // anonymous namespace
882namespace {
883at::Tensor & wrapper_SparseCPU_out_tanh_out(const at::Tensor & self, at::Tensor & out) {
884 // No device check
885 // DeviceGuard omitted
886 return at::native::tanh_sparse_out(self, out);
887}
888} // anonymous namespace
889namespace {
890at::Tensor & wrapper_SparseCPU__tanh_(at::Tensor & self) {
891 // No device check
892 // DeviceGuard omitted
893 return at::native::tanh_sparse_(self);
894}
895} // anonymous namespace
896namespace {
897at::Tensor wrapper_SparseCPU__threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
898 // No device check
899 // DeviceGuard omitted
900 return at::native::threshold_backward_sparse(grad_output, self, threshold);
901}
902} // anonymous namespace
903namespace {
904at::Tensor & wrapper_SparseCPU_grad_input_threshold_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) {
905 // No device check
906 // DeviceGuard omitted
907 return at::native::threshold_backward_sparse_out(grad_output, self, threshold, grad_input);
908}
909} // anonymous namespace
910namespace {
911at::Tensor wrapper_SparseCPU__trunc(const at::Tensor & self) {
912 // No device check
913 // DeviceGuard omitted
914 return at::native::trunc_sparse(self);
915}
916} // anonymous namespace
917namespace {
918at::Tensor & wrapper_SparseCPU_out_trunc_out(const at::Tensor & self, at::Tensor & out) {
919 // No device check
920 // DeviceGuard omitted
921 return at::native::trunc_sparse_out(self, out);
922}
923} // anonymous namespace
924namespace {
925at::Tensor & wrapper_SparseCPU__trunc_(at::Tensor & self) {
926 // No device check
927 // DeviceGuard omitted
928 return at::native::trunc_sparse_(self);
929}
930} // anonymous namespace
931namespace {
932at::Tensor wrapper_SparseCPU__unsqueeze(const at::Tensor & self, int64_t dim) {
933 // No device check
934 // DeviceGuard omitted
935 return at::native::unsqueeze_sparse(self, dim);
936}
937} // anonymous namespace
938namespace {
939at::Tensor & wrapper_SparseCPU_out_zeros_out(c10::SymIntArrayRef size, at::Tensor & out) {
940 // No device check
941 // DeviceGuard omitted
942 return at::native::zeros_sparse_out(C10_AS_INTARRAYREF_SLOW(size), out);
943}
944} // anonymous namespace
945namespace {
946at::Tensor wrapper_SparseCPU__native_norm(const at::Tensor & self, const at::Scalar & p) {
947 // No device check
948 // DeviceGuard omitted
949 return at::native::norm_sparse(self, p);
950}
951} // anonymous namespace
952namespace {
953at::Tensor wrapper_SparseCPU_ScalarOpt_dim_dtype_native_norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
954 // No device check
955 // DeviceGuard omitted
956 return at::native::norm_sparse(self, p, dim, keepdim, dtype);
957}
958} // anonymous namespace
959namespace {
960at::Tensor wrapper_SparseCPU___sparse_sum_backward(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) {
961 // No device check
962 // DeviceGuard omitted
963 return at::native::_sparse_sum_backward_cpu(grad, self, dim);
964}
965} // anonymous namespace
966namespace {
967at::Tensor wrapper_SparseCPU___sparse_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
968 // No device check
969 // DeviceGuard omitted
970 return at::native::softmax_sparse_cpu(self, dim, half_to_float);
971}
972} // anonymous namespace
973namespace {
974at::Tensor wrapper_SparseCPU___sparse_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
975 // No device check
976 // DeviceGuard omitted
977 return at::native::softmax_backward_sparse_cpu(grad_output, output, dim, self);
978}
979} // anonymous namespace
980namespace {
981at::Tensor wrapper_SparseCPU___sparse_log_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
982 // No device check
983 // DeviceGuard omitted
984 return at::native::log_softmax_sparse_cpu(self, dim, half_to_float);
985}
986} // anonymous namespace
987namespace {
988at::Tensor wrapper_SparseCPU___sparse_log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
989 // No device check
990 // DeviceGuard omitted
991 return at::native::log_softmax_backward_sparse_cpu(grad_output, output, dim, self);
992}
993} // anonymous namespace
994namespace {
995at::Tensor wrapper_SparseCPU_ScalarOpt_dim_dtype_norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) {
996 // No device check
997 // DeviceGuard omitted
998 return at::native::sparse_dtype_norm(self, p, dim, keepdim, dtype);
999}
1000} // anonymous namespace
1001namespace {
1002at::Tensor wrapper_SparseCPU_ScalarOpt_dim_norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) {
1003 // No device check
1004 // DeviceGuard omitted
1005 return at::native::sparse_norm(self, p, dim, keepdim);
1006}
1007} // anonymous namespace
1008namespace {
1009at::Tensor wrapper_SparseCPU__clone(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) {
1010 // No device check
1011 // DeviceGuard omitted
1012 return at::native::clone_sparse(self, memory_format);
1013}
1014} // anonymous namespace
1015namespace {
1016const at::Tensor & wrapper_SparseCPU__resize_as_sparse_(const at::Tensor & self, const at::Tensor & the_template) {
1017 // No device check
1018 // DeviceGuard omitted
1019 return at::native::resize_as_sparse_(self, the_template);
1020}
1021} // anonymous namespace
1022namespace {
1023at::Tensor & wrapper_SparseCPU__zero_(at::Tensor & self) {
1024 // No device check
1025 // DeviceGuard omitted
1026 return at::native::zero_sparse_(self);
1027}
1028} // anonymous namespace
1029namespace {
1030at::Tensor wrapper_SparseCPU_Tensor_sub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1031 // No device check
1032 // DeviceGuard omitted
1033 return at::native::sub_sparse(self, other, alpha);
1034}
1035} // anonymous namespace
1036namespace {
1037at::Tensor & wrapper_SparseCPU_out_sub_out(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
1038 // No device check
1039 // DeviceGuard omitted
1040 return at::native::sub_out_sparse(self, other, alpha, out);
1041}
1042} // anonymous namespace
1043namespace {
1044at::Tensor & wrapper_SparseCPU_Tensor_sub_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1045 // No device check
1046 // DeviceGuard omitted
1047 return at::native::sub_sparse_(self, other, alpha);
1048}
1049} // anonymous namespace
1050namespace {
1051at::Tensor wrapper_SparseCPU__addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
1052 // No device check
1053 // DeviceGuard omitted
1054 return at::native::addmm_sparse_dense_cpu(self, mat1, mat2, beta, alpha);
1055}
1056} // anonymous namespace
1057namespace {
1058at::Tensor & wrapper_SparseCPU_out_addmm_out(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
1059 // No device check
1060 // DeviceGuard omitted
1061 return at::native::addmm_out_sparse_dense_cpu(self, mat1, mat2, beta, alpha, out);
1062}
1063} // anonymous namespace
1064namespace {
1065at::Tensor & wrapper_SparseCPU__addmm_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
1066 // No device check
1067 // DeviceGuard omitted
1068 return at::native::s_addmm_sparse_dense_cpu_(self, mat1, mat2, beta, alpha);
1069}
1070} // anonymous namespace
1071namespace {
1072at::Tensor wrapper_SparseCPU___sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
1073 // No device check
1074 // DeviceGuard omitted
1075 return at::native::new_with_dims_sparse(sparse_dim, dense_dim, size, dtype, layout, device, pin_memory);
1076}
1077} // anonymous namespace
1078namespace {
1079at::Tensor wrapper_SparseCPU___sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
1080 // No device check
1081 // DeviceGuard omitted
1082 return at::native::new_with_dims_and_tensor_sparse_symint(sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory);
1083}
1084} // anonymous namespace
1085namespace {
1086const at::Tensor & wrapper_SparseCPU__sparse_resize_(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
1087 // No device check
1088 // DeviceGuard omitted
1089 return at::native::sparse_resize_(self, size, sparse_dim, dense_dim);
1090}
1091} // anonymous namespace
1092namespace {
1093const at::Tensor & wrapper_SparseCPU__sparse_resize_and_clear_(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
1094 // No device check
1095 // DeviceGuard omitted
1096 return at::native::sparse_resize_and_clear_(self, size, sparse_dim, dense_dim);
1097}
1098} // anonymous namespace
1099namespace {
1100at::Tensor wrapper_SparseCPU__sparse_mask(const at::Tensor & self, const at::Tensor & mask) {
1101 // No device check
1102 // DeviceGuard omitted
1103 return at::native::sparse_mask(self, mask);
1104}
1105} // anonymous namespace
1106namespace {
1107at::Tensor wrapper_SparseCPU___to_dense(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
1108 // No device check
1109 // DeviceGuard omitted
1110 return at::native::sparse_to_dense(self, dtype);
1111}
1112} // anonymous namespace
1113namespace {
1114int64_t wrapper_SparseCPU__sparse_dim(const at::Tensor & self) {
1115 // No device check
1116 // DeviceGuard omitted
1117 return at::native::sparse_dim_sparse(self);
1118}
1119} // anonymous namespace
1120namespace {
1121int64_t wrapper_SparseCPU___dimI(const at::Tensor & self) {
1122 // No device check
1123 // DeviceGuard omitted
1124 return at::native::sparse_dim_sparse(self);
1125}
1126} // anonymous namespace
1127namespace {
1128int64_t wrapper_SparseCPU__dense_dim(const at::Tensor & self) {
1129 // No device check
1130 // DeviceGuard omitted
1131 return at::native::dense_dim_sparse(self);
1132}
1133} // anonymous namespace
1134namespace {
1135int64_t wrapper_SparseCPU___dimV(const at::Tensor & self) {
1136 // No device check
1137 // DeviceGuard omitted
1138 return at::native::dense_dim_sparse(self);
1139}
1140} // anonymous namespace
1141namespace {
1142int64_t wrapper_SparseCPU___nnz(const at::Tensor & self) {
1143 // No device check
1144 // DeviceGuard omitted
1145 return at::native::_nnz_sparse(self);
1146}
1147} // anonymous namespace
1148namespace {
1149at::Tensor wrapper_SparseCPU___coalesce(const at::Tensor & self) {
1150 // No device check
1151 // DeviceGuard omitted
1152 return at::native::_coalesce_sparse_cpu(self);
1153}
1154} // anonymous namespace
1155namespace {
1156bool wrapper_SparseCPU__is_coalesced(const at::Tensor & self) {
1157 // No device check
1158 // DeviceGuard omitted
1159 return at::native::is_coalesced_sparse(self);
1160}
1161} // anonymous namespace
1162namespace {
1163at::Tensor wrapper_SparseCPU___indices(const at::Tensor & self) {
1164 // No device check
1165 // DeviceGuard omitted
1166 return at::native::_indices_sparse(self);
1167}
1168} // anonymous namespace
1169namespace {
1170at::Tensor wrapper_SparseCPU___values(const at::Tensor & self) {
1171 // No device check
1172 // DeviceGuard omitted
1173 return at::native::_values_sparse(self);
1174}
1175} // anonymous namespace
1176namespace {
1177at::Tensor & wrapper_SparseCPU___coalesced_(at::Tensor & self, bool coalesced) {
1178 // No device check
1179 // DeviceGuard omitted
1180 return at::native::_coalesced_sparse_(self, coalesced);
1181}
1182} // anonymous namespace
1183namespace {
1184at::Tensor wrapper_SparseCPU__indices(const at::Tensor & self) {
1185 // No device check
1186 // DeviceGuard omitted
1187 return at::native::indices_sparse(self);
1188}
1189} // anonymous namespace
1190namespace {
1191at::Tensor wrapper_SparseCPU__values(const at::Tensor & self) {
1192 // No device check
1193 // DeviceGuard omitted
1194 return at::native::values_sparse(self);
1195}
1196} // anonymous namespace
1197namespace {
1198at::Tensor wrapper_SparseCPU__hspmm(const at::Tensor & mat1, const at::Tensor & mat2) {
1199 // No device check
1200 // DeviceGuard omitted
1201 return at::native::hspmm_sparse_cpu(mat1, mat2);
1202}
1203} // anonymous namespace
1204namespace {
1205at::Tensor & wrapper_SparseCPU_out_hspmm_out(const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out) {
1206 // No device check
1207 // DeviceGuard omitted
1208 return at::native::hspmm_out_sparse_cpu(mat1, mat2, out);
1209}
1210} // anonymous namespace
1211namespace {
1212at::Tensor & wrapper_SparseCPU__copy_sparse_to_sparse_(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
1213 // No device check
1214 // DeviceGuard omitted
1215 return at::native::copy_sparse_(self, src, non_blocking);
1216}
1217} // anonymous namespace
1218namespace {
1219at::Tensor wrapper_SparseCPU_sparse_dim_to_sparse(const at::Tensor & self, int64_t sparse_dim) {
1220 // No device check
1221 // DeviceGuard omitted
1222 return at::native::sparse_coo_to_sparse(self, sparse_dim);
1223}
1224} // anonymous namespace
1225namespace {
1226at::Tensor wrapper_SparseCPU__to_sparse(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
1227 // No device check
1228 // DeviceGuard omitted
1229 return at::native::sparse_coo_to_sparse(self, layout, blocksize, dense_dim);
1230}
1231} // anonymous namespace
1232namespace {
1233at::Tensor wrapper_SparseCPU__to_sparse_csr(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
1234 // No device check
1235 // DeviceGuard omitted
1236 return at::native::coo_to_sparse_csr(self, dense_dim);
1237}
1238} // anonymous namespace
1239namespace {
1240at::Tensor wrapper_SparseCPU__to_sparse_csc(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
1241 // No device check
1242 // DeviceGuard omitted
1243 return at::native::coo_to_sparse_csc(self, dense_dim);
1244}
1245} // anonymous namespace
1246namespace {
1247at::Tensor wrapper_SparseCPU__to_sparse_bsr(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
1248 // No device check
1249 // DeviceGuard omitted
1250 return at::native::coo_to_sparse_bsr(self, blocksize, dense_dim);
1251}
1252} // anonymous namespace
1253namespace {
1254at::Tensor wrapper_SparseCPU__to_sparse_bsc(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
1255 // No device check
1256 // DeviceGuard omitted
1257 return at::native::coo_to_sparse_bsc(self, blocksize, dense_dim);
1258}
1259} // anonymous namespace
1260namespace {
1261at::Tensor wrapper_SparseCPU__index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index) {
1262 // No device check
1263 // DeviceGuard omitted
1264 return at::native::index_select_sparse_cpu(self, dim, index);
1265}
1266} // anonymous namespace
1267namespace {
1268at::Tensor wrapper_SparseCPU__erfinv(const at::Tensor & self) {
1269 // No device check
1270 // DeviceGuard omitted
1271 return at::native::erfinv_sparse(self);
1272}
1273} // anonymous namespace
1274namespace {
1275at::Tensor & wrapper_SparseCPU_out_erfinv_out(const at::Tensor & self, at::Tensor & out) {
1276 // No device check
1277 // DeviceGuard omitted
1278 return at::native::erfinv_sparse_out(self, out);
1279}
1280} // anonymous namespace
1281namespace {
1282at::Tensor & wrapper_SparseCPU__erfinv_(at::Tensor & self) {
1283 // No device check
1284 // DeviceGuard omitted
1285 return at::native::erfinv_sparse_(self);
1286}
1287} // anonymous namespace
1288namespace {
1289at::Tensor wrapper_SparseCPU__sign(const at::Tensor & self) {
1290 // No device check
1291 // DeviceGuard omitted
1292 return at::native::sign_sparse(self);
1293}
1294} // anonymous namespace
1295namespace {
1296at::Tensor & wrapper_SparseCPU_out_sign_out(const at::Tensor & self, at::Tensor & out) {
1297 // No device check
1298 // DeviceGuard omitted
1299 return at::native::sign_sparse_out(self, out);
1300}
1301} // anonymous namespace
1302namespace {
1303at::Tensor & wrapper_SparseCPU__sign_(at::Tensor & self) {
1304 // No device check
1305 // DeviceGuard omitted
1306 return at::native::sign_sparse_(self);
1307}
1308} // anonymous namespace
1309namespace {
1310at::Tensor wrapper_SparseCPU__signbit(const at::Tensor & self) {
1311 // No device check
1312 // DeviceGuard omitted
1313 return at::native::signbit_sparse(self);
1314}
1315} // anonymous namespace
1316namespace {
1317at::Tensor & wrapper_SparseCPU_out_signbit_out(const at::Tensor & self, at::Tensor & out) {
1318 // No device check
1319 // DeviceGuard omitted
1320 return at::native::signbit_sparse_out(self, out);
1321}
1322} // anonymous namespace
1323namespace {
1324at::Tensor wrapper_SparseCPU__any(const at::Tensor & self) {
1325 // No device check
1326 // DeviceGuard omitted
1327 return at::native::any_sparse(self);
1328}
1329} // anonymous namespace
1330namespace {
1331at::Tensor wrapper_SparseCPU_Tensor_Scalar_pow(const at::Tensor & self, const at::Scalar & exponent) {
1332 // No device check
1333 // DeviceGuard omitted
1334 return at::native::pow_sparse_scalar(self, exponent);
1335}
1336} // anonymous namespace
1337namespace {
1338at::Tensor & wrapper_SparseCPU_Tensor_Scalar_out_pow_out(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) {
1339 // No device check
1340 // DeviceGuard omitted
1341 return at::native::pow_out_sparse_scalar(self, exponent, out);
1342}
1343} // anonymous namespace
1344namespace {
1345at::Tensor wrapper_SparseCPU__isinf(const at::Tensor & self) {
1346 // No device check
1347 // DeviceGuard omitted
1348 return at::native::isinf_sparse(self);
1349}
1350} // anonymous namespace
1351namespace {
1352at::Tensor wrapper_SparseCPU__isposinf(const at::Tensor & self) {
1353 // No device check
1354 // DeviceGuard omitted
1355 return at::native::isposinf_sparse(self);
1356}
1357} // anonymous namespace
1358namespace {
1359at::Tensor & wrapper_SparseCPU_out_isposinf_out(const at::Tensor & self, at::Tensor & out) {
1360 // No device check
1361 // DeviceGuard omitted
1362 return at::native::isposinf_sparse_out(self, out);
1363}
1364} // anonymous namespace
1365namespace {
1366at::Tensor wrapper_SparseCPU__isneginf(const at::Tensor & self) {
1367 // No device check
1368 // DeviceGuard omitted
1369 return at::native::isneginf_sparse(self);
1370}
1371} // anonymous namespace
1372namespace {
1373at::Tensor & wrapper_SparseCPU_out_isneginf_out(const at::Tensor & self, at::Tensor & out) {
1374 // No device check
1375 // DeviceGuard omitted
1376 return at::native::isneginf_sparse_out(self, out);
1377}
1378} // anonymous namespace
1379TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
1380 m.impl("abs",
1381TORCH_FN(wrapper_SparseCPU__abs));
1382m.impl("abs.out",
1383TORCH_FN(wrapper_SparseCPU_out_abs_out));
1384m.impl("abs_",
1385TORCH_FN(wrapper_SparseCPU__abs_));
1386m.impl("sgn",
1387TORCH_FN(wrapper_SparseCPU__sgn));
1388m.impl("sgn.out",
1389TORCH_FN(wrapper_SparseCPU_out_sgn_out));
1390m.impl("sgn_",
1391TORCH_FN(wrapper_SparseCPU__sgn_));
1392m.impl("conj_physical.out",
1393TORCH_FN(wrapper_SparseCPU_out_conj_physical_out));
1394m.impl("add.Tensor",
1395TORCH_FN(wrapper_SparseCPU_Tensor_add));
1396m.impl("add.out",
1397TORCH_FN(wrapper_SparseCPU_out_add_out));
1398m.impl("add_.Tensor",
1399TORCH_FN(wrapper_SparseCPU_Tensor_add_));
1400m.impl("asinh",
1401TORCH_FN(wrapper_SparseCPU__asinh));
1402m.impl("asinh.out",
1403TORCH_FN(wrapper_SparseCPU_out_asinh_out));
1404m.impl("asinh_",
1405TORCH_FN(wrapper_SparseCPU__asinh_));
1406m.impl("atanh",
1407TORCH_FN(wrapper_SparseCPU__atanh));
1408m.impl("atanh.out",
1409TORCH_FN(wrapper_SparseCPU_out_atanh_out));
1410m.impl("atanh_",
1411TORCH_FN(wrapper_SparseCPU__atanh_));
1412m.impl("asin",
1413TORCH_FN(wrapper_SparseCPU__asin));
1414m.impl("asin.out",
1415TORCH_FN(wrapper_SparseCPU_out_asin_out));
1416m.impl("asin_",
1417TORCH_FN(wrapper_SparseCPU__asin_));
1418m.impl("atan",
1419TORCH_FN(wrapper_SparseCPU__atan));
1420m.impl("atan.out",
1421TORCH_FN(wrapper_SparseCPU_out_atan_out));
1422m.impl("atan_",
1423TORCH_FN(wrapper_SparseCPU__atan_));
1424m.impl("bmm",
1425TORCH_FN(wrapper_SparseCPU__bmm));
1426m.impl("bmm.out",
1427TORCH_FN(wrapper_SparseCPU_out_bmm_out));
1428m.impl("_sparse_broadcast_to",
1429TORCH_FN(wrapper_SparseCPU___sparse_broadcast_to));
1430m.impl("cat",
1431TORCH_FN(wrapper_SparseCPU__cat));
1432m.impl("ceil",
1433TORCH_FN(wrapper_SparseCPU__ceil));
1434m.impl("ceil.out",
1435TORCH_FN(wrapper_SparseCPU_out_ceil_out));
1436m.impl("ceil_",
1437TORCH_FN(wrapper_SparseCPU__ceil_));
1438m.impl("copy_",
1439TORCH_FN(wrapper_SparseCPU__copy_));
1440m.impl("div.Tensor",
1441TORCH_FN(wrapper_SparseCPU_Tensor_div));
1442m.impl("div.out",
1443TORCH_FN(wrapper_SparseCPU_out_div_out));
1444m.impl("div_.Tensor",
1445TORCH_FN(wrapper_SparseCPU_Tensor_div_));
1446m.impl("div.Tensor_mode",
1447TORCH_FN(wrapper_SparseCPU_Tensor_mode_div));
1448m.impl("div.out_mode",
1449TORCH_FN(wrapper_SparseCPU_out_mode_div_out));
1450m.impl("div_.Tensor_mode",
1451TORCH_FN(wrapper_SparseCPU_Tensor_mode_div_));
1452m.impl("empty.memory_format",
1453TORCH_FN(wrapper_SparseCPU_memory_format_empty));
1454m.impl("empty_like",
1455TORCH_FN(wrapper_SparseCPU__empty_like));
1456m.impl("erf",
1457TORCH_FN(wrapper_SparseCPU__erf));
1458m.impl("erf.out",
1459TORCH_FN(wrapper_SparseCPU_out_erf_out));
1460m.impl("erf_",
1461TORCH_FN(wrapper_SparseCPU__erf_));
1462m.impl("expm1",
1463TORCH_FN(wrapper_SparseCPU__expm1));
1464m.impl("expm1.out",
1465TORCH_FN(wrapper_SparseCPU_out_expm1_out));
1466m.impl("expm1_",
1467TORCH_FN(wrapper_SparseCPU__expm1_));
1468m.impl("floor",
1469TORCH_FN(wrapper_SparseCPU__floor));
1470m.impl("floor.out",
1471TORCH_FN(wrapper_SparseCPU_out_floor_out));
1472m.impl("floor_",
1473TORCH_FN(wrapper_SparseCPU__floor_));
1474m.impl("floor_divide",
1475TORCH_FN(wrapper_SparseCPU__floor_divide));
1476m.impl("floor_divide.out",
1477TORCH_FN(wrapper_SparseCPU_out_floor_divide_out));
1478m.impl("floor_divide_.Tensor",
1479TORCH_FN(wrapper_SparseCPU_Tensor_floor_divide_));
1480m.impl("frac",
1481TORCH_FN(wrapper_SparseCPU__frac));
1482m.impl("frac.out",
1483TORCH_FN(wrapper_SparseCPU_out_frac_out));
1484m.impl("frac_",
1485TORCH_FN(wrapper_SparseCPU__frac_));
1486m.impl("isnan",
1487TORCH_FN(wrapper_SparseCPU__isnan));
1488m.impl("nan_to_num",
1489TORCH_FN(wrapper_SparseCPU__nan_to_num));
1490m.impl("nan_to_num.out",
1491TORCH_FN(wrapper_SparseCPU_out_nan_to_num_out));
1492m.impl("nan_to_num_",
1493TORCH_FN(wrapper_SparseCPU__nan_to_num_));
1494m.impl("log1p",
1495TORCH_FN(wrapper_SparseCPU__log1p));
1496m.impl("log1p.out",
1497TORCH_FN(wrapper_SparseCPU_out_log1p_out));
1498m.impl("log1p_",
1499TORCH_FN(wrapper_SparseCPU__log1p_));
1500m.impl("mm",
1501TORCH_FN(wrapper_SparseCPU__mm));
1502m.impl("mm.out",
1503TORCH_FN(wrapper_SparseCPU_out_mm_out));
1504m.impl("_sparse_sparse_matmul",
1505TORCH_FN(wrapper_SparseCPU___sparse_sparse_matmul));
1506m.impl("mul.Tensor",
1507TORCH_FN(wrapper_SparseCPU_Tensor_mul));
1508m.impl("mul.out",
1509TORCH_FN(wrapper_SparseCPU_out_mul_out));
1510m.impl("mul_.Tensor",
1511TORCH_FN(wrapper_SparseCPU_Tensor_mul_));
1512m.impl("mv",
1513TORCH_FN(wrapper_SparseCPU__mv));
1514m.impl("narrow_copy",
1515TORCH_FN(wrapper_SparseCPU__narrow_copy));
1516m.impl("permute",
1517TORCH_FN(wrapper_SparseCPU__permute));
1518m.impl("rad2deg",
1519TORCH_FN(wrapper_SparseCPU__rad2deg));
1520m.impl("rad2deg.out",
1521TORCH_FN(wrapper_SparseCPU_out_rad2deg_out));
1522m.impl("rad2deg_",
1523TORCH_FN(wrapper_SparseCPU__rad2deg_));
1524m.impl("deg2rad",
1525TORCH_FN(wrapper_SparseCPU__deg2rad));
1526m.impl("deg2rad.out",
1527TORCH_FN(wrapper_SparseCPU_out_deg2rad_out));
1528m.impl("deg2rad_",
1529TORCH_FN(wrapper_SparseCPU__deg2rad_));
1530m.impl("neg",
1531TORCH_FN(wrapper_SparseCPU__neg));
1532m.impl("neg.out",
1533TORCH_FN(wrapper_SparseCPU_out_neg_out));
1534m.impl("neg_",
1535TORCH_FN(wrapper_SparseCPU__neg_));
1536m.impl("round",
1537TORCH_FN(wrapper_SparseCPU__round));
1538m.impl("round.out",
1539TORCH_FN(wrapper_SparseCPU_out_round_out));
1540m.impl("round_",
1541TORCH_FN(wrapper_SparseCPU__round_));
1542m.impl("relu",
1543TORCH_FN(wrapper_SparseCPU__relu));
1544m.impl("relu_",
1545TORCH_FN(wrapper_SparseCPU__relu_));
1546m.impl("sin",
1547TORCH_FN(wrapper_SparseCPU__sin));
1548m.impl("sin.out",
1549TORCH_FN(wrapper_SparseCPU_out_sin_out));
1550m.impl("sin_",
1551TORCH_FN(wrapper_SparseCPU__sin_));
1552m.impl("sinh",
1553TORCH_FN(wrapper_SparseCPU__sinh));
1554m.impl("sinh.out",
1555TORCH_FN(wrapper_SparseCPU_out_sinh_out));
1556m.impl("sinh_",
1557TORCH_FN(wrapper_SparseCPU__sinh_));
1558m.impl("sspaddmm.out",
1559TORCH_FN(wrapper_SparseCPU_out_sspaddmm_out));
1560m.impl("sum",
1561TORCH_FN(wrapper_SparseCPU__sum));
1562m.impl("sum.dim_IntList",
1563TORCH_FN(wrapper_SparseCPU_dim_IntList_sum));
1564m.impl("sqrt",
1565TORCH_FN(wrapper_SparseCPU__sqrt));
1566m.impl("sqrt.out",
1567TORCH_FN(wrapper_SparseCPU_out_sqrt_out));
1568m.impl("sqrt_",
1569TORCH_FN(wrapper_SparseCPU__sqrt_));
1570m.impl("tan",
1571TORCH_FN(wrapper_SparseCPU__tan));
1572m.impl("tan.out",
1573TORCH_FN(wrapper_SparseCPU_out_tan_out));
1574m.impl("tan_",
1575TORCH_FN(wrapper_SparseCPU__tan_));
1576m.impl("tanh",
1577TORCH_FN(wrapper_SparseCPU__tanh));
1578m.impl("tanh.out",
1579TORCH_FN(wrapper_SparseCPU_out_tanh_out));
1580m.impl("tanh_",
1581TORCH_FN(wrapper_SparseCPU__tanh_));
1582m.impl("threshold_backward",
1583TORCH_FN(wrapper_SparseCPU__threshold_backward));
1584m.impl("threshold_backward.grad_input",
1585TORCH_FN(wrapper_SparseCPU_grad_input_threshold_backward_out));
1586m.impl("trunc",
1587TORCH_FN(wrapper_SparseCPU__trunc));
1588m.impl("trunc.out",
1589TORCH_FN(wrapper_SparseCPU_out_trunc_out));
1590m.impl("trunc_",
1591TORCH_FN(wrapper_SparseCPU__trunc_));
1592m.impl("unsqueeze",
1593TORCH_FN(wrapper_SparseCPU__unsqueeze));
1594m.impl("zeros.out",
1595TORCH_FN(wrapper_SparseCPU_out_zeros_out));
1596m.impl("native_norm",
1597TORCH_FN(wrapper_SparseCPU__native_norm));
1598m.impl("native_norm.ScalarOpt_dim_dtype",
1599TORCH_FN(wrapper_SparseCPU_ScalarOpt_dim_dtype_native_norm));
1600m.impl("_sparse_sum_backward",
1601TORCH_FN(wrapper_SparseCPU___sparse_sum_backward));
1602m.impl("_sparse_softmax",
1603TORCH_FN(wrapper_SparseCPU___sparse_softmax));
1604m.impl("_sparse_softmax_backward_data",
1605TORCH_FN(wrapper_SparseCPU___sparse_softmax_backward_data));
1606m.impl("_sparse_log_softmax",
1607TORCH_FN(wrapper_SparseCPU___sparse_log_softmax));
1608m.impl("_sparse_log_softmax_backward_data",
1609TORCH_FN(wrapper_SparseCPU___sparse_log_softmax_backward_data));
1610m.impl("norm.ScalarOpt_dim_dtype",
1611TORCH_FN(wrapper_SparseCPU_ScalarOpt_dim_dtype_norm));
1612m.impl("norm.ScalarOpt_dim",
1613TORCH_FN(wrapper_SparseCPU_ScalarOpt_dim_norm));
1614m.impl("clone",
1615TORCH_FN(wrapper_SparseCPU__clone));
1616m.impl("resize_as_sparse_",
1617TORCH_FN(wrapper_SparseCPU__resize_as_sparse_));
1618m.impl("zero_",
1619TORCH_FN(wrapper_SparseCPU__zero_));
1620m.impl("sub.Tensor",
1621TORCH_FN(wrapper_SparseCPU_Tensor_sub));
1622m.impl("sub.out",
1623TORCH_FN(wrapper_SparseCPU_out_sub_out));
1624m.impl("sub_.Tensor",
1625TORCH_FN(wrapper_SparseCPU_Tensor_sub_));
1626m.impl("addmm",
1627TORCH_FN(wrapper_SparseCPU__addmm));
1628m.impl("addmm.out",
1629TORCH_FN(wrapper_SparseCPU_out_addmm_out));
1630m.impl("addmm_",
1631TORCH_FN(wrapper_SparseCPU__addmm_));
1632m.impl("_sparse_coo_tensor_with_dims",
1633TORCH_FN(wrapper_SparseCPU___sparse_coo_tensor_with_dims));
1634m.impl("_sparse_coo_tensor_with_dims_and_tensors",
1635TORCH_FN(wrapper_SparseCPU___sparse_coo_tensor_with_dims_and_tensors));
1636m.impl("sparse_resize_",
1637TORCH_FN(wrapper_SparseCPU__sparse_resize_));
1638m.impl("sparse_resize_and_clear_",
1639TORCH_FN(wrapper_SparseCPU__sparse_resize_and_clear_));
1640m.impl("sparse_mask",
1641TORCH_FN(wrapper_SparseCPU__sparse_mask));
1642m.impl("_to_dense",
1643TORCH_FN(wrapper_SparseCPU___to_dense));
1644m.impl("sparse_dim",
1645TORCH_FN(wrapper_SparseCPU__sparse_dim));
1646m.impl("_dimI",
1647TORCH_FN(wrapper_SparseCPU___dimI));
1648m.impl("dense_dim",
1649TORCH_FN(wrapper_SparseCPU__dense_dim));
1650m.impl("_dimV",
1651TORCH_FN(wrapper_SparseCPU___dimV));
1652m.impl("_nnz",
1653TORCH_FN(wrapper_SparseCPU___nnz));
1654m.impl("_coalesce",
1655TORCH_FN(wrapper_SparseCPU___coalesce));
1656m.impl("is_coalesced",
1657TORCH_FN(wrapper_SparseCPU__is_coalesced));
1658m.impl("_indices",
1659TORCH_FN(wrapper_SparseCPU___indices));
1660m.impl("_values",
1661TORCH_FN(wrapper_SparseCPU___values));
1662m.impl("_coalesced_",
1663TORCH_FN(wrapper_SparseCPU___coalesced_));
1664m.impl("indices",
1665TORCH_FN(wrapper_SparseCPU__indices));
1666m.impl("values",
1667TORCH_FN(wrapper_SparseCPU__values));
1668m.impl("hspmm",
1669TORCH_FN(wrapper_SparseCPU__hspmm));
1670m.impl("hspmm.out",
1671TORCH_FN(wrapper_SparseCPU_out_hspmm_out));
1672m.impl("copy_sparse_to_sparse_",
1673TORCH_FN(wrapper_SparseCPU__copy_sparse_to_sparse_));
1674m.impl("to_sparse.sparse_dim",
1675TORCH_FN(wrapper_SparseCPU_sparse_dim_to_sparse));
1676m.impl("to_sparse",
1677TORCH_FN(wrapper_SparseCPU__to_sparse));
1678m.impl("to_sparse_csr",
1679TORCH_FN(wrapper_SparseCPU__to_sparse_csr));
1680m.impl("to_sparse_csc",
1681TORCH_FN(wrapper_SparseCPU__to_sparse_csc));
1682m.impl("to_sparse_bsr",
1683TORCH_FN(wrapper_SparseCPU__to_sparse_bsr));
1684m.impl("to_sparse_bsc",
1685TORCH_FN(wrapper_SparseCPU__to_sparse_bsc));
1686m.impl("index_select",
1687TORCH_FN(wrapper_SparseCPU__index_select));
1688m.impl("erfinv",
1689TORCH_FN(wrapper_SparseCPU__erfinv));
1690m.impl("erfinv.out",
1691TORCH_FN(wrapper_SparseCPU_out_erfinv_out));
1692m.impl("erfinv_",
1693TORCH_FN(wrapper_SparseCPU__erfinv_));
1694m.impl("sign",
1695TORCH_FN(wrapper_SparseCPU__sign));
1696m.impl("sign.out",
1697TORCH_FN(wrapper_SparseCPU_out_sign_out));
1698m.impl("sign_",
1699TORCH_FN(wrapper_SparseCPU__sign_));
1700m.impl("signbit",
1701TORCH_FN(wrapper_SparseCPU__signbit));
1702m.impl("signbit.out",
1703TORCH_FN(wrapper_SparseCPU_out_signbit_out));
1704m.impl("any",
1705TORCH_FN(wrapper_SparseCPU__any));
1706m.impl("pow.Tensor_Scalar",
1707TORCH_FN(wrapper_SparseCPU_Tensor_Scalar_pow));
1708m.impl("pow.Tensor_Scalar_out",
1709TORCH_FN(wrapper_SparseCPU_Tensor_Scalar_out_pow_out));
1710m.impl("isinf",
1711TORCH_FN(wrapper_SparseCPU__isinf));
1712m.impl("isposinf",
1713TORCH_FN(wrapper_SparseCPU__isposinf));
1714m.impl("isposinf.out",
1715TORCH_FN(wrapper_SparseCPU_out_isposinf_out));
1716m.impl("isneginf",
1717TORCH_FN(wrapper_SparseCPU__isneginf));
1718m.impl("isneginf.out",
1719TORCH_FN(wrapper_SparseCPU_out_isneginf_out));
1720};
1721} // anonymous namespace
1722namespace sparsecpu {
1723at::Tensor abs(const at::Tensor & self) {
1724return wrapper_SparseCPU__abs(self);
1725}
1726at::Tensor & abs_out(at::Tensor & out, const at::Tensor & self) {
1727return wrapper_SparseCPU_out_abs_out(self, out);
1728}
1729at::Tensor & abs_outf(const at::Tensor & self, at::Tensor & out) {
1730return wrapper_SparseCPU_out_abs_out(self, out);
1731}
1732at::Tensor & abs_(at::Tensor & self) {
1733return wrapper_SparseCPU__abs_(self);
1734}
1735at::Tensor sgn(const at::Tensor & self) {
1736return wrapper_SparseCPU__sgn(self);
1737}
1738at::Tensor & sgn_out(at::Tensor & out, const at::Tensor & self) {
1739return wrapper_SparseCPU_out_sgn_out(self, out);
1740}
1741at::Tensor & sgn_outf(const at::Tensor & self, at::Tensor & out) {
1742return wrapper_SparseCPU_out_sgn_out(self, out);
1743}
1744at::Tensor & sgn_(at::Tensor & self) {
1745return wrapper_SparseCPU__sgn_(self);
1746}
1747at::Tensor & conj_physical_out(at::Tensor & out, const at::Tensor & self) {
1748return wrapper_SparseCPU_out_conj_physical_out(self, out);
1749}
1750at::Tensor & conj_physical_outf(const at::Tensor & self, at::Tensor & out) {
1751return wrapper_SparseCPU_out_conj_physical_out(self, out);
1752}
1753at::Tensor add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1754return wrapper_SparseCPU_Tensor_add(self, other, alpha);
1755}
1756at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1757return wrapper_SparseCPU_out_add_out(self, other, alpha, out);
1758}
1759at::Tensor & add_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
1760return wrapper_SparseCPU_out_add_out(self, other, alpha, out);
1761}
1762at::Tensor & add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1763return wrapper_SparseCPU_Tensor_add_(self, other, alpha);
1764}
1765at::Tensor asinh(const at::Tensor & self) {
1766return wrapper_SparseCPU__asinh(self);
1767}
1768at::Tensor & asinh_out(at::Tensor & out, const at::Tensor & self) {
1769return wrapper_SparseCPU_out_asinh_out(self, out);
1770}
1771at::Tensor & asinh_outf(const at::Tensor & self, at::Tensor & out) {
1772return wrapper_SparseCPU_out_asinh_out(self, out);
1773}
1774at::Tensor & asinh_(at::Tensor & self) {
1775return wrapper_SparseCPU__asinh_(self);
1776}
1777at::Tensor atanh(const at::Tensor & self) {
1778return wrapper_SparseCPU__atanh(self);
1779}
1780at::Tensor & atanh_out(at::Tensor & out, const at::Tensor & self) {
1781return wrapper_SparseCPU_out_atanh_out(self, out);
1782}
1783at::Tensor & atanh_outf(const at::Tensor & self, at::Tensor & out) {
1784return wrapper_SparseCPU_out_atanh_out(self, out);
1785}
1786at::Tensor & atanh_(at::Tensor & self) {
1787return wrapper_SparseCPU__atanh_(self);
1788}
1789at::Tensor asin(const at::Tensor & self) {
1790return wrapper_SparseCPU__asin(self);
1791}
1792at::Tensor & asin_out(at::Tensor & out, const at::Tensor & self) {
1793return wrapper_SparseCPU_out_asin_out(self, out);
1794}
1795at::Tensor & asin_outf(const at::Tensor & self, at::Tensor & out) {
1796return wrapper_SparseCPU_out_asin_out(self, out);
1797}
1798at::Tensor & asin_(at::Tensor & self) {
1799return wrapper_SparseCPU__asin_(self);
1800}
1801at::Tensor atan(const at::Tensor & self) {
1802return wrapper_SparseCPU__atan(self);
1803}
1804at::Tensor & atan_out(at::Tensor & out, const at::Tensor & self) {
1805return wrapper_SparseCPU_out_atan_out(self, out);
1806}
1807at::Tensor & atan_outf(const at::Tensor & self, at::Tensor & out) {
1808return wrapper_SparseCPU_out_atan_out(self, out);
1809}
1810at::Tensor & atan_(at::Tensor & self) {
1811return wrapper_SparseCPU__atan_(self);
1812}
1813at::Tensor bmm(const at::Tensor & self, const at::Tensor & mat2) {
1814return wrapper_SparseCPU__bmm(self, mat2);
1815}
1816at::Tensor & bmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) {
1817return wrapper_SparseCPU_out_bmm_out(self, mat2, out);
1818}
1819at::Tensor & bmm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
1820return wrapper_SparseCPU_out_bmm_out(self, mat2, out);
1821}
1822at::Tensor _sparse_broadcast_to(const at::Tensor & self, at::IntArrayRef size) {
1823return wrapper_SparseCPU___sparse_broadcast_to(self, size);
1824}
1825at::Tensor cat(const at::ITensorListRef & tensors, int64_t dim) {
1826return wrapper_SparseCPU__cat(tensors, dim);
1827}
1828at::Tensor ceil(const at::Tensor & self) {
1829return wrapper_SparseCPU__ceil(self);
1830}
1831at::Tensor & ceil_out(at::Tensor & out, const at::Tensor & self) {
1832return wrapper_SparseCPU_out_ceil_out(self, out);
1833}
1834at::Tensor & ceil_outf(const at::Tensor & self, at::Tensor & out) {
1835return wrapper_SparseCPU_out_ceil_out(self, out);
1836}
1837at::Tensor & ceil_(at::Tensor & self) {
1838return wrapper_SparseCPU__ceil_(self);
1839}
1840at::Tensor & copy_(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
1841return wrapper_SparseCPU__copy_(self, src, non_blocking);
1842}
1843at::Tensor div(const at::Tensor & self, const at::Tensor & other) {
1844return wrapper_SparseCPU_Tensor_div(self, other);
1845}
1846at::Tensor & div_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
1847return wrapper_SparseCPU_out_div_out(self, other, out);
1848}
1849at::Tensor & div_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
1850return wrapper_SparseCPU_out_div_out(self, other, out);
1851}
1852at::Tensor & div_(at::Tensor & self, const at::Tensor & other) {
1853return wrapper_SparseCPU_Tensor_div_(self, other);
1854}
1855at::Tensor div(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
1856return wrapper_SparseCPU_Tensor_mode_div(self, other, rounding_mode);
1857}
1858at::Tensor & div_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
1859return wrapper_SparseCPU_out_mode_div_out(self, other, rounding_mode, out);
1860}
1861at::Tensor & div_outf(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out) {
1862return wrapper_SparseCPU_out_mode_div_out(self, other, rounding_mode, out);
1863}
1864at::Tensor & div_(at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
1865return wrapper_SparseCPU_Tensor_mode_div_(self, other, rounding_mode);
1866}
1867at::Tensor empty(at::IntArrayRef size, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) {
1868return wrapper_SparseCPU_memory_format_empty(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
1869}
1870at::Tensor empty(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
1871return wrapper_SparseCPU_memory_format_empty(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, memory_format);
1872}
1873at::Tensor empty_symint(c10::SymIntArrayRef size, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) {
1874return wrapper_SparseCPU_memory_format_empty(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
1875}
1876at::Tensor empty_symint(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
1877return wrapper_SparseCPU_memory_format_empty(size, dtype, layout, device, pin_memory, memory_format);
1878}
1879at::Tensor empty_like(const at::Tensor & self, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) {
1880return wrapper_SparseCPU__empty_like(self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
1881}
1882at::Tensor empty_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
1883return wrapper_SparseCPU__empty_like(self, dtype, layout, device, pin_memory, memory_format);
1884}
1885at::Tensor erf(const at::Tensor & self) {
1886return wrapper_SparseCPU__erf(self);
1887}
1888at::Tensor & erf_out(at::Tensor & out, const at::Tensor & self) {
1889return wrapper_SparseCPU_out_erf_out(self, out);
1890}
1891at::Tensor & erf_outf(const at::Tensor & self, at::Tensor & out) {
1892return wrapper_SparseCPU_out_erf_out(self, out);
1893}
1894at::Tensor & erf_(at::Tensor & self) {
1895return wrapper_SparseCPU__erf_(self);
1896}
1897at::Tensor expm1(const at::Tensor & self) {
1898return wrapper_SparseCPU__expm1(self);
1899}
1900at::Tensor & expm1_out(at::Tensor & out, const at::Tensor & self) {
1901return wrapper_SparseCPU_out_expm1_out(self, out);
1902}
1903at::Tensor & expm1_outf(const at::Tensor & self, at::Tensor & out) {
1904return wrapper_SparseCPU_out_expm1_out(self, out);
1905}
1906at::Tensor & expm1_(at::Tensor & self) {
1907return wrapper_SparseCPU__expm1_(self);
1908}
1909at::Tensor floor(const at::Tensor & self) {
1910return wrapper_SparseCPU__floor(self);
1911}
1912at::Tensor & floor_out(at::Tensor & out, const at::Tensor & self) {
1913return wrapper_SparseCPU_out_floor_out(self, out);
1914}
1915at::Tensor & floor_outf(const at::Tensor & self, at::Tensor & out) {
1916return wrapper_SparseCPU_out_floor_out(self, out);
1917}
1918at::Tensor & floor_(at::Tensor & self) {
1919return wrapper_SparseCPU__floor_(self);
1920}
1921at::Tensor floor_divide(const at::Tensor & self, const at::Tensor & other) {
1922return wrapper_SparseCPU__floor_divide(self, other);
1923}
1924at::Tensor & floor_divide_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
1925return wrapper_SparseCPU_out_floor_divide_out(self, other, out);
1926}
1927at::Tensor & floor_divide_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
1928return wrapper_SparseCPU_out_floor_divide_out(self, other, out);
1929}
1930at::Tensor & floor_divide_(at::Tensor & self, const at::Tensor & other) {
1931return wrapper_SparseCPU_Tensor_floor_divide_(self, other);
1932}
1933at::Tensor frac(const at::Tensor & self) {
1934return wrapper_SparseCPU__frac(self);
1935}
1936at::Tensor & frac_out(at::Tensor & out, const at::Tensor & self) {
1937return wrapper_SparseCPU_out_frac_out(self, out);
1938}
1939at::Tensor & frac_outf(const at::Tensor & self, at::Tensor & out) {
1940return wrapper_SparseCPU_out_frac_out(self, out);
1941}
1942at::Tensor & frac_(at::Tensor & self) {
1943return wrapper_SparseCPU__frac_(self);
1944}
1945at::Tensor isnan(const at::Tensor & self) {
1946return wrapper_SparseCPU__isnan(self);
1947}
1948at::Tensor nan_to_num(const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) {
1949return wrapper_SparseCPU__nan_to_num(self, nan, posinf, neginf);
1950}
1951at::Tensor & nan_to_num_out(at::Tensor & out, const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) {
1952return wrapper_SparseCPU_out_nan_to_num_out(self, nan, posinf, neginf, out);
1953}
1954at::Tensor & nan_to_num_outf(const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf, at::Tensor & out) {
1955return wrapper_SparseCPU_out_nan_to_num_out(self, nan, posinf, neginf, out);
1956}
1957at::Tensor & nan_to_num_(at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) {
1958return wrapper_SparseCPU__nan_to_num_(self, nan, posinf, neginf);
1959}
1960at::Tensor log1p(const at::Tensor & self) {
1961return wrapper_SparseCPU__log1p(self);
1962}
1963at::Tensor & log1p_out(at::Tensor & out, const at::Tensor & self) {
1964return wrapper_SparseCPU_out_log1p_out(self, out);
1965}
1966at::Tensor & log1p_outf(const at::Tensor & self, at::Tensor & out) {
1967return wrapper_SparseCPU_out_log1p_out(self, out);
1968}
1969at::Tensor & log1p_(at::Tensor & self) {
1970return wrapper_SparseCPU__log1p_(self);
1971}
1972at::Tensor mm(const at::Tensor & self, const at::Tensor & mat2) {
1973return wrapper_SparseCPU__mm(self, mat2);
1974}
1975at::Tensor & mm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) {
1976return wrapper_SparseCPU_out_mm_out(self, mat2, out);
1977}
1978at::Tensor & mm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
1979return wrapper_SparseCPU_out_mm_out(self, mat2, out);
1980}
1981at::Tensor _sparse_sparse_matmul(const at::Tensor & self, const at::Tensor & other) {
1982return wrapper_SparseCPU___sparse_sparse_matmul(self, other);
1983}
1984at::Tensor mul(const at::Tensor & self, const at::Tensor & other) {
1985return wrapper_SparseCPU_Tensor_mul(self, other);
1986}
1987at::Tensor & mul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
1988return wrapper_SparseCPU_out_mul_out(self, other, out);
1989}
1990at::Tensor & mul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
1991return wrapper_SparseCPU_out_mul_out(self, other, out);
1992}
1993at::Tensor & mul_(at::Tensor & self, const at::Tensor & other) {
1994return wrapper_SparseCPU_Tensor_mul_(self, other);
1995}
1996at::Tensor mv(const at::Tensor & self, const at::Tensor & vec) {
1997return wrapper_SparseCPU__mv(self, vec);
1998}
1999at::Tensor narrow_copy(const at::Tensor & self, int64_t dim, int64_t start, int64_t length) {
2000return wrapper_SparseCPU__narrow_copy(self, dim, start, length);
2001}
2002at::Tensor narrow_copy_symint(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
2003return wrapper_SparseCPU__narrow_copy(self, dim, start, length);
2004}
2005at::Tensor permute(const at::Tensor & self, at::IntArrayRef dims) {
2006return wrapper_SparseCPU__permute(self, dims);
2007}
2008at::Tensor rad2deg(const at::Tensor & self) {
2009return wrapper_SparseCPU__rad2deg(self);
2010}
2011at::Tensor & rad2deg_out(at::Tensor & out, const at::Tensor & self) {
2012return wrapper_SparseCPU_out_rad2deg_out(self, out);
2013}
2014at::Tensor & rad2deg_outf(const at::Tensor & self, at::Tensor & out) {
2015return wrapper_SparseCPU_out_rad2deg_out(self, out);
2016}
2017at::Tensor & rad2deg_(at::Tensor & self) {
2018return wrapper_SparseCPU__rad2deg_(self);
2019}
2020at::Tensor deg2rad(const at::Tensor & self) {
2021return wrapper_SparseCPU__deg2rad(self);
2022}
2023at::Tensor & deg2rad_out(at::Tensor & out, const at::Tensor & self) {
2024return wrapper_SparseCPU_out_deg2rad_out(self, out);
2025}
2026at::Tensor & deg2rad_outf(const at::Tensor & self, at::Tensor & out) {
2027return wrapper_SparseCPU_out_deg2rad_out(self, out);
2028}
2029at::Tensor & deg2rad_(at::Tensor & self) {
2030return wrapper_SparseCPU__deg2rad_(self);
2031}
2032at::Tensor neg(const at::Tensor & self) {
2033return wrapper_SparseCPU__neg(self);
2034}
2035at::Tensor & neg_out(at::Tensor & out, const at::Tensor & self) {
2036return wrapper_SparseCPU_out_neg_out(self, out);
2037}
2038at::Tensor & neg_outf(const at::Tensor & self, at::Tensor & out) {
2039return wrapper_SparseCPU_out_neg_out(self, out);
2040}
2041at::Tensor & neg_(at::Tensor & self) {
2042return wrapper_SparseCPU__neg_(self);
2043}
2044at::Tensor round(const at::Tensor & self) {
2045return wrapper_SparseCPU__round(self);
2046}
2047at::Tensor & round_out(at::Tensor & out, const at::Tensor & self) {
2048return wrapper_SparseCPU_out_round_out(self, out);
2049}
2050at::Tensor & round_outf(const at::Tensor & self, at::Tensor & out) {
2051return wrapper_SparseCPU_out_round_out(self, out);
2052}
2053at::Tensor & round_(at::Tensor & self) {
2054return wrapper_SparseCPU__round_(self);
2055}
2056at::Tensor relu(const at::Tensor & self) {
2057return wrapper_SparseCPU__relu(self);
2058}
2059at::Tensor & relu_(at::Tensor & self) {
2060return wrapper_SparseCPU__relu_(self);
2061}
2062at::Tensor sin(const at::Tensor & self) {
2063return wrapper_SparseCPU__sin(self);
2064}
2065at::Tensor & sin_out(at::Tensor & out, const at::Tensor & self) {
2066return wrapper_SparseCPU_out_sin_out(self, out);
2067}
2068at::Tensor & sin_outf(const at::Tensor & self, at::Tensor & out) {
2069return wrapper_SparseCPU_out_sin_out(self, out);
2070}
2071at::Tensor & sin_(at::Tensor & self) {
2072return wrapper_SparseCPU__sin_(self);
2073}
2074at::Tensor sinh(const at::Tensor & self) {
2075return wrapper_SparseCPU__sinh(self);
2076}
2077at::Tensor & sinh_out(at::Tensor & out, const at::Tensor & self) {
2078return wrapper_SparseCPU_out_sinh_out(self, out);
2079}
2080at::Tensor & sinh_outf(const at::Tensor & self, at::Tensor & out) {
2081return wrapper_SparseCPU_out_sinh_out(self, out);
2082}
2083at::Tensor & sinh_(at::Tensor & self) {
2084return wrapper_SparseCPU__sinh_(self);
2085}
2086at::Tensor & sspaddmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
2087return wrapper_SparseCPU_out_sspaddmm_out(self, mat1, mat2, beta, alpha, out);
2088}
2089at::Tensor & sspaddmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
2090return wrapper_SparseCPU_out_sspaddmm_out(self, mat1, mat2, beta, alpha, out);
2091}
2092at::Tensor sum(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
2093return wrapper_SparseCPU__sum(self, dtype);
2094}
2095at::Tensor sum(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
2096return wrapper_SparseCPU_dim_IntList_sum(self, dim, keepdim, dtype);
2097}
2098at::Tensor sqrt(const at::Tensor & self) {
2099return wrapper_SparseCPU__sqrt(self);
2100}
2101at::Tensor & sqrt_out(at::Tensor & out, const at::Tensor & self) {
2102return wrapper_SparseCPU_out_sqrt_out(self, out);
2103}
2104at::Tensor & sqrt_outf(const at::Tensor & self, at::Tensor & out) {
2105return wrapper_SparseCPU_out_sqrt_out(self, out);
2106}
2107at::Tensor & sqrt_(at::Tensor & self) {
2108return wrapper_SparseCPU__sqrt_(self);
2109}
2110at::Tensor tan(const at::Tensor & self) {
2111return wrapper_SparseCPU__tan(self);
2112}
2113at::Tensor & tan_out(at::Tensor & out, const at::Tensor & self) {
2114return wrapper_SparseCPU_out_tan_out(self, out);
2115}
2116at::Tensor & tan_outf(const at::Tensor & self, at::Tensor & out) {
2117return wrapper_SparseCPU_out_tan_out(self, out);
2118}
2119at::Tensor & tan_(at::Tensor & self) {
2120return wrapper_SparseCPU__tan_(self);
2121}
2122at::Tensor tanh(const at::Tensor & self) {
2123return wrapper_SparseCPU__tanh(self);
2124}
2125at::Tensor & tanh_out(at::Tensor & out, const at::Tensor & self) {
2126return wrapper_SparseCPU_out_tanh_out(self, out);
2127}
2128at::Tensor & tanh_outf(const at::Tensor & self, at::Tensor & out) {
2129return wrapper_SparseCPU_out_tanh_out(self, out);
2130}
2131at::Tensor & tanh_(at::Tensor & self) {
2132return wrapper_SparseCPU__tanh_(self);
2133}
2134at::Tensor threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
2135return wrapper_SparseCPU__threshold_backward(grad_output, self, threshold);
2136}
2137at::Tensor & threshold_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
2138return wrapper_SparseCPU_grad_input_threshold_backward_out(grad_output, self, threshold, grad_input);
2139}
2140at::Tensor & threshold_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) {
2141return wrapper_SparseCPU_grad_input_threshold_backward_out(grad_output, self, threshold, grad_input);
2142}
2143at::Tensor trunc(const at::Tensor & self) {
2144return wrapper_SparseCPU__trunc(self);
2145}
2146at::Tensor & trunc_out(at::Tensor & out, const at::Tensor & self) {
2147return wrapper_SparseCPU_out_trunc_out(self, out);
2148}
2149at::Tensor & trunc_outf(const at::Tensor & self, at::Tensor & out) {
2150return wrapper_SparseCPU_out_trunc_out(self, out);
2151}
2152at::Tensor & trunc_(at::Tensor & self) {
2153return wrapper_SparseCPU__trunc_(self);
2154}
2155at::Tensor unsqueeze(const at::Tensor & self, int64_t dim) {
2156return wrapper_SparseCPU__unsqueeze(self, dim);
2157}
2158at::Tensor & zeros_out(at::Tensor & out, at::IntArrayRef size) {
2159return wrapper_SparseCPU_out_zeros_out(c10::fromIntArrayRefSlow(size), out);
2160}
2161at::Tensor & zeros_outf(at::IntArrayRef size, at::Tensor & out) {
2162return wrapper_SparseCPU_out_zeros_out(c10::fromIntArrayRefSlow(size), out);
2163}
2164at::Tensor & zeros_symint_out(at::Tensor & out, c10::SymIntArrayRef size) {
2165return wrapper_SparseCPU_out_zeros_out(size, out);
2166}
2167at::Tensor & zeros_symint_outf(c10::SymIntArrayRef size, at::Tensor & out) {
2168return wrapper_SparseCPU_out_zeros_out(size, out);
2169}
2170at::Tensor native_norm(const at::Tensor & self, const at::Scalar & p) {
2171return wrapper_SparseCPU__native_norm(self, p);
2172}
2173at::Tensor native_norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
2174return wrapper_SparseCPU_ScalarOpt_dim_dtype_native_norm(self, p, dim, keepdim, dtype);
2175}
2176at::Tensor _sparse_sum_backward(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) {
2177return wrapper_SparseCPU___sparse_sum_backward(grad, self, dim);
2178}
2179at::Tensor _sparse_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
2180return wrapper_SparseCPU___sparse_softmax(self, dim, half_to_float);
2181}
2182at::Tensor _sparse_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
2183return wrapper_SparseCPU___sparse_softmax_backward_data(grad_output, output, dim, self);
2184}
2185at::Tensor _sparse_log_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
2186return wrapper_SparseCPU___sparse_log_softmax(self, dim, half_to_float);
2187}
2188at::Tensor _sparse_log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
2189return wrapper_SparseCPU___sparse_log_softmax_backward_data(grad_output, output, dim, self);
2190}
2191at::Tensor norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) {
2192return wrapper_SparseCPU_ScalarOpt_dim_dtype_norm(self, p, dim, keepdim, dtype);
2193}
2194at::Tensor norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) {
2195return wrapper_SparseCPU_ScalarOpt_dim_norm(self, p, dim, keepdim);
2196}
2197at::Tensor clone(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) {
2198return wrapper_SparseCPU__clone(self, memory_format);
2199}
2200const at::Tensor & resize_as_sparse_(const at::Tensor & self, const at::Tensor & the_template) {
2201return wrapper_SparseCPU__resize_as_sparse_(self, the_template);
2202}
2203at::Tensor & zero_(at::Tensor & self) {
2204return wrapper_SparseCPU__zero_(self);
2205}
2206at::Tensor sub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
2207return wrapper_SparseCPU_Tensor_sub(self, other, alpha);
2208}
2209at::Tensor & sub_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
2210return wrapper_SparseCPU_out_sub_out(self, other, alpha, out);
2211}
2212at::Tensor & sub_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
2213return wrapper_SparseCPU_out_sub_out(self, other, alpha, out);
2214}
2215at::Tensor & sub_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
2216return wrapper_SparseCPU_Tensor_sub_(self, other, alpha);
2217}
2218at::Tensor addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
2219return wrapper_SparseCPU__addmm(self, mat1, mat2, beta, alpha);
2220}
2221at::Tensor & addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
2222return wrapper_SparseCPU_out_addmm_out(self, mat1, mat2, beta, alpha, out);
2223}
2224at::Tensor & addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
2225return wrapper_SparseCPU_out_addmm_out(self, mat1, mat2, beta, alpha, out);
2226}
2227at::Tensor & addmm_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
2228return wrapper_SparseCPU__addmm_(self, mat1, mat2, beta, alpha);
2229}
2230at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::TensorOptions options) {
2231return wrapper_SparseCPU___sparse_coo_tensor_with_dims(sparse_dim, dense_dim, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2232}
2233at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2234return wrapper_SparseCPU___sparse_coo_tensor_with_dims(sparse_dim, dense_dim, size, dtype, layout, device, pin_memory);
2235}
2236at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options) {
2237return wrapper_SparseCPU___sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2238}
2239at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2240return wrapper_SparseCPU___sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, dtype, layout, device, pin_memory);
2241}
2242at::Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options) {
2243return wrapper_SparseCPU___sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, size, indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2244}
2245at::Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2246return wrapper_SparseCPU___sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory);
2247}
2248const at::Tensor & sparse_resize_(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
2249return wrapper_SparseCPU__sparse_resize_(self, size, sparse_dim, dense_dim);
2250}
2251const at::Tensor & sparse_resize_and_clear_(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
2252return wrapper_SparseCPU__sparse_resize_and_clear_(self, size, sparse_dim, dense_dim);
2253}
2254at::Tensor sparse_mask(const at::Tensor & self, const at::Tensor & mask) {
2255return wrapper_SparseCPU__sparse_mask(self, mask);
2256}
2257at::Tensor _to_dense(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
2258return wrapper_SparseCPU___to_dense(self, dtype);
2259}
2260int64_t sparse_dim(const at::Tensor & self) {
2261return wrapper_SparseCPU__sparse_dim(self);
2262}
2263int64_t _dimI(const at::Tensor & self) {
2264return wrapper_SparseCPU___dimI(self);
2265}
2266int64_t dense_dim(const at::Tensor & self) {
2267return wrapper_SparseCPU__dense_dim(self);
2268}
2269int64_t _dimV(const at::Tensor & self) {
2270return wrapper_SparseCPU___dimV(self);
2271}
2272int64_t _nnz(const at::Tensor & self) {
2273return wrapper_SparseCPU___nnz(self);
2274}
2275at::Tensor _coalesce(const at::Tensor & self) {
2276return wrapper_SparseCPU___coalesce(self);
2277}
2278bool is_coalesced(const at::Tensor & self) {
2279return wrapper_SparseCPU__is_coalesced(self);
2280}
2281at::Tensor _indices(const at::Tensor & self) {
2282return wrapper_SparseCPU___indices(self);
2283}
2284at::Tensor _values(const at::Tensor & self) {
2285return wrapper_SparseCPU___values(self);
2286}
2287at::Tensor & _coalesced_(at::Tensor & self, bool coalesced) {
2288return wrapper_SparseCPU___coalesced_(self, coalesced);
2289}
2290at::Tensor indices(const at::Tensor & self) {
2291return wrapper_SparseCPU__indices(self);
2292}
2293at::Tensor values(const at::Tensor & self) {
2294return wrapper_SparseCPU__values(self);
2295}
2296at::Tensor hspmm(const at::Tensor & mat1, const at::Tensor & mat2) {
2297return wrapper_SparseCPU__hspmm(mat1, mat2);
2298}
2299at::Tensor & hspmm_out(at::Tensor & out, const at::Tensor & mat1, const at::Tensor & mat2) {
2300return wrapper_SparseCPU_out_hspmm_out(mat1, mat2, out);
2301}
2302at::Tensor & hspmm_outf(const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out) {
2303return wrapper_SparseCPU_out_hspmm_out(mat1, mat2, out);
2304}
2305at::Tensor & copy_sparse_to_sparse_(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
2306return wrapper_SparseCPU__copy_sparse_to_sparse_(self, src, non_blocking);
2307}
2308at::Tensor to_sparse(const at::Tensor & self, int64_t sparse_dim) {
2309return wrapper_SparseCPU_sparse_dim_to_sparse(self, sparse_dim);
2310}
2311at::Tensor to_sparse(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
2312return wrapper_SparseCPU__to_sparse(self, layout, blocksize, dense_dim);
2313}
2314at::Tensor to_sparse_csr(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
2315return wrapper_SparseCPU__to_sparse_csr(self, dense_dim);
2316}
2317at::Tensor to_sparse_csc(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
2318return wrapper_SparseCPU__to_sparse_csc(self, dense_dim);
2319}
2320at::Tensor to_sparse_bsr(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
2321return wrapper_SparseCPU__to_sparse_bsr(self, blocksize, dense_dim);
2322}
2323at::Tensor to_sparse_bsc(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
2324return wrapper_SparseCPU__to_sparse_bsc(self, blocksize, dense_dim);
2325}
2326at::Tensor index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index) {
2327return wrapper_SparseCPU__index_select(self, dim, index);
2328}
2329at::Tensor erfinv(const at::Tensor & self) {
2330return wrapper_SparseCPU__erfinv(self);
2331}
2332at::Tensor & erfinv_out(at::Tensor & out, const at::Tensor & self) {
2333return wrapper_SparseCPU_out_erfinv_out(self, out);
2334}
2335at::Tensor & erfinv_outf(const at::Tensor & self, at::Tensor & out) {
2336return wrapper_SparseCPU_out_erfinv_out(self, out);
2337}
2338at::Tensor & erfinv_(at::Tensor & self) {
2339return wrapper_SparseCPU__erfinv_(self);
2340}
2341at::Tensor sign(const at::Tensor & self) {
2342return wrapper_SparseCPU__sign(self);
2343}
2344at::Tensor & sign_out(at::Tensor & out, const at::Tensor & self) {
2345return wrapper_SparseCPU_out_sign_out(self, out);
2346}
2347at::Tensor & sign_outf(const at::Tensor & self, at::Tensor & out) {
2348return wrapper_SparseCPU_out_sign_out(self, out);
2349}
2350at::Tensor & sign_(at::Tensor & self) {
2351return wrapper_SparseCPU__sign_(self);
2352}
2353at::Tensor signbit(const at::Tensor & self) {
2354return wrapper_SparseCPU__signbit(self);
2355}
2356at::Tensor & signbit_out(at::Tensor & out, const at::Tensor & self) {
2357return wrapper_SparseCPU_out_signbit_out(self, out);
2358}
2359at::Tensor & signbit_outf(const at::Tensor & self, at::Tensor & out) {
2360return wrapper_SparseCPU_out_signbit_out(self, out);
2361}
2362at::Tensor any(const at::Tensor & self) {
2363return wrapper_SparseCPU__any(self);
2364}
2365at::Tensor pow(const at::Tensor & self, const at::Scalar & exponent) {
2366return wrapper_SparseCPU_Tensor_Scalar_pow(self, exponent);
2367}
2368at::Tensor & pow_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & exponent) {
2369return wrapper_SparseCPU_Tensor_Scalar_out_pow_out(self, exponent, out);
2370}
2371at::Tensor & pow_outf(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) {
2372return wrapper_SparseCPU_Tensor_Scalar_out_pow_out(self, exponent, out);
2373}
2374at::Tensor isinf(const at::Tensor & self) {
2375return wrapper_SparseCPU__isinf(self);
2376}
2377at::Tensor isposinf(const at::Tensor & self) {
2378return wrapper_SparseCPU__isposinf(self);
2379}
2380at::Tensor & isposinf_out(at::Tensor & out, const at::Tensor & self) {
2381return wrapper_SparseCPU_out_isposinf_out(self, out);
2382}
2383at::Tensor & isposinf_outf(const at::Tensor & self, at::Tensor & out) {
2384return wrapper_SparseCPU_out_isposinf_out(self, out);
2385}
2386at::Tensor isneginf(const at::Tensor & self) {
2387return wrapper_SparseCPU__isneginf(self);
2388}
2389at::Tensor & isneginf_out(at::Tensor & out, const at::Tensor & self) {
2390return wrapper_SparseCPU_out_isneginf_out(self, out);
2391}
2392at::Tensor & isneginf_outf(const at::Tensor & self, at::Tensor & out) {
2393return wrapper_SparseCPU_out_isneginf_out(self, out);
2394}
2395} // namespace sparsecpu
2396} // namespace at
2397