1 | // required for old g++ to compile PRId64 macros, see |
2 | // https://github.com/pytorch/pytorch/issues/3571 |
3 | // for context |
4 | #ifndef __STDC_FORMAT_MACROS |
5 | #define __STDC_FORMAT_MACROS |
6 | #endif |
7 | |
8 | // an external backend might generate file within its code tree |
9 | // and check all the source files within the tree with clang-format. |
10 | // so, disable it since the backend might have a different config. |
11 | // clang-format off |
12 | |
13 | // NOTE: This condition is true for all PyTorch internal libraries, it |
14 | // just excludes external projects such as torch_xla which |
15 | // re-use some of the PyTorch codegen machinery. |
16 | #if defined(CAFFE2_BUILD_MAIN_LIB) || \ |
17 | defined(TORCH_CUDA_BUILD_MAIN_LIB) || \ |
18 | defined(TORCH_HIP_BUILD_MAIN_LIB) || \ |
19 | defined(TORCH_CUDA_CU_BUILD_MAIN_LIB) || \ |
20 | defined(TORCH_CUDA_CPP_BUILD_MAIN_LIB) |
21 | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS |
22 | #endif |
23 | |
24 | // @generated by torchgen/gen.py from RegisterDispatchKey.cpp |
25 | |
26 | #include <c10/core/TensorImpl.h> |
27 | #include <c10/core/Allocator.h> |
28 | #include <ATen/DeviceGuard.h> |
29 | #include <ATen/NamedTensorUtils.h> |
30 | #include <ATen/Utils.h> |
31 | #include <ATen/WrapDimUtils.h> |
32 | #include <ATen/Dispatch.h> |
33 | #include <c10/util/ExclusivelyOwned.h> |
34 | #include <c10/util/Half.h> |
35 | #include <c10/core/UndefinedTensorImpl.h> |
36 | #include <c10/util/Optional.h> |
37 | #include <ATen/Tensor.h> |
38 | #include <ATen/native/Resize.h> |
39 | |
40 | #include <cstddef> |
41 | #include <functional> |
42 | #include <memory> |
43 | #include <utility> |
44 | |
45 | #include <ATen/Config.h> |
46 | #include <ATen/core/op_registration/adaption.h> |
47 | #include <torch/library.h> |
48 | |
49 | |
50 | #include <ATen/ops/as_strided_native.h> |
51 | #include <ATen/ops/empty.h> |
52 | #include <ATen/ops/empty_strided.h> |
53 | #include <ATen/ops/_copy_from_and_resize.h> |
54 | #include <ATen/ops/_copy_from.h> |
55 | #include <ATen/ops/_conj_physical_native.h> |
56 | #include <ATen/ops/_nnz_native.h> |
57 | #include <ATen/ops/_sparse_csr_prod_native.h> |
58 | #include <ATen/ops/_sparse_csr_sum_native.h> |
59 | #include <ATen/ops/_sparse_mm_reduce_impl_backward_native.h> |
60 | #include <ATen/ops/_sparse_mm_reduce_impl_native.h> |
61 | #include <ATen/ops/_to_dense_native.h> |
62 | #include <ATen/ops/abs_native.h> |
63 | #include <ATen/ops/add_native.h> |
64 | #include <ATen/ops/addmm_native.h> |
65 | #include <ATen/ops/addmv_native.h> |
66 | #include <ATen/ops/angle_native.h> |
67 | #include <ATen/ops/asin_native.h> |
68 | #include <ATen/ops/asinh_native.h> |
69 | #include <ATen/ops/atan_native.h> |
70 | #include <ATen/ops/atanh_native.h> |
71 | #include <ATen/ops/ccol_indices_native.h> |
72 | #include <ATen/ops/ceil_native.h> |
73 | #include <ATen/ops/clone_native.h> |
74 | #include <ATen/ops/col_indices_native.h> |
75 | #include <ATen/ops/conj_physical_native.h> |
76 | #include <ATen/ops/copy_native.h> |
77 | #include <ATen/ops/crow_indices_native.h> |
78 | #include <ATen/ops/deg2rad_native.h> |
79 | #include <ATen/ops/dense_dim_native.h> |
80 | #include <ATen/ops/empty_like_native.h> |
81 | #include <ATen/ops/empty_native.h> |
82 | #include <ATen/ops/erf_native.h> |
83 | #include <ATen/ops/erfinv_native.h> |
84 | #include <ATen/ops/expm1_native.h> |
85 | #include <ATen/ops/fill_native.h> |
86 | #include <ATen/ops/floor_native.h> |
87 | #include <ATen/ops/frac_native.h> |
88 | #include <ATen/ops/isinf_native.h> |
89 | #include <ATen/ops/isnan_native.h> |
90 | #include <ATen/ops/isneginf_native.h> |
91 | #include <ATen/ops/isposinf_native.h> |
92 | #include <ATen/ops/log1p_native.h> |
93 | #include <ATen/ops/mm_native.h> |
94 | #include <ATen/ops/mul_native.h> |
95 | #include <ATen/ops/neg_native.h> |
96 | #include <ATen/ops/normal_native.h> |
97 | #include <ATen/ops/rad2deg_native.h> |
98 | #include <ATen/ops/relu_native.h> |
99 | #include <ATen/ops/resize_as_sparse_native.h> |
100 | #include <ATen/ops/resize_native.h> |
101 | #include <ATen/ops/round_native.h> |
102 | #include <ATen/ops/row_indices_native.h> |
103 | #include <ATen/ops/select_copy_native.h> |
104 | #include <ATen/ops/select_native.h> |
105 | #include <ATen/ops/sgn_native.h> |
106 | #include <ATen/ops/sign_native.h> |
107 | #include <ATen/ops/signbit_native.h> |
108 | #include <ATen/ops/sin_native.h> |
109 | #include <ATen/ops/sinh_native.h> |
110 | #include <ATen/ops/sparse_dim_native.h> |
111 | #include <ATen/ops/sparse_mask_native.h> |
112 | #include <ATen/ops/sparse_sampled_addmm_native.h> |
113 | #include <ATen/ops/sqrt_native.h> |
114 | #include <ATen/ops/sum_native.h> |
115 | #include <ATen/ops/tan_native.h> |
116 | #include <ATen/ops/tanh_native.h> |
117 | #include <ATen/ops/threshold_backward_native.h> |
118 | #include <ATen/ops/to_sparse_bsc_native.h> |
119 | #include <ATen/ops/to_sparse_bsr_native.h> |
120 | #include <ATen/ops/to_sparse_csc_native.h> |
121 | #include <ATen/ops/to_sparse_csr_native.h> |
122 | #include <ATen/ops/to_sparse_native.h> |
123 | #include <ATen/ops/triangular_solve_native.h> |
124 | #include <ATen/ops/trunc_native.h> |
125 | #include <ATen/ops/values_native.h> |
126 | #include <ATen/ops/zero_native.h> |
127 | |
128 | // See template file RegisterDispatchDefinitions.ini |
129 | namespace at { |
130 | // NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid |
131 | // ambiguity with conflicting identifiers that may have been defined in |
132 | // at namespace already. |
133 | namespace { |
134 | void resize_out(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) { |
135 | TORCH_CHECK(options.dtype() == out.dtype(), |
136 | "Expected out tensor to have dtype " , options.dtype(), ", but got " , out.dtype(), " instead" ); |
137 | TORCH_CHECK(options.device() == out.device(), |
138 | "Expected out tensor to have device " , options.device(), ", but got " , out.device(), " instead" ); |
139 | const bool resized = at::native::resize_output(out, sizes); |
140 | // Only restride if a resize occurred; otherwise we ignore the (advisory) |
141 | // strides from the meta function and directly use the output tensor's |
142 | // preexisting strides |
143 | if (resized) { |
144 | if (!strides.empty()) { |
145 | TORCH_INTERNAL_ASSERT(!options.memory_format_opt().has_value()); |
146 | // TODO: avoid the redispatch here |
147 | out.as_strided_(sizes, strides); |
148 | } else if (options.memory_format_opt().has_value()) { |
149 | out.unsafeGetTensorImpl()->empty_tensor_restride(*options.memory_format_opt()); |
150 | } |
151 | } |
152 | } |
153 | void check_inplace(const Tensor &self, IntArrayRef sizes, const TensorOptions &options) { |
154 | // These checks are needed on those operators that: |
155 | // 1) don't use 'TensorIterator' (e.g. 'addmm' and 'baddbmm') |
156 | // 2) have particular typing rules (e.g. 'cumsum' and 'cumprod') |
157 | // For other operators (e.g. 'add'), 'TensorIterator' already checks |
158 | // these things separately. |
159 | TORCH_CHECK(options.dtype() == self.dtype(), |
160 | "Bad in-place call: " , |
161 | "input tensor dtype " , self.dtype(), " and output tensor dtype " , options.dtype(), " should match" ); |
162 | TORCH_CHECK(options.device() == self.device(), |
163 | "Bad in-place call: " , |
164 | "input tensor device " , self.device(), " and output tensor device " , options.device(), " should match" ); |
165 | TORCH_CHECK(sizes == self.sizes(), |
166 | "Bad in-place call: " , |
167 | "input tensor size " , self.sizes(), " and output tensor size " , sizes, " should match" ); |
168 | } |
169 | namespace { |
170 | at::Tensor wrapper_SparseCsrCPU__abs(const at::Tensor & self) { |
171 | // No device check |
172 | // DeviceGuard omitted |
173 | return at::native::abs_sparse_csr(self); |
174 | } |
175 | } // anonymous namespace |
176 | namespace { |
177 | at::Tensor & wrapper_SparseCsrCPU_out_abs_out(const at::Tensor & self, at::Tensor & out) { |
178 | // No device check |
179 | // DeviceGuard omitted |
180 | return at::native::abs_sparse_csr_out(self, out); |
181 | } |
182 | } // anonymous namespace |
183 | namespace { |
184 | at::Tensor & wrapper_SparseCsrCPU__abs_(at::Tensor & self) { |
185 | // No device check |
186 | // DeviceGuard omitted |
187 | return at::native::abs_sparse_csr_(self); |
188 | } |
189 | } // anonymous namespace |
190 | namespace { |
191 | at::Tensor wrapper_SparseCsrCPU__angle(const at::Tensor & self) { |
192 | // No device check |
193 | // DeviceGuard omitted |
194 | return at::native::angle_sparse_csr(self); |
195 | } |
196 | } // anonymous namespace |
197 | namespace { |
198 | at::Tensor & wrapper_SparseCsrCPU_out_angle_out(const at::Tensor & self, at::Tensor & out) { |
199 | // No device check |
200 | // DeviceGuard omitted |
201 | return at::native::angle_sparse_csr_out(self, out); |
202 | } |
203 | } // anonymous namespace |
204 | namespace { |
205 | at::Tensor wrapper_SparseCsrCPU__sgn(const at::Tensor & self) { |
206 | // No device check |
207 | // DeviceGuard omitted |
208 | return at::native::sgn_sparse_csr(self); |
209 | } |
210 | } // anonymous namespace |
211 | namespace { |
212 | at::Tensor & wrapper_SparseCsrCPU_out_sgn_out(const at::Tensor & self, at::Tensor & out) { |
213 | // No device check |
214 | // DeviceGuard omitted |
215 | return at::native::sgn_sparse_csr_out(self, out); |
216 | } |
217 | } // anonymous namespace |
218 | namespace { |
219 | at::Tensor & wrapper_SparseCsrCPU__sgn_(at::Tensor & self) { |
220 | // No device check |
221 | // DeviceGuard omitted |
222 | return at::native::sgn_sparse_csr_(self); |
223 | } |
224 | } // anonymous namespace |
225 | namespace { |
226 | at::Tensor wrapper_SparseCsrCPU___conj_physical(const at::Tensor & self) { |
227 | // No device check |
228 | // DeviceGuard omitted |
229 | return at::native::conj_physical_sparse_csr(self); |
230 | } |
231 | } // anonymous namespace |
232 | namespace { |
233 | at::Tensor & wrapper_SparseCsrCPU_out_conj_physical_out(const at::Tensor & self, at::Tensor & out) { |
234 | // No device check |
235 | // DeviceGuard omitted |
236 | return at::native::conj_physical_sparse_csr_out(self, out); |
237 | } |
238 | } // anonymous namespace |
239 | namespace { |
240 | at::Tensor & wrapper_SparseCsrCPU__conj_physical_(at::Tensor & self) { |
241 | // No device check |
242 | // DeviceGuard omitted |
243 | return at::native::conj_physical_sparse_csr_(self); |
244 | } |
245 | } // anonymous namespace |
246 | namespace { |
247 | at::Tensor wrapper_SparseCsrCPU_Tensor_add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
248 | // No device check |
249 | // DeviceGuard omitted |
250 | return at::native::add_sparse_csr(self, other, alpha); |
251 | } |
252 | } // anonymous namespace |
253 | namespace { |
254 | at::Tensor & wrapper_SparseCsrCPU_out_add_out(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { |
255 | // No device check |
256 | // DeviceGuard omitted |
257 | return at::native::add_out_sparse_csr_cpu(self, other, alpha, out); |
258 | } |
259 | } // anonymous namespace |
260 | namespace { |
261 | at::Tensor & wrapper_SparseCsrCPU_Tensor_add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
262 | // No device check |
263 | // DeviceGuard omitted |
264 | return at::native::add_sparse_csr_(self, other, alpha); |
265 | } |
266 | } // anonymous namespace |
267 | namespace { |
268 | at::Tensor & wrapper_SparseCsrCPU_out_addmv_out(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
269 | // No device check |
270 | // DeviceGuard omitted |
271 | return at::native::addmv_out_sparse_compressed(self, mat, vec, beta, alpha, out); |
272 | } |
273 | } // anonymous namespace |
274 | namespace { |
275 | at::Tensor wrapper_SparseCsrCPU__asinh(const at::Tensor & self) { |
276 | // No device check |
277 | // DeviceGuard omitted |
278 | return at::native::asinh_sparse_csr(self); |
279 | } |
280 | } // anonymous namespace |
281 | namespace { |
282 | at::Tensor & wrapper_SparseCsrCPU_out_asinh_out(const at::Tensor & self, at::Tensor & out) { |
283 | // No device check |
284 | // DeviceGuard omitted |
285 | return at::native::asinh_sparse_csr_out(self, out); |
286 | } |
287 | } // anonymous namespace |
288 | namespace { |
289 | at::Tensor & wrapper_SparseCsrCPU__asinh_(at::Tensor & self) { |
290 | // No device check |
291 | // DeviceGuard omitted |
292 | return at::native::asinh_sparse_csr_(self); |
293 | } |
294 | } // anonymous namespace |
295 | namespace { |
296 | at::Tensor wrapper_SparseCsrCPU__atanh(const at::Tensor & self) { |
297 | // No device check |
298 | // DeviceGuard omitted |
299 | return at::native::atanh_sparse_csr(self); |
300 | } |
301 | } // anonymous namespace |
302 | namespace { |
303 | at::Tensor & wrapper_SparseCsrCPU_out_atanh_out(const at::Tensor & self, at::Tensor & out) { |
304 | // No device check |
305 | // DeviceGuard omitted |
306 | return at::native::atanh_sparse_csr_out(self, out); |
307 | } |
308 | } // anonymous namespace |
309 | namespace { |
310 | at::Tensor & wrapper_SparseCsrCPU__atanh_(at::Tensor & self) { |
311 | // No device check |
312 | // DeviceGuard omitted |
313 | return at::native::atanh_sparse_csr_(self); |
314 | } |
315 | } // anonymous namespace |
316 | namespace { |
317 | at::Tensor wrapper_SparseCsrCPU__asin(const at::Tensor & self) { |
318 | // No device check |
319 | // DeviceGuard omitted |
320 | return at::native::asin_sparse_csr(self); |
321 | } |
322 | } // anonymous namespace |
323 | namespace { |
324 | at::Tensor & wrapper_SparseCsrCPU_out_asin_out(const at::Tensor & self, at::Tensor & out) { |
325 | // No device check |
326 | // DeviceGuard omitted |
327 | return at::native::asin_sparse_csr_out(self, out); |
328 | } |
329 | } // anonymous namespace |
330 | namespace { |
331 | at::Tensor & wrapper_SparseCsrCPU__asin_(at::Tensor & self) { |
332 | // No device check |
333 | // DeviceGuard omitted |
334 | return at::native::asin_sparse_csr_(self); |
335 | } |
336 | } // anonymous namespace |
337 | namespace { |
338 | at::Tensor wrapper_SparseCsrCPU__atan(const at::Tensor & self) { |
339 | // No device check |
340 | // DeviceGuard omitted |
341 | return at::native::atan_sparse_csr(self); |
342 | } |
343 | } // anonymous namespace |
344 | namespace { |
345 | at::Tensor & wrapper_SparseCsrCPU_out_atan_out(const at::Tensor & self, at::Tensor & out) { |
346 | // No device check |
347 | // DeviceGuard omitted |
348 | return at::native::atan_sparse_csr_out(self, out); |
349 | } |
350 | } // anonymous namespace |
351 | namespace { |
352 | at::Tensor & wrapper_SparseCsrCPU__atan_(at::Tensor & self) { |
353 | // No device check |
354 | // DeviceGuard omitted |
355 | return at::native::atan_sparse_csr_(self); |
356 | } |
357 | } // anonymous namespace |
358 | namespace { |
359 | at::Tensor wrapper_SparseCsrCPU__ceil(const at::Tensor & self) { |
360 | // No device check |
361 | // DeviceGuard omitted |
362 | return at::native::ceil_sparse_csr(self); |
363 | } |
364 | } // anonymous namespace |
365 | namespace { |
366 | at::Tensor & wrapper_SparseCsrCPU_out_ceil_out(const at::Tensor & self, at::Tensor & out) { |
367 | // No device check |
368 | // DeviceGuard omitted |
369 | return at::native::ceil_sparse_csr_out(self, out); |
370 | } |
371 | } // anonymous namespace |
372 | namespace { |
373 | at::Tensor & wrapper_SparseCsrCPU__ceil_(at::Tensor & self) { |
374 | // No device check |
375 | // DeviceGuard omitted |
376 | return at::native::ceil_sparse_csr_(self); |
377 | } |
378 | } // anonymous namespace |
379 | namespace { |
380 | at::Tensor & wrapper_SparseCsrCPU__copy_(at::Tensor & self, const at::Tensor & src, bool non_blocking) { |
381 | // No device check |
382 | // DeviceGuard omitted |
383 | return at::native::copy_sparse_compressed_(self, src, non_blocking); |
384 | } |
385 | } // anonymous namespace |
386 | namespace { |
387 | at::Tensor wrapper_SparseCsrCPU_memory_format_empty(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
388 | // No device check |
389 | // DeviceGuard omitted |
390 | return at::native::empty_sparse_compressed(C10_AS_INTARRAYREF_SLOW(size), dtype, layout, device, pin_memory, memory_format); |
391 | } |
392 | } // anonymous namespace |
393 | namespace { |
394 | const at::Tensor & wrapper_SparseCsrCPU__resize_(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) { |
395 | // No device check |
396 | // DeviceGuard omitted |
397 | return at::native::resize_sparse_csr_(self, C10_AS_INTARRAYREF_SLOW(size), memory_format); |
398 | } |
399 | } // anonymous namespace |
400 | namespace { |
401 | at::Tensor wrapper_SparseCsrCPU__empty_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
402 | // No device check |
403 | // DeviceGuard omitted |
404 | return at::native::empty_like_sparse_csr(self, dtype, layout, device, pin_memory, memory_format); |
405 | } |
406 | } // anonymous namespace |
407 | namespace { |
408 | at::Tensor wrapper_SparseCsrCPU__erf(const at::Tensor & self) { |
409 | // No device check |
410 | // DeviceGuard omitted |
411 | return at::native::erf_sparse_csr(self); |
412 | } |
413 | } // anonymous namespace |
414 | namespace { |
415 | at::Tensor & wrapper_SparseCsrCPU_out_erf_out(const at::Tensor & self, at::Tensor & out) { |
416 | // No device check |
417 | // DeviceGuard omitted |
418 | return at::native::erf_sparse_csr_out(self, out); |
419 | } |
420 | } // anonymous namespace |
421 | namespace { |
422 | at::Tensor & wrapper_SparseCsrCPU__erf_(at::Tensor & self) { |
423 | // No device check |
424 | // DeviceGuard omitted |
425 | return at::native::erf_sparse_csr_(self); |
426 | } |
427 | } // anonymous namespace |
428 | namespace { |
429 | at::Tensor wrapper_SparseCsrCPU__expm1(const at::Tensor & self) { |
430 | // No device check |
431 | // DeviceGuard omitted |
432 | return at::native::expm1_sparse_csr(self); |
433 | } |
434 | } // anonymous namespace |
435 | namespace { |
436 | at::Tensor & wrapper_SparseCsrCPU_out_expm1_out(const at::Tensor & self, at::Tensor & out) { |
437 | // No device check |
438 | // DeviceGuard omitted |
439 | return at::native::expm1_sparse_csr_out(self, out); |
440 | } |
441 | } // anonymous namespace |
442 | namespace { |
443 | at::Tensor & wrapper_SparseCsrCPU__expm1_(at::Tensor & self) { |
444 | // No device check |
445 | // DeviceGuard omitted |
446 | return at::native::expm1_sparse_csr_(self); |
447 | } |
448 | } // anonymous namespace |
449 | namespace { |
450 | at::Tensor & wrapper_SparseCsrCPU_Scalar_fill_(at::Tensor & self, const at::Scalar & value) { |
451 | // No device check |
452 | // DeviceGuard omitted |
453 | return at::native::fill_sparse_csr_(self, value); |
454 | } |
455 | } // anonymous namespace |
456 | namespace { |
457 | at::Tensor wrapper_SparseCsrCPU__floor(const at::Tensor & self) { |
458 | // No device check |
459 | // DeviceGuard omitted |
460 | return at::native::floor_sparse_csr(self); |
461 | } |
462 | } // anonymous namespace |
463 | namespace { |
464 | at::Tensor & wrapper_SparseCsrCPU_out_floor_out(const at::Tensor & self, at::Tensor & out) { |
465 | // No device check |
466 | // DeviceGuard omitted |
467 | return at::native::floor_sparse_csr_out(self, out); |
468 | } |
469 | } // anonymous namespace |
470 | namespace { |
471 | at::Tensor & wrapper_SparseCsrCPU__floor_(at::Tensor & self) { |
472 | // No device check |
473 | // DeviceGuard omitted |
474 | return at::native::floor_sparse_csr_(self); |
475 | } |
476 | } // anonymous namespace |
477 | namespace { |
478 | at::Tensor wrapper_SparseCsrCPU__frac(const at::Tensor & self) { |
479 | // No device check |
480 | // DeviceGuard omitted |
481 | return at::native::frac_sparse_csr(self); |
482 | } |
483 | } // anonymous namespace |
484 | namespace { |
485 | at::Tensor & wrapper_SparseCsrCPU_out_frac_out(const at::Tensor & self, at::Tensor & out) { |
486 | // No device check |
487 | // DeviceGuard omitted |
488 | return at::native::frac_sparse_csr_out(self, out); |
489 | } |
490 | } // anonymous namespace |
491 | namespace { |
492 | at::Tensor & wrapper_SparseCsrCPU__frac_(at::Tensor & self) { |
493 | // No device check |
494 | // DeviceGuard omitted |
495 | return at::native::frac_sparse_csr_(self); |
496 | } |
497 | } // anonymous namespace |
498 | namespace { |
499 | at::Tensor wrapper_SparseCsrCPU__isnan(const at::Tensor & self) { |
500 | // No device check |
501 | // DeviceGuard omitted |
502 | return at::native::isnan_sparse_csr(self); |
503 | } |
504 | } // anonymous namespace |
505 | namespace { |
506 | at::Tensor wrapper_SparseCsrCPU__log1p(const at::Tensor & self) { |
507 | // No device check |
508 | // DeviceGuard omitted |
509 | return at::native::log1p_sparse_csr(self); |
510 | } |
511 | } // anonymous namespace |
512 | namespace { |
513 | at::Tensor & wrapper_SparseCsrCPU_out_log1p_out(const at::Tensor & self, at::Tensor & out) { |
514 | // No device check |
515 | // DeviceGuard omitted |
516 | return at::native::log1p_sparse_csr_out(self, out); |
517 | } |
518 | } // anonymous namespace |
519 | namespace { |
520 | at::Tensor & wrapper_SparseCsrCPU__log1p_(at::Tensor & self) { |
521 | // No device check |
522 | // DeviceGuard omitted |
523 | return at::native::log1p_sparse_csr_(self); |
524 | } |
525 | } // anonymous namespace |
526 | namespace { |
527 | at::Tensor wrapper_SparseCsrCPU__mm(const at::Tensor & self, const at::Tensor & mat2) { |
528 | // No device check |
529 | // DeviceGuard omitted |
530 | return at::native::_sparse_csr_mm(self, mat2); |
531 | } |
532 | } // anonymous namespace |
533 | namespace { |
534 | at::Tensor & wrapper_SparseCsrCPU_out_mm_out(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) { |
535 | // No device check |
536 | // DeviceGuard omitted |
537 | return at::native::_sparse_csr_mm_out(self, mat2, out); |
538 | } |
539 | } // anonymous namespace |
540 | namespace { |
541 | at::Tensor wrapper_SparseCsrCPU_Tensor_mul(const at::Tensor & self, const at::Tensor & other) { |
542 | // No device check |
543 | // DeviceGuard omitted |
544 | return at::native::mul_sparse_csr(self, other); |
545 | } |
546 | } // anonymous namespace |
547 | namespace { |
548 | at::Tensor & wrapper_SparseCsrCPU_out_mul_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
549 | // No device check |
550 | // DeviceGuard omitted |
551 | return at::native::mul_out_sparse_csr(self, other, out); |
552 | } |
553 | } // anonymous namespace |
554 | namespace { |
555 | at::Tensor & wrapper_SparseCsrCPU_Tensor_mul_(at::Tensor & self, const at::Tensor & other) { |
556 | // No device check |
557 | // DeviceGuard omitted |
558 | return at::native::mul_sparse_csr_(self, other); |
559 | } |
560 | } // anonymous namespace |
561 | namespace { |
562 | at::Tensor wrapper_SparseCsrCPU_Scalar_mul(const at::Tensor & self, const at::Scalar & other) { |
563 | // No device check |
564 | // DeviceGuard omitted |
565 | return at::native::mul_scalar_sparse_csr(self, other); |
566 | } |
567 | } // anonymous namespace |
568 | namespace { |
569 | at::Tensor & wrapper_SparseCsrCPU_Scalar_mul_(at::Tensor & self, const at::Scalar & other) { |
570 | // No device check |
571 | // DeviceGuard omitted |
572 | return at::native::mul__scalar_sparse_csr(self, other); |
573 | } |
574 | } // anonymous namespace |
575 | namespace { |
576 | at::Tensor wrapper_SparseCsrCPU__rad2deg(const at::Tensor & self) { |
577 | // No device check |
578 | // DeviceGuard omitted |
579 | return at::native::rad2deg_sparse_csr(self); |
580 | } |
581 | } // anonymous namespace |
582 | namespace { |
583 | at::Tensor & wrapper_SparseCsrCPU_out_rad2deg_out(const at::Tensor & self, at::Tensor & out) { |
584 | // No device check |
585 | // DeviceGuard omitted |
586 | return at::native::rad2deg_sparse_csr_out(self, out); |
587 | } |
588 | } // anonymous namespace |
589 | namespace { |
590 | at::Tensor & wrapper_SparseCsrCPU__rad2deg_(at::Tensor & self) { |
591 | // No device check |
592 | // DeviceGuard omitted |
593 | return at::native::rad2deg_sparse_csr_(self); |
594 | } |
595 | } // anonymous namespace |
596 | namespace { |
597 | at::Tensor wrapper_SparseCsrCPU__deg2rad(const at::Tensor & self) { |
598 | // No device check |
599 | // DeviceGuard omitted |
600 | return at::native::deg2rad_sparse_csr(self); |
601 | } |
602 | } // anonymous namespace |
603 | namespace { |
604 | at::Tensor & wrapper_SparseCsrCPU_out_deg2rad_out(const at::Tensor & self, at::Tensor & out) { |
605 | // No device check |
606 | // DeviceGuard omitted |
607 | return at::native::deg2rad_sparse_csr_out(self, out); |
608 | } |
609 | } // anonymous namespace |
610 | namespace { |
611 | at::Tensor & wrapper_SparseCsrCPU__deg2rad_(at::Tensor & self) { |
612 | // No device check |
613 | // DeviceGuard omitted |
614 | return at::native::deg2rad_sparse_csr_(self); |
615 | } |
616 | } // anonymous namespace |
617 | namespace { |
618 | at::Tensor wrapper_SparseCsrCPU__neg(const at::Tensor & self) { |
619 | // No device check |
620 | // DeviceGuard omitted |
621 | return at::native::neg_sparse_csr(self); |
622 | } |
623 | } // anonymous namespace |
624 | namespace { |
625 | at::Tensor & wrapper_SparseCsrCPU_out_neg_out(const at::Tensor & self, at::Tensor & out) { |
626 | // No device check |
627 | // DeviceGuard omitted |
628 | return at::native::neg_sparse_csr_out(self, out); |
629 | } |
630 | } // anonymous namespace |
631 | namespace { |
632 | at::Tensor & wrapper_SparseCsrCPU__neg_(at::Tensor & self) { |
633 | // No device check |
634 | // DeviceGuard omitted |
635 | return at::native::neg_sparse_csr_(self); |
636 | } |
637 | } // anonymous namespace |
638 | namespace { |
639 | at::Tensor wrapper_SparseCsrCPU__round(const at::Tensor & self) { |
640 | // No device check |
641 | // DeviceGuard omitted |
642 | return at::native::round_sparse_csr(self); |
643 | } |
644 | } // anonymous namespace |
645 | namespace { |
646 | at::Tensor & wrapper_SparseCsrCPU_out_round_out(const at::Tensor & self, at::Tensor & out) { |
647 | // No device check |
648 | // DeviceGuard omitted |
649 | return at::native::round_sparse_csr_out(self, out); |
650 | } |
651 | } // anonymous namespace |
652 | namespace { |
653 | at::Tensor & wrapper_SparseCsrCPU__round_(at::Tensor & self) { |
654 | // No device check |
655 | // DeviceGuard omitted |
656 | return at::native::round_sparse_csr_(self); |
657 | } |
658 | } // anonymous namespace |
659 | namespace { |
660 | at::Tensor wrapper_SparseCsrCPU__relu(const at::Tensor & self) { |
661 | // No device check |
662 | // DeviceGuard omitted |
663 | return at::native::relu_sparse_csr(self); |
664 | } |
665 | } // anonymous namespace |
666 | namespace { |
667 | at::Tensor & wrapper_SparseCsrCPU__relu_(at::Tensor & self) { |
668 | // No device check |
669 | // DeviceGuard omitted |
670 | return at::native::relu_sparse_csr_(self); |
671 | } |
672 | } // anonymous namespace |
673 | namespace { |
674 | at::Tensor wrapper_SparseCsrCPU_int_select(const at::Tensor & self, int64_t dim, c10::SymInt index) { |
675 | // No device check |
676 | // DeviceGuard omitted |
677 | return at::native::select_sparse_csr(self, dim, index.expect_int()); |
678 | } |
679 | } // anonymous namespace |
680 | namespace { |
681 | at::Tensor wrapper_SparseCsrCPU__sin(const at::Tensor & self) { |
682 | // No device check |
683 | // DeviceGuard omitted |
684 | return at::native::sin_sparse_csr(self); |
685 | } |
686 | } // anonymous namespace |
687 | namespace { |
688 | at::Tensor & wrapper_SparseCsrCPU_out_sin_out(const at::Tensor & self, at::Tensor & out) { |
689 | // No device check |
690 | // DeviceGuard omitted |
691 | return at::native::sin_sparse_csr_out(self, out); |
692 | } |
693 | } // anonymous namespace |
694 | namespace { |
695 | at::Tensor & wrapper_SparseCsrCPU__sin_(at::Tensor & self) { |
696 | // No device check |
697 | // DeviceGuard omitted |
698 | return at::native::sin_sparse_csr_(self); |
699 | } |
700 | } // anonymous namespace |
701 | namespace { |
702 | at::Tensor wrapper_SparseCsrCPU__sinh(const at::Tensor & self) { |
703 | // No device check |
704 | // DeviceGuard omitted |
705 | return at::native::sinh_sparse_csr(self); |
706 | } |
707 | } // anonymous namespace |
708 | namespace { |
709 | at::Tensor & wrapper_SparseCsrCPU_out_sinh_out(const at::Tensor & self, at::Tensor & out) { |
710 | // No device check |
711 | // DeviceGuard omitted |
712 | return at::native::sinh_sparse_csr_out(self, out); |
713 | } |
714 | } // anonymous namespace |
715 | namespace { |
716 | at::Tensor & wrapper_SparseCsrCPU__sinh_(at::Tensor & self) { |
717 | // No device check |
718 | // DeviceGuard omitted |
719 | return at::native::sinh_sparse_csr_(self); |
720 | } |
721 | } // anonymous namespace |
722 | namespace { |
723 | at::Tensor wrapper_SparseCsrCPU__sum(const at::Tensor & self, c10::optional<at::ScalarType> dtype) { |
724 | // No device check |
725 | // DeviceGuard omitted |
726 | return at::native::sum_csr(self, dtype); |
727 | } |
728 | } // anonymous namespace |
729 | namespace { |
730 | at::Tensor wrapper_SparseCsrCPU__sqrt(const at::Tensor & self) { |
731 | // No device check |
732 | // DeviceGuard omitted |
733 | return at::native::sqrt_sparse_csr(self); |
734 | } |
735 | } // anonymous namespace |
736 | namespace { |
737 | at::Tensor & wrapper_SparseCsrCPU_out_sqrt_out(const at::Tensor & self, at::Tensor & out) { |
738 | // No device check |
739 | // DeviceGuard omitted |
740 | return at::native::sqrt_sparse_csr_out(self, out); |
741 | } |
742 | } // anonymous namespace |
743 | namespace { |
744 | at::Tensor & wrapper_SparseCsrCPU__sqrt_(at::Tensor & self) { |
745 | // No device check |
746 | // DeviceGuard omitted |
747 | return at::native::sqrt_sparse_csr_(self); |
748 | } |
749 | } // anonymous namespace |
750 | namespace { |
751 | at::Tensor wrapper_SparseCsrCPU__tan(const at::Tensor & self) { |
752 | // No device check |
753 | // DeviceGuard omitted |
754 | return at::native::tan_sparse_csr(self); |
755 | } |
756 | } // anonymous namespace |
757 | namespace { |
758 | at::Tensor & wrapper_SparseCsrCPU_out_tan_out(const at::Tensor & self, at::Tensor & out) { |
759 | // No device check |
760 | // DeviceGuard omitted |
761 | return at::native::tan_sparse_csr_out(self, out); |
762 | } |
763 | } // anonymous namespace |
764 | namespace { |
765 | at::Tensor & wrapper_SparseCsrCPU__tan_(at::Tensor & self) { |
766 | // No device check |
767 | // DeviceGuard omitted |
768 | return at::native::tan_sparse_csr_(self); |
769 | } |
770 | } // anonymous namespace |
771 | namespace { |
772 | at::Tensor wrapper_SparseCsrCPU__tanh(const at::Tensor & self) { |
773 | // No device check |
774 | // DeviceGuard omitted |
775 | return at::native::tanh_sparse_csr(self); |
776 | } |
777 | } // anonymous namespace |
778 | namespace { |
779 | at::Tensor & wrapper_SparseCsrCPU_out_tanh_out(const at::Tensor & self, at::Tensor & out) { |
780 | // No device check |
781 | // DeviceGuard omitted |
782 | return at::native::tanh_sparse_csr_out(self, out); |
783 | } |
784 | } // anonymous namespace |
785 | namespace { |
786 | at::Tensor & wrapper_SparseCsrCPU__tanh_(at::Tensor & self) { |
787 | // No device check |
788 | // DeviceGuard omitted |
789 | return at::native::tanh_sparse_csr_(self); |
790 | } |
791 | } // anonymous namespace |
792 | namespace { |
793 | at::Tensor wrapper_SparseCsrCPU__threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) { |
794 | // No device check |
795 | // DeviceGuard omitted |
796 | return at::native::threshold_backward_sparse_compressed(grad_output, self, threshold); |
797 | } |
798 | } // anonymous namespace |
799 | namespace { |
800 | at::Tensor & wrapper_SparseCsrCPU_grad_input_threshold_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) { |
801 | // No device check |
802 | // DeviceGuard omitted |
803 | return at::native::threshold_backward_sparse_compressed_out(grad_output, self, threshold, grad_input); |
804 | } |
805 | } // anonymous namespace |
806 | namespace { |
807 | at::Tensor wrapper_SparseCsrCPU__trunc(const at::Tensor & self) { |
808 | // No device check |
809 | // DeviceGuard omitted |
810 | return at::native::trunc_sparse_csr(self); |
811 | } |
812 | } // anonymous namespace |
813 | namespace { |
814 | at::Tensor & wrapper_SparseCsrCPU_out_trunc_out(const at::Tensor & self, at::Tensor & out) { |
815 | // No device check |
816 | // DeviceGuard omitted |
817 | return at::native::trunc_sparse_csr_out(self, out); |
818 | } |
819 | } // anonymous namespace |
820 | namespace { |
821 | at::Tensor & wrapper_SparseCsrCPU__trunc_(at::Tensor & self) { |
822 | // No device check |
823 | // DeviceGuard omitted |
824 | return at::native::trunc_sparse_csr_(self); |
825 | } |
826 | } // anonymous namespace |
827 | namespace { |
828 | at::Tensor wrapper_SparseCsrCPU_dim_dtype__sparse_csr_sum(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
829 | // No device check |
830 | // DeviceGuard omitted |
831 | return at::native::_sparse_csr_sum_cpu(self, dim, keepdim, dtype); |
832 | } |
833 | } // anonymous namespace |
834 | namespace { |
835 | at::Tensor wrapper_SparseCsrCPU_dim_dtype__sparse_csr_prod(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
836 | // No device check |
837 | // DeviceGuard omitted |
838 | return at::native::_sparse_csr_prod_cpu(self, dim, keepdim, dtype); |
839 | } |
840 | } // anonymous namespace |
841 | namespace { |
842 | at::Tensor wrapper_SparseCsrCPU__clone(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) { |
843 | // No device check |
844 | // DeviceGuard omitted |
845 | return at::native::clone_sparse_compressed(self, memory_format); |
846 | } |
847 | } // anonymous namespace |
848 | namespace { |
849 | const at::Tensor & wrapper_SparseCsrCPU__resize_as_sparse_(const at::Tensor & self, const at::Tensor & the_template) { |
850 | // No device check |
851 | // DeviceGuard omitted |
852 | return at::native::resize_as_sparse_compressed_(self, the_template); |
853 | } |
854 | } // anonymous namespace |
855 | namespace { |
856 | at::Tensor & wrapper_SparseCsrCPU__zero_(at::Tensor & self) { |
857 | // No device check |
858 | // DeviceGuard omitted |
859 | return at::native::zero_sparse_csr_(self); |
860 | } |
861 | } // anonymous namespace |
862 | namespace { |
863 | at::Tensor wrapper_SparseCsrCPU__sparse_sampled_addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { |
864 | // No device check |
865 | // DeviceGuard omitted |
866 | return at::native::sparse_sampled_addmm_sparse_csr_cpu(self, mat1, mat2, beta, alpha); |
867 | } |
868 | } // anonymous namespace |
869 | namespace { |
870 | at::Tensor & wrapper_SparseCsrCPU_out_sparse_sampled_addmm_out(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
871 | // No device check |
872 | // DeviceGuard omitted |
873 | return at::native::sparse_sampled_addmm_out_sparse_csr_cpu(self, mat1, mat2, beta, alpha, out); |
874 | } |
875 | } // anonymous namespace |
876 | namespace { |
877 | ::std::tuple<at::Tensor,at::Tensor> wrapper_SparseCsrCPU___sparse_mm_reduce_impl(const at::Tensor & self, const at::Tensor & other, c10::string_view reduce) { |
878 | // No device check |
879 | // DeviceGuard omitted |
880 | return at::native::_sparse_mm_reduce_impl_sparse_csr_cpu(self, other, reduce); |
881 | } |
882 | } // anonymous namespace |
883 | namespace { |
884 | ::std::tuple<at::Tensor,at::Tensor> wrapper_SparseCsrCPU___sparse_mm_reduce_impl_backward(const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array<bool,2> output_mask) { |
885 | // No device check |
886 | // DeviceGuard omitted |
887 | return at::native::_sparse_mm_reduce_impl_backward_sparse_csr_cpu(self, grad_out, weight, reduce, arg_out, output_mask); |
888 | } |
889 | } // anonymous namespace |
890 | namespace { |
891 | at::Tensor wrapper_SparseCsrCPU__addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { |
892 | // No device check |
893 | // DeviceGuard omitted |
894 | return at::native::addmm_sparse_compressed_dense(self, mat1, mat2, beta, alpha); |
895 | } |
896 | } // anonymous namespace |
897 | namespace { |
898 | at::Tensor & wrapper_SparseCsrCPU_out_addmm_out(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
899 | // No device check |
900 | // DeviceGuard omitted |
901 | return at::native::addmm_out_sparse_compressed_cpu(self, mat1, mat2, beta, alpha, out); |
902 | } |
903 | } // anonymous namespace |
904 | namespace { |
905 | at::Tensor wrapper_SparseCsrCPU__sparse_mask(const at::Tensor & self, const at::Tensor & mask) { |
906 | // No device check |
907 | // DeviceGuard omitted |
908 | return at::native::sparse_mask_sparse_csr(self, mask); |
909 | } |
910 | } // anonymous namespace |
911 | namespace { |
912 | at::Tensor wrapper_SparseCsrCPU___to_dense(const at::Tensor & self, c10::optional<at::ScalarType> dtype) { |
913 | // No device check |
914 | // DeviceGuard omitted |
915 | return at::native::sparse_compressed_to_dense(self, dtype); |
916 | } |
917 | } // anonymous namespace |
918 | namespace { |
919 | int64_t wrapper_SparseCsrCPU__sparse_dim(const at::Tensor & self) { |
920 | // No device check |
921 | // DeviceGuard omitted |
922 | return at::native::sparse_dim_sparse_csr(self); |
923 | } |
924 | } // anonymous namespace |
925 | namespace { |
926 | int64_t wrapper_SparseCsrCPU__dense_dim(const at::Tensor & self) { |
927 | // No device check |
928 | // DeviceGuard omitted |
929 | return at::native::dense_dim_sparse_csr(self); |
930 | } |
931 | } // anonymous namespace |
932 | namespace { |
933 | int64_t wrapper_SparseCsrCPU___nnz(const at::Tensor & self) { |
934 | // No device check |
935 | // DeviceGuard omitted |
936 | return at::native::_nnz_sparse_csr(self); |
937 | } |
938 | } // anonymous namespace |
939 | namespace { |
940 | at::Tensor wrapper_SparseCsrCPU__values(const at::Tensor & self) { |
941 | // No device check |
942 | // DeviceGuard omitted |
943 | return at::native::values_sparse_csr(self); |
944 | } |
945 | } // anonymous namespace |
946 | namespace { |
947 | at::Tensor wrapper_SparseCsrCPU__crow_indices(const at::Tensor & self) { |
948 | // No device check |
949 | // DeviceGuard omitted |
950 | return at::native::crow_indices_sparse_csr(self); |
951 | } |
952 | } // anonymous namespace |
953 | namespace { |
954 | at::Tensor wrapper_SparseCsrCPU__col_indices(const at::Tensor & self) { |
955 | // No device check |
956 | // DeviceGuard omitted |
957 | return at::native::col_indices_sparse_csr(self); |
958 | } |
959 | } // anonymous namespace |
960 | namespace { |
961 | at::Tensor wrapper_SparseCsrCPU__ccol_indices(const at::Tensor & self) { |
962 | // No device check |
963 | // DeviceGuard omitted |
964 | return at::native::ccol_indices_sparse_csr(self); |
965 | } |
966 | } // anonymous namespace |
967 | namespace { |
968 | at::Tensor wrapper_SparseCsrCPU__row_indices(const at::Tensor & self) { |
969 | // No device check |
970 | // DeviceGuard omitted |
971 | return at::native::row_indices_sparse_csr(self); |
972 | } |
973 | } // anonymous namespace |
974 | namespace { |
975 | at::Tensor wrapper_SparseCsrCPU_sparse_dim_to_sparse(const at::Tensor & self, int64_t sparse_dim) { |
976 | // No device check |
977 | // DeviceGuard omitted |
978 | return at::native::sparse_compressed_to_sparse(self, sparse_dim); |
979 | } |
980 | } // anonymous namespace |
981 | namespace { |
982 | at::Tensor wrapper_SparseCsrCPU__to_sparse(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim) { |
983 | // No device check |
984 | // DeviceGuard omitted |
985 | return at::native::sparse_compressed_to_sparse(self, layout, blocksize, dense_dim); |
986 | } |
987 | } // anonymous namespace |
988 | namespace { |
989 | at::Tensor wrapper_SparseCsrCPU__to_sparse_csr(const at::Tensor & self, c10::optional<int64_t> dense_dim) { |
990 | // No device check |
991 | // DeviceGuard omitted |
992 | return at::native::sparse_compressed_to_sparse_csr(self, dense_dim); |
993 | } |
994 | } // anonymous namespace |
995 | namespace { |
996 | at::Tensor wrapper_SparseCsrCPU__to_sparse_csc(const at::Tensor & self, c10::optional<int64_t> dense_dim) { |
997 | // No device check |
998 | // DeviceGuard omitted |
999 | return at::native::sparse_compressed_to_sparse_csc(self, dense_dim); |
1000 | } |
1001 | } // anonymous namespace |
1002 | namespace { |
1003 | at::Tensor wrapper_SparseCsrCPU__to_sparse_bsr(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) { |
1004 | // No device check |
1005 | // DeviceGuard omitted |
1006 | return at::native::sparse_compressed_to_sparse_bsr(self, blocksize, dense_dim); |
1007 | } |
1008 | } // anonymous namespace |
1009 | namespace { |
1010 | at::Tensor wrapper_SparseCsrCPU__to_sparse_bsc(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) { |
1011 | // No device check |
1012 | // DeviceGuard omitted |
1013 | return at::native::sparse_compressed_to_sparse_bsc(self, blocksize, dense_dim); |
1014 | } |
1015 | } // anonymous namespace |
1016 | namespace { |
1017 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_SparseCsrCPU_X_triangular_solve_out(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular, at::Tensor & X, at::Tensor & M) { |
1018 | // No device check |
1019 | // DeviceGuard omitted |
1020 | return at::native::triangular_solve_out_sparse_csr_cpu(self, A, upper, transpose, unitriangular, X, M); |
1021 | } |
1022 | } // anonymous namespace |
1023 | namespace { |
1024 | at::Tensor wrapper_SparseCsrCPU__erfinv(const at::Tensor & self) { |
1025 | // No device check |
1026 | // DeviceGuard omitted |
1027 | return at::native::erfinv_sparse_csr(self); |
1028 | } |
1029 | } // anonymous namespace |
1030 | namespace { |
1031 | at::Tensor & wrapper_SparseCsrCPU_out_erfinv_out(const at::Tensor & self, at::Tensor & out) { |
1032 | // No device check |
1033 | // DeviceGuard omitted |
1034 | return at::native::erfinv_sparse_csr_out(self, out); |
1035 | } |
1036 | } // anonymous namespace |
1037 | namespace { |
1038 | at::Tensor & wrapper_SparseCsrCPU__erfinv_(at::Tensor & self) { |
1039 | // No device check |
1040 | // DeviceGuard omitted |
1041 | return at::native::erfinv_sparse_csr_(self); |
1042 | } |
1043 | } // anonymous namespace |
1044 | namespace { |
1045 | at::Tensor wrapper_SparseCsrCPU__sign(const at::Tensor & self) { |
1046 | // No device check |
1047 | // DeviceGuard omitted |
1048 | return at::native::sign_sparse_csr(self); |
1049 | } |
1050 | } // anonymous namespace |
1051 | namespace { |
1052 | at::Tensor & wrapper_SparseCsrCPU_out_sign_out(const at::Tensor & self, at::Tensor & out) { |
1053 | // No device check |
1054 | // DeviceGuard omitted |
1055 | return at::native::sign_sparse_csr_out(self, out); |
1056 | } |
1057 | } // anonymous namespace |
1058 | namespace { |
1059 | at::Tensor & wrapper_SparseCsrCPU__sign_(at::Tensor & self) { |
1060 | // No device check |
1061 | // DeviceGuard omitted |
1062 | return at::native::sign_sparse_csr_(self); |
1063 | } |
1064 | } // anonymous namespace |
1065 | namespace { |
1066 | at::Tensor wrapper_SparseCsrCPU__signbit(const at::Tensor & self) { |
1067 | // No device check |
1068 | // DeviceGuard omitted |
1069 | return at::native::signbit_sparse_csr(self); |
1070 | } |
1071 | } // anonymous namespace |
1072 | namespace { |
1073 | at::Tensor & wrapper_SparseCsrCPU_out_signbit_out(const at::Tensor & self, at::Tensor & out) { |
1074 | // No device check |
1075 | // DeviceGuard omitted |
1076 | return at::native::signbit_sparse_csr_out(self, out); |
1077 | } |
1078 | } // anonymous namespace |
1079 | namespace { |
1080 | at::Tensor & wrapper_SparseCsrCPU__normal_(at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) { |
1081 | // No device check |
1082 | // DeviceGuard omitted |
1083 | return at::native::normal_sparse_csr_(self, mean, std, generator); |
1084 | } |
1085 | } // anonymous namespace |
1086 | namespace { |
1087 | at::Tensor wrapper_SparseCsrCPU__isinf(const at::Tensor & self) { |
1088 | // No device check |
1089 | // DeviceGuard omitted |
1090 | return at::native::isinf_sparse_csr(self); |
1091 | } |
1092 | } // anonymous namespace |
1093 | namespace { |
1094 | at::Tensor wrapper_SparseCsrCPU__isposinf(const at::Tensor & self) { |
1095 | // No device check |
1096 | // DeviceGuard omitted |
1097 | return at::native::isposinf_sparse_csr(self); |
1098 | } |
1099 | } // anonymous namespace |
1100 | namespace { |
1101 | at::Tensor & wrapper_SparseCsrCPU_out_isposinf_out(const at::Tensor & self, at::Tensor & out) { |
1102 | // No device check |
1103 | // DeviceGuard omitted |
1104 | return at::native::isposinf_sparse_csr_out(self, out); |
1105 | } |
1106 | } // anonymous namespace |
1107 | namespace { |
1108 | at::Tensor wrapper_SparseCsrCPU__isneginf(const at::Tensor & self) { |
1109 | // No device check |
1110 | // DeviceGuard omitted |
1111 | return at::native::isneginf_sparse_csr(self); |
1112 | } |
1113 | } // anonymous namespace |
1114 | namespace { |
1115 | at::Tensor & wrapper_SparseCsrCPU_out_isneginf_out(const at::Tensor & self, at::Tensor & out) { |
1116 | // No device check |
1117 | // DeviceGuard omitted |
1118 | return at::native::isneginf_sparse_csr_out(self, out); |
1119 | } |
1120 | } // anonymous namespace |
1121 | namespace { |
1122 | at::Tensor wrapper_SparseCsrCPU_int_select_copy(const at::Tensor & self, int64_t dim, c10::SymInt index) { |
1123 | // No device check |
1124 | // DeviceGuard omitted |
1125 | return at::native::select_copy_sparse_csr(self, dim, index.expect_int()); |
1126 | } |
1127 | } // anonymous namespace |
1128 | TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) { |
1129 | m.impl("abs" , |
1130 | TORCH_FN(wrapper_SparseCsrCPU__abs)); |
1131 | m.impl("abs.out" , |
1132 | TORCH_FN(wrapper_SparseCsrCPU_out_abs_out)); |
1133 | m.impl("abs_" , |
1134 | TORCH_FN(wrapper_SparseCsrCPU__abs_)); |
1135 | m.impl("angle" , |
1136 | TORCH_FN(wrapper_SparseCsrCPU__angle)); |
1137 | m.impl("angle.out" , |
1138 | TORCH_FN(wrapper_SparseCsrCPU_out_angle_out)); |
1139 | m.impl("sgn" , |
1140 | TORCH_FN(wrapper_SparseCsrCPU__sgn)); |
1141 | m.impl("sgn.out" , |
1142 | TORCH_FN(wrapper_SparseCsrCPU_out_sgn_out)); |
1143 | m.impl("sgn_" , |
1144 | TORCH_FN(wrapper_SparseCsrCPU__sgn_)); |
1145 | m.impl("_conj_physical" , |
1146 | TORCH_FN(wrapper_SparseCsrCPU___conj_physical)); |
1147 | m.impl("conj_physical.out" , |
1148 | TORCH_FN(wrapper_SparseCsrCPU_out_conj_physical_out)); |
1149 | m.impl("conj_physical_" , |
1150 | TORCH_FN(wrapper_SparseCsrCPU__conj_physical_)); |
1151 | m.impl("add.Tensor" , |
1152 | TORCH_FN(wrapper_SparseCsrCPU_Tensor_add)); |
1153 | m.impl("add.out" , |
1154 | TORCH_FN(wrapper_SparseCsrCPU_out_add_out)); |
1155 | m.impl("add_.Tensor" , |
1156 | TORCH_FN(wrapper_SparseCsrCPU_Tensor_add_)); |
1157 | m.impl("addmv.out" , |
1158 | TORCH_FN(wrapper_SparseCsrCPU_out_addmv_out)); |
1159 | m.impl("asinh" , |
1160 | TORCH_FN(wrapper_SparseCsrCPU__asinh)); |
1161 | m.impl("asinh.out" , |
1162 | TORCH_FN(wrapper_SparseCsrCPU_out_asinh_out)); |
1163 | m.impl("asinh_" , |
1164 | TORCH_FN(wrapper_SparseCsrCPU__asinh_)); |
1165 | m.impl("atanh" , |
1166 | TORCH_FN(wrapper_SparseCsrCPU__atanh)); |
1167 | m.impl("atanh.out" , |
1168 | TORCH_FN(wrapper_SparseCsrCPU_out_atanh_out)); |
1169 | m.impl("atanh_" , |
1170 | TORCH_FN(wrapper_SparseCsrCPU__atanh_)); |
1171 | m.impl("asin" , |
1172 | TORCH_FN(wrapper_SparseCsrCPU__asin)); |
1173 | m.impl("asin.out" , |
1174 | TORCH_FN(wrapper_SparseCsrCPU_out_asin_out)); |
1175 | m.impl("asin_" , |
1176 | TORCH_FN(wrapper_SparseCsrCPU__asin_)); |
1177 | m.impl("atan" , |
1178 | TORCH_FN(wrapper_SparseCsrCPU__atan)); |
1179 | m.impl("atan.out" , |
1180 | TORCH_FN(wrapper_SparseCsrCPU_out_atan_out)); |
1181 | m.impl("atan_" , |
1182 | TORCH_FN(wrapper_SparseCsrCPU__atan_)); |
1183 | m.impl("ceil" , |
1184 | TORCH_FN(wrapper_SparseCsrCPU__ceil)); |
1185 | m.impl("ceil.out" , |
1186 | TORCH_FN(wrapper_SparseCsrCPU_out_ceil_out)); |
1187 | m.impl("ceil_" , |
1188 | TORCH_FN(wrapper_SparseCsrCPU__ceil_)); |
1189 | m.impl("copy_" , |
1190 | TORCH_FN(wrapper_SparseCsrCPU__copy_)); |
1191 | m.impl("empty.memory_format" , |
1192 | TORCH_FN(wrapper_SparseCsrCPU_memory_format_empty)); |
1193 | m.impl("resize_" , |
1194 | TORCH_FN(wrapper_SparseCsrCPU__resize_)); |
1195 | m.impl("empty_like" , |
1196 | TORCH_FN(wrapper_SparseCsrCPU__empty_like)); |
1197 | m.impl("erf" , |
1198 | TORCH_FN(wrapper_SparseCsrCPU__erf)); |
1199 | m.impl("erf.out" , |
1200 | TORCH_FN(wrapper_SparseCsrCPU_out_erf_out)); |
1201 | m.impl("erf_" , |
1202 | TORCH_FN(wrapper_SparseCsrCPU__erf_)); |
1203 | m.impl("expm1" , |
1204 | TORCH_FN(wrapper_SparseCsrCPU__expm1)); |
1205 | m.impl("expm1.out" , |
1206 | TORCH_FN(wrapper_SparseCsrCPU_out_expm1_out)); |
1207 | m.impl("expm1_" , |
1208 | TORCH_FN(wrapper_SparseCsrCPU__expm1_)); |
1209 | m.impl("fill_.Scalar" , |
1210 | TORCH_FN(wrapper_SparseCsrCPU_Scalar_fill_)); |
1211 | m.impl("floor" , |
1212 | TORCH_FN(wrapper_SparseCsrCPU__floor)); |
1213 | m.impl("floor.out" , |
1214 | TORCH_FN(wrapper_SparseCsrCPU_out_floor_out)); |
1215 | m.impl("floor_" , |
1216 | TORCH_FN(wrapper_SparseCsrCPU__floor_)); |
1217 | m.impl("frac" , |
1218 | TORCH_FN(wrapper_SparseCsrCPU__frac)); |
1219 | m.impl("frac.out" , |
1220 | TORCH_FN(wrapper_SparseCsrCPU_out_frac_out)); |
1221 | m.impl("frac_" , |
1222 | TORCH_FN(wrapper_SparseCsrCPU__frac_)); |
1223 | m.impl("isnan" , |
1224 | TORCH_FN(wrapper_SparseCsrCPU__isnan)); |
1225 | m.impl("log1p" , |
1226 | TORCH_FN(wrapper_SparseCsrCPU__log1p)); |
1227 | m.impl("log1p.out" , |
1228 | TORCH_FN(wrapper_SparseCsrCPU_out_log1p_out)); |
1229 | m.impl("log1p_" , |
1230 | TORCH_FN(wrapper_SparseCsrCPU__log1p_)); |
1231 | m.impl("mm" , |
1232 | TORCH_FN(wrapper_SparseCsrCPU__mm)); |
1233 | m.impl("mm.out" , |
1234 | TORCH_FN(wrapper_SparseCsrCPU_out_mm_out)); |
1235 | m.impl("mul.Tensor" , |
1236 | TORCH_FN(wrapper_SparseCsrCPU_Tensor_mul)); |
1237 | m.impl("mul.out" , |
1238 | TORCH_FN(wrapper_SparseCsrCPU_out_mul_out)); |
1239 | m.impl("mul_.Tensor" , |
1240 | TORCH_FN(wrapper_SparseCsrCPU_Tensor_mul_)); |
1241 | m.impl("mul.Scalar" , |
1242 | TORCH_FN(wrapper_SparseCsrCPU_Scalar_mul)); |
1243 | m.impl("mul_.Scalar" , |
1244 | TORCH_FN(wrapper_SparseCsrCPU_Scalar_mul_)); |
1245 | m.impl("rad2deg" , |
1246 | TORCH_FN(wrapper_SparseCsrCPU__rad2deg)); |
1247 | m.impl("rad2deg.out" , |
1248 | TORCH_FN(wrapper_SparseCsrCPU_out_rad2deg_out)); |
1249 | m.impl("rad2deg_" , |
1250 | TORCH_FN(wrapper_SparseCsrCPU__rad2deg_)); |
1251 | m.impl("deg2rad" , |
1252 | TORCH_FN(wrapper_SparseCsrCPU__deg2rad)); |
1253 | m.impl("deg2rad.out" , |
1254 | TORCH_FN(wrapper_SparseCsrCPU_out_deg2rad_out)); |
1255 | m.impl("deg2rad_" , |
1256 | TORCH_FN(wrapper_SparseCsrCPU__deg2rad_)); |
1257 | m.impl("neg" , |
1258 | TORCH_FN(wrapper_SparseCsrCPU__neg)); |
1259 | m.impl("neg.out" , |
1260 | TORCH_FN(wrapper_SparseCsrCPU_out_neg_out)); |
1261 | m.impl("neg_" , |
1262 | TORCH_FN(wrapper_SparseCsrCPU__neg_)); |
1263 | m.impl("round" , |
1264 | TORCH_FN(wrapper_SparseCsrCPU__round)); |
1265 | m.impl("round.out" , |
1266 | TORCH_FN(wrapper_SparseCsrCPU_out_round_out)); |
1267 | m.impl("round_" , |
1268 | TORCH_FN(wrapper_SparseCsrCPU__round_)); |
1269 | m.impl("relu" , |
1270 | TORCH_FN(wrapper_SparseCsrCPU__relu)); |
1271 | m.impl("relu_" , |
1272 | TORCH_FN(wrapper_SparseCsrCPU__relu_)); |
1273 | m.impl("select.int" , |
1274 | TORCH_FN(wrapper_SparseCsrCPU_int_select)); |
1275 | m.impl("sin" , |
1276 | TORCH_FN(wrapper_SparseCsrCPU__sin)); |
1277 | m.impl("sin.out" , |
1278 | TORCH_FN(wrapper_SparseCsrCPU_out_sin_out)); |
1279 | m.impl("sin_" , |
1280 | TORCH_FN(wrapper_SparseCsrCPU__sin_)); |
1281 | m.impl("sinh" , |
1282 | TORCH_FN(wrapper_SparseCsrCPU__sinh)); |
1283 | m.impl("sinh.out" , |
1284 | TORCH_FN(wrapper_SparseCsrCPU_out_sinh_out)); |
1285 | m.impl("sinh_" , |
1286 | TORCH_FN(wrapper_SparseCsrCPU__sinh_)); |
1287 | m.impl("sum" , |
1288 | TORCH_FN(wrapper_SparseCsrCPU__sum)); |
1289 | m.impl("sqrt" , |
1290 | TORCH_FN(wrapper_SparseCsrCPU__sqrt)); |
1291 | m.impl("sqrt.out" , |
1292 | TORCH_FN(wrapper_SparseCsrCPU_out_sqrt_out)); |
1293 | m.impl("sqrt_" , |
1294 | TORCH_FN(wrapper_SparseCsrCPU__sqrt_)); |
1295 | m.impl("tan" , |
1296 | TORCH_FN(wrapper_SparseCsrCPU__tan)); |
1297 | m.impl("tan.out" , |
1298 | TORCH_FN(wrapper_SparseCsrCPU_out_tan_out)); |
1299 | m.impl("tan_" , |
1300 | TORCH_FN(wrapper_SparseCsrCPU__tan_)); |
1301 | m.impl("tanh" , |
1302 | TORCH_FN(wrapper_SparseCsrCPU__tanh)); |
1303 | m.impl("tanh.out" , |
1304 | TORCH_FN(wrapper_SparseCsrCPU_out_tanh_out)); |
1305 | m.impl("tanh_" , |
1306 | TORCH_FN(wrapper_SparseCsrCPU__tanh_)); |
1307 | m.impl("threshold_backward" , |
1308 | TORCH_FN(wrapper_SparseCsrCPU__threshold_backward)); |
1309 | m.impl("threshold_backward.grad_input" , |
1310 | TORCH_FN(wrapper_SparseCsrCPU_grad_input_threshold_backward_out)); |
1311 | m.impl("trunc" , |
1312 | TORCH_FN(wrapper_SparseCsrCPU__trunc)); |
1313 | m.impl("trunc.out" , |
1314 | TORCH_FN(wrapper_SparseCsrCPU_out_trunc_out)); |
1315 | m.impl("trunc_" , |
1316 | TORCH_FN(wrapper_SparseCsrCPU__trunc_)); |
1317 | m.impl("_sparse_csr_sum.dim_dtype" , |
1318 | TORCH_FN(wrapper_SparseCsrCPU_dim_dtype__sparse_csr_sum)); |
1319 | m.impl("_sparse_csr_prod.dim_dtype" , |
1320 | TORCH_FN(wrapper_SparseCsrCPU_dim_dtype__sparse_csr_prod)); |
1321 | m.impl("clone" , |
1322 | TORCH_FN(wrapper_SparseCsrCPU__clone)); |
1323 | m.impl("resize_as_sparse_" , |
1324 | TORCH_FN(wrapper_SparseCsrCPU__resize_as_sparse_)); |
1325 | m.impl("zero_" , |
1326 | TORCH_FN(wrapper_SparseCsrCPU__zero_)); |
1327 | m.impl("sparse_sampled_addmm" , |
1328 | TORCH_FN(wrapper_SparseCsrCPU__sparse_sampled_addmm)); |
1329 | m.impl("sparse_sampled_addmm.out" , |
1330 | TORCH_FN(wrapper_SparseCsrCPU_out_sparse_sampled_addmm_out)); |
1331 | m.impl("_sparse_mm_reduce_impl" , |
1332 | TORCH_FN(wrapper_SparseCsrCPU___sparse_mm_reduce_impl)); |
1333 | m.impl("_sparse_mm_reduce_impl_backward" , |
1334 | TORCH_FN(wrapper_SparseCsrCPU___sparse_mm_reduce_impl_backward)); |
1335 | m.impl("addmm" , |
1336 | TORCH_FN(wrapper_SparseCsrCPU__addmm)); |
1337 | m.impl("addmm.out" , |
1338 | TORCH_FN(wrapper_SparseCsrCPU_out_addmm_out)); |
1339 | m.impl("sparse_mask" , |
1340 | TORCH_FN(wrapper_SparseCsrCPU__sparse_mask)); |
1341 | m.impl("_to_dense" , |
1342 | TORCH_FN(wrapper_SparseCsrCPU___to_dense)); |
1343 | m.impl("sparse_dim" , |
1344 | TORCH_FN(wrapper_SparseCsrCPU__sparse_dim)); |
1345 | m.impl("dense_dim" , |
1346 | TORCH_FN(wrapper_SparseCsrCPU__dense_dim)); |
1347 | m.impl("_nnz" , |
1348 | TORCH_FN(wrapper_SparseCsrCPU___nnz)); |
1349 | m.impl("values" , |
1350 | TORCH_FN(wrapper_SparseCsrCPU__values)); |
1351 | m.impl("crow_indices" , |
1352 | TORCH_FN(wrapper_SparseCsrCPU__crow_indices)); |
1353 | m.impl("col_indices" , |
1354 | TORCH_FN(wrapper_SparseCsrCPU__col_indices)); |
1355 | m.impl("ccol_indices" , |
1356 | TORCH_FN(wrapper_SparseCsrCPU__ccol_indices)); |
1357 | m.impl("row_indices" , |
1358 | TORCH_FN(wrapper_SparseCsrCPU__row_indices)); |
1359 | m.impl("to_sparse.sparse_dim" , |
1360 | TORCH_FN(wrapper_SparseCsrCPU_sparse_dim_to_sparse)); |
1361 | m.impl("to_sparse" , |
1362 | TORCH_FN(wrapper_SparseCsrCPU__to_sparse)); |
1363 | m.impl("to_sparse_csr" , |
1364 | TORCH_FN(wrapper_SparseCsrCPU__to_sparse_csr)); |
1365 | m.impl("to_sparse_csc" , |
1366 | TORCH_FN(wrapper_SparseCsrCPU__to_sparse_csc)); |
1367 | m.impl("to_sparse_bsr" , |
1368 | TORCH_FN(wrapper_SparseCsrCPU__to_sparse_bsr)); |
1369 | m.impl("to_sparse_bsc" , |
1370 | TORCH_FN(wrapper_SparseCsrCPU__to_sparse_bsc)); |
1371 | m.impl("triangular_solve.X" , |
1372 | TORCH_FN(wrapper_SparseCsrCPU_X_triangular_solve_out)); |
1373 | m.impl("erfinv" , |
1374 | TORCH_FN(wrapper_SparseCsrCPU__erfinv)); |
1375 | m.impl("erfinv.out" , |
1376 | TORCH_FN(wrapper_SparseCsrCPU_out_erfinv_out)); |
1377 | m.impl("erfinv_" , |
1378 | TORCH_FN(wrapper_SparseCsrCPU__erfinv_)); |
1379 | m.impl("sign" , |
1380 | TORCH_FN(wrapper_SparseCsrCPU__sign)); |
1381 | m.impl("sign.out" , |
1382 | TORCH_FN(wrapper_SparseCsrCPU_out_sign_out)); |
1383 | m.impl("sign_" , |
1384 | TORCH_FN(wrapper_SparseCsrCPU__sign_)); |
1385 | m.impl("signbit" , |
1386 | TORCH_FN(wrapper_SparseCsrCPU__signbit)); |
1387 | m.impl("signbit.out" , |
1388 | TORCH_FN(wrapper_SparseCsrCPU_out_signbit_out)); |
1389 | m.impl("normal_" , |
1390 | TORCH_FN(wrapper_SparseCsrCPU__normal_)); |
1391 | m.impl("isinf" , |
1392 | TORCH_FN(wrapper_SparseCsrCPU__isinf)); |
1393 | m.impl("isposinf" , |
1394 | TORCH_FN(wrapper_SparseCsrCPU__isposinf)); |
1395 | m.impl("isposinf.out" , |
1396 | TORCH_FN(wrapper_SparseCsrCPU_out_isposinf_out)); |
1397 | m.impl("isneginf" , |
1398 | TORCH_FN(wrapper_SparseCsrCPU__isneginf)); |
1399 | m.impl("isneginf.out" , |
1400 | TORCH_FN(wrapper_SparseCsrCPU_out_isneginf_out)); |
1401 | m.impl("select_copy.int" , |
1402 | TORCH_FN(wrapper_SparseCsrCPU_int_select_copy)); |
1403 | }; |
1404 | } // anonymous namespace |
1405 | namespace sparsecsrcpu { |
1406 | at::Tensor abs(const at::Tensor & self) { |
1407 | return wrapper_SparseCsrCPU__abs(self); |
1408 | } |
1409 | at::Tensor & abs_out(at::Tensor & out, const at::Tensor & self) { |
1410 | return wrapper_SparseCsrCPU_out_abs_out(self, out); |
1411 | } |
1412 | at::Tensor & abs_outf(const at::Tensor & self, at::Tensor & out) { |
1413 | return wrapper_SparseCsrCPU_out_abs_out(self, out); |
1414 | } |
1415 | at::Tensor & abs_(at::Tensor & self) { |
1416 | return wrapper_SparseCsrCPU__abs_(self); |
1417 | } |
1418 | at::Tensor angle(const at::Tensor & self) { |
1419 | return wrapper_SparseCsrCPU__angle(self); |
1420 | } |
1421 | at::Tensor & angle_out(at::Tensor & out, const at::Tensor & self) { |
1422 | return wrapper_SparseCsrCPU_out_angle_out(self, out); |
1423 | } |
1424 | at::Tensor & angle_outf(const at::Tensor & self, at::Tensor & out) { |
1425 | return wrapper_SparseCsrCPU_out_angle_out(self, out); |
1426 | } |
1427 | at::Tensor sgn(const at::Tensor & self) { |
1428 | return wrapper_SparseCsrCPU__sgn(self); |
1429 | } |
1430 | at::Tensor & sgn_out(at::Tensor & out, const at::Tensor & self) { |
1431 | return wrapper_SparseCsrCPU_out_sgn_out(self, out); |
1432 | } |
1433 | at::Tensor & sgn_outf(const at::Tensor & self, at::Tensor & out) { |
1434 | return wrapper_SparseCsrCPU_out_sgn_out(self, out); |
1435 | } |
1436 | at::Tensor & sgn_(at::Tensor & self) { |
1437 | return wrapper_SparseCsrCPU__sgn_(self); |
1438 | } |
1439 | at::Tensor _conj_physical(const at::Tensor & self) { |
1440 | return wrapper_SparseCsrCPU___conj_physical(self); |
1441 | } |
1442 | at::Tensor & conj_physical_out(at::Tensor & out, const at::Tensor & self) { |
1443 | return wrapper_SparseCsrCPU_out_conj_physical_out(self, out); |
1444 | } |
1445 | at::Tensor & conj_physical_outf(const at::Tensor & self, at::Tensor & out) { |
1446 | return wrapper_SparseCsrCPU_out_conj_physical_out(self, out); |
1447 | } |
1448 | at::Tensor & conj_physical_(at::Tensor & self) { |
1449 | return wrapper_SparseCsrCPU__conj_physical_(self); |
1450 | } |
1451 | at::Tensor add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
1452 | return wrapper_SparseCsrCPU_Tensor_add(self, other, alpha); |
1453 | } |
1454 | at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
1455 | return wrapper_SparseCsrCPU_out_add_out(self, other, alpha, out); |
1456 | } |
1457 | at::Tensor & add_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { |
1458 | return wrapper_SparseCsrCPU_out_add_out(self, other, alpha, out); |
1459 | } |
1460 | at::Tensor & add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
1461 | return wrapper_SparseCsrCPU_Tensor_add_(self, other, alpha); |
1462 | } |
1463 | at::Tensor & addmv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) { |
1464 | return wrapper_SparseCsrCPU_out_addmv_out(self, mat, vec, beta, alpha, out); |
1465 | } |
1466 | at::Tensor & addmv_outf(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
1467 | return wrapper_SparseCsrCPU_out_addmv_out(self, mat, vec, beta, alpha, out); |
1468 | } |
1469 | at::Tensor asinh(const at::Tensor & self) { |
1470 | return wrapper_SparseCsrCPU__asinh(self); |
1471 | } |
1472 | at::Tensor & asinh_out(at::Tensor & out, const at::Tensor & self) { |
1473 | return wrapper_SparseCsrCPU_out_asinh_out(self, out); |
1474 | } |
1475 | at::Tensor & asinh_outf(const at::Tensor & self, at::Tensor & out) { |
1476 | return wrapper_SparseCsrCPU_out_asinh_out(self, out); |
1477 | } |
1478 | at::Tensor & asinh_(at::Tensor & self) { |
1479 | return wrapper_SparseCsrCPU__asinh_(self); |
1480 | } |
1481 | at::Tensor atanh(const at::Tensor & self) { |
1482 | return wrapper_SparseCsrCPU__atanh(self); |
1483 | } |
1484 | at::Tensor & atanh_out(at::Tensor & out, const at::Tensor & self) { |
1485 | return wrapper_SparseCsrCPU_out_atanh_out(self, out); |
1486 | } |
1487 | at::Tensor & atanh_outf(const at::Tensor & self, at::Tensor & out) { |
1488 | return wrapper_SparseCsrCPU_out_atanh_out(self, out); |
1489 | } |
1490 | at::Tensor & atanh_(at::Tensor & self) { |
1491 | return wrapper_SparseCsrCPU__atanh_(self); |
1492 | } |
1493 | at::Tensor asin(const at::Tensor & self) { |
1494 | return wrapper_SparseCsrCPU__asin(self); |
1495 | } |
1496 | at::Tensor & asin_out(at::Tensor & out, const at::Tensor & self) { |
1497 | return wrapper_SparseCsrCPU_out_asin_out(self, out); |
1498 | } |
1499 | at::Tensor & asin_outf(const at::Tensor & self, at::Tensor & out) { |
1500 | return wrapper_SparseCsrCPU_out_asin_out(self, out); |
1501 | } |
1502 | at::Tensor & asin_(at::Tensor & self) { |
1503 | return wrapper_SparseCsrCPU__asin_(self); |
1504 | } |
1505 | at::Tensor atan(const at::Tensor & self) { |
1506 | return wrapper_SparseCsrCPU__atan(self); |
1507 | } |
1508 | at::Tensor & atan_out(at::Tensor & out, const at::Tensor & self) { |
1509 | return wrapper_SparseCsrCPU_out_atan_out(self, out); |
1510 | } |
1511 | at::Tensor & atan_outf(const at::Tensor & self, at::Tensor & out) { |
1512 | return wrapper_SparseCsrCPU_out_atan_out(self, out); |
1513 | } |
1514 | at::Tensor & atan_(at::Tensor & self) { |
1515 | return wrapper_SparseCsrCPU__atan_(self); |
1516 | } |
1517 | at::Tensor ceil(const at::Tensor & self) { |
1518 | return wrapper_SparseCsrCPU__ceil(self); |
1519 | } |
1520 | at::Tensor & ceil_out(at::Tensor & out, const at::Tensor & self) { |
1521 | return wrapper_SparseCsrCPU_out_ceil_out(self, out); |
1522 | } |
1523 | at::Tensor & ceil_outf(const at::Tensor & self, at::Tensor & out) { |
1524 | return wrapper_SparseCsrCPU_out_ceil_out(self, out); |
1525 | } |
1526 | at::Tensor & ceil_(at::Tensor & self) { |
1527 | return wrapper_SparseCsrCPU__ceil_(self); |
1528 | } |
1529 | at::Tensor & copy_(at::Tensor & self, const at::Tensor & src, bool non_blocking) { |
1530 | return wrapper_SparseCsrCPU__copy_(self, src, non_blocking); |
1531 | } |
1532 | at::Tensor empty(at::IntArrayRef size, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) { |
1533 | return wrapper_SparseCsrCPU_memory_format_empty(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
1534 | } |
1535 | at::Tensor empty(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
1536 | return wrapper_SparseCsrCPU_memory_format_empty(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, memory_format); |
1537 | } |
1538 | at::Tensor empty_symint(c10::SymIntArrayRef size, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) { |
1539 | return wrapper_SparseCsrCPU_memory_format_empty(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
1540 | } |
1541 | at::Tensor empty_symint(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
1542 | return wrapper_SparseCsrCPU_memory_format_empty(size, dtype, layout, device, pin_memory, memory_format); |
1543 | } |
1544 | const at::Tensor & resize_(const at::Tensor & self, at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format) { |
1545 | return wrapper_SparseCsrCPU__resize_(self, c10::fromIntArrayRefSlow(size), memory_format); |
1546 | } |
1547 | const at::Tensor & resize__symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) { |
1548 | return wrapper_SparseCsrCPU__resize_(self, size, memory_format); |
1549 | } |
1550 | at::Tensor empty_like(const at::Tensor & self, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) { |
1551 | return wrapper_SparseCsrCPU__empty_like(self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
1552 | } |
1553 | at::Tensor empty_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
1554 | return wrapper_SparseCsrCPU__empty_like(self, dtype, layout, device, pin_memory, memory_format); |
1555 | } |
1556 | at::Tensor erf(const at::Tensor & self) { |
1557 | return wrapper_SparseCsrCPU__erf(self); |
1558 | } |
1559 | at::Tensor & erf_out(at::Tensor & out, const at::Tensor & self) { |
1560 | return wrapper_SparseCsrCPU_out_erf_out(self, out); |
1561 | } |
1562 | at::Tensor & erf_outf(const at::Tensor & self, at::Tensor & out) { |
1563 | return wrapper_SparseCsrCPU_out_erf_out(self, out); |
1564 | } |
1565 | at::Tensor & erf_(at::Tensor & self) { |
1566 | return wrapper_SparseCsrCPU__erf_(self); |
1567 | } |
1568 | at::Tensor expm1(const at::Tensor & self) { |
1569 | return wrapper_SparseCsrCPU__expm1(self); |
1570 | } |
1571 | at::Tensor & expm1_out(at::Tensor & out, const at::Tensor & self) { |
1572 | return wrapper_SparseCsrCPU_out_expm1_out(self, out); |
1573 | } |
1574 | at::Tensor & expm1_outf(const at::Tensor & self, at::Tensor & out) { |
1575 | return wrapper_SparseCsrCPU_out_expm1_out(self, out); |
1576 | } |
1577 | at::Tensor & expm1_(at::Tensor & self) { |
1578 | return wrapper_SparseCsrCPU__expm1_(self); |
1579 | } |
1580 | at::Tensor & fill_(at::Tensor & self, const at::Scalar & value) { |
1581 | return wrapper_SparseCsrCPU_Scalar_fill_(self, value); |
1582 | } |
1583 | at::Tensor floor(const at::Tensor & self) { |
1584 | return wrapper_SparseCsrCPU__floor(self); |
1585 | } |
1586 | at::Tensor & floor_out(at::Tensor & out, const at::Tensor & self) { |
1587 | return wrapper_SparseCsrCPU_out_floor_out(self, out); |
1588 | } |
1589 | at::Tensor & floor_outf(const at::Tensor & self, at::Tensor & out) { |
1590 | return wrapper_SparseCsrCPU_out_floor_out(self, out); |
1591 | } |
1592 | at::Tensor & floor_(at::Tensor & self) { |
1593 | return wrapper_SparseCsrCPU__floor_(self); |
1594 | } |
1595 | at::Tensor frac(const at::Tensor & self) { |
1596 | return wrapper_SparseCsrCPU__frac(self); |
1597 | } |
1598 | at::Tensor & frac_out(at::Tensor & out, const at::Tensor & self) { |
1599 | return wrapper_SparseCsrCPU_out_frac_out(self, out); |
1600 | } |
1601 | at::Tensor & frac_outf(const at::Tensor & self, at::Tensor & out) { |
1602 | return wrapper_SparseCsrCPU_out_frac_out(self, out); |
1603 | } |
1604 | at::Tensor & frac_(at::Tensor & self) { |
1605 | return wrapper_SparseCsrCPU__frac_(self); |
1606 | } |
1607 | at::Tensor isnan(const at::Tensor & self) { |
1608 | return wrapper_SparseCsrCPU__isnan(self); |
1609 | } |
1610 | at::Tensor log1p(const at::Tensor & self) { |
1611 | return wrapper_SparseCsrCPU__log1p(self); |
1612 | } |
1613 | at::Tensor & log1p_out(at::Tensor & out, const at::Tensor & self) { |
1614 | return wrapper_SparseCsrCPU_out_log1p_out(self, out); |
1615 | } |
1616 | at::Tensor & log1p_outf(const at::Tensor & self, at::Tensor & out) { |
1617 | return wrapper_SparseCsrCPU_out_log1p_out(self, out); |
1618 | } |
1619 | at::Tensor & log1p_(at::Tensor & self) { |
1620 | return wrapper_SparseCsrCPU__log1p_(self); |
1621 | } |
1622 | at::Tensor mm(const at::Tensor & self, const at::Tensor & mat2) { |
1623 | return wrapper_SparseCsrCPU__mm(self, mat2); |
1624 | } |
1625 | at::Tensor & mm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) { |
1626 | return wrapper_SparseCsrCPU_out_mm_out(self, mat2, out); |
1627 | } |
1628 | at::Tensor & mm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) { |
1629 | return wrapper_SparseCsrCPU_out_mm_out(self, mat2, out); |
1630 | } |
1631 | at::Tensor mul(const at::Tensor & self, const at::Tensor & other) { |
1632 | return wrapper_SparseCsrCPU_Tensor_mul(self, other); |
1633 | } |
1634 | at::Tensor & mul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
1635 | return wrapper_SparseCsrCPU_out_mul_out(self, other, out); |
1636 | } |
1637 | at::Tensor & mul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
1638 | return wrapper_SparseCsrCPU_out_mul_out(self, other, out); |
1639 | } |
1640 | at::Tensor & mul_(at::Tensor & self, const at::Tensor & other) { |
1641 | return wrapper_SparseCsrCPU_Tensor_mul_(self, other); |
1642 | } |
1643 | at::Tensor mul(const at::Tensor & self, const at::Scalar & other) { |
1644 | return wrapper_SparseCsrCPU_Scalar_mul(self, other); |
1645 | } |
1646 | at::Tensor & mul_(at::Tensor & self, const at::Scalar & other) { |
1647 | return wrapper_SparseCsrCPU_Scalar_mul_(self, other); |
1648 | } |
1649 | at::Tensor rad2deg(const at::Tensor & self) { |
1650 | return wrapper_SparseCsrCPU__rad2deg(self); |
1651 | } |
1652 | at::Tensor & rad2deg_out(at::Tensor & out, const at::Tensor & self) { |
1653 | return wrapper_SparseCsrCPU_out_rad2deg_out(self, out); |
1654 | } |
1655 | at::Tensor & rad2deg_outf(const at::Tensor & self, at::Tensor & out) { |
1656 | return wrapper_SparseCsrCPU_out_rad2deg_out(self, out); |
1657 | } |
1658 | at::Tensor & rad2deg_(at::Tensor & self) { |
1659 | return wrapper_SparseCsrCPU__rad2deg_(self); |
1660 | } |
1661 | at::Tensor deg2rad(const at::Tensor & self) { |
1662 | return wrapper_SparseCsrCPU__deg2rad(self); |
1663 | } |
1664 | at::Tensor & deg2rad_out(at::Tensor & out, const at::Tensor & self) { |
1665 | return wrapper_SparseCsrCPU_out_deg2rad_out(self, out); |
1666 | } |
1667 | at::Tensor & deg2rad_outf(const at::Tensor & self, at::Tensor & out) { |
1668 | return wrapper_SparseCsrCPU_out_deg2rad_out(self, out); |
1669 | } |
1670 | at::Tensor & deg2rad_(at::Tensor & self) { |
1671 | return wrapper_SparseCsrCPU__deg2rad_(self); |
1672 | } |
1673 | at::Tensor neg(const at::Tensor & self) { |
1674 | return wrapper_SparseCsrCPU__neg(self); |
1675 | } |
1676 | at::Tensor & neg_out(at::Tensor & out, const at::Tensor & self) { |
1677 | return wrapper_SparseCsrCPU_out_neg_out(self, out); |
1678 | } |
1679 | at::Tensor & neg_outf(const at::Tensor & self, at::Tensor & out) { |
1680 | return wrapper_SparseCsrCPU_out_neg_out(self, out); |
1681 | } |
1682 | at::Tensor & neg_(at::Tensor & self) { |
1683 | return wrapper_SparseCsrCPU__neg_(self); |
1684 | } |
1685 | at::Tensor round(const at::Tensor & self) { |
1686 | return wrapper_SparseCsrCPU__round(self); |
1687 | } |
1688 | at::Tensor & round_out(at::Tensor & out, const at::Tensor & self) { |
1689 | return wrapper_SparseCsrCPU_out_round_out(self, out); |
1690 | } |
1691 | at::Tensor & round_outf(const at::Tensor & self, at::Tensor & out) { |
1692 | return wrapper_SparseCsrCPU_out_round_out(self, out); |
1693 | } |
1694 | at::Tensor & round_(at::Tensor & self) { |
1695 | return wrapper_SparseCsrCPU__round_(self); |
1696 | } |
1697 | at::Tensor relu(const at::Tensor & self) { |
1698 | return wrapper_SparseCsrCPU__relu(self); |
1699 | } |
1700 | at::Tensor & relu_(at::Tensor & self) { |
1701 | return wrapper_SparseCsrCPU__relu_(self); |
1702 | } |
1703 | at::Tensor select(const at::Tensor & self, int64_t dim, int64_t index) { |
1704 | return wrapper_SparseCsrCPU_int_select(self, dim, index); |
1705 | } |
1706 | at::Tensor select_symint(const at::Tensor & self, int64_t dim, c10::SymInt index) { |
1707 | return wrapper_SparseCsrCPU_int_select(self, dim, index); |
1708 | } |
1709 | at::Tensor sin(const at::Tensor & self) { |
1710 | return wrapper_SparseCsrCPU__sin(self); |
1711 | } |
1712 | at::Tensor & sin_out(at::Tensor & out, const at::Tensor & self) { |
1713 | return wrapper_SparseCsrCPU_out_sin_out(self, out); |
1714 | } |
1715 | at::Tensor & sin_outf(const at::Tensor & self, at::Tensor & out) { |
1716 | return wrapper_SparseCsrCPU_out_sin_out(self, out); |
1717 | } |
1718 | at::Tensor & sin_(at::Tensor & self) { |
1719 | return wrapper_SparseCsrCPU__sin_(self); |
1720 | } |
1721 | at::Tensor sinh(const at::Tensor & self) { |
1722 | return wrapper_SparseCsrCPU__sinh(self); |
1723 | } |
1724 | at::Tensor & sinh_out(at::Tensor & out, const at::Tensor & self) { |
1725 | return wrapper_SparseCsrCPU_out_sinh_out(self, out); |
1726 | } |
1727 | at::Tensor & sinh_outf(const at::Tensor & self, at::Tensor & out) { |
1728 | return wrapper_SparseCsrCPU_out_sinh_out(self, out); |
1729 | } |
1730 | at::Tensor & sinh_(at::Tensor & self) { |
1731 | return wrapper_SparseCsrCPU__sinh_(self); |
1732 | } |
1733 | at::Tensor sum(const at::Tensor & self, c10::optional<at::ScalarType> dtype) { |
1734 | return wrapper_SparseCsrCPU__sum(self, dtype); |
1735 | } |
1736 | at::Tensor sqrt(const at::Tensor & self) { |
1737 | return wrapper_SparseCsrCPU__sqrt(self); |
1738 | } |
1739 | at::Tensor & sqrt_out(at::Tensor & out, const at::Tensor & self) { |
1740 | return wrapper_SparseCsrCPU_out_sqrt_out(self, out); |
1741 | } |
1742 | at::Tensor & sqrt_outf(const at::Tensor & self, at::Tensor & out) { |
1743 | return wrapper_SparseCsrCPU_out_sqrt_out(self, out); |
1744 | } |
1745 | at::Tensor & sqrt_(at::Tensor & self) { |
1746 | return wrapper_SparseCsrCPU__sqrt_(self); |
1747 | } |
1748 | at::Tensor tan(const at::Tensor & self) { |
1749 | return wrapper_SparseCsrCPU__tan(self); |
1750 | } |
1751 | at::Tensor & tan_out(at::Tensor & out, const at::Tensor & self) { |
1752 | return wrapper_SparseCsrCPU_out_tan_out(self, out); |
1753 | } |
1754 | at::Tensor & tan_outf(const at::Tensor & self, at::Tensor & out) { |
1755 | return wrapper_SparseCsrCPU_out_tan_out(self, out); |
1756 | } |
1757 | at::Tensor & tan_(at::Tensor & self) { |
1758 | return wrapper_SparseCsrCPU__tan_(self); |
1759 | } |
1760 | at::Tensor tanh(const at::Tensor & self) { |
1761 | return wrapper_SparseCsrCPU__tanh(self); |
1762 | } |
1763 | at::Tensor & tanh_out(at::Tensor & out, const at::Tensor & self) { |
1764 | return wrapper_SparseCsrCPU_out_tanh_out(self, out); |
1765 | } |
1766 | at::Tensor & tanh_outf(const at::Tensor & self, at::Tensor & out) { |
1767 | return wrapper_SparseCsrCPU_out_tanh_out(self, out); |
1768 | } |
1769 | at::Tensor & tanh_(at::Tensor & self) { |
1770 | return wrapper_SparseCsrCPU__tanh_(self); |
1771 | } |
1772 | at::Tensor threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) { |
1773 | return wrapper_SparseCsrCPU__threshold_backward(grad_output, self, threshold); |
1774 | } |
1775 | at::Tensor & threshold_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) { |
1776 | return wrapper_SparseCsrCPU_grad_input_threshold_backward_out(grad_output, self, threshold, grad_input); |
1777 | } |
1778 | at::Tensor & threshold_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) { |
1779 | return wrapper_SparseCsrCPU_grad_input_threshold_backward_out(grad_output, self, threshold, grad_input); |
1780 | } |
1781 | at::Tensor trunc(const at::Tensor & self) { |
1782 | return wrapper_SparseCsrCPU__trunc(self); |
1783 | } |
1784 | at::Tensor & trunc_out(at::Tensor & out, const at::Tensor & self) { |
1785 | return wrapper_SparseCsrCPU_out_trunc_out(self, out); |
1786 | } |
1787 | at::Tensor & trunc_outf(const at::Tensor & self, at::Tensor & out) { |
1788 | return wrapper_SparseCsrCPU_out_trunc_out(self, out); |
1789 | } |
1790 | at::Tensor & trunc_(at::Tensor & self) { |
1791 | return wrapper_SparseCsrCPU__trunc_(self); |
1792 | } |
1793 | at::Tensor _sparse_csr_sum(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
1794 | return wrapper_SparseCsrCPU_dim_dtype__sparse_csr_sum(self, dim, keepdim, dtype); |
1795 | } |
1796 | at::Tensor _sparse_csr_prod(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
1797 | return wrapper_SparseCsrCPU_dim_dtype__sparse_csr_prod(self, dim, keepdim, dtype); |
1798 | } |
1799 | at::Tensor clone(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) { |
1800 | return wrapper_SparseCsrCPU__clone(self, memory_format); |
1801 | } |
1802 | const at::Tensor & resize_as_sparse_(const at::Tensor & self, const at::Tensor & the_template) { |
1803 | return wrapper_SparseCsrCPU__resize_as_sparse_(self, the_template); |
1804 | } |
1805 | at::Tensor & zero_(at::Tensor & self) { |
1806 | return wrapper_SparseCsrCPU__zero_(self); |
1807 | } |
1808 | at::Tensor sparse_sampled_addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { |
1809 | return wrapper_SparseCsrCPU__sparse_sampled_addmm(self, mat1, mat2, beta, alpha); |
1810 | } |
1811 | at::Tensor & sparse_sampled_addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { |
1812 | return wrapper_SparseCsrCPU_out_sparse_sampled_addmm_out(self, mat1, mat2, beta, alpha, out); |
1813 | } |
1814 | at::Tensor & sparse_sampled_addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
1815 | return wrapper_SparseCsrCPU_out_sparse_sampled_addmm_out(self, mat1, mat2, beta, alpha, out); |
1816 | } |
1817 | ::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl(const at::Tensor & self, const at::Tensor & other, c10::string_view reduce) { |
1818 | return wrapper_SparseCsrCPU___sparse_mm_reduce_impl(self, other, reduce); |
1819 | } |
1820 | ::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl_backward(const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array<bool,2> output_mask) { |
1821 | return wrapper_SparseCsrCPU___sparse_mm_reduce_impl_backward(self, grad_out, weight, reduce, arg_out, output_mask); |
1822 | } |
1823 | at::Tensor addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { |
1824 | return wrapper_SparseCsrCPU__addmm(self, mat1, mat2, beta, alpha); |
1825 | } |
1826 | at::Tensor & addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { |
1827 | return wrapper_SparseCsrCPU_out_addmm_out(self, mat1, mat2, beta, alpha, out); |
1828 | } |
1829 | at::Tensor & addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
1830 | return wrapper_SparseCsrCPU_out_addmm_out(self, mat1, mat2, beta, alpha, out); |
1831 | } |
1832 | at::Tensor sparse_mask(const at::Tensor & self, const at::Tensor & mask) { |
1833 | return wrapper_SparseCsrCPU__sparse_mask(self, mask); |
1834 | } |
1835 | at::Tensor _to_dense(const at::Tensor & self, c10::optional<at::ScalarType> dtype) { |
1836 | return wrapper_SparseCsrCPU___to_dense(self, dtype); |
1837 | } |
1838 | int64_t sparse_dim(const at::Tensor & self) { |
1839 | return wrapper_SparseCsrCPU__sparse_dim(self); |
1840 | } |
1841 | int64_t dense_dim(const at::Tensor & self) { |
1842 | return wrapper_SparseCsrCPU__dense_dim(self); |
1843 | } |
1844 | int64_t _nnz(const at::Tensor & self) { |
1845 | return wrapper_SparseCsrCPU___nnz(self); |
1846 | } |
1847 | at::Tensor values(const at::Tensor & self) { |
1848 | return wrapper_SparseCsrCPU__values(self); |
1849 | } |
1850 | at::Tensor crow_indices(const at::Tensor & self) { |
1851 | return wrapper_SparseCsrCPU__crow_indices(self); |
1852 | } |
1853 | at::Tensor col_indices(const at::Tensor & self) { |
1854 | return wrapper_SparseCsrCPU__col_indices(self); |
1855 | } |
1856 | at::Tensor ccol_indices(const at::Tensor & self) { |
1857 | return wrapper_SparseCsrCPU__ccol_indices(self); |
1858 | } |
1859 | at::Tensor row_indices(const at::Tensor & self) { |
1860 | return wrapper_SparseCsrCPU__row_indices(self); |
1861 | } |
1862 | at::Tensor to_sparse(const at::Tensor & self, int64_t sparse_dim) { |
1863 | return wrapper_SparseCsrCPU_sparse_dim_to_sparse(self, sparse_dim); |
1864 | } |
1865 | at::Tensor to_sparse(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim) { |
1866 | return wrapper_SparseCsrCPU__to_sparse(self, layout, blocksize, dense_dim); |
1867 | } |
1868 | at::Tensor to_sparse_csr(const at::Tensor & self, c10::optional<int64_t> dense_dim) { |
1869 | return wrapper_SparseCsrCPU__to_sparse_csr(self, dense_dim); |
1870 | } |
1871 | at::Tensor to_sparse_csc(const at::Tensor & self, c10::optional<int64_t> dense_dim) { |
1872 | return wrapper_SparseCsrCPU__to_sparse_csc(self, dense_dim); |
1873 | } |
1874 | at::Tensor to_sparse_bsr(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) { |
1875 | return wrapper_SparseCsrCPU__to_sparse_bsr(self, blocksize, dense_dim); |
1876 | } |
1877 | at::Tensor to_sparse_bsc(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) { |
1878 | return wrapper_SparseCsrCPU__to_sparse_bsc(self, blocksize, dense_dim); |
1879 | } |
1880 | ::std::tuple<at::Tensor &,at::Tensor &> triangular_solve_out(at::Tensor & X, at::Tensor & M, const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) { |
1881 | return wrapper_SparseCsrCPU_X_triangular_solve_out(self, A, upper, transpose, unitriangular, X, M); |
1882 | } |
1883 | ::std::tuple<at::Tensor &,at::Tensor &> triangular_solve_outf(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular, at::Tensor & X, at::Tensor & M) { |
1884 | return wrapper_SparseCsrCPU_X_triangular_solve_out(self, A, upper, transpose, unitriangular, X, M); |
1885 | } |
1886 | at::Tensor erfinv(const at::Tensor & self) { |
1887 | return wrapper_SparseCsrCPU__erfinv(self); |
1888 | } |
1889 | at::Tensor & erfinv_out(at::Tensor & out, const at::Tensor & self) { |
1890 | return wrapper_SparseCsrCPU_out_erfinv_out(self, out); |
1891 | } |
1892 | at::Tensor & erfinv_outf(const at::Tensor & self, at::Tensor & out) { |
1893 | return wrapper_SparseCsrCPU_out_erfinv_out(self, out); |
1894 | } |
1895 | at::Tensor & erfinv_(at::Tensor & self) { |
1896 | return wrapper_SparseCsrCPU__erfinv_(self); |
1897 | } |
1898 | at::Tensor sign(const at::Tensor & self) { |
1899 | return wrapper_SparseCsrCPU__sign(self); |
1900 | } |
1901 | at::Tensor & sign_out(at::Tensor & out, const at::Tensor & self) { |
1902 | return wrapper_SparseCsrCPU_out_sign_out(self, out); |
1903 | } |
1904 | at::Tensor & sign_outf(const at::Tensor & self, at::Tensor & out) { |
1905 | return wrapper_SparseCsrCPU_out_sign_out(self, out); |
1906 | } |
1907 | at::Tensor & sign_(at::Tensor & self) { |
1908 | return wrapper_SparseCsrCPU__sign_(self); |
1909 | } |
1910 | at::Tensor signbit(const at::Tensor & self) { |
1911 | return wrapper_SparseCsrCPU__signbit(self); |
1912 | } |
1913 | at::Tensor & signbit_out(at::Tensor & out, const at::Tensor & self) { |
1914 | return wrapper_SparseCsrCPU_out_signbit_out(self, out); |
1915 | } |
1916 | at::Tensor & signbit_outf(const at::Tensor & self, at::Tensor & out) { |
1917 | return wrapper_SparseCsrCPU_out_signbit_out(self, out); |
1918 | } |
1919 | at::Tensor & normal_(at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) { |
1920 | return wrapper_SparseCsrCPU__normal_(self, mean, std, generator); |
1921 | } |
1922 | at::Tensor isinf(const at::Tensor & self) { |
1923 | return wrapper_SparseCsrCPU__isinf(self); |
1924 | } |
1925 | at::Tensor isposinf(const at::Tensor & self) { |
1926 | return wrapper_SparseCsrCPU__isposinf(self); |
1927 | } |
1928 | at::Tensor & isposinf_out(at::Tensor & out, const at::Tensor & self) { |
1929 | return wrapper_SparseCsrCPU_out_isposinf_out(self, out); |
1930 | } |
1931 | at::Tensor & isposinf_outf(const at::Tensor & self, at::Tensor & out) { |
1932 | return wrapper_SparseCsrCPU_out_isposinf_out(self, out); |
1933 | } |
1934 | at::Tensor isneginf(const at::Tensor & self) { |
1935 | return wrapper_SparseCsrCPU__isneginf(self); |
1936 | } |
1937 | at::Tensor & isneginf_out(at::Tensor & out, const at::Tensor & self) { |
1938 | return wrapper_SparseCsrCPU_out_isneginf_out(self, out); |
1939 | } |
1940 | at::Tensor & isneginf_outf(const at::Tensor & self, at::Tensor & out) { |
1941 | return wrapper_SparseCsrCPU_out_isneginf_out(self, out); |
1942 | } |
1943 | at::Tensor select_copy(const at::Tensor & self, int64_t dim, int64_t index) { |
1944 | return wrapper_SparseCsrCPU_int_select_copy(self, dim, index); |
1945 | } |
1946 | at::Tensor select_copy_symint(const at::Tensor & self, int64_t dim, c10::SymInt index) { |
1947 | return wrapper_SparseCsrCPU_int_select_copy(self, dim, index); |
1948 | } |
1949 | } // namespace sparsecsrcpu |
1950 | } // namespace at |
1951 | |