1 | // required for old g++ to compile PRId64 macros, see |
2 | // https://github.com/pytorch/pytorch/issues/3571 |
3 | // for context |
4 | #ifndef __STDC_FORMAT_MACROS |
5 | #define __STDC_FORMAT_MACROS |
6 | #endif |
7 | |
8 | // an external backend might generate file within its code tree |
9 | // and check all the source files within the tree with clang-format. |
10 | // so, disable it since the backend might have a different config. |
11 | // clang-format off |
12 | |
13 | // NOTE: This condition is true for all PyTorch internal libraries, it |
14 | // just excludes external projects such as torch_xla which |
15 | // re-use some of the PyTorch codegen machinery. |
16 | #if defined(CAFFE2_BUILD_MAIN_LIB) || \ |
17 | defined(TORCH_CUDA_BUILD_MAIN_LIB) || \ |
18 | defined(TORCH_HIP_BUILD_MAIN_LIB) || \ |
19 | defined(TORCH_CUDA_CU_BUILD_MAIN_LIB) || \ |
20 | defined(TORCH_CUDA_CPP_BUILD_MAIN_LIB) |
21 | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS |
22 | #endif |
23 | |
24 | // @generated by torchgen/gen.py from RegisterDispatchKey.cpp |
25 | |
26 | #include <c10/core/TensorImpl.h> |
27 | #include <c10/core/Allocator.h> |
28 | #include <ATen/DeviceGuard.h> |
29 | #include <ATen/NamedTensorUtils.h> |
30 | #include <ATen/Utils.h> |
31 | #include <ATen/WrapDimUtils.h> |
32 | #include <ATen/Dispatch.h> |
33 | #include <c10/util/ExclusivelyOwned.h> |
34 | #include <c10/util/Half.h> |
35 | #include <c10/core/UndefinedTensorImpl.h> |
36 | #include <c10/util/Optional.h> |
37 | #include <ATen/Tensor.h> |
38 | #include <ATen/native/Resize.h> |
39 | |
40 | #include <cstddef> |
41 | #include <functional> |
42 | #include <memory> |
43 | #include <utility> |
44 | |
45 | #include <ATen/Config.h> |
46 | #include <ATen/core/op_registration/adaption.h> |
47 | #include <torch/library.h> |
48 | #include <c10/cuda/CUDAGuard.h> |
49 | #include <ATen/cuda/ATenCUDAGeneral.h> |
50 | #include <ATen/cuda/CUDADevice.h> |
51 | #include <ATen/cuda/CUDAContext.h> |
52 | |
53 | #include <ATen/ops/as_strided_native.h> |
54 | #include <ATen/ops/empty.h> |
55 | #include <ATen/ops/empty_strided.h> |
56 | #include <ATen/ops/_copy_from_and_resize.h> |
57 | #include <ATen/ops/_copy_from.h> |
58 | #include <ATen/ops/_coalesce_native.h> |
59 | #include <ATen/ops/_coalesced_native.h> |
60 | #include <ATen/ops/_dimI_native.h> |
61 | #include <ATen/ops/_dimV_native.h> |
62 | #include <ATen/ops/_indices_native.h> |
63 | #include <ATen/ops/_nnz_native.h> |
64 | #include <ATen/ops/_sparse_broadcast_to_native.h> |
65 | #include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_native.h> |
66 | #include <ATen/ops/_sparse_coo_tensor_with_dims_native.h> |
67 | #include <ATen/ops/_sparse_log_softmax_backward_data_native.h> |
68 | #include <ATen/ops/_sparse_log_softmax_native.h> |
69 | #include <ATen/ops/_sparse_softmax_backward_data_native.h> |
70 | #include <ATen/ops/_sparse_softmax_native.h> |
71 | #include <ATen/ops/_sparse_sparse_matmul_native.h> |
72 | #include <ATen/ops/_sparse_sum_backward_native.h> |
73 | #include <ATen/ops/_to_dense_native.h> |
74 | #include <ATen/ops/_values_native.h> |
75 | #include <ATen/ops/abs_native.h> |
76 | #include <ATen/ops/add_native.h> |
77 | #include <ATen/ops/addmm_native.h> |
78 | #include <ATen/ops/any_native.h> |
79 | #include <ATen/ops/asin_native.h> |
80 | #include <ATen/ops/asinh_native.h> |
81 | #include <ATen/ops/atan_native.h> |
82 | #include <ATen/ops/atanh_native.h> |
83 | #include <ATen/ops/bmm_native.h> |
84 | #include <ATen/ops/cat_native.h> |
85 | #include <ATen/ops/ceil_native.h> |
86 | #include <ATen/ops/clone_native.h> |
87 | #include <ATen/ops/conj_physical_native.h> |
88 | #include <ATen/ops/copy_native.h> |
89 | #include <ATen/ops/copy_sparse_to_sparse_native.h> |
90 | #include <ATen/ops/deg2rad_native.h> |
91 | #include <ATen/ops/dense_dim_native.h> |
92 | #include <ATen/ops/div_native.h> |
93 | #include <ATen/ops/empty_like_native.h> |
94 | #include <ATen/ops/empty_native.h> |
95 | #include <ATen/ops/erf_native.h> |
96 | #include <ATen/ops/erfinv_native.h> |
97 | #include <ATen/ops/expm1_native.h> |
98 | #include <ATen/ops/floor_divide_native.h> |
99 | #include <ATen/ops/floor_native.h> |
100 | #include <ATen/ops/frac_native.h> |
101 | #include <ATen/ops/hspmm_native.h> |
102 | #include <ATen/ops/index_select_native.h> |
103 | #include <ATen/ops/indices_native.h> |
104 | #include <ATen/ops/is_coalesced_native.h> |
105 | #include <ATen/ops/isinf_native.h> |
106 | #include <ATen/ops/isnan_native.h> |
107 | #include <ATen/ops/isneginf_native.h> |
108 | #include <ATen/ops/isposinf_native.h> |
109 | #include <ATen/ops/log1p_native.h> |
110 | #include <ATen/ops/mm_native.h> |
111 | #include <ATen/ops/mul_native.h> |
112 | #include <ATen/ops/mv_native.h> |
113 | #include <ATen/ops/nan_to_num_native.h> |
114 | #include <ATen/ops/narrow_copy_native.h> |
115 | #include <ATen/ops/native_norm_native.h> |
116 | #include <ATen/ops/neg_native.h> |
117 | #include <ATen/ops/norm_native.h> |
118 | #include <ATen/ops/permute_native.h> |
119 | #include <ATen/ops/pow_native.h> |
120 | #include <ATen/ops/rad2deg_native.h> |
121 | #include <ATen/ops/relu_native.h> |
122 | #include <ATen/ops/resize_as_sparse_native.h> |
123 | #include <ATen/ops/round_native.h> |
124 | #include <ATen/ops/sgn_native.h> |
125 | #include <ATen/ops/sign_native.h> |
126 | #include <ATen/ops/signbit_native.h> |
127 | #include <ATen/ops/sin_native.h> |
128 | #include <ATen/ops/sinh_native.h> |
129 | #include <ATen/ops/sparse_dim_native.h> |
130 | #include <ATen/ops/sparse_mask_native.h> |
131 | #include <ATen/ops/sparse_resize_and_clear_native.h> |
132 | #include <ATen/ops/sparse_resize_native.h> |
133 | #include <ATen/ops/sqrt_native.h> |
134 | #include <ATen/ops/sspaddmm_native.h> |
135 | #include <ATen/ops/sub_native.h> |
136 | #include <ATen/ops/sum_native.h> |
137 | #include <ATen/ops/tan_native.h> |
138 | #include <ATen/ops/tanh_native.h> |
139 | #include <ATen/ops/threshold_backward_native.h> |
140 | #include <ATen/ops/to_sparse_bsc_native.h> |
141 | #include <ATen/ops/to_sparse_bsr_native.h> |
142 | #include <ATen/ops/to_sparse_csc_native.h> |
143 | #include <ATen/ops/to_sparse_csr_native.h> |
144 | #include <ATen/ops/to_sparse_native.h> |
145 | #include <ATen/ops/trunc_native.h> |
146 | #include <ATen/ops/unsqueeze_native.h> |
147 | #include <ATen/ops/values_native.h> |
148 | #include <ATen/ops/zero_native.h> |
149 | #include <ATen/ops/zeros_native.h> |
150 | |
151 | // See template file RegisterDispatchDefinitions.ini |
152 | namespace at { |
153 | // NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid |
154 | // ambiguity with conflicting identifiers that may have been defined in |
155 | // at namespace already. |
156 | namespace { |
157 | void resize_out(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) { |
158 | TORCH_CHECK(options.dtype() == out.dtype(), |
159 | "Expected out tensor to have dtype " , options.dtype(), ", but got " , out.dtype(), " instead" ); |
160 | TORCH_CHECK(options.device() == out.device(), |
161 | "Expected out tensor to have device " , options.device(), ", but got " , out.device(), " instead" ); |
162 | const bool resized = at::native::resize_output(out, sizes); |
163 | // Only restride if a resize occurred; otherwise we ignore the (advisory) |
164 | // strides from the meta function and directly use the output tensor's |
165 | // preexisting strides |
166 | if (resized) { |
167 | if (!strides.empty()) { |
168 | TORCH_INTERNAL_ASSERT(!options.memory_format_opt().has_value()); |
169 | // TODO: avoid the redispatch here |
170 | out.as_strided_(sizes, strides); |
171 | } else if (options.memory_format_opt().has_value()) { |
172 | out.unsafeGetTensorImpl()->empty_tensor_restride(*options.memory_format_opt()); |
173 | } |
174 | } |
175 | } |
176 | void check_inplace(const Tensor &self, IntArrayRef sizes, const TensorOptions &options) { |
177 | // These checks are needed on those operators that: |
178 | // 1) don't use 'TensorIterator' (e.g. 'addmm' and 'baddbmm') |
179 | // 2) have particular typing rules (e.g. 'cumsum' and 'cumprod') |
180 | // For other operators (e.g. 'add'), 'TensorIterator' already checks |
181 | // these things separately. |
182 | TORCH_CHECK(options.dtype() == self.dtype(), |
183 | "Bad in-place call: " , |
184 | "input tensor dtype " , self.dtype(), " and output tensor dtype " , options.dtype(), " should match" ); |
185 | TORCH_CHECK(options.device() == self.device(), |
186 | "Bad in-place call: " , |
187 | "input tensor device " , self.device(), " and output tensor device " , options.device(), " should match" ); |
188 | TORCH_CHECK(sizes == self.sizes(), |
189 | "Bad in-place call: " , |
190 | "input tensor size " , self.sizes(), " and output tensor size " , sizes, " should match" ); |
191 | } |
192 | namespace { |
193 | at::Tensor wrapper_SparseCUDA__abs(const at::Tensor & self) { |
194 | // No device check |
195 | const OptionalDeviceGuard device_guard(device_of(self)); |
196 | return at::native::abs_sparse(self); |
197 | } |
198 | } // anonymous namespace |
199 | namespace { |
200 | at::Tensor & wrapper_SparseCUDA_out_abs_out(const at::Tensor & self, at::Tensor & out) { |
201 | // No device check |
202 | const OptionalDeviceGuard device_guard(device_of(self)); |
203 | return at::native::abs_sparse_out(self, out); |
204 | } |
205 | } // anonymous namespace |
206 | namespace { |
207 | at::Tensor & wrapper_SparseCUDA__abs_(at::Tensor & self) { |
208 | // No device check |
209 | const OptionalDeviceGuard device_guard(device_of(self)); |
210 | return at::native::abs_sparse_(self); |
211 | } |
212 | } // anonymous namespace |
213 | namespace { |
214 | at::Tensor wrapper_SparseCUDA__sgn(const at::Tensor & self) { |
215 | c10::optional<Device> common_device = nullopt; |
216 | (void)common_device; // Suppress unused variable warning |
217 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__sgn" , "self" ); |
218 | const OptionalDeviceGuard device_guard(device_of(self)); |
219 | return at::native::sgn_sparse(self); |
220 | } |
221 | } // anonymous namespace |
222 | namespace { |
223 | at::Tensor & wrapper_SparseCUDA_out_sgn_out(const at::Tensor & self, at::Tensor & out) { |
224 | c10::optional<Device> common_device = nullopt; |
225 | (void)common_device; // Suppress unused variable warning |
226 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_sgn_out" , "out" ); |
227 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_sgn_out" , "self" ); |
228 | const OptionalDeviceGuard device_guard(device_of(self)); |
229 | return at::native::sgn_sparse_out(self, out); |
230 | } |
231 | } // anonymous namespace |
232 | namespace { |
233 | at::Tensor & wrapper_SparseCUDA__sgn_(at::Tensor & self) { |
234 | c10::optional<Device> common_device = nullopt; |
235 | (void)common_device; // Suppress unused variable warning |
236 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__sgn_" , "self" ); |
237 | const OptionalDeviceGuard device_guard(device_of(self)); |
238 | return at::native::sgn_sparse_(self); |
239 | } |
240 | } // anonymous namespace |
241 | namespace { |
242 | at::Tensor & wrapper_SparseCUDA_out_conj_physical_out(const at::Tensor & self, at::Tensor & out) { |
243 | c10::optional<Device> common_device = nullopt; |
244 | (void)common_device; // Suppress unused variable warning |
245 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_conj_physical_out" , "out" ); |
246 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_conj_physical_out" , "self" ); |
247 | const OptionalDeviceGuard device_guard(device_of(self)); |
248 | return at::native::conj_physical_out_sparse(self, out); |
249 | } |
250 | } // anonymous namespace |
251 | namespace { |
252 | at::Tensor wrapper_SparseCUDA_Tensor_add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
253 | // No device check |
254 | const OptionalDeviceGuard device_guard(device_of(self)); |
255 | return at::native::add_sparse(self, other, alpha); |
256 | } |
257 | } // anonymous namespace |
258 | namespace { |
259 | at::Tensor & wrapper_SparseCUDA_out_add_out(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { |
260 | // No device check |
261 | const OptionalDeviceGuard device_guard(device_of(self)); |
262 | return at::native::add_out_sparse_cuda(self, other, alpha, out); |
263 | } |
264 | } // anonymous namespace |
265 | namespace { |
266 | at::Tensor & wrapper_SparseCUDA_Tensor_add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
267 | // No device check |
268 | const OptionalDeviceGuard device_guard(device_of(self)); |
269 | return at::native::add_sparse_(self, other, alpha); |
270 | } |
271 | } // anonymous namespace |
272 | namespace { |
273 | at::Tensor wrapper_SparseCUDA__asinh(const at::Tensor & self) { |
274 | c10::optional<Device> common_device = nullopt; |
275 | (void)common_device; // Suppress unused variable warning |
276 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__asinh" , "self" ); |
277 | const OptionalDeviceGuard device_guard(device_of(self)); |
278 | return at::native::asinh_sparse(self); |
279 | } |
280 | } // anonymous namespace |
281 | namespace { |
282 | at::Tensor & wrapper_SparseCUDA_out_asinh_out(const at::Tensor & self, at::Tensor & out) { |
283 | c10::optional<Device> common_device = nullopt; |
284 | (void)common_device; // Suppress unused variable warning |
285 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_asinh_out" , "out" ); |
286 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_asinh_out" , "self" ); |
287 | const OptionalDeviceGuard device_guard(device_of(self)); |
288 | return at::native::asinh_sparse_out(self, out); |
289 | } |
290 | } // anonymous namespace |
291 | namespace { |
292 | at::Tensor & wrapper_SparseCUDA__asinh_(at::Tensor & self) { |
293 | c10::optional<Device> common_device = nullopt; |
294 | (void)common_device; // Suppress unused variable warning |
295 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__asinh_" , "self" ); |
296 | const OptionalDeviceGuard device_guard(device_of(self)); |
297 | return at::native::asinh_sparse_(self); |
298 | } |
299 | } // anonymous namespace |
300 | namespace { |
301 | at::Tensor wrapper_SparseCUDA__atanh(const at::Tensor & self) { |
302 | c10::optional<Device> common_device = nullopt; |
303 | (void)common_device; // Suppress unused variable warning |
304 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__atanh" , "self" ); |
305 | const OptionalDeviceGuard device_guard(device_of(self)); |
306 | return at::native::atanh_sparse(self); |
307 | } |
308 | } // anonymous namespace |
309 | namespace { |
310 | at::Tensor & wrapper_SparseCUDA_out_atanh_out(const at::Tensor & self, at::Tensor & out) { |
311 | c10::optional<Device> common_device = nullopt; |
312 | (void)common_device; // Suppress unused variable warning |
313 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_atanh_out" , "out" ); |
314 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_atanh_out" , "self" ); |
315 | const OptionalDeviceGuard device_guard(device_of(self)); |
316 | return at::native::atanh_sparse_out(self, out); |
317 | } |
318 | } // anonymous namespace |
319 | namespace { |
320 | at::Tensor & wrapper_SparseCUDA__atanh_(at::Tensor & self) { |
321 | c10::optional<Device> common_device = nullopt; |
322 | (void)common_device; // Suppress unused variable warning |
323 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__atanh_" , "self" ); |
324 | const OptionalDeviceGuard device_guard(device_of(self)); |
325 | return at::native::atanh_sparse_(self); |
326 | } |
327 | } // anonymous namespace |
328 | namespace { |
329 | at::Tensor wrapper_SparseCUDA__asin(const at::Tensor & self) { |
330 | // No device check |
331 | const OptionalDeviceGuard device_guard(device_of(self)); |
332 | return at::native::asin_sparse(self); |
333 | } |
334 | } // anonymous namespace |
335 | namespace { |
336 | at::Tensor & wrapper_SparseCUDA_out_asin_out(const at::Tensor & self, at::Tensor & out) { |
337 | // No device check |
338 | const OptionalDeviceGuard device_guard(device_of(self)); |
339 | return at::native::asin_sparse_out(self, out); |
340 | } |
341 | } // anonymous namespace |
342 | namespace { |
343 | at::Tensor & wrapper_SparseCUDA__asin_(at::Tensor & self) { |
344 | // No device check |
345 | const OptionalDeviceGuard device_guard(device_of(self)); |
346 | return at::native::asin_sparse_(self); |
347 | } |
348 | } // anonymous namespace |
349 | namespace { |
350 | at::Tensor wrapper_SparseCUDA__atan(const at::Tensor & self) { |
351 | // No device check |
352 | const OptionalDeviceGuard device_guard(device_of(self)); |
353 | return at::native::atan_sparse(self); |
354 | } |
355 | } // anonymous namespace |
356 | namespace { |
357 | at::Tensor & wrapper_SparseCUDA_out_atan_out(const at::Tensor & self, at::Tensor & out) { |
358 | // No device check |
359 | const OptionalDeviceGuard device_guard(device_of(self)); |
360 | return at::native::atan_sparse_out(self, out); |
361 | } |
362 | } // anonymous namespace |
363 | namespace { |
364 | at::Tensor & wrapper_SparseCUDA__atan_(at::Tensor & self) { |
365 | // No device check |
366 | const OptionalDeviceGuard device_guard(device_of(self)); |
367 | return at::native::atan_sparse_(self); |
368 | } |
369 | } // anonymous namespace |
370 | namespace { |
371 | at::Tensor wrapper_SparseCUDA__bmm(const at::Tensor & self, const at::Tensor & mat2) { |
372 | c10::optional<Device> common_device = nullopt; |
373 | (void)common_device; // Suppress unused variable warning |
374 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__bmm" , "self" ); |
375 | c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCUDA__bmm" , "mat2" ); |
376 | const OptionalDeviceGuard device_guard(device_of(self)); |
377 | return at::native::bmm_sparse_cuda(self, mat2); |
378 | } |
379 | } // anonymous namespace |
380 | namespace { |
381 | at::Tensor & wrapper_SparseCUDA_out_bmm_out(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) { |
382 | c10::optional<Device> common_device = nullopt; |
383 | (void)common_device; // Suppress unused variable warning |
384 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_bmm_out" , "out" ); |
385 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_bmm_out" , "self" ); |
386 | c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCUDA_out_bmm_out" , "mat2" ); |
387 | const OptionalDeviceGuard device_guard(device_of(self)); |
388 | return at::native::bmm_out_sparse_cuda(self, mat2, out); |
389 | } |
390 | } // anonymous namespace |
391 | namespace { |
392 | at::Tensor wrapper_SparseCUDA___sparse_broadcast_to(const at::Tensor & self, at::IntArrayRef size) { |
393 | c10::optional<Device> common_device = nullopt; |
394 | (void)common_device; // Suppress unused variable warning |
395 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA___sparse_broadcast_to" , "self" ); |
396 | const OptionalDeviceGuard device_guard(device_of(self)); |
397 | return at::native::sparse_broadcast_to(self, size); |
398 | } |
399 | } // anonymous namespace |
400 | namespace { |
401 | at::Tensor wrapper_SparseCUDA__cat(const at::ITensorListRef & tensors, int64_t dim) { |
402 | c10::optional<Device> common_device = nullopt; |
403 | (void)common_device; // Suppress unused variable warning |
404 | c10::impl::check_and_update_common_device(common_device, tensors, "wrapper_SparseCUDA__cat" , "tensors" ); |
405 | const OptionalDeviceGuard device_guard(device_of(tensors)); |
406 | return at::native::cat_sparse(tensors, dim); |
407 | } |
408 | } // anonymous namespace |
409 | namespace { |
410 | at::Tensor wrapper_SparseCUDA__ceil(const at::Tensor & self) { |
411 | // No device check |
412 | const OptionalDeviceGuard device_guard(device_of(self)); |
413 | return at::native::ceil_sparse(self); |
414 | } |
415 | } // anonymous namespace |
416 | namespace { |
417 | at::Tensor & wrapper_SparseCUDA_out_ceil_out(const at::Tensor & self, at::Tensor & out) { |
418 | // No device check |
419 | const OptionalDeviceGuard device_guard(device_of(self)); |
420 | return at::native::ceil_sparse_out(self, out); |
421 | } |
422 | } // anonymous namespace |
423 | namespace { |
424 | at::Tensor & wrapper_SparseCUDA__ceil_(at::Tensor & self) { |
425 | // No device check |
426 | const OptionalDeviceGuard device_guard(device_of(self)); |
427 | return at::native::ceil_sparse_(self); |
428 | } |
429 | } // anonymous namespace |
430 | namespace { |
431 | at::Tensor & wrapper_SparseCUDA__copy_(at::Tensor & self, const at::Tensor & src, bool non_blocking) { |
432 | // No device check |
433 | // DeviceGuard omitted |
434 | return at::native::copy_sparse_wrapper_(self, src, non_blocking); |
435 | } |
436 | } // anonymous namespace |
437 | namespace { |
438 | at::Tensor wrapper_SparseCUDA_Tensor_div(const at::Tensor & self, const at::Tensor & other) { |
439 | // No device check |
440 | const OptionalDeviceGuard device_guard(device_of(self)); |
441 | return at::native::div_sparse(self, other); |
442 | } |
443 | } // anonymous namespace |
444 | namespace { |
445 | at::Tensor & wrapper_SparseCUDA_out_div_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
446 | // No device check |
447 | const OptionalDeviceGuard device_guard(device_of(self)); |
448 | return at::native::div_out_sparse_zerodim(self, other, out); |
449 | } |
450 | } // anonymous namespace |
451 | namespace { |
452 | at::Tensor & wrapper_SparseCUDA_Tensor_div_(at::Tensor & self, const at::Tensor & other) { |
453 | // No device check |
454 | const OptionalDeviceGuard device_guard(device_of(self)); |
455 | return at::native::div_sparse_(self, other); |
456 | } |
457 | } // anonymous namespace |
458 | namespace { |
459 | at::Tensor wrapper_SparseCUDA_Tensor_mode_div(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) { |
460 | // No device check |
461 | const OptionalDeviceGuard device_guard(device_of(self)); |
462 | return at::native::div_sparse(self, other, rounding_mode); |
463 | } |
464 | } // anonymous namespace |
465 | namespace { |
466 | at::Tensor & wrapper_SparseCUDA_out_mode_div_out(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out) { |
467 | // No device check |
468 | const OptionalDeviceGuard device_guard(device_of(self)); |
469 | return at::native::div_out_sparse_zerodim(self, other, rounding_mode, out); |
470 | } |
471 | } // anonymous namespace |
472 | namespace { |
473 | at::Tensor & wrapper_SparseCUDA_Tensor_mode_div_(at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) { |
474 | // No device check |
475 | const OptionalDeviceGuard device_guard(device_of(self)); |
476 | return at::native::div_sparse_(self, other, rounding_mode); |
477 | } |
478 | } // anonymous namespace |
479 | namespace { |
480 | at::Tensor wrapper_SparseCUDA_memory_format_empty(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
481 | c10::optional<Device> common_device = nullopt; |
482 | (void)common_device; // Suppress unused variable warning |
483 | globalContext().lazyInitCUDA(); |
484 | const DeviceGuard device_guard(device_or_default(device)); |
485 | return at::native::empty_sparse(C10_AS_INTARRAYREF_SLOW(size), dtype, layout, device, pin_memory, memory_format); |
486 | } |
487 | } // anonymous namespace |
488 | namespace { |
489 | at::Tensor wrapper_SparseCUDA__empty_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
490 | // No device check |
491 | // DeviceGuard omitted |
492 | return at::native::empty_like_sparse_coo(self, dtype, layout, device, pin_memory, memory_format); |
493 | } |
494 | } // anonymous namespace |
495 | namespace { |
496 | at::Tensor wrapper_SparseCUDA__erf(const at::Tensor & self) { |
497 | // No device check |
498 | const OptionalDeviceGuard device_guard(device_of(self)); |
499 | return at::native::erf_sparse(self); |
500 | } |
501 | } // anonymous namespace |
502 | namespace { |
503 | at::Tensor & wrapper_SparseCUDA_out_erf_out(const at::Tensor & self, at::Tensor & out) { |
504 | // No device check |
505 | const OptionalDeviceGuard device_guard(device_of(self)); |
506 | return at::native::erf_sparse_out(self, out); |
507 | } |
508 | } // anonymous namespace |
509 | namespace { |
510 | at::Tensor & wrapper_SparseCUDA__erf_(at::Tensor & self) { |
511 | // No device check |
512 | const OptionalDeviceGuard device_guard(device_of(self)); |
513 | return at::native::erf_sparse_(self); |
514 | } |
515 | } // anonymous namespace |
516 | namespace { |
517 | at::Tensor wrapper_SparseCUDA__expm1(const at::Tensor & self) { |
518 | // No device check |
519 | const OptionalDeviceGuard device_guard(device_of(self)); |
520 | return at::native::expm1_sparse(self); |
521 | } |
522 | } // anonymous namespace |
523 | namespace { |
524 | at::Tensor & wrapper_SparseCUDA_out_expm1_out(const at::Tensor & self, at::Tensor & out) { |
525 | // No device check |
526 | const OptionalDeviceGuard device_guard(device_of(self)); |
527 | return at::native::expm1_sparse_out(self, out); |
528 | } |
529 | } // anonymous namespace |
530 | namespace { |
531 | at::Tensor & wrapper_SparseCUDA__expm1_(at::Tensor & self) { |
532 | // No device check |
533 | const OptionalDeviceGuard device_guard(device_of(self)); |
534 | return at::native::expm1_sparse_(self); |
535 | } |
536 | } // anonymous namespace |
537 | namespace { |
538 | at::Tensor wrapper_SparseCUDA__floor(const at::Tensor & self) { |
539 | // No device check |
540 | const OptionalDeviceGuard device_guard(device_of(self)); |
541 | return at::native::floor_sparse(self); |
542 | } |
543 | } // anonymous namespace |
544 | namespace { |
545 | at::Tensor & wrapper_SparseCUDA_out_floor_out(const at::Tensor & self, at::Tensor & out) { |
546 | // No device check |
547 | const OptionalDeviceGuard device_guard(device_of(self)); |
548 | return at::native::floor_sparse_out(self, out); |
549 | } |
550 | } // anonymous namespace |
551 | namespace { |
552 | at::Tensor & wrapper_SparseCUDA__floor_(at::Tensor & self) { |
553 | // No device check |
554 | const OptionalDeviceGuard device_guard(device_of(self)); |
555 | return at::native::floor_sparse_(self); |
556 | } |
557 | } // anonymous namespace |
558 | namespace { |
559 | at::Tensor wrapper_SparseCUDA__floor_divide(const at::Tensor & self, const at::Tensor & other) { |
560 | // No device check |
561 | const OptionalDeviceGuard device_guard(device_of(self)); |
562 | return at::native::floor_divide_sparse(self, other); |
563 | } |
564 | } // anonymous namespace |
565 | namespace { |
566 | at::Tensor & wrapper_SparseCUDA_out_floor_divide_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
567 | // No device check |
568 | const OptionalDeviceGuard device_guard(device_of(self)); |
569 | return at::native::floor_divide_out_sparse_zerodim(self, other, out); |
570 | } |
571 | } // anonymous namespace |
572 | namespace { |
573 | at::Tensor & wrapper_SparseCUDA_Tensor_floor_divide_(at::Tensor & self, const at::Tensor & other) { |
574 | // No device check |
575 | const OptionalDeviceGuard device_guard(device_of(self)); |
576 | return at::native::floor_divide_sparse_(self, other); |
577 | } |
578 | } // anonymous namespace |
579 | namespace { |
580 | at::Tensor wrapper_SparseCUDA__frac(const at::Tensor & self) { |
581 | // No device check |
582 | const OptionalDeviceGuard device_guard(device_of(self)); |
583 | return at::native::frac_sparse(self); |
584 | } |
585 | } // anonymous namespace |
586 | namespace { |
587 | at::Tensor & wrapper_SparseCUDA_out_frac_out(const at::Tensor & self, at::Tensor & out) { |
588 | // No device check |
589 | const OptionalDeviceGuard device_guard(device_of(self)); |
590 | return at::native::frac_sparse_out(self, out); |
591 | } |
592 | } // anonymous namespace |
593 | namespace { |
594 | at::Tensor & wrapper_SparseCUDA__frac_(at::Tensor & self) { |
595 | // No device check |
596 | const OptionalDeviceGuard device_guard(device_of(self)); |
597 | return at::native::frac_sparse_(self); |
598 | } |
599 | } // anonymous namespace |
600 | namespace { |
601 | at::Tensor wrapper_SparseCUDA__isnan(const at::Tensor & self) { |
602 | // No device check |
603 | // DeviceGuard omitted |
604 | return at::native::isnan_sparse(self); |
605 | } |
606 | } // anonymous namespace |
607 | namespace { |
608 | at::Tensor wrapper_SparseCUDA__nan_to_num(const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) { |
609 | c10::optional<Device> common_device = nullopt; |
610 | (void)common_device; // Suppress unused variable warning |
611 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__nan_to_num" , "self" ); |
612 | const OptionalDeviceGuard device_guard(device_of(self)); |
613 | return at::native::nan_to_num_sparse(self, nan, posinf, neginf); |
614 | } |
615 | } // anonymous namespace |
616 | namespace { |
617 | at::Tensor & wrapper_SparseCUDA_out_nan_to_num_out(const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf, at::Tensor & out) { |
618 | c10::optional<Device> common_device = nullopt; |
619 | (void)common_device; // Suppress unused variable warning |
620 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_nan_to_num_out" , "out" ); |
621 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_nan_to_num_out" , "self" ); |
622 | const OptionalDeviceGuard device_guard(device_of(self)); |
623 | return at::native::nan_to_num_sparse_out(self, nan, posinf, neginf, out); |
624 | } |
625 | } // anonymous namespace |
626 | namespace { |
627 | at::Tensor & wrapper_SparseCUDA__nan_to_num_(at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) { |
628 | c10::optional<Device> common_device = nullopt; |
629 | (void)common_device; // Suppress unused variable warning |
630 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__nan_to_num_" , "self" ); |
631 | const OptionalDeviceGuard device_guard(device_of(self)); |
632 | return at::native::nan_to_num_sparse_(self, nan, posinf, neginf); |
633 | } |
634 | } // anonymous namespace |
635 | namespace { |
636 | at::Tensor wrapper_SparseCUDA__log1p(const at::Tensor & self) { |
637 | // No device check |
638 | const OptionalDeviceGuard device_guard(device_of(self)); |
639 | return at::native::log1p_sparse(self); |
640 | } |
641 | } // anonymous namespace |
642 | namespace { |
643 | at::Tensor & wrapper_SparseCUDA_out_log1p_out(const at::Tensor & self, at::Tensor & out) { |
644 | // No device check |
645 | const OptionalDeviceGuard device_guard(device_of(self)); |
646 | return at::native::log1p_sparse_out(self, out); |
647 | } |
648 | } // anonymous namespace |
649 | namespace { |
650 | at::Tensor & wrapper_SparseCUDA__log1p_(at::Tensor & self) { |
651 | // No device check |
652 | const OptionalDeviceGuard device_guard(device_of(self)); |
653 | return at::native::log1p_sparse_(self); |
654 | } |
655 | } // anonymous namespace |
656 | namespace { |
657 | at::Tensor wrapper_SparseCUDA__mm(const at::Tensor & self, const at::Tensor & mat2) { |
658 | c10::optional<Device> common_device = nullopt; |
659 | (void)common_device; // Suppress unused variable warning |
660 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__mm" , "self" ); |
661 | c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCUDA__mm" , "mat2" ); |
662 | const OptionalDeviceGuard device_guard(device_of(self)); |
663 | return at::native::_sparse_mm(self, mat2); |
664 | } |
665 | } // anonymous namespace |
666 | namespace { |
667 | at::Tensor & wrapper_SparseCUDA_out_mm_out(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) { |
668 | c10::optional<Device> common_device = nullopt; |
669 | (void)common_device; // Suppress unused variable warning |
670 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_mm_out" , "out" ); |
671 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_mm_out" , "self" ); |
672 | c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCUDA_out_mm_out" , "mat2" ); |
673 | const OptionalDeviceGuard device_guard(device_of(self)); |
674 | return at::native::_sparse_mm_out(self, mat2, out); |
675 | } |
676 | } // anonymous namespace |
677 | namespace { |
678 | at::Tensor wrapper_SparseCUDA___sparse_sparse_matmul(const at::Tensor & self, const at::Tensor & other) { |
679 | c10::optional<Device> common_device = nullopt; |
680 | (void)common_device; // Suppress unused variable warning |
681 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA___sparse_sparse_matmul" , "self" ); |
682 | c10::impl::check_and_update_common_device(common_device, other, "wrapper_SparseCUDA___sparse_sparse_matmul" , "other" ); |
683 | const OptionalDeviceGuard device_guard(device_of(self)); |
684 | return at::native::sparse_sparse_matmul_cuda(self, other); |
685 | } |
686 | } // anonymous namespace |
687 | namespace { |
688 | at::Tensor wrapper_SparseCUDA_Tensor_mul(const at::Tensor & self, const at::Tensor & other) { |
689 | // No device check |
690 | const OptionalDeviceGuard device_guard(device_of(self)); |
691 | return at::native::mul_sparse(self, other); |
692 | } |
693 | } // anonymous namespace |
694 | namespace { |
695 | at::Tensor & wrapper_SparseCUDA_out_mul_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
696 | // No device check |
697 | const OptionalDeviceGuard device_guard(device_of(self)); |
698 | return at::native::mul_out_sparse_cuda(self, other, out); |
699 | } |
700 | } // anonymous namespace |
701 | namespace { |
702 | at::Tensor & wrapper_SparseCUDA_Tensor_mul_(at::Tensor & self, const at::Tensor & other) { |
703 | // No device check |
704 | const OptionalDeviceGuard device_guard(device_of(self)); |
705 | return at::native::mul_sparse_(self, other); |
706 | } |
707 | } // anonymous namespace |
708 | namespace { |
709 | at::Tensor wrapper_SparseCUDA__mv(const at::Tensor & self, const at::Tensor & vec) { |
710 | c10::optional<Device> common_device = nullopt; |
711 | (void)common_device; // Suppress unused variable warning |
712 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__mv" , "self" ); |
713 | c10::impl::check_and_update_common_device(common_device, vec, "wrapper_SparseCUDA__mv" , "vec" ); |
714 | const OptionalDeviceGuard device_guard(device_of(self)); |
715 | return at::native::mv_sparse(self, vec); |
716 | } |
717 | } // anonymous namespace |
718 | namespace { |
719 | at::Tensor wrapper_SparseCUDA__narrow_copy(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) { |
720 | c10::optional<Device> common_device = nullopt; |
721 | (void)common_device; // Suppress unused variable warning |
722 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__narrow_copy" , "self" ); |
723 | const OptionalDeviceGuard device_guard(device_of(self)); |
724 | return at::native::narrow_copy_sparse(self, dim, start.expect_int(), length.expect_int()); |
725 | } |
726 | } // anonymous namespace |
727 | namespace { |
728 | at::Tensor wrapper_SparseCUDA__permute(const at::Tensor & self, at::IntArrayRef dims) { |
729 | c10::optional<Device> common_device = nullopt; |
730 | (void)common_device; // Suppress unused variable warning |
731 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__permute" , "self" ); |
732 | const OptionalDeviceGuard device_guard(device_of(self)); |
733 | return at::native::permute_sparse_coo(self, dims); |
734 | } |
735 | } // anonymous namespace |
736 | namespace { |
737 | at::Tensor wrapper_SparseCUDA__rad2deg(const at::Tensor & self) { |
738 | c10::optional<Device> common_device = nullopt; |
739 | (void)common_device; // Suppress unused variable warning |
740 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__rad2deg" , "self" ); |
741 | const OptionalDeviceGuard device_guard(device_of(self)); |
742 | return at::native::rad2deg_sparse(self); |
743 | } |
744 | } // anonymous namespace |
745 | namespace { |
746 | at::Tensor & wrapper_SparseCUDA_out_rad2deg_out(const at::Tensor & self, at::Tensor & out) { |
747 | c10::optional<Device> common_device = nullopt; |
748 | (void)common_device; // Suppress unused variable warning |
749 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_rad2deg_out" , "out" ); |
750 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_rad2deg_out" , "self" ); |
751 | const OptionalDeviceGuard device_guard(device_of(self)); |
752 | return at::native::rad2deg_sparse_out(self, out); |
753 | } |
754 | } // anonymous namespace |
755 | namespace { |
756 | at::Tensor & wrapper_SparseCUDA__rad2deg_(at::Tensor & self) { |
757 | c10::optional<Device> common_device = nullopt; |
758 | (void)common_device; // Suppress unused variable warning |
759 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__rad2deg_" , "self" ); |
760 | const OptionalDeviceGuard device_guard(device_of(self)); |
761 | return at::native::rad2deg_sparse_(self); |
762 | } |
763 | } // anonymous namespace |
764 | namespace { |
765 | at::Tensor wrapper_SparseCUDA__deg2rad(const at::Tensor & self) { |
766 | c10::optional<Device> common_device = nullopt; |
767 | (void)common_device; // Suppress unused variable warning |
768 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__deg2rad" , "self" ); |
769 | const OptionalDeviceGuard device_guard(device_of(self)); |
770 | return at::native::deg2rad_sparse(self); |
771 | } |
772 | } // anonymous namespace |
773 | namespace { |
774 | at::Tensor & wrapper_SparseCUDA_out_deg2rad_out(const at::Tensor & self, at::Tensor & out) { |
775 | c10::optional<Device> common_device = nullopt; |
776 | (void)common_device; // Suppress unused variable warning |
777 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_deg2rad_out" , "out" ); |
778 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_deg2rad_out" , "self" ); |
779 | const OptionalDeviceGuard device_guard(device_of(self)); |
780 | return at::native::deg2rad_sparse_out(self, out); |
781 | } |
782 | } // anonymous namespace |
783 | namespace { |
784 | at::Tensor & wrapper_SparseCUDA__deg2rad_(at::Tensor & self) { |
785 | c10::optional<Device> common_device = nullopt; |
786 | (void)common_device; // Suppress unused variable warning |
787 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__deg2rad_" , "self" ); |
788 | const OptionalDeviceGuard device_guard(device_of(self)); |
789 | return at::native::deg2rad_sparse_(self); |
790 | } |
791 | } // anonymous namespace |
792 | namespace { |
793 | at::Tensor wrapper_SparseCUDA__neg(const at::Tensor & self) { |
794 | // No device check |
795 | const OptionalDeviceGuard device_guard(device_of(self)); |
796 | return at::native::neg_sparse(self); |
797 | } |
798 | } // anonymous namespace |
799 | namespace { |
800 | at::Tensor & wrapper_SparseCUDA_out_neg_out(const at::Tensor & self, at::Tensor & out) { |
801 | // No device check |
802 | const OptionalDeviceGuard device_guard(device_of(self)); |
803 | return at::native::neg_out_sparse(self, out); |
804 | } |
805 | } // anonymous namespace |
806 | namespace { |
807 | at::Tensor & wrapper_SparseCUDA__neg_(at::Tensor & self) { |
808 | // No device check |
809 | const OptionalDeviceGuard device_guard(device_of(self)); |
810 | return at::native::neg_sparse_(self); |
811 | } |
812 | } // anonymous namespace |
813 | namespace { |
814 | at::Tensor wrapper_SparseCUDA__round(const at::Tensor & self) { |
815 | // No device check |
816 | const OptionalDeviceGuard device_guard(device_of(self)); |
817 | return at::native::round_sparse(self); |
818 | } |
819 | } // anonymous namespace |
820 | namespace { |
821 | at::Tensor & wrapper_SparseCUDA_out_round_out(const at::Tensor & self, at::Tensor & out) { |
822 | // No device check |
823 | const OptionalDeviceGuard device_guard(device_of(self)); |
824 | return at::native::round_sparse_out(self, out); |
825 | } |
826 | } // anonymous namespace |
827 | namespace { |
828 | at::Tensor & wrapper_SparseCUDA__round_(at::Tensor & self) { |
829 | // No device check |
830 | const OptionalDeviceGuard device_guard(device_of(self)); |
831 | return at::native::round_sparse_(self); |
832 | } |
833 | } // anonymous namespace |
834 | namespace { |
835 | at::Tensor wrapper_SparseCUDA__relu(const at::Tensor & self) { |
836 | // No device check |
837 | const OptionalDeviceGuard device_guard(device_of(self)); |
838 | return at::native::relu_sparse(self); |
839 | } |
840 | } // anonymous namespace |
841 | namespace { |
842 | at::Tensor & wrapper_SparseCUDA__relu_(at::Tensor & self) { |
843 | // No device check |
844 | const OptionalDeviceGuard device_guard(device_of(self)); |
845 | return at::native::relu_sparse_(self); |
846 | } |
847 | } // anonymous namespace |
848 | namespace { |
849 | at::Tensor wrapper_SparseCUDA__sin(const at::Tensor & self) { |
850 | // No device check |
851 | const OptionalDeviceGuard device_guard(device_of(self)); |
852 | return at::native::sin_sparse(self); |
853 | } |
854 | } // anonymous namespace |
855 | namespace { |
856 | at::Tensor & wrapper_SparseCUDA_out_sin_out(const at::Tensor & self, at::Tensor & out) { |
857 | // No device check |
858 | const OptionalDeviceGuard device_guard(device_of(self)); |
859 | return at::native::sin_sparse_out(self, out); |
860 | } |
861 | } // anonymous namespace |
862 | namespace { |
863 | at::Tensor & wrapper_SparseCUDA__sin_(at::Tensor & self) { |
864 | // No device check |
865 | const OptionalDeviceGuard device_guard(device_of(self)); |
866 | return at::native::sin_sparse_(self); |
867 | } |
868 | } // anonymous namespace |
869 | namespace { |
870 | at::Tensor wrapper_SparseCUDA__sinh(const at::Tensor & self) { |
871 | // No device check |
872 | const OptionalDeviceGuard device_guard(device_of(self)); |
873 | return at::native::sinh_sparse(self); |
874 | } |
875 | } // anonymous namespace |
876 | namespace { |
877 | at::Tensor & wrapper_SparseCUDA_out_sinh_out(const at::Tensor & self, at::Tensor & out) { |
878 | // No device check |
879 | const OptionalDeviceGuard device_guard(device_of(self)); |
880 | return at::native::sinh_sparse_out(self, out); |
881 | } |
882 | } // anonymous namespace |
883 | namespace { |
884 | at::Tensor & wrapper_SparseCUDA__sinh_(at::Tensor & self) { |
885 | // No device check |
886 | const OptionalDeviceGuard device_guard(device_of(self)); |
887 | return at::native::sinh_sparse_(self); |
888 | } |
889 | } // anonymous namespace |
890 | namespace { |
891 | at::Tensor & wrapper_SparseCUDA_out_sspaddmm_out(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
892 | c10::optional<Device> common_device = nullopt; |
893 | (void)common_device; // Suppress unused variable warning |
894 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_sspaddmm_out" , "out" ); |
895 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_sspaddmm_out" , "self" ); |
896 | c10::impl::check_and_update_common_device(common_device, mat1, "wrapper_SparseCUDA_out_sspaddmm_out" , "mat1" ); |
897 | c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCUDA_out_sspaddmm_out" , "mat2" ); |
898 | const OptionalDeviceGuard device_guard(device_of(self)); |
899 | return at::native::_sspaddmm_out_cuda(self, mat1, mat2, beta, alpha, out); |
900 | } |
901 | } // anonymous namespace |
902 | namespace { |
903 | at::Tensor wrapper_SparseCUDA__sum(const at::Tensor & self, c10::optional<at::ScalarType> dtype) { |
904 | // No device check |
905 | const OptionalDeviceGuard device_guard(device_of(self)); |
906 | return at::native::sum_coo(self, dtype); |
907 | } |
908 | } // anonymous namespace |
909 | namespace { |
910 | at::Tensor wrapper_SparseCUDA_dim_IntList_sum(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
911 | // No device check |
912 | const OptionalDeviceGuard device_guard(device_of(self)); |
913 | return at::native::sum_sparse_coo(self, dim, keepdim, dtype); |
914 | } |
915 | } // anonymous namespace |
916 | namespace { |
917 | at::Tensor wrapper_SparseCUDA__sqrt(const at::Tensor & self) { |
918 | // No device check |
919 | const OptionalDeviceGuard device_guard(device_of(self)); |
920 | return at::native::sqrt_sparse(self); |
921 | } |
922 | } // anonymous namespace |
923 | namespace { |
924 | at::Tensor & wrapper_SparseCUDA_out_sqrt_out(const at::Tensor & self, at::Tensor & out) { |
925 | // No device check |
926 | const OptionalDeviceGuard device_guard(device_of(self)); |
927 | return at::native::sqrt_sparse_out(self, out); |
928 | } |
929 | } // anonymous namespace |
930 | namespace { |
931 | at::Tensor & wrapper_SparseCUDA__sqrt_(at::Tensor & self) { |
932 | // No device check |
933 | const OptionalDeviceGuard device_guard(device_of(self)); |
934 | return at::native::sqrt_sparse_(self); |
935 | } |
936 | } // anonymous namespace |
937 | namespace { |
938 | at::Tensor wrapper_SparseCUDA__tan(const at::Tensor & self) { |
939 | // No device check |
940 | const OptionalDeviceGuard device_guard(device_of(self)); |
941 | return at::native::tan_sparse(self); |
942 | } |
943 | } // anonymous namespace |
944 | namespace { |
945 | at::Tensor & wrapper_SparseCUDA_out_tan_out(const at::Tensor & self, at::Tensor & out) { |
946 | // No device check |
947 | const OptionalDeviceGuard device_guard(device_of(self)); |
948 | return at::native::tan_sparse_out(self, out); |
949 | } |
950 | } // anonymous namespace |
951 | namespace { |
952 | at::Tensor & wrapper_SparseCUDA__tan_(at::Tensor & self) { |
953 | // No device check |
954 | const OptionalDeviceGuard device_guard(device_of(self)); |
955 | return at::native::tan_sparse_(self); |
956 | } |
957 | } // anonymous namespace |
958 | namespace { |
959 | at::Tensor wrapper_SparseCUDA__tanh(const at::Tensor & self) { |
960 | // No device check |
961 | const OptionalDeviceGuard device_guard(device_of(self)); |
962 | return at::native::tanh_sparse(self); |
963 | } |
964 | } // anonymous namespace |
965 | namespace { |
966 | at::Tensor & wrapper_SparseCUDA_out_tanh_out(const at::Tensor & self, at::Tensor & out) { |
967 | // No device check |
968 | const OptionalDeviceGuard device_guard(device_of(self)); |
969 | return at::native::tanh_sparse_out(self, out); |
970 | } |
971 | } // anonymous namespace |
972 | namespace { |
973 | at::Tensor & wrapper_SparseCUDA__tanh_(at::Tensor & self) { |
974 | // No device check |
975 | const OptionalDeviceGuard device_guard(device_of(self)); |
976 | return at::native::tanh_sparse_(self); |
977 | } |
978 | } // anonymous namespace |
979 | namespace { |
980 | at::Tensor wrapper_SparseCUDA__threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) { |
981 | c10::optional<Device> common_device = nullopt; |
982 | (void)common_device; // Suppress unused variable warning |
983 | c10::impl::check_and_update_common_device(common_device, grad_output, "wrapper_SparseCUDA__threshold_backward" , "grad_output" ); |
984 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__threshold_backward" , "self" ); |
985 | const OptionalDeviceGuard device_guard(device_of(self)); |
986 | return at::native::threshold_backward_sparse(grad_output, self, threshold); |
987 | } |
988 | } // anonymous namespace |
989 | namespace { |
990 | at::Tensor & wrapper_SparseCUDA_grad_input_threshold_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) { |
991 | c10::optional<Device> common_device = nullopt; |
992 | (void)common_device; // Suppress unused variable warning |
993 | c10::impl::check_and_update_common_device(common_device, grad_input, "wrapper_SparseCUDA_grad_input_threshold_backward_out" , "grad_input" ); |
994 | c10::impl::check_and_update_common_device(common_device, grad_output, "wrapper_SparseCUDA_grad_input_threshold_backward_out" , "grad_output" ); |
995 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_grad_input_threshold_backward_out" , "self" ); |
996 | const OptionalDeviceGuard device_guard(device_of(self)); |
997 | return at::native::threshold_backward_sparse_out(grad_output, self, threshold, grad_input); |
998 | } |
999 | } // anonymous namespace |
1000 | namespace { |
1001 | at::Tensor wrapper_SparseCUDA__trunc(const at::Tensor & self) { |
1002 | // No device check |
1003 | const OptionalDeviceGuard device_guard(device_of(self)); |
1004 | return at::native::trunc_sparse(self); |
1005 | } |
1006 | } // anonymous namespace |
1007 | namespace { |
1008 | at::Tensor & wrapper_SparseCUDA_out_trunc_out(const at::Tensor & self, at::Tensor & out) { |
1009 | // No device check |
1010 | const OptionalDeviceGuard device_guard(device_of(self)); |
1011 | return at::native::trunc_sparse_out(self, out); |
1012 | } |
1013 | } // anonymous namespace |
1014 | namespace { |
1015 | at::Tensor & wrapper_SparseCUDA__trunc_(at::Tensor & self) { |
1016 | // No device check |
1017 | const OptionalDeviceGuard device_guard(device_of(self)); |
1018 | return at::native::trunc_sparse_(self); |
1019 | } |
1020 | } // anonymous namespace |
1021 | namespace { |
1022 | at::Tensor wrapper_SparseCUDA__unsqueeze(const at::Tensor & self, int64_t dim) { |
1023 | // No device check |
1024 | // DeviceGuard omitted |
1025 | return at::native::unsqueeze_sparse(self, dim); |
1026 | } |
1027 | } // anonymous namespace |
1028 | namespace { |
1029 | at::Tensor & wrapper_SparseCUDA_out_zeros_out(c10::SymIntArrayRef size, at::Tensor & out) { |
1030 | c10::optional<Device> common_device = nullopt; |
1031 | (void)common_device; // Suppress unused variable warning |
1032 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_zeros_out" , "out" ); |
1033 | const OptionalDeviceGuard device_guard(device_of(out)); |
1034 | return at::native::zeros_sparse_out(C10_AS_INTARRAYREF_SLOW(size), out); |
1035 | } |
1036 | } // anonymous namespace |
1037 | namespace { |
1038 | at::Tensor wrapper_SparseCUDA__native_norm(const at::Tensor & self, const at::Scalar & p) { |
1039 | c10::optional<Device> common_device = nullopt; |
1040 | (void)common_device; // Suppress unused variable warning |
1041 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__native_norm" , "self" ); |
1042 | const OptionalDeviceGuard device_guard(device_of(self)); |
1043 | return at::native::norm_sparse(self, p); |
1044 | } |
1045 | } // anonymous namespace |
1046 | namespace { |
1047 | at::Tensor wrapper_SparseCUDA_ScalarOpt_dim_dtype_native_norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
1048 | c10::optional<Device> common_device = nullopt; |
1049 | (void)common_device; // Suppress unused variable warning |
1050 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_ScalarOpt_dim_dtype_native_norm" , "self" ); |
1051 | const OptionalDeviceGuard device_guard(device_of(self)); |
1052 | return at::native::norm_sparse(self, p, dim, keepdim, dtype); |
1053 | } |
1054 | } // anonymous namespace |
1055 | namespace { |
1056 | at::Tensor wrapper_SparseCUDA___sparse_sum_backward(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) { |
1057 | c10::optional<Device> common_device = nullopt; |
1058 | (void)common_device; // Suppress unused variable warning |
1059 | c10::impl::check_and_update_common_device(common_device, grad, "wrapper_SparseCUDA___sparse_sum_backward" , "grad" ); |
1060 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA___sparse_sum_backward" , "self" ); |
1061 | const OptionalDeviceGuard device_guard(device_of(self)); |
1062 | return at::native::_sparse_sum_backward_cuda(grad, self, dim); |
1063 | } |
1064 | } // anonymous namespace |
1065 | namespace { |
1066 | at::Tensor wrapper_SparseCUDA___sparse_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) { |
1067 | c10::optional<Device> common_device = nullopt; |
1068 | (void)common_device; // Suppress unused variable warning |
1069 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA___sparse_softmax" , "self" ); |
1070 | const OptionalDeviceGuard device_guard(device_of(self)); |
1071 | return at::native::softmax_sparse_cuda(self, dim, half_to_float); |
1072 | } |
1073 | } // anonymous namespace |
1074 | namespace { |
1075 | at::Tensor wrapper_SparseCUDA___sparse_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) { |
1076 | c10::optional<Device> common_device = nullopt; |
1077 | (void)common_device; // Suppress unused variable warning |
1078 | c10::impl::check_and_update_common_device(common_device, grad_output, "wrapper_SparseCUDA___sparse_softmax_backward_data" , "grad_output" ); |
1079 | c10::impl::check_and_update_common_device(common_device, output, "wrapper_SparseCUDA___sparse_softmax_backward_data" , "output" ); |
1080 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA___sparse_softmax_backward_data" , "self" ); |
1081 | const OptionalDeviceGuard device_guard(device_of(self)); |
1082 | return at::native::softmax_backward_sparse_cuda(grad_output, output, dim, self); |
1083 | } |
1084 | } // anonymous namespace |
1085 | namespace { |
1086 | at::Tensor wrapper_SparseCUDA___sparse_log_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) { |
1087 | c10::optional<Device> common_device = nullopt; |
1088 | (void)common_device; // Suppress unused variable warning |
1089 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA___sparse_log_softmax" , "self" ); |
1090 | const OptionalDeviceGuard device_guard(device_of(self)); |
1091 | return at::native::log_softmax_sparse_cuda(self, dim, half_to_float); |
1092 | } |
1093 | } // anonymous namespace |
1094 | namespace { |
1095 | at::Tensor wrapper_SparseCUDA___sparse_log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) { |
1096 | c10::optional<Device> common_device = nullopt; |
1097 | (void)common_device; // Suppress unused variable warning |
1098 | c10::impl::check_and_update_common_device(common_device, grad_output, "wrapper_SparseCUDA___sparse_log_softmax_backward_data" , "grad_output" ); |
1099 | c10::impl::check_and_update_common_device(common_device, output, "wrapper_SparseCUDA___sparse_log_softmax_backward_data" , "output" ); |
1100 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA___sparse_log_softmax_backward_data" , "self" ); |
1101 | const OptionalDeviceGuard device_guard(device_of(self)); |
1102 | return at::native::log_softmax_backward_sparse_cuda(grad_output, output, dim, self); |
1103 | } |
1104 | } // anonymous namespace |
1105 | namespace { |
1106 | at::Tensor wrapper_SparseCUDA_ScalarOpt_dim_dtype_norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) { |
1107 | // No device check |
1108 | const OptionalDeviceGuard device_guard(device_of(self)); |
1109 | return at::native::sparse_dtype_norm(self, p, dim, keepdim, dtype); |
1110 | } |
1111 | } // anonymous namespace |
1112 | namespace { |
1113 | at::Tensor wrapper_SparseCUDA_ScalarOpt_dim_norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) { |
1114 | // No device check |
1115 | const OptionalDeviceGuard device_guard(device_of(self)); |
1116 | return at::native::sparse_norm(self, p, dim, keepdim); |
1117 | } |
1118 | } // anonymous namespace |
1119 | namespace { |
1120 | at::Tensor wrapper_SparseCUDA__clone(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) { |
1121 | c10::optional<Device> common_device = nullopt; |
1122 | (void)common_device; // Suppress unused variable warning |
1123 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__clone" , "self" ); |
1124 | const OptionalDeviceGuard device_guard(device_of(self)); |
1125 | return at::native::clone_sparse(self, memory_format); |
1126 | } |
1127 | } // anonymous namespace |
1128 | namespace { |
1129 | const at::Tensor & wrapper_SparseCUDA__resize_as_sparse_(const at::Tensor & self, const at::Tensor & the_template) { |
1130 | c10::optional<Device> common_device = nullopt; |
1131 | (void)common_device; // Suppress unused variable warning |
1132 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__resize_as_sparse_" , "self" ); |
1133 | c10::impl::check_and_update_common_device(common_device, the_template, "wrapper_SparseCUDA__resize_as_sparse_" , "the_template" ); |
1134 | const OptionalDeviceGuard device_guard(device_of(self)); |
1135 | return at::native::resize_as_sparse_(self, the_template); |
1136 | } |
1137 | } // anonymous namespace |
1138 | namespace { |
1139 | at::Tensor & wrapper_SparseCUDA__zero_(at::Tensor & self) { |
1140 | // No device check |
1141 | const OptionalDeviceGuard device_guard(device_of(self)); |
1142 | return at::native::zero_sparse_(self); |
1143 | } |
1144 | } // anonymous namespace |
1145 | namespace { |
1146 | at::Tensor wrapper_SparseCUDA_Tensor_sub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
1147 | // No device check |
1148 | const OptionalDeviceGuard device_guard(device_of(self)); |
1149 | return at::native::sub_sparse(self, other, alpha); |
1150 | } |
1151 | } // anonymous namespace |
1152 | namespace { |
1153 | at::Tensor & wrapper_SparseCUDA_out_sub_out(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { |
1154 | // No device check |
1155 | const OptionalDeviceGuard device_guard(device_of(self)); |
1156 | return at::native::sub_out_sparse(self, other, alpha, out); |
1157 | } |
1158 | } // anonymous namespace |
1159 | namespace { |
1160 | at::Tensor & wrapper_SparseCUDA_Tensor_sub_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
1161 | // No device check |
1162 | const OptionalDeviceGuard device_guard(device_of(self)); |
1163 | return at::native::sub_sparse_(self, other, alpha); |
1164 | } |
1165 | } // anonymous namespace |
1166 | namespace { |
1167 | at::Tensor wrapper_SparseCUDA__addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { |
1168 | c10::optional<Device> common_device = nullopt; |
1169 | (void)common_device; // Suppress unused variable warning |
1170 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__addmm" , "self" ); |
1171 | c10::impl::check_and_update_common_device(common_device, mat1, "wrapper_SparseCUDA__addmm" , "mat1" ); |
1172 | c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCUDA__addmm" , "mat2" ); |
1173 | const OptionalDeviceGuard device_guard(device_of(self)); |
1174 | return at::native::addmm_sparse_dense_cuda(self, mat1, mat2, beta, alpha); |
1175 | } |
1176 | } // anonymous namespace |
1177 | namespace { |
1178 | at::Tensor & wrapper_SparseCUDA_out_addmm_out(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
1179 | c10::optional<Device> common_device = nullopt; |
1180 | (void)common_device; // Suppress unused variable warning |
1181 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_addmm_out" , "out" ); |
1182 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_addmm_out" , "self" ); |
1183 | c10::impl::check_and_update_common_device(common_device, mat1, "wrapper_SparseCUDA_out_addmm_out" , "mat1" ); |
1184 | c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCUDA_out_addmm_out" , "mat2" ); |
1185 | const OptionalDeviceGuard device_guard(device_of(self)); |
1186 | return at::native::addmm_out_sparse_dense_cuda(self, mat1, mat2, beta, alpha, out); |
1187 | } |
1188 | } // anonymous namespace |
1189 | namespace { |
1190 | at::Tensor & wrapper_SparseCUDA__addmm_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { |
1191 | c10::optional<Device> common_device = nullopt; |
1192 | (void)common_device; // Suppress unused variable warning |
1193 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__addmm_" , "self" ); |
1194 | c10::impl::check_and_update_common_device(common_device, mat1, "wrapper_SparseCUDA__addmm_" , "mat1" ); |
1195 | c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCUDA__addmm_" , "mat2" ); |
1196 | const OptionalDeviceGuard device_guard(device_of(self)); |
1197 | return at::native::s_addmm_sparse_dense_cuda_(self, mat1, mat2, beta, alpha); |
1198 | } |
1199 | } // anonymous namespace |
1200 | namespace { |
1201 | at::Tensor wrapper_SparseCUDA___sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
1202 | c10::optional<Device> common_device = nullopt; |
1203 | (void)common_device; // Suppress unused variable warning |
1204 | globalContext().lazyInitCUDA(); |
1205 | const DeviceGuard device_guard(device_or_default(device)); |
1206 | return at::native::new_with_dims_sparse(sparse_dim, dense_dim, size, dtype, layout, device, pin_memory); |
1207 | } |
1208 | } // anonymous namespace |
1209 | namespace { |
1210 | at::Tensor wrapper_SparseCUDA___sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
1211 | c10::optional<Device> common_device = nullopt; |
1212 | (void)common_device; // Suppress unused variable warning |
1213 | c10::impl::check_and_update_common_device(common_device, indices, "wrapper_SparseCUDA___sparse_coo_tensor_with_dims_and_tensors" , "indices" ); |
1214 | c10::impl::check_and_update_common_device(common_device, values, "wrapper_SparseCUDA___sparse_coo_tensor_with_dims_and_tensors" , "values" ); |
1215 | globalContext().lazyInitCUDA(); |
1216 | const DeviceGuard device_guard(device_or_default(device)); |
1217 | return at::native::new_with_dims_and_tensor_sparse_symint(sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory); |
1218 | } |
1219 | } // anonymous namespace |
1220 | namespace { |
1221 | const at::Tensor & wrapper_SparseCUDA__sparse_resize_(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { |
1222 | c10::optional<Device> common_device = nullopt; |
1223 | (void)common_device; // Suppress unused variable warning |
1224 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__sparse_resize_" , "self" ); |
1225 | const OptionalDeviceGuard device_guard(device_of(self)); |
1226 | return at::native::sparse_resize_(self, size, sparse_dim, dense_dim); |
1227 | } |
1228 | } // anonymous namespace |
1229 | namespace { |
1230 | const at::Tensor & wrapper_SparseCUDA__sparse_resize_and_clear_(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { |
1231 | c10::optional<Device> common_device = nullopt; |
1232 | (void)common_device; // Suppress unused variable warning |
1233 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__sparse_resize_and_clear_" , "self" ); |
1234 | const OptionalDeviceGuard device_guard(device_of(self)); |
1235 | return at::native::sparse_resize_and_clear_(self, size, sparse_dim, dense_dim); |
1236 | } |
1237 | } // anonymous namespace |
1238 | namespace { |
1239 | at::Tensor wrapper_SparseCUDA__sparse_mask(const at::Tensor & self, const at::Tensor & mask) { |
1240 | c10::optional<Device> common_device = nullopt; |
1241 | (void)common_device; // Suppress unused variable warning |
1242 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__sparse_mask" , "self" ); |
1243 | c10::impl::check_and_update_common_device(common_device, mask, "wrapper_SparseCUDA__sparse_mask" , "mask" ); |
1244 | const OptionalDeviceGuard device_guard(device_of(self)); |
1245 | return at::native::sparse_mask(self, mask); |
1246 | } |
1247 | } // anonymous namespace |
1248 | namespace { |
1249 | at::Tensor wrapper_SparseCUDA___to_dense(const at::Tensor & self, c10::optional<at::ScalarType> dtype) { |
1250 | c10::optional<Device> common_device = nullopt; |
1251 | (void)common_device; // Suppress unused variable warning |
1252 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA___to_dense" , "self" ); |
1253 | const OptionalDeviceGuard device_guard(device_of(self)); |
1254 | return at::native::sparse_to_dense(self, dtype); |
1255 | } |
1256 | } // anonymous namespace |
1257 | namespace { |
1258 | int64_t wrapper_SparseCUDA__sparse_dim(const at::Tensor & self) { |
1259 | // No device check |
1260 | // DeviceGuard omitted |
1261 | return at::native::sparse_dim_sparse(self); |
1262 | } |
1263 | } // anonymous namespace |
1264 | namespace { |
1265 | int64_t wrapper_SparseCUDA___dimI(const at::Tensor & self) { |
1266 | // No device check |
1267 | // DeviceGuard omitted |
1268 | return at::native::sparse_dim_sparse(self); |
1269 | } |
1270 | } // anonymous namespace |
1271 | namespace { |
1272 | int64_t wrapper_SparseCUDA__dense_dim(const at::Tensor & self) { |
1273 | // No device check |
1274 | // DeviceGuard omitted |
1275 | return at::native::dense_dim_sparse(self); |
1276 | } |
1277 | } // anonymous namespace |
1278 | namespace { |
1279 | int64_t wrapper_SparseCUDA___dimV(const at::Tensor & self) { |
1280 | // No device check |
1281 | // DeviceGuard omitted |
1282 | return at::native::dense_dim_sparse(self); |
1283 | } |
1284 | } // anonymous namespace |
1285 | namespace { |
1286 | int64_t wrapper_SparseCUDA___nnz(const at::Tensor & self) { |
1287 | // No device check |
1288 | // DeviceGuard omitted |
1289 | return at::native::_nnz_sparse(self); |
1290 | } |
1291 | } // anonymous namespace |
1292 | namespace { |
1293 | at::Tensor wrapper_SparseCUDA___coalesce(const at::Tensor & self) { |
1294 | c10::optional<Device> common_device = nullopt; |
1295 | (void)common_device; // Suppress unused variable warning |
1296 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA___coalesce" , "self" ); |
1297 | const OptionalDeviceGuard device_guard(device_of(self)); |
1298 | return at::native::_coalesce_sparse_cuda(self); |
1299 | } |
1300 | } // anonymous namespace |
1301 | namespace { |
1302 | bool wrapper_SparseCUDA__is_coalesced(const at::Tensor & self) { |
1303 | // No device check |
1304 | // DeviceGuard omitted |
1305 | return at::native::is_coalesced_sparse(self); |
1306 | } |
1307 | } // anonymous namespace |
1308 | namespace { |
1309 | at::Tensor wrapper_SparseCUDA___indices(const at::Tensor & self) { |
1310 | // No device check |
1311 | // DeviceGuard omitted |
1312 | return at::native::_indices_sparse(self); |
1313 | } |
1314 | } // anonymous namespace |
1315 | namespace { |
1316 | at::Tensor wrapper_SparseCUDA___values(const at::Tensor & self) { |
1317 | // No device check |
1318 | // DeviceGuard omitted |
1319 | return at::native::_values_sparse(self); |
1320 | } |
1321 | } // anonymous namespace |
1322 | namespace { |
1323 | at::Tensor & wrapper_SparseCUDA___coalesced_(at::Tensor & self, bool coalesced) { |
1324 | // No device check |
1325 | // DeviceGuard omitted |
1326 | return at::native::_coalesced_sparse_(self, coalesced); |
1327 | } |
1328 | } // anonymous namespace |
1329 | namespace { |
1330 | at::Tensor wrapper_SparseCUDA__indices(const at::Tensor & self) { |
1331 | // No device check |
1332 | // DeviceGuard omitted |
1333 | return at::native::indices_sparse(self); |
1334 | } |
1335 | } // anonymous namespace |
1336 | namespace { |
1337 | at::Tensor wrapper_SparseCUDA__values(const at::Tensor & self) { |
1338 | // No device check |
1339 | // DeviceGuard omitted |
1340 | return at::native::values_sparse(self); |
1341 | } |
1342 | } // anonymous namespace |
1343 | namespace { |
1344 | at::Tensor wrapper_SparseCUDA__hspmm(const at::Tensor & mat1, const at::Tensor & mat2) { |
1345 | c10::optional<Device> common_device = nullopt; |
1346 | (void)common_device; // Suppress unused variable warning |
1347 | c10::impl::check_and_update_common_device(common_device, mat1, "wrapper_SparseCUDA__hspmm" , "mat1" ); |
1348 | c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCUDA__hspmm" , "mat2" ); |
1349 | const OptionalDeviceGuard device_guard(device_of(mat1)); |
1350 | return at::native::hspmm_sparse_cuda(mat1, mat2); |
1351 | } |
1352 | } // anonymous namespace |
1353 | namespace { |
1354 | at::Tensor & wrapper_SparseCUDA_out_hspmm_out(const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out) { |
1355 | c10::optional<Device> common_device = nullopt; |
1356 | (void)common_device; // Suppress unused variable warning |
1357 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_hspmm_out" , "out" ); |
1358 | c10::impl::check_and_update_common_device(common_device, mat1, "wrapper_SparseCUDA_out_hspmm_out" , "mat1" ); |
1359 | c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCUDA_out_hspmm_out" , "mat2" ); |
1360 | const OptionalDeviceGuard device_guard(device_of(out)); |
1361 | return at::native::hspmm_out_sparse_cuda(mat1, mat2, out); |
1362 | } |
1363 | } // anonymous namespace |
1364 | namespace { |
1365 | at::Tensor & wrapper_SparseCUDA__copy_sparse_to_sparse_(at::Tensor & self, const at::Tensor & src, bool non_blocking) { |
1366 | // No device check |
1367 | const OptionalDeviceGuard device_guard(device_of(self)); |
1368 | return at::native::copy_sparse_(self, src, non_blocking); |
1369 | } |
1370 | } // anonymous namespace |
1371 | namespace { |
1372 | at::Tensor wrapper_SparseCUDA_sparse_dim_to_sparse(const at::Tensor & self, int64_t sparse_dim) { |
1373 | c10::optional<Device> common_device = nullopt; |
1374 | (void)common_device; // Suppress unused variable warning |
1375 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_sparse_dim_to_sparse" , "self" ); |
1376 | const OptionalDeviceGuard device_guard(device_of(self)); |
1377 | return at::native::sparse_coo_to_sparse(self, sparse_dim); |
1378 | } |
1379 | } // anonymous namespace |
1380 | namespace { |
1381 | at::Tensor wrapper_SparseCUDA__to_sparse(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim) { |
1382 | c10::optional<Device> common_device = nullopt; |
1383 | (void)common_device; // Suppress unused variable warning |
1384 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__to_sparse" , "self" ); |
1385 | const OptionalDeviceGuard device_guard(device_of(self)); |
1386 | return at::native::sparse_coo_to_sparse(self, layout, blocksize, dense_dim); |
1387 | } |
1388 | } // anonymous namespace |
1389 | namespace { |
1390 | at::Tensor wrapper_SparseCUDA__to_sparse_csr(const at::Tensor & self, c10::optional<int64_t> dense_dim) { |
1391 | c10::optional<Device> common_device = nullopt; |
1392 | (void)common_device; // Suppress unused variable warning |
1393 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__to_sparse_csr" , "self" ); |
1394 | const OptionalDeviceGuard device_guard(device_of(self)); |
1395 | return at::native::coo_to_sparse_csr(self, dense_dim); |
1396 | } |
1397 | } // anonymous namespace |
1398 | namespace { |
1399 | at::Tensor wrapper_SparseCUDA__to_sparse_csc(const at::Tensor & self, c10::optional<int64_t> dense_dim) { |
1400 | c10::optional<Device> common_device = nullopt; |
1401 | (void)common_device; // Suppress unused variable warning |
1402 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__to_sparse_csc" , "self" ); |
1403 | const OptionalDeviceGuard device_guard(device_of(self)); |
1404 | return at::native::coo_to_sparse_csc(self, dense_dim); |
1405 | } |
1406 | } // anonymous namespace |
1407 | namespace { |
1408 | at::Tensor wrapper_SparseCUDA__to_sparse_bsr(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) { |
1409 | c10::optional<Device> common_device = nullopt; |
1410 | (void)common_device; // Suppress unused variable warning |
1411 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__to_sparse_bsr" , "self" ); |
1412 | const OptionalDeviceGuard device_guard(device_of(self)); |
1413 | return at::native::coo_to_sparse_bsr(self, blocksize, dense_dim); |
1414 | } |
1415 | } // anonymous namespace |
1416 | namespace { |
1417 | at::Tensor wrapper_SparseCUDA__to_sparse_bsc(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) { |
1418 | c10::optional<Device> common_device = nullopt; |
1419 | (void)common_device; // Suppress unused variable warning |
1420 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__to_sparse_bsc" , "self" ); |
1421 | const OptionalDeviceGuard device_guard(device_of(self)); |
1422 | return at::native::coo_to_sparse_bsc(self, blocksize, dense_dim); |
1423 | } |
1424 | } // anonymous namespace |
1425 | namespace { |
1426 | at::Tensor wrapper_SparseCUDA__index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index) { |
1427 | c10::optional<Device> common_device = nullopt; |
1428 | (void)common_device; // Suppress unused variable warning |
1429 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__index_select" , "self" ); |
1430 | c10::impl::check_and_update_common_device(common_device, index, "wrapper_SparseCUDA__index_select" , "index" ); |
1431 | const OptionalDeviceGuard device_guard(device_of(self)); |
1432 | return at::native::index_select_sparse_cuda(self, dim, index); |
1433 | } |
1434 | } // anonymous namespace |
1435 | namespace { |
1436 | at::Tensor wrapper_SparseCUDA__erfinv(const at::Tensor & self) { |
1437 | // No device check |
1438 | const OptionalDeviceGuard device_guard(device_of(self)); |
1439 | return at::native::erfinv_sparse(self); |
1440 | } |
1441 | } // anonymous namespace |
1442 | namespace { |
1443 | at::Tensor & wrapper_SparseCUDA_out_erfinv_out(const at::Tensor & self, at::Tensor & out) { |
1444 | // No device check |
1445 | const OptionalDeviceGuard device_guard(device_of(self)); |
1446 | return at::native::erfinv_sparse_out(self, out); |
1447 | } |
1448 | } // anonymous namespace |
1449 | namespace { |
1450 | at::Tensor & wrapper_SparseCUDA__erfinv_(at::Tensor & self) { |
1451 | // No device check |
1452 | const OptionalDeviceGuard device_guard(device_of(self)); |
1453 | return at::native::erfinv_sparse_(self); |
1454 | } |
1455 | } // anonymous namespace |
1456 | namespace { |
1457 | at::Tensor wrapper_SparseCUDA__sign(const at::Tensor & self) { |
1458 | // No device check |
1459 | const OptionalDeviceGuard device_guard(device_of(self)); |
1460 | return at::native::sign_sparse(self); |
1461 | } |
1462 | } // anonymous namespace |
1463 | namespace { |
1464 | at::Tensor & wrapper_SparseCUDA_out_sign_out(const at::Tensor & self, at::Tensor & out) { |
1465 | // No device check |
1466 | const OptionalDeviceGuard device_guard(device_of(self)); |
1467 | return at::native::sign_sparse_out(self, out); |
1468 | } |
1469 | } // anonymous namespace |
1470 | namespace { |
1471 | at::Tensor & wrapper_SparseCUDA__sign_(at::Tensor & self) { |
1472 | // No device check |
1473 | const OptionalDeviceGuard device_guard(device_of(self)); |
1474 | return at::native::sign_sparse_(self); |
1475 | } |
1476 | } // anonymous namespace |
1477 | namespace { |
1478 | at::Tensor wrapper_SparseCUDA__signbit(const at::Tensor & self) { |
1479 | c10::optional<Device> common_device = nullopt; |
1480 | (void)common_device; // Suppress unused variable warning |
1481 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__signbit" , "self" ); |
1482 | const OptionalDeviceGuard device_guard(device_of(self)); |
1483 | return at::native::signbit_sparse(self); |
1484 | } |
1485 | } // anonymous namespace |
1486 | namespace { |
1487 | at::Tensor & wrapper_SparseCUDA_out_signbit_out(const at::Tensor & self, at::Tensor & out) { |
1488 | c10::optional<Device> common_device = nullopt; |
1489 | (void)common_device; // Suppress unused variable warning |
1490 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_signbit_out" , "out" ); |
1491 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_signbit_out" , "self" ); |
1492 | const OptionalDeviceGuard device_guard(device_of(self)); |
1493 | return at::native::signbit_sparse_out(self, out); |
1494 | } |
1495 | } // anonymous namespace |
1496 | namespace { |
1497 | at::Tensor wrapper_SparseCUDA__any(const at::Tensor & self) { |
1498 | // No device check |
1499 | const OptionalDeviceGuard device_guard(device_of(self)); |
1500 | return at::native::any_sparse(self); |
1501 | } |
1502 | } // anonymous namespace |
1503 | namespace { |
1504 | at::Tensor wrapper_SparseCUDA_Tensor_Scalar_pow(const at::Tensor & self, const at::Scalar & exponent) { |
1505 | // No device check |
1506 | const OptionalDeviceGuard device_guard(device_of(self)); |
1507 | return at::native::pow_sparse_scalar(self, exponent); |
1508 | } |
1509 | } // anonymous namespace |
1510 | namespace { |
1511 | at::Tensor & wrapper_SparseCUDA_Tensor_Scalar_out_pow_out(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) { |
1512 | // No device check |
1513 | const OptionalDeviceGuard device_guard(device_of(self)); |
1514 | return at::native::pow_out_sparse_scalar(self, exponent, out); |
1515 | } |
1516 | } // anonymous namespace |
1517 | namespace { |
1518 | at::Tensor wrapper_SparseCUDA__isinf(const at::Tensor & self) { |
1519 | // No device check |
1520 | // DeviceGuard omitted |
1521 | return at::native::isinf_sparse(self); |
1522 | } |
1523 | } // anonymous namespace |
1524 | namespace { |
1525 | at::Tensor wrapper_SparseCUDA__isposinf(const at::Tensor & self) { |
1526 | c10::optional<Device> common_device = nullopt; |
1527 | (void)common_device; // Suppress unused variable warning |
1528 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__isposinf" , "self" ); |
1529 | const OptionalDeviceGuard device_guard(device_of(self)); |
1530 | return at::native::isposinf_sparse(self); |
1531 | } |
1532 | } // anonymous namespace |
1533 | namespace { |
1534 | at::Tensor & wrapper_SparseCUDA_out_isposinf_out(const at::Tensor & self, at::Tensor & out) { |
1535 | c10::optional<Device> common_device = nullopt; |
1536 | (void)common_device; // Suppress unused variable warning |
1537 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_isposinf_out" , "out" ); |
1538 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_isposinf_out" , "self" ); |
1539 | const OptionalDeviceGuard device_guard(device_of(self)); |
1540 | return at::native::isposinf_sparse_out(self, out); |
1541 | } |
1542 | } // anonymous namespace |
1543 | namespace { |
1544 | at::Tensor wrapper_SparseCUDA__isneginf(const at::Tensor & self) { |
1545 | c10::optional<Device> common_device = nullopt; |
1546 | (void)common_device; // Suppress unused variable warning |
1547 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA__isneginf" , "self" ); |
1548 | const OptionalDeviceGuard device_guard(device_of(self)); |
1549 | return at::native::isneginf_sparse(self); |
1550 | } |
1551 | } // anonymous namespace |
1552 | namespace { |
1553 | at::Tensor & wrapper_SparseCUDA_out_isneginf_out(const at::Tensor & self, at::Tensor & out) { |
1554 | c10::optional<Device> common_device = nullopt; |
1555 | (void)common_device; // Suppress unused variable warning |
1556 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCUDA_out_isneginf_out" , "out" ); |
1557 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCUDA_out_isneginf_out" , "self" ); |
1558 | const OptionalDeviceGuard device_guard(device_of(self)); |
1559 | return at::native::isneginf_sparse_out(self, out); |
1560 | } |
1561 | } // anonymous namespace |
1562 | TORCH_LIBRARY_IMPL(aten, SparseCUDA, m) { |
1563 | m.impl("abs" , |
1564 | TORCH_FN(wrapper_SparseCUDA__abs)); |
1565 | m.impl("abs.out" , |
1566 | TORCH_FN(wrapper_SparseCUDA_out_abs_out)); |
1567 | m.impl("abs_" , |
1568 | TORCH_FN(wrapper_SparseCUDA__abs_)); |
1569 | m.impl("sgn" , |
1570 | TORCH_FN(wrapper_SparseCUDA__sgn)); |
1571 | m.impl("sgn.out" , |
1572 | TORCH_FN(wrapper_SparseCUDA_out_sgn_out)); |
1573 | m.impl("sgn_" , |
1574 | TORCH_FN(wrapper_SparseCUDA__sgn_)); |
1575 | m.impl("conj_physical.out" , |
1576 | TORCH_FN(wrapper_SparseCUDA_out_conj_physical_out)); |
1577 | m.impl("add.Tensor" , |
1578 | TORCH_FN(wrapper_SparseCUDA_Tensor_add)); |
1579 | m.impl("add.out" , |
1580 | TORCH_FN(wrapper_SparseCUDA_out_add_out)); |
1581 | m.impl("add_.Tensor" , |
1582 | TORCH_FN(wrapper_SparseCUDA_Tensor_add_)); |
1583 | m.impl("asinh" , |
1584 | TORCH_FN(wrapper_SparseCUDA__asinh)); |
1585 | m.impl("asinh.out" , |
1586 | TORCH_FN(wrapper_SparseCUDA_out_asinh_out)); |
1587 | m.impl("asinh_" , |
1588 | TORCH_FN(wrapper_SparseCUDA__asinh_)); |
1589 | m.impl("atanh" , |
1590 | TORCH_FN(wrapper_SparseCUDA__atanh)); |
1591 | m.impl("atanh.out" , |
1592 | TORCH_FN(wrapper_SparseCUDA_out_atanh_out)); |
1593 | m.impl("atanh_" , |
1594 | TORCH_FN(wrapper_SparseCUDA__atanh_)); |
1595 | m.impl("asin" , |
1596 | TORCH_FN(wrapper_SparseCUDA__asin)); |
1597 | m.impl("asin.out" , |
1598 | TORCH_FN(wrapper_SparseCUDA_out_asin_out)); |
1599 | m.impl("asin_" , |
1600 | TORCH_FN(wrapper_SparseCUDA__asin_)); |
1601 | m.impl("atan" , |
1602 | TORCH_FN(wrapper_SparseCUDA__atan)); |
1603 | m.impl("atan.out" , |
1604 | TORCH_FN(wrapper_SparseCUDA_out_atan_out)); |
1605 | m.impl("atan_" , |
1606 | TORCH_FN(wrapper_SparseCUDA__atan_)); |
1607 | m.impl("bmm" , |
1608 | TORCH_FN(wrapper_SparseCUDA__bmm)); |
1609 | m.impl("bmm.out" , |
1610 | TORCH_FN(wrapper_SparseCUDA_out_bmm_out)); |
1611 | m.impl("_sparse_broadcast_to" , |
1612 | TORCH_FN(wrapper_SparseCUDA___sparse_broadcast_to)); |
1613 | m.impl("cat" , |
1614 | TORCH_FN(wrapper_SparseCUDA__cat)); |
1615 | m.impl("ceil" , |
1616 | TORCH_FN(wrapper_SparseCUDA__ceil)); |
1617 | m.impl("ceil.out" , |
1618 | TORCH_FN(wrapper_SparseCUDA_out_ceil_out)); |
1619 | m.impl("ceil_" , |
1620 | TORCH_FN(wrapper_SparseCUDA__ceil_)); |
1621 | m.impl("copy_" , |
1622 | TORCH_FN(wrapper_SparseCUDA__copy_)); |
1623 | m.impl("div.Tensor" , |
1624 | TORCH_FN(wrapper_SparseCUDA_Tensor_div)); |
1625 | m.impl("div.out" , |
1626 | TORCH_FN(wrapper_SparseCUDA_out_div_out)); |
1627 | m.impl("div_.Tensor" , |
1628 | TORCH_FN(wrapper_SparseCUDA_Tensor_div_)); |
1629 | m.impl("div.Tensor_mode" , |
1630 | TORCH_FN(wrapper_SparseCUDA_Tensor_mode_div)); |
1631 | m.impl("div.out_mode" , |
1632 | TORCH_FN(wrapper_SparseCUDA_out_mode_div_out)); |
1633 | m.impl("div_.Tensor_mode" , |
1634 | TORCH_FN(wrapper_SparseCUDA_Tensor_mode_div_)); |
1635 | m.impl("empty.memory_format" , |
1636 | TORCH_FN(wrapper_SparseCUDA_memory_format_empty)); |
1637 | m.impl("empty_like" , |
1638 | TORCH_FN(wrapper_SparseCUDA__empty_like)); |
1639 | m.impl("erf" , |
1640 | TORCH_FN(wrapper_SparseCUDA__erf)); |
1641 | m.impl("erf.out" , |
1642 | TORCH_FN(wrapper_SparseCUDA_out_erf_out)); |
1643 | m.impl("erf_" , |
1644 | TORCH_FN(wrapper_SparseCUDA__erf_)); |
1645 | m.impl("expm1" , |
1646 | TORCH_FN(wrapper_SparseCUDA__expm1)); |
1647 | m.impl("expm1.out" , |
1648 | TORCH_FN(wrapper_SparseCUDA_out_expm1_out)); |
1649 | m.impl("expm1_" , |
1650 | TORCH_FN(wrapper_SparseCUDA__expm1_)); |
1651 | m.impl("floor" , |
1652 | TORCH_FN(wrapper_SparseCUDA__floor)); |
1653 | m.impl("floor.out" , |
1654 | TORCH_FN(wrapper_SparseCUDA_out_floor_out)); |
1655 | m.impl("floor_" , |
1656 | TORCH_FN(wrapper_SparseCUDA__floor_)); |
1657 | m.impl("floor_divide" , |
1658 | TORCH_FN(wrapper_SparseCUDA__floor_divide)); |
1659 | m.impl("floor_divide.out" , |
1660 | TORCH_FN(wrapper_SparseCUDA_out_floor_divide_out)); |
1661 | m.impl("floor_divide_.Tensor" , |
1662 | TORCH_FN(wrapper_SparseCUDA_Tensor_floor_divide_)); |
1663 | m.impl("frac" , |
1664 | TORCH_FN(wrapper_SparseCUDA__frac)); |
1665 | m.impl("frac.out" , |
1666 | TORCH_FN(wrapper_SparseCUDA_out_frac_out)); |
1667 | m.impl("frac_" , |
1668 | TORCH_FN(wrapper_SparseCUDA__frac_)); |
1669 | m.impl("isnan" , |
1670 | TORCH_FN(wrapper_SparseCUDA__isnan)); |
1671 | m.impl("nan_to_num" , |
1672 | TORCH_FN(wrapper_SparseCUDA__nan_to_num)); |
1673 | m.impl("nan_to_num.out" , |
1674 | TORCH_FN(wrapper_SparseCUDA_out_nan_to_num_out)); |
1675 | m.impl("nan_to_num_" , |
1676 | TORCH_FN(wrapper_SparseCUDA__nan_to_num_)); |
1677 | m.impl("log1p" , |
1678 | TORCH_FN(wrapper_SparseCUDA__log1p)); |
1679 | m.impl("log1p.out" , |
1680 | TORCH_FN(wrapper_SparseCUDA_out_log1p_out)); |
1681 | m.impl("log1p_" , |
1682 | TORCH_FN(wrapper_SparseCUDA__log1p_)); |
1683 | m.impl("mm" , |
1684 | TORCH_FN(wrapper_SparseCUDA__mm)); |
1685 | m.impl("mm.out" , |
1686 | TORCH_FN(wrapper_SparseCUDA_out_mm_out)); |
1687 | m.impl("_sparse_sparse_matmul" , |
1688 | TORCH_FN(wrapper_SparseCUDA___sparse_sparse_matmul)); |
1689 | m.impl("mul.Tensor" , |
1690 | TORCH_FN(wrapper_SparseCUDA_Tensor_mul)); |
1691 | m.impl("mul.out" , |
1692 | TORCH_FN(wrapper_SparseCUDA_out_mul_out)); |
1693 | m.impl("mul_.Tensor" , |
1694 | TORCH_FN(wrapper_SparseCUDA_Tensor_mul_)); |
1695 | m.impl("mv" , |
1696 | TORCH_FN(wrapper_SparseCUDA__mv)); |
1697 | m.impl("narrow_copy" , |
1698 | TORCH_FN(wrapper_SparseCUDA__narrow_copy)); |
1699 | m.impl("permute" , |
1700 | TORCH_FN(wrapper_SparseCUDA__permute)); |
1701 | m.impl("rad2deg" , |
1702 | TORCH_FN(wrapper_SparseCUDA__rad2deg)); |
1703 | m.impl("rad2deg.out" , |
1704 | TORCH_FN(wrapper_SparseCUDA_out_rad2deg_out)); |
1705 | m.impl("rad2deg_" , |
1706 | TORCH_FN(wrapper_SparseCUDA__rad2deg_)); |
1707 | m.impl("deg2rad" , |
1708 | TORCH_FN(wrapper_SparseCUDA__deg2rad)); |
1709 | m.impl("deg2rad.out" , |
1710 | TORCH_FN(wrapper_SparseCUDA_out_deg2rad_out)); |
1711 | m.impl("deg2rad_" , |
1712 | TORCH_FN(wrapper_SparseCUDA__deg2rad_)); |
1713 | m.impl("neg" , |
1714 | TORCH_FN(wrapper_SparseCUDA__neg)); |
1715 | m.impl("neg.out" , |
1716 | TORCH_FN(wrapper_SparseCUDA_out_neg_out)); |
1717 | m.impl("neg_" , |
1718 | TORCH_FN(wrapper_SparseCUDA__neg_)); |
1719 | m.impl("round" , |
1720 | TORCH_FN(wrapper_SparseCUDA__round)); |
1721 | m.impl("round.out" , |
1722 | TORCH_FN(wrapper_SparseCUDA_out_round_out)); |
1723 | m.impl("round_" , |
1724 | TORCH_FN(wrapper_SparseCUDA__round_)); |
1725 | m.impl("relu" , |
1726 | TORCH_FN(wrapper_SparseCUDA__relu)); |
1727 | m.impl("relu_" , |
1728 | TORCH_FN(wrapper_SparseCUDA__relu_)); |
1729 | m.impl("sin" , |
1730 | TORCH_FN(wrapper_SparseCUDA__sin)); |
1731 | m.impl("sin.out" , |
1732 | TORCH_FN(wrapper_SparseCUDA_out_sin_out)); |
1733 | m.impl("sin_" , |
1734 | TORCH_FN(wrapper_SparseCUDA__sin_)); |
1735 | m.impl("sinh" , |
1736 | TORCH_FN(wrapper_SparseCUDA__sinh)); |
1737 | m.impl("sinh.out" , |
1738 | TORCH_FN(wrapper_SparseCUDA_out_sinh_out)); |
1739 | m.impl("sinh_" , |
1740 | TORCH_FN(wrapper_SparseCUDA__sinh_)); |
1741 | m.impl("sspaddmm.out" , |
1742 | TORCH_FN(wrapper_SparseCUDA_out_sspaddmm_out)); |
1743 | m.impl("sum" , |
1744 | TORCH_FN(wrapper_SparseCUDA__sum)); |
1745 | m.impl("sum.dim_IntList" , |
1746 | TORCH_FN(wrapper_SparseCUDA_dim_IntList_sum)); |
1747 | m.impl("sqrt" , |
1748 | TORCH_FN(wrapper_SparseCUDA__sqrt)); |
1749 | m.impl("sqrt.out" , |
1750 | TORCH_FN(wrapper_SparseCUDA_out_sqrt_out)); |
1751 | m.impl("sqrt_" , |
1752 | TORCH_FN(wrapper_SparseCUDA__sqrt_)); |
1753 | m.impl("tan" , |
1754 | TORCH_FN(wrapper_SparseCUDA__tan)); |
1755 | m.impl("tan.out" , |
1756 | TORCH_FN(wrapper_SparseCUDA_out_tan_out)); |
1757 | m.impl("tan_" , |
1758 | TORCH_FN(wrapper_SparseCUDA__tan_)); |
1759 | m.impl("tanh" , |
1760 | TORCH_FN(wrapper_SparseCUDA__tanh)); |
1761 | m.impl("tanh.out" , |
1762 | TORCH_FN(wrapper_SparseCUDA_out_tanh_out)); |
1763 | m.impl("tanh_" , |
1764 | TORCH_FN(wrapper_SparseCUDA__tanh_)); |
1765 | m.impl("threshold_backward" , |
1766 | TORCH_FN(wrapper_SparseCUDA__threshold_backward)); |
1767 | m.impl("threshold_backward.grad_input" , |
1768 | TORCH_FN(wrapper_SparseCUDA_grad_input_threshold_backward_out)); |
1769 | m.impl("trunc" , |
1770 | TORCH_FN(wrapper_SparseCUDA__trunc)); |
1771 | m.impl("trunc.out" , |
1772 | TORCH_FN(wrapper_SparseCUDA_out_trunc_out)); |
1773 | m.impl("trunc_" , |
1774 | TORCH_FN(wrapper_SparseCUDA__trunc_)); |
1775 | m.impl("unsqueeze" , |
1776 | TORCH_FN(wrapper_SparseCUDA__unsqueeze)); |
1777 | m.impl("zeros.out" , |
1778 | TORCH_FN(wrapper_SparseCUDA_out_zeros_out)); |
1779 | m.impl("native_norm" , |
1780 | TORCH_FN(wrapper_SparseCUDA__native_norm)); |
1781 | m.impl("native_norm.ScalarOpt_dim_dtype" , |
1782 | TORCH_FN(wrapper_SparseCUDA_ScalarOpt_dim_dtype_native_norm)); |
1783 | m.impl("_sparse_sum_backward" , |
1784 | TORCH_FN(wrapper_SparseCUDA___sparse_sum_backward)); |
1785 | m.impl("_sparse_softmax" , |
1786 | TORCH_FN(wrapper_SparseCUDA___sparse_softmax)); |
1787 | m.impl("_sparse_softmax_backward_data" , |
1788 | TORCH_FN(wrapper_SparseCUDA___sparse_softmax_backward_data)); |
1789 | m.impl("_sparse_log_softmax" , |
1790 | TORCH_FN(wrapper_SparseCUDA___sparse_log_softmax)); |
1791 | m.impl("_sparse_log_softmax_backward_data" , |
1792 | TORCH_FN(wrapper_SparseCUDA___sparse_log_softmax_backward_data)); |
1793 | m.impl("norm.ScalarOpt_dim_dtype" , |
1794 | TORCH_FN(wrapper_SparseCUDA_ScalarOpt_dim_dtype_norm)); |
1795 | m.impl("norm.ScalarOpt_dim" , |
1796 | TORCH_FN(wrapper_SparseCUDA_ScalarOpt_dim_norm)); |
1797 | m.impl("clone" , |
1798 | TORCH_FN(wrapper_SparseCUDA__clone)); |
1799 | m.impl("resize_as_sparse_" , |
1800 | TORCH_FN(wrapper_SparseCUDA__resize_as_sparse_)); |
1801 | m.impl("zero_" , |
1802 | TORCH_FN(wrapper_SparseCUDA__zero_)); |
1803 | m.impl("sub.Tensor" , |
1804 | TORCH_FN(wrapper_SparseCUDA_Tensor_sub)); |
1805 | m.impl("sub.out" , |
1806 | TORCH_FN(wrapper_SparseCUDA_out_sub_out)); |
1807 | m.impl("sub_.Tensor" , |
1808 | TORCH_FN(wrapper_SparseCUDA_Tensor_sub_)); |
1809 | m.impl("addmm" , |
1810 | TORCH_FN(wrapper_SparseCUDA__addmm)); |
1811 | m.impl("addmm.out" , |
1812 | TORCH_FN(wrapper_SparseCUDA_out_addmm_out)); |
1813 | m.impl("addmm_" , |
1814 | TORCH_FN(wrapper_SparseCUDA__addmm_)); |
1815 | m.impl("_sparse_coo_tensor_with_dims" , |
1816 | TORCH_FN(wrapper_SparseCUDA___sparse_coo_tensor_with_dims)); |
1817 | m.impl("_sparse_coo_tensor_with_dims_and_tensors" , |
1818 | TORCH_FN(wrapper_SparseCUDA___sparse_coo_tensor_with_dims_and_tensors)); |
1819 | m.impl("sparse_resize_" , |
1820 | TORCH_FN(wrapper_SparseCUDA__sparse_resize_)); |
1821 | m.impl("sparse_resize_and_clear_" , |
1822 | TORCH_FN(wrapper_SparseCUDA__sparse_resize_and_clear_)); |
1823 | m.impl("sparse_mask" , |
1824 | TORCH_FN(wrapper_SparseCUDA__sparse_mask)); |
1825 | m.impl("_to_dense" , |
1826 | TORCH_FN(wrapper_SparseCUDA___to_dense)); |
1827 | m.impl("sparse_dim" , |
1828 | TORCH_FN(wrapper_SparseCUDA__sparse_dim)); |
1829 | m.impl("_dimI" , |
1830 | TORCH_FN(wrapper_SparseCUDA___dimI)); |
1831 | m.impl("dense_dim" , |
1832 | TORCH_FN(wrapper_SparseCUDA__dense_dim)); |
1833 | m.impl("_dimV" , |
1834 | TORCH_FN(wrapper_SparseCUDA___dimV)); |
1835 | m.impl("_nnz" , |
1836 | TORCH_FN(wrapper_SparseCUDA___nnz)); |
1837 | m.impl("_coalesce" , |
1838 | TORCH_FN(wrapper_SparseCUDA___coalesce)); |
1839 | m.impl("is_coalesced" , |
1840 | TORCH_FN(wrapper_SparseCUDA__is_coalesced)); |
1841 | m.impl("_indices" , |
1842 | TORCH_FN(wrapper_SparseCUDA___indices)); |
1843 | m.impl("_values" , |
1844 | TORCH_FN(wrapper_SparseCUDA___values)); |
1845 | m.impl("_coalesced_" , |
1846 | TORCH_FN(wrapper_SparseCUDA___coalesced_)); |
1847 | m.impl("indices" , |
1848 | TORCH_FN(wrapper_SparseCUDA__indices)); |
1849 | m.impl("values" , |
1850 | TORCH_FN(wrapper_SparseCUDA__values)); |
1851 | m.impl("hspmm" , |
1852 | TORCH_FN(wrapper_SparseCUDA__hspmm)); |
1853 | m.impl("hspmm.out" , |
1854 | TORCH_FN(wrapper_SparseCUDA_out_hspmm_out)); |
1855 | m.impl("copy_sparse_to_sparse_" , |
1856 | TORCH_FN(wrapper_SparseCUDA__copy_sparse_to_sparse_)); |
1857 | m.impl("to_sparse.sparse_dim" , |
1858 | TORCH_FN(wrapper_SparseCUDA_sparse_dim_to_sparse)); |
1859 | m.impl("to_sparse" , |
1860 | TORCH_FN(wrapper_SparseCUDA__to_sparse)); |
1861 | m.impl("to_sparse_csr" , |
1862 | TORCH_FN(wrapper_SparseCUDA__to_sparse_csr)); |
1863 | m.impl("to_sparse_csc" , |
1864 | TORCH_FN(wrapper_SparseCUDA__to_sparse_csc)); |
1865 | m.impl("to_sparse_bsr" , |
1866 | TORCH_FN(wrapper_SparseCUDA__to_sparse_bsr)); |
1867 | m.impl("to_sparse_bsc" , |
1868 | TORCH_FN(wrapper_SparseCUDA__to_sparse_bsc)); |
1869 | m.impl("index_select" , |
1870 | TORCH_FN(wrapper_SparseCUDA__index_select)); |
1871 | m.impl("erfinv" , |
1872 | TORCH_FN(wrapper_SparseCUDA__erfinv)); |
1873 | m.impl("erfinv.out" , |
1874 | TORCH_FN(wrapper_SparseCUDA_out_erfinv_out)); |
1875 | m.impl("erfinv_" , |
1876 | TORCH_FN(wrapper_SparseCUDA__erfinv_)); |
1877 | m.impl("sign" , |
1878 | TORCH_FN(wrapper_SparseCUDA__sign)); |
1879 | m.impl("sign.out" , |
1880 | TORCH_FN(wrapper_SparseCUDA_out_sign_out)); |
1881 | m.impl("sign_" , |
1882 | TORCH_FN(wrapper_SparseCUDA__sign_)); |
1883 | m.impl("signbit" , |
1884 | TORCH_FN(wrapper_SparseCUDA__signbit)); |
1885 | m.impl("signbit.out" , |
1886 | TORCH_FN(wrapper_SparseCUDA_out_signbit_out)); |
1887 | m.impl("any" , |
1888 | TORCH_FN(wrapper_SparseCUDA__any)); |
1889 | m.impl("pow.Tensor_Scalar" , |
1890 | TORCH_FN(wrapper_SparseCUDA_Tensor_Scalar_pow)); |
1891 | m.impl("pow.Tensor_Scalar_out" , |
1892 | TORCH_FN(wrapper_SparseCUDA_Tensor_Scalar_out_pow_out)); |
1893 | m.impl("isinf" , |
1894 | TORCH_FN(wrapper_SparseCUDA__isinf)); |
1895 | m.impl("isposinf" , |
1896 | TORCH_FN(wrapper_SparseCUDA__isposinf)); |
1897 | m.impl("isposinf.out" , |
1898 | TORCH_FN(wrapper_SparseCUDA_out_isposinf_out)); |
1899 | m.impl("isneginf" , |
1900 | TORCH_FN(wrapper_SparseCUDA__isneginf)); |
1901 | m.impl("isneginf.out" , |
1902 | TORCH_FN(wrapper_SparseCUDA_out_isneginf_out)); |
1903 | }; |
1904 | } // anonymous namespace |
1905 | namespace sparsecuda { |
1906 | at::Tensor abs(const at::Tensor & self) { |
1907 | return wrapper_SparseCUDA__abs(self); |
1908 | } |
1909 | at::Tensor & abs_out(at::Tensor & out, const at::Tensor & self) { |
1910 | return wrapper_SparseCUDA_out_abs_out(self, out); |
1911 | } |
1912 | at::Tensor & abs_outf(const at::Tensor & self, at::Tensor & out) { |
1913 | return wrapper_SparseCUDA_out_abs_out(self, out); |
1914 | } |
1915 | at::Tensor & abs_(at::Tensor & self) { |
1916 | return wrapper_SparseCUDA__abs_(self); |
1917 | } |
1918 | at::Tensor sgn(const at::Tensor & self) { |
1919 | return wrapper_SparseCUDA__sgn(self); |
1920 | } |
1921 | at::Tensor & sgn_out(at::Tensor & out, const at::Tensor & self) { |
1922 | return wrapper_SparseCUDA_out_sgn_out(self, out); |
1923 | } |
1924 | at::Tensor & sgn_outf(const at::Tensor & self, at::Tensor & out) { |
1925 | return wrapper_SparseCUDA_out_sgn_out(self, out); |
1926 | } |
1927 | at::Tensor & sgn_(at::Tensor & self) { |
1928 | return wrapper_SparseCUDA__sgn_(self); |
1929 | } |
1930 | at::Tensor & conj_physical_out(at::Tensor & out, const at::Tensor & self) { |
1931 | return wrapper_SparseCUDA_out_conj_physical_out(self, out); |
1932 | } |
1933 | at::Tensor & conj_physical_outf(const at::Tensor & self, at::Tensor & out) { |
1934 | return wrapper_SparseCUDA_out_conj_physical_out(self, out); |
1935 | } |
1936 | at::Tensor add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
1937 | return wrapper_SparseCUDA_Tensor_add(self, other, alpha); |
1938 | } |
1939 | at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
1940 | return wrapper_SparseCUDA_out_add_out(self, other, alpha, out); |
1941 | } |
1942 | at::Tensor & add_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { |
1943 | return wrapper_SparseCUDA_out_add_out(self, other, alpha, out); |
1944 | } |
1945 | at::Tensor & add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
1946 | return wrapper_SparseCUDA_Tensor_add_(self, other, alpha); |
1947 | } |
1948 | at::Tensor asinh(const at::Tensor & self) { |
1949 | return wrapper_SparseCUDA__asinh(self); |
1950 | } |
1951 | at::Tensor & asinh_out(at::Tensor & out, const at::Tensor & self) { |
1952 | return wrapper_SparseCUDA_out_asinh_out(self, out); |
1953 | } |
1954 | at::Tensor & asinh_outf(const at::Tensor & self, at::Tensor & out) { |
1955 | return wrapper_SparseCUDA_out_asinh_out(self, out); |
1956 | } |
1957 | at::Tensor & asinh_(at::Tensor & self) { |
1958 | return wrapper_SparseCUDA__asinh_(self); |
1959 | } |
1960 | at::Tensor atanh(const at::Tensor & self) { |
1961 | return wrapper_SparseCUDA__atanh(self); |
1962 | } |
1963 | at::Tensor & atanh_out(at::Tensor & out, const at::Tensor & self) { |
1964 | return wrapper_SparseCUDA_out_atanh_out(self, out); |
1965 | } |
1966 | at::Tensor & atanh_outf(const at::Tensor & self, at::Tensor & out) { |
1967 | return wrapper_SparseCUDA_out_atanh_out(self, out); |
1968 | } |
1969 | at::Tensor & atanh_(at::Tensor & self) { |
1970 | return wrapper_SparseCUDA__atanh_(self); |
1971 | } |
1972 | at::Tensor asin(const at::Tensor & self) { |
1973 | return wrapper_SparseCUDA__asin(self); |
1974 | } |
1975 | at::Tensor & asin_out(at::Tensor & out, const at::Tensor & self) { |
1976 | return wrapper_SparseCUDA_out_asin_out(self, out); |
1977 | } |
1978 | at::Tensor & asin_outf(const at::Tensor & self, at::Tensor & out) { |
1979 | return wrapper_SparseCUDA_out_asin_out(self, out); |
1980 | } |
1981 | at::Tensor & asin_(at::Tensor & self) { |
1982 | return wrapper_SparseCUDA__asin_(self); |
1983 | } |
1984 | at::Tensor atan(const at::Tensor & self) { |
1985 | return wrapper_SparseCUDA__atan(self); |
1986 | } |
1987 | at::Tensor & atan_out(at::Tensor & out, const at::Tensor & self) { |
1988 | return wrapper_SparseCUDA_out_atan_out(self, out); |
1989 | } |
1990 | at::Tensor & atan_outf(const at::Tensor & self, at::Tensor & out) { |
1991 | return wrapper_SparseCUDA_out_atan_out(self, out); |
1992 | } |
1993 | at::Tensor & atan_(at::Tensor & self) { |
1994 | return wrapper_SparseCUDA__atan_(self); |
1995 | } |
1996 | at::Tensor bmm(const at::Tensor & self, const at::Tensor & mat2) { |
1997 | return wrapper_SparseCUDA__bmm(self, mat2); |
1998 | } |
1999 | at::Tensor & bmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) { |
2000 | return wrapper_SparseCUDA_out_bmm_out(self, mat2, out); |
2001 | } |
2002 | at::Tensor & bmm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) { |
2003 | return wrapper_SparseCUDA_out_bmm_out(self, mat2, out); |
2004 | } |
2005 | at::Tensor _sparse_broadcast_to(const at::Tensor & self, at::IntArrayRef size) { |
2006 | return wrapper_SparseCUDA___sparse_broadcast_to(self, size); |
2007 | } |
2008 | at::Tensor cat(const at::ITensorListRef & tensors, int64_t dim) { |
2009 | return wrapper_SparseCUDA__cat(tensors, dim); |
2010 | } |
2011 | at::Tensor ceil(const at::Tensor & self) { |
2012 | return wrapper_SparseCUDA__ceil(self); |
2013 | } |
2014 | at::Tensor & ceil_out(at::Tensor & out, const at::Tensor & self) { |
2015 | return wrapper_SparseCUDA_out_ceil_out(self, out); |
2016 | } |
2017 | at::Tensor & ceil_outf(const at::Tensor & self, at::Tensor & out) { |
2018 | return wrapper_SparseCUDA_out_ceil_out(self, out); |
2019 | } |
2020 | at::Tensor & ceil_(at::Tensor & self) { |
2021 | return wrapper_SparseCUDA__ceil_(self); |
2022 | } |
2023 | at::Tensor & copy_(at::Tensor & self, const at::Tensor & src, bool non_blocking) { |
2024 | return wrapper_SparseCUDA__copy_(self, src, non_blocking); |
2025 | } |
2026 | at::Tensor div(const at::Tensor & self, const at::Tensor & other) { |
2027 | return wrapper_SparseCUDA_Tensor_div(self, other); |
2028 | } |
2029 | at::Tensor & div_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
2030 | return wrapper_SparseCUDA_out_div_out(self, other, out); |
2031 | } |
2032 | at::Tensor & div_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
2033 | return wrapper_SparseCUDA_out_div_out(self, other, out); |
2034 | } |
2035 | at::Tensor & div_(at::Tensor & self, const at::Tensor & other) { |
2036 | return wrapper_SparseCUDA_Tensor_div_(self, other); |
2037 | } |
2038 | at::Tensor div(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) { |
2039 | return wrapper_SparseCUDA_Tensor_mode_div(self, other, rounding_mode); |
2040 | } |
2041 | at::Tensor & div_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) { |
2042 | return wrapper_SparseCUDA_out_mode_div_out(self, other, rounding_mode, out); |
2043 | } |
2044 | at::Tensor & div_outf(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out) { |
2045 | return wrapper_SparseCUDA_out_mode_div_out(self, other, rounding_mode, out); |
2046 | } |
2047 | at::Tensor & div_(at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) { |
2048 | return wrapper_SparseCUDA_Tensor_mode_div_(self, other, rounding_mode); |
2049 | } |
2050 | at::Tensor empty(at::IntArrayRef size, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) { |
2051 | return wrapper_SparseCUDA_memory_format_empty(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
2052 | } |
2053 | at::Tensor empty(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
2054 | return wrapper_SparseCUDA_memory_format_empty(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, memory_format); |
2055 | } |
2056 | at::Tensor empty_symint(c10::SymIntArrayRef size, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) { |
2057 | return wrapper_SparseCUDA_memory_format_empty(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
2058 | } |
2059 | at::Tensor empty_symint(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
2060 | return wrapper_SparseCUDA_memory_format_empty(size, dtype, layout, device, pin_memory, memory_format); |
2061 | } |
2062 | at::Tensor empty_like(const at::Tensor & self, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) { |
2063 | return wrapper_SparseCUDA__empty_like(self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
2064 | } |
2065 | at::Tensor empty_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
2066 | return wrapper_SparseCUDA__empty_like(self, dtype, layout, device, pin_memory, memory_format); |
2067 | } |
2068 | at::Tensor erf(const at::Tensor & self) { |
2069 | return wrapper_SparseCUDA__erf(self); |
2070 | } |
2071 | at::Tensor & erf_out(at::Tensor & out, const at::Tensor & self) { |
2072 | return wrapper_SparseCUDA_out_erf_out(self, out); |
2073 | } |
2074 | at::Tensor & erf_outf(const at::Tensor & self, at::Tensor & out) { |
2075 | return wrapper_SparseCUDA_out_erf_out(self, out); |
2076 | } |
2077 | at::Tensor & erf_(at::Tensor & self) { |
2078 | return wrapper_SparseCUDA__erf_(self); |
2079 | } |
2080 | at::Tensor expm1(const at::Tensor & self) { |
2081 | return wrapper_SparseCUDA__expm1(self); |
2082 | } |
2083 | at::Tensor & expm1_out(at::Tensor & out, const at::Tensor & self) { |
2084 | return wrapper_SparseCUDA_out_expm1_out(self, out); |
2085 | } |
2086 | at::Tensor & expm1_outf(const at::Tensor & self, at::Tensor & out) { |
2087 | return wrapper_SparseCUDA_out_expm1_out(self, out); |
2088 | } |
2089 | at::Tensor & expm1_(at::Tensor & self) { |
2090 | return wrapper_SparseCUDA__expm1_(self); |
2091 | } |
2092 | at::Tensor floor(const at::Tensor & self) { |
2093 | return wrapper_SparseCUDA__floor(self); |
2094 | } |
2095 | at::Tensor & floor_out(at::Tensor & out, const at::Tensor & self) { |
2096 | return wrapper_SparseCUDA_out_floor_out(self, out); |
2097 | } |
2098 | at::Tensor & floor_outf(const at::Tensor & self, at::Tensor & out) { |
2099 | return wrapper_SparseCUDA_out_floor_out(self, out); |
2100 | } |
2101 | at::Tensor & floor_(at::Tensor & self) { |
2102 | return wrapper_SparseCUDA__floor_(self); |
2103 | } |
2104 | at::Tensor floor_divide(const at::Tensor & self, const at::Tensor & other) { |
2105 | return wrapper_SparseCUDA__floor_divide(self, other); |
2106 | } |
2107 | at::Tensor & floor_divide_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
2108 | return wrapper_SparseCUDA_out_floor_divide_out(self, other, out); |
2109 | } |
2110 | at::Tensor & floor_divide_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
2111 | return wrapper_SparseCUDA_out_floor_divide_out(self, other, out); |
2112 | } |
2113 | at::Tensor & floor_divide_(at::Tensor & self, const at::Tensor & other) { |
2114 | return wrapper_SparseCUDA_Tensor_floor_divide_(self, other); |
2115 | } |
2116 | at::Tensor frac(const at::Tensor & self) { |
2117 | return wrapper_SparseCUDA__frac(self); |
2118 | } |
2119 | at::Tensor & frac_out(at::Tensor & out, const at::Tensor & self) { |
2120 | return wrapper_SparseCUDA_out_frac_out(self, out); |
2121 | } |
2122 | at::Tensor & frac_outf(const at::Tensor & self, at::Tensor & out) { |
2123 | return wrapper_SparseCUDA_out_frac_out(self, out); |
2124 | } |
2125 | at::Tensor & frac_(at::Tensor & self) { |
2126 | return wrapper_SparseCUDA__frac_(self); |
2127 | } |
2128 | at::Tensor isnan(const at::Tensor & self) { |
2129 | return wrapper_SparseCUDA__isnan(self); |
2130 | } |
2131 | at::Tensor nan_to_num(const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) { |
2132 | return wrapper_SparseCUDA__nan_to_num(self, nan, posinf, neginf); |
2133 | } |
2134 | at::Tensor & nan_to_num_out(at::Tensor & out, const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) { |
2135 | return wrapper_SparseCUDA_out_nan_to_num_out(self, nan, posinf, neginf, out); |
2136 | } |
2137 | at::Tensor & nan_to_num_outf(const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf, at::Tensor & out) { |
2138 | return wrapper_SparseCUDA_out_nan_to_num_out(self, nan, posinf, neginf, out); |
2139 | } |
2140 | at::Tensor & nan_to_num_(at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) { |
2141 | return wrapper_SparseCUDA__nan_to_num_(self, nan, posinf, neginf); |
2142 | } |
2143 | at::Tensor log1p(const at::Tensor & self) { |
2144 | return wrapper_SparseCUDA__log1p(self); |
2145 | } |
2146 | at::Tensor & log1p_out(at::Tensor & out, const at::Tensor & self) { |
2147 | return wrapper_SparseCUDA_out_log1p_out(self, out); |
2148 | } |
2149 | at::Tensor & log1p_outf(const at::Tensor & self, at::Tensor & out) { |
2150 | return wrapper_SparseCUDA_out_log1p_out(self, out); |
2151 | } |
2152 | at::Tensor & log1p_(at::Tensor & self) { |
2153 | return wrapper_SparseCUDA__log1p_(self); |
2154 | } |
2155 | at::Tensor mm(const at::Tensor & self, const at::Tensor & mat2) { |
2156 | return wrapper_SparseCUDA__mm(self, mat2); |
2157 | } |
2158 | at::Tensor & mm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) { |
2159 | return wrapper_SparseCUDA_out_mm_out(self, mat2, out); |
2160 | } |
2161 | at::Tensor & mm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) { |
2162 | return wrapper_SparseCUDA_out_mm_out(self, mat2, out); |
2163 | } |
2164 | at::Tensor _sparse_sparse_matmul(const at::Tensor & self, const at::Tensor & other) { |
2165 | return wrapper_SparseCUDA___sparse_sparse_matmul(self, other); |
2166 | } |
2167 | at::Tensor mul(const at::Tensor & self, const at::Tensor & other) { |
2168 | return wrapper_SparseCUDA_Tensor_mul(self, other); |
2169 | } |
2170 | at::Tensor & mul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
2171 | return wrapper_SparseCUDA_out_mul_out(self, other, out); |
2172 | } |
2173 | at::Tensor & mul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
2174 | return wrapper_SparseCUDA_out_mul_out(self, other, out); |
2175 | } |
2176 | at::Tensor & mul_(at::Tensor & self, const at::Tensor & other) { |
2177 | return wrapper_SparseCUDA_Tensor_mul_(self, other); |
2178 | } |
2179 | at::Tensor mv(const at::Tensor & self, const at::Tensor & vec) { |
2180 | return wrapper_SparseCUDA__mv(self, vec); |
2181 | } |
2182 | at::Tensor narrow_copy(const at::Tensor & self, int64_t dim, int64_t start, int64_t length) { |
2183 | return wrapper_SparseCUDA__narrow_copy(self, dim, start, length); |
2184 | } |
2185 | at::Tensor narrow_copy_symint(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) { |
2186 | return wrapper_SparseCUDA__narrow_copy(self, dim, start, length); |
2187 | } |
2188 | at::Tensor permute(const at::Tensor & self, at::IntArrayRef dims) { |
2189 | return wrapper_SparseCUDA__permute(self, dims); |
2190 | } |
2191 | at::Tensor rad2deg(const at::Tensor & self) { |
2192 | return wrapper_SparseCUDA__rad2deg(self); |
2193 | } |
2194 | at::Tensor & rad2deg_out(at::Tensor & out, const at::Tensor & self) { |
2195 | return wrapper_SparseCUDA_out_rad2deg_out(self, out); |
2196 | } |
2197 | at::Tensor & rad2deg_outf(const at::Tensor & self, at::Tensor & out) { |
2198 | return wrapper_SparseCUDA_out_rad2deg_out(self, out); |
2199 | } |
2200 | at::Tensor & rad2deg_(at::Tensor & self) { |
2201 | return wrapper_SparseCUDA__rad2deg_(self); |
2202 | } |
2203 | at::Tensor deg2rad(const at::Tensor & self) { |
2204 | return wrapper_SparseCUDA__deg2rad(self); |
2205 | } |
2206 | at::Tensor & deg2rad_out(at::Tensor & out, const at::Tensor & self) { |
2207 | return wrapper_SparseCUDA_out_deg2rad_out(self, out); |
2208 | } |
2209 | at::Tensor & deg2rad_outf(const at::Tensor & self, at::Tensor & out) { |
2210 | return wrapper_SparseCUDA_out_deg2rad_out(self, out); |
2211 | } |
2212 | at::Tensor & deg2rad_(at::Tensor & self) { |
2213 | return wrapper_SparseCUDA__deg2rad_(self); |
2214 | } |
2215 | at::Tensor neg(const at::Tensor & self) { |
2216 | return wrapper_SparseCUDA__neg(self); |
2217 | } |
2218 | at::Tensor & neg_out(at::Tensor & out, const at::Tensor & self) { |
2219 | return wrapper_SparseCUDA_out_neg_out(self, out); |
2220 | } |
2221 | at::Tensor & neg_outf(const at::Tensor & self, at::Tensor & out) { |
2222 | return wrapper_SparseCUDA_out_neg_out(self, out); |
2223 | } |
2224 | at::Tensor & neg_(at::Tensor & self) { |
2225 | return wrapper_SparseCUDA__neg_(self); |
2226 | } |
2227 | at::Tensor round(const at::Tensor & self) { |
2228 | return wrapper_SparseCUDA__round(self); |
2229 | } |
2230 | at::Tensor & round_out(at::Tensor & out, const at::Tensor & self) { |
2231 | return wrapper_SparseCUDA_out_round_out(self, out); |
2232 | } |
2233 | at::Tensor & round_outf(const at::Tensor & self, at::Tensor & out) { |
2234 | return wrapper_SparseCUDA_out_round_out(self, out); |
2235 | } |
2236 | at::Tensor & round_(at::Tensor & self) { |
2237 | return wrapper_SparseCUDA__round_(self); |
2238 | } |
2239 | at::Tensor relu(const at::Tensor & self) { |
2240 | return wrapper_SparseCUDA__relu(self); |
2241 | } |
2242 | at::Tensor & relu_(at::Tensor & self) { |
2243 | return wrapper_SparseCUDA__relu_(self); |
2244 | } |
2245 | at::Tensor sin(const at::Tensor & self) { |
2246 | return wrapper_SparseCUDA__sin(self); |
2247 | } |
2248 | at::Tensor & sin_out(at::Tensor & out, const at::Tensor & self) { |
2249 | return wrapper_SparseCUDA_out_sin_out(self, out); |
2250 | } |
2251 | at::Tensor & sin_outf(const at::Tensor & self, at::Tensor & out) { |
2252 | return wrapper_SparseCUDA_out_sin_out(self, out); |
2253 | } |
2254 | at::Tensor & sin_(at::Tensor & self) { |
2255 | return wrapper_SparseCUDA__sin_(self); |
2256 | } |
2257 | at::Tensor sinh(const at::Tensor & self) { |
2258 | return wrapper_SparseCUDA__sinh(self); |
2259 | } |
2260 | at::Tensor & sinh_out(at::Tensor & out, const at::Tensor & self) { |
2261 | return wrapper_SparseCUDA_out_sinh_out(self, out); |
2262 | } |
2263 | at::Tensor & sinh_outf(const at::Tensor & self, at::Tensor & out) { |
2264 | return wrapper_SparseCUDA_out_sinh_out(self, out); |
2265 | } |
2266 | at::Tensor & sinh_(at::Tensor & self) { |
2267 | return wrapper_SparseCUDA__sinh_(self); |
2268 | } |
2269 | at::Tensor & sspaddmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { |
2270 | return wrapper_SparseCUDA_out_sspaddmm_out(self, mat1, mat2, beta, alpha, out); |
2271 | } |
2272 | at::Tensor & sspaddmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
2273 | return wrapper_SparseCUDA_out_sspaddmm_out(self, mat1, mat2, beta, alpha, out); |
2274 | } |
2275 | at::Tensor sum(const at::Tensor & self, c10::optional<at::ScalarType> dtype) { |
2276 | return wrapper_SparseCUDA__sum(self, dtype); |
2277 | } |
2278 | at::Tensor sum(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
2279 | return wrapper_SparseCUDA_dim_IntList_sum(self, dim, keepdim, dtype); |
2280 | } |
2281 | at::Tensor sqrt(const at::Tensor & self) { |
2282 | return wrapper_SparseCUDA__sqrt(self); |
2283 | } |
2284 | at::Tensor & sqrt_out(at::Tensor & out, const at::Tensor & self) { |
2285 | return wrapper_SparseCUDA_out_sqrt_out(self, out); |
2286 | } |
2287 | at::Tensor & sqrt_outf(const at::Tensor & self, at::Tensor & out) { |
2288 | return wrapper_SparseCUDA_out_sqrt_out(self, out); |
2289 | } |
2290 | at::Tensor & sqrt_(at::Tensor & self) { |
2291 | return wrapper_SparseCUDA__sqrt_(self); |
2292 | } |
2293 | at::Tensor tan(const at::Tensor & self) { |
2294 | return wrapper_SparseCUDA__tan(self); |
2295 | } |
2296 | at::Tensor & tan_out(at::Tensor & out, const at::Tensor & self) { |
2297 | return wrapper_SparseCUDA_out_tan_out(self, out); |
2298 | } |
2299 | at::Tensor & tan_outf(const at::Tensor & self, at::Tensor & out) { |
2300 | return wrapper_SparseCUDA_out_tan_out(self, out); |
2301 | } |
2302 | at::Tensor & tan_(at::Tensor & self) { |
2303 | return wrapper_SparseCUDA__tan_(self); |
2304 | } |
2305 | at::Tensor tanh(const at::Tensor & self) { |
2306 | return wrapper_SparseCUDA__tanh(self); |
2307 | } |
2308 | at::Tensor & tanh_out(at::Tensor & out, const at::Tensor & self) { |
2309 | return wrapper_SparseCUDA_out_tanh_out(self, out); |
2310 | } |
2311 | at::Tensor & tanh_outf(const at::Tensor & self, at::Tensor & out) { |
2312 | return wrapper_SparseCUDA_out_tanh_out(self, out); |
2313 | } |
2314 | at::Tensor & tanh_(at::Tensor & self) { |
2315 | return wrapper_SparseCUDA__tanh_(self); |
2316 | } |
2317 | at::Tensor threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) { |
2318 | return wrapper_SparseCUDA__threshold_backward(grad_output, self, threshold); |
2319 | } |
2320 | at::Tensor & threshold_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) { |
2321 | return wrapper_SparseCUDA_grad_input_threshold_backward_out(grad_output, self, threshold, grad_input); |
2322 | } |
2323 | at::Tensor & threshold_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) { |
2324 | return wrapper_SparseCUDA_grad_input_threshold_backward_out(grad_output, self, threshold, grad_input); |
2325 | } |
2326 | at::Tensor trunc(const at::Tensor & self) { |
2327 | return wrapper_SparseCUDA__trunc(self); |
2328 | } |
2329 | at::Tensor & trunc_out(at::Tensor & out, const at::Tensor & self) { |
2330 | return wrapper_SparseCUDA_out_trunc_out(self, out); |
2331 | } |
2332 | at::Tensor & trunc_outf(const at::Tensor & self, at::Tensor & out) { |
2333 | return wrapper_SparseCUDA_out_trunc_out(self, out); |
2334 | } |
2335 | at::Tensor & trunc_(at::Tensor & self) { |
2336 | return wrapper_SparseCUDA__trunc_(self); |
2337 | } |
2338 | at::Tensor unsqueeze(const at::Tensor & self, int64_t dim) { |
2339 | return wrapper_SparseCUDA__unsqueeze(self, dim); |
2340 | } |
2341 | at::Tensor & zeros_out(at::Tensor & out, at::IntArrayRef size) { |
2342 | return wrapper_SparseCUDA_out_zeros_out(c10::fromIntArrayRefSlow(size), out); |
2343 | } |
2344 | at::Tensor & zeros_outf(at::IntArrayRef size, at::Tensor & out) { |
2345 | return wrapper_SparseCUDA_out_zeros_out(c10::fromIntArrayRefSlow(size), out); |
2346 | } |
2347 | at::Tensor & zeros_symint_out(at::Tensor & out, c10::SymIntArrayRef size) { |
2348 | return wrapper_SparseCUDA_out_zeros_out(size, out); |
2349 | } |
2350 | at::Tensor & zeros_symint_outf(c10::SymIntArrayRef size, at::Tensor & out) { |
2351 | return wrapper_SparseCUDA_out_zeros_out(size, out); |
2352 | } |
2353 | at::Tensor native_norm(const at::Tensor & self, const at::Scalar & p) { |
2354 | return wrapper_SparseCUDA__native_norm(self, p); |
2355 | } |
2356 | at::Tensor native_norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
2357 | return wrapper_SparseCUDA_ScalarOpt_dim_dtype_native_norm(self, p, dim, keepdim, dtype); |
2358 | } |
2359 | at::Tensor _sparse_sum_backward(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) { |
2360 | return wrapper_SparseCUDA___sparse_sum_backward(grad, self, dim); |
2361 | } |
2362 | at::Tensor _sparse_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) { |
2363 | return wrapper_SparseCUDA___sparse_softmax(self, dim, half_to_float); |
2364 | } |
2365 | at::Tensor _sparse_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) { |
2366 | return wrapper_SparseCUDA___sparse_softmax_backward_data(grad_output, output, dim, self); |
2367 | } |
2368 | at::Tensor _sparse_log_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) { |
2369 | return wrapper_SparseCUDA___sparse_log_softmax(self, dim, half_to_float); |
2370 | } |
2371 | at::Tensor _sparse_log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) { |
2372 | return wrapper_SparseCUDA___sparse_log_softmax_backward_data(grad_output, output, dim, self); |
2373 | } |
2374 | at::Tensor norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) { |
2375 | return wrapper_SparseCUDA_ScalarOpt_dim_dtype_norm(self, p, dim, keepdim, dtype); |
2376 | } |
2377 | at::Tensor norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) { |
2378 | return wrapper_SparseCUDA_ScalarOpt_dim_norm(self, p, dim, keepdim); |
2379 | } |
2380 | at::Tensor clone(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) { |
2381 | return wrapper_SparseCUDA__clone(self, memory_format); |
2382 | } |
2383 | const at::Tensor & resize_as_sparse_(const at::Tensor & self, const at::Tensor & the_template) { |
2384 | return wrapper_SparseCUDA__resize_as_sparse_(self, the_template); |
2385 | } |
2386 | at::Tensor & zero_(at::Tensor & self) { |
2387 | return wrapper_SparseCUDA__zero_(self); |
2388 | } |
2389 | at::Tensor sub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
2390 | return wrapper_SparseCUDA_Tensor_sub(self, other, alpha); |
2391 | } |
2392 | at::Tensor & sub_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
2393 | return wrapper_SparseCUDA_out_sub_out(self, other, alpha, out); |
2394 | } |
2395 | at::Tensor & sub_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { |
2396 | return wrapper_SparseCUDA_out_sub_out(self, other, alpha, out); |
2397 | } |
2398 | at::Tensor & sub_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
2399 | return wrapper_SparseCUDA_Tensor_sub_(self, other, alpha); |
2400 | } |
2401 | at::Tensor addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { |
2402 | return wrapper_SparseCUDA__addmm(self, mat1, mat2, beta, alpha); |
2403 | } |
2404 | at::Tensor & addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { |
2405 | return wrapper_SparseCUDA_out_addmm_out(self, mat1, mat2, beta, alpha, out); |
2406 | } |
2407 | at::Tensor & addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
2408 | return wrapper_SparseCUDA_out_addmm_out(self, mat1, mat2, beta, alpha, out); |
2409 | } |
2410 | at::Tensor & addmm_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { |
2411 | return wrapper_SparseCUDA__addmm_(self, mat1, mat2, beta, alpha); |
2412 | } |
2413 | at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::TensorOptions options) { |
2414 | return wrapper_SparseCUDA___sparse_coo_tensor_with_dims(sparse_dim, dense_dim, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
2415 | } |
2416 | at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
2417 | return wrapper_SparseCUDA___sparse_coo_tensor_with_dims(sparse_dim, dense_dim, size, dtype, layout, device, pin_memory); |
2418 | } |
2419 | at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options) { |
2420 | return wrapper_SparseCUDA___sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
2421 | } |
2422 | at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
2423 | return wrapper_SparseCUDA___sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, dtype, layout, device, pin_memory); |
2424 | } |
2425 | at::Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options) { |
2426 | return wrapper_SparseCUDA___sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, size, indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
2427 | } |
2428 | at::Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
2429 | return wrapper_SparseCUDA___sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory); |
2430 | } |
2431 | const at::Tensor & sparse_resize_(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { |
2432 | return wrapper_SparseCUDA__sparse_resize_(self, size, sparse_dim, dense_dim); |
2433 | } |
2434 | const at::Tensor & sparse_resize_and_clear_(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { |
2435 | return wrapper_SparseCUDA__sparse_resize_and_clear_(self, size, sparse_dim, dense_dim); |
2436 | } |
2437 | at::Tensor sparse_mask(const at::Tensor & self, const at::Tensor & mask) { |
2438 | return wrapper_SparseCUDA__sparse_mask(self, mask); |
2439 | } |
2440 | at::Tensor _to_dense(const at::Tensor & self, c10::optional<at::ScalarType> dtype) { |
2441 | return wrapper_SparseCUDA___to_dense(self, dtype); |
2442 | } |
2443 | int64_t sparse_dim(const at::Tensor & self) { |
2444 | return wrapper_SparseCUDA__sparse_dim(self); |
2445 | } |
2446 | int64_t _dimI(const at::Tensor & self) { |
2447 | return wrapper_SparseCUDA___dimI(self); |
2448 | } |
2449 | int64_t dense_dim(const at::Tensor & self) { |
2450 | return wrapper_SparseCUDA__dense_dim(self); |
2451 | } |
2452 | int64_t _dimV(const at::Tensor & self) { |
2453 | return wrapper_SparseCUDA___dimV(self); |
2454 | } |
2455 | int64_t _nnz(const at::Tensor & self) { |
2456 | return wrapper_SparseCUDA___nnz(self); |
2457 | } |
2458 | at::Tensor _coalesce(const at::Tensor & self) { |
2459 | return wrapper_SparseCUDA___coalesce(self); |
2460 | } |
2461 | bool is_coalesced(const at::Tensor & self) { |
2462 | return wrapper_SparseCUDA__is_coalesced(self); |
2463 | } |
2464 | at::Tensor _indices(const at::Tensor & self) { |
2465 | return wrapper_SparseCUDA___indices(self); |
2466 | } |
2467 | at::Tensor _values(const at::Tensor & self) { |
2468 | return wrapper_SparseCUDA___values(self); |
2469 | } |
2470 | at::Tensor & _coalesced_(at::Tensor & self, bool coalesced) { |
2471 | return wrapper_SparseCUDA___coalesced_(self, coalesced); |
2472 | } |
2473 | at::Tensor indices(const at::Tensor & self) { |
2474 | return wrapper_SparseCUDA__indices(self); |
2475 | } |
2476 | at::Tensor values(const at::Tensor & self) { |
2477 | return wrapper_SparseCUDA__values(self); |
2478 | } |
2479 | at::Tensor hspmm(const at::Tensor & mat1, const at::Tensor & mat2) { |
2480 | return wrapper_SparseCUDA__hspmm(mat1, mat2); |
2481 | } |
2482 | at::Tensor & hspmm_out(at::Tensor & out, const at::Tensor & mat1, const at::Tensor & mat2) { |
2483 | return wrapper_SparseCUDA_out_hspmm_out(mat1, mat2, out); |
2484 | } |
2485 | at::Tensor & hspmm_outf(const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out) { |
2486 | return wrapper_SparseCUDA_out_hspmm_out(mat1, mat2, out); |
2487 | } |
2488 | at::Tensor & copy_sparse_to_sparse_(at::Tensor & self, const at::Tensor & src, bool non_blocking) { |
2489 | return wrapper_SparseCUDA__copy_sparse_to_sparse_(self, src, non_blocking); |
2490 | } |
2491 | at::Tensor to_sparse(const at::Tensor & self, int64_t sparse_dim) { |
2492 | return wrapper_SparseCUDA_sparse_dim_to_sparse(self, sparse_dim); |
2493 | } |
2494 | at::Tensor to_sparse(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim) { |
2495 | return wrapper_SparseCUDA__to_sparse(self, layout, blocksize, dense_dim); |
2496 | } |
2497 | at::Tensor to_sparse_csr(const at::Tensor & self, c10::optional<int64_t> dense_dim) { |
2498 | return wrapper_SparseCUDA__to_sparse_csr(self, dense_dim); |
2499 | } |
2500 | at::Tensor to_sparse_csc(const at::Tensor & self, c10::optional<int64_t> dense_dim) { |
2501 | return wrapper_SparseCUDA__to_sparse_csc(self, dense_dim); |
2502 | } |
2503 | at::Tensor to_sparse_bsr(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) { |
2504 | return wrapper_SparseCUDA__to_sparse_bsr(self, blocksize, dense_dim); |
2505 | } |
2506 | at::Tensor to_sparse_bsc(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) { |
2507 | return wrapper_SparseCUDA__to_sparse_bsc(self, blocksize, dense_dim); |
2508 | } |
2509 | at::Tensor index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index) { |
2510 | return wrapper_SparseCUDA__index_select(self, dim, index); |
2511 | } |
2512 | at::Tensor erfinv(const at::Tensor & self) { |
2513 | return wrapper_SparseCUDA__erfinv(self); |
2514 | } |
2515 | at::Tensor & erfinv_out(at::Tensor & out, const at::Tensor & self) { |
2516 | return wrapper_SparseCUDA_out_erfinv_out(self, out); |
2517 | } |
2518 | at::Tensor & erfinv_outf(const at::Tensor & self, at::Tensor & out) { |
2519 | return wrapper_SparseCUDA_out_erfinv_out(self, out); |
2520 | } |
2521 | at::Tensor & erfinv_(at::Tensor & self) { |
2522 | return wrapper_SparseCUDA__erfinv_(self); |
2523 | } |
2524 | at::Tensor sign(const at::Tensor & self) { |
2525 | return wrapper_SparseCUDA__sign(self); |
2526 | } |
2527 | at::Tensor & sign_out(at::Tensor & out, const at::Tensor & self) { |
2528 | return wrapper_SparseCUDA_out_sign_out(self, out); |
2529 | } |
2530 | at::Tensor & sign_outf(const at::Tensor & self, at::Tensor & out) { |
2531 | return wrapper_SparseCUDA_out_sign_out(self, out); |
2532 | } |
2533 | at::Tensor & sign_(at::Tensor & self) { |
2534 | return wrapper_SparseCUDA__sign_(self); |
2535 | } |
2536 | at::Tensor signbit(const at::Tensor & self) { |
2537 | return wrapper_SparseCUDA__signbit(self); |
2538 | } |
2539 | at::Tensor & signbit_out(at::Tensor & out, const at::Tensor & self) { |
2540 | return wrapper_SparseCUDA_out_signbit_out(self, out); |
2541 | } |
2542 | at::Tensor & signbit_outf(const at::Tensor & self, at::Tensor & out) { |
2543 | return wrapper_SparseCUDA_out_signbit_out(self, out); |
2544 | } |
2545 | at::Tensor any(const at::Tensor & self) { |
2546 | return wrapper_SparseCUDA__any(self); |
2547 | } |
2548 | at::Tensor pow(const at::Tensor & self, const at::Scalar & exponent) { |
2549 | return wrapper_SparseCUDA_Tensor_Scalar_pow(self, exponent); |
2550 | } |
2551 | at::Tensor & pow_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & exponent) { |
2552 | return wrapper_SparseCUDA_Tensor_Scalar_out_pow_out(self, exponent, out); |
2553 | } |
2554 | at::Tensor & pow_outf(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) { |
2555 | return wrapper_SparseCUDA_Tensor_Scalar_out_pow_out(self, exponent, out); |
2556 | } |
2557 | at::Tensor isinf(const at::Tensor & self) { |
2558 | return wrapper_SparseCUDA__isinf(self); |
2559 | } |
2560 | at::Tensor isposinf(const at::Tensor & self) { |
2561 | return wrapper_SparseCUDA__isposinf(self); |
2562 | } |
2563 | at::Tensor & isposinf_out(at::Tensor & out, const at::Tensor & self) { |
2564 | return wrapper_SparseCUDA_out_isposinf_out(self, out); |
2565 | } |
2566 | at::Tensor & isposinf_outf(const at::Tensor & self, at::Tensor & out) { |
2567 | return wrapper_SparseCUDA_out_isposinf_out(self, out); |
2568 | } |
2569 | at::Tensor isneginf(const at::Tensor & self) { |
2570 | return wrapper_SparseCUDA__isneginf(self); |
2571 | } |
2572 | at::Tensor & isneginf_out(at::Tensor & out, const at::Tensor & self) { |
2573 | return wrapper_SparseCUDA_out_isneginf_out(self, out); |
2574 | } |
2575 | at::Tensor & isneginf_outf(const at::Tensor & self, at::Tensor & out) { |
2576 | return wrapper_SparseCUDA_out_isneginf_out(self, out); |
2577 | } |
2578 | } // namespace sparsecuda |
2579 | } // namespace at |
2580 | |