1 | // required for old g++ to compile PRId64 macros, see |
2 | // https://github.com/pytorch/pytorch/issues/3571 |
3 | // for context |
4 | #ifndef __STDC_FORMAT_MACROS |
5 | #define __STDC_FORMAT_MACROS |
6 | #endif |
7 | |
8 | // an external backend might generate file within its code tree |
9 | // and check all the source files within the tree with clang-format. |
10 | // so, disable it since the backend might have a different config. |
11 | // clang-format off |
12 | |
13 | // NOTE: This condition is true for all PyTorch internal libraries, it |
14 | // just excludes external projects such as torch_xla which |
15 | // re-use some of the PyTorch codegen machinery. |
16 | #if defined(CAFFE2_BUILD_MAIN_LIB) || \ |
17 | defined(TORCH_CUDA_BUILD_MAIN_LIB) || \ |
18 | defined(TORCH_HIP_BUILD_MAIN_LIB) || \ |
19 | defined(TORCH_CUDA_CU_BUILD_MAIN_LIB) || \ |
20 | defined(TORCH_CUDA_CPP_BUILD_MAIN_LIB) |
21 | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS |
22 | #endif |
23 | |
24 | // @generated by torchgen/gen.py from RegisterDispatchKey.cpp |
25 | |
26 | #include <c10/core/TensorImpl.h> |
27 | #include <c10/core/Allocator.h> |
28 | #include <ATen/DeviceGuard.h> |
29 | #include <ATen/NamedTensorUtils.h> |
30 | #include <ATen/Utils.h> |
31 | #include <ATen/WrapDimUtils.h> |
32 | #include <ATen/Dispatch.h> |
33 | #include <c10/util/ExclusivelyOwned.h> |
34 | #include <c10/util/Half.h> |
35 | #include <c10/core/UndefinedTensorImpl.h> |
36 | #include <c10/util/Optional.h> |
37 | #include <ATen/Tensor.h> |
38 | #include <ATen/native/Resize.h> |
39 | |
40 | #include <cstddef> |
41 | #include <functional> |
42 | #include <memory> |
43 | #include <utility> |
44 | |
45 | #include <ATen/Config.h> |
46 | #include <ATen/core/op_registration/adaption.h> |
47 | #include <torch/library.h> |
48 | #include <c10/cuda/CUDAGuard.h> |
49 | #include <ATen/cuda/ATenCUDAGeneral.h> |
50 | #include <ATen/cuda/CUDADevice.h> |
51 | #include <ATen/cuda/CUDAContext.h> |
52 | |
53 | #include <ATen/ops/as_strided_native.h> |
54 | #include <ATen/ops/empty.h> |
55 | #include <ATen/ops/empty_strided.h> |
56 | #include <ATen/ops/_copy_from_and_resize.h> |
57 | #include <ATen/ops/_copy_from.h> |
58 | #include <ATen/ops/_conj_physical_native.h> |
59 | #include <ATen/ops/_nnz_native.h> |
60 | #include <ATen/ops/_sparse_csr_prod_native.h> |
61 | #include <ATen/ops/_sparse_csr_sum_native.h> |
62 | #include <ATen/ops/_to_dense_native.h> |
63 | #include <ATen/ops/abs_native.h> |
64 | #include <ATen/ops/add_native.h> |
65 | #include <ATen/ops/addmm_native.h> |
66 | #include <ATen/ops/addmv_native.h> |
67 | #include <ATen/ops/angle_native.h> |
68 | #include <ATen/ops/asin_native.h> |
69 | #include <ATen/ops/asinh_native.h> |
70 | #include <ATen/ops/atan_native.h> |
71 | #include <ATen/ops/atanh_native.h> |
72 | #include <ATen/ops/baddbmm_native.h> |
73 | #include <ATen/ops/bmm_native.h> |
74 | #include <ATen/ops/ccol_indices_native.h> |
75 | #include <ATen/ops/ceil_native.h> |
76 | #include <ATen/ops/clone_native.h> |
77 | #include <ATen/ops/col_indices_native.h> |
78 | #include <ATen/ops/conj_physical_native.h> |
79 | #include <ATen/ops/copy_native.h> |
80 | #include <ATen/ops/crow_indices_native.h> |
81 | #include <ATen/ops/deg2rad_native.h> |
82 | #include <ATen/ops/dense_dim_native.h> |
83 | #include <ATen/ops/empty_like_native.h> |
84 | #include <ATen/ops/empty_native.h> |
85 | #include <ATen/ops/erf_native.h> |
86 | #include <ATen/ops/erfinv_native.h> |
87 | #include <ATen/ops/expm1_native.h> |
88 | #include <ATen/ops/fill_native.h> |
89 | #include <ATen/ops/floor_native.h> |
90 | #include <ATen/ops/frac_native.h> |
91 | #include <ATen/ops/isinf_native.h> |
92 | #include <ATen/ops/isnan_native.h> |
93 | #include <ATen/ops/isneginf_native.h> |
94 | #include <ATen/ops/isposinf_native.h> |
95 | #include <ATen/ops/log1p_native.h> |
96 | #include <ATen/ops/mm_native.h> |
97 | #include <ATen/ops/mul_native.h> |
98 | #include <ATen/ops/neg_native.h> |
99 | #include <ATen/ops/normal_native.h> |
100 | #include <ATen/ops/rad2deg_native.h> |
101 | #include <ATen/ops/relu_native.h> |
102 | #include <ATen/ops/resize_as_sparse_native.h> |
103 | #include <ATen/ops/resize_native.h> |
104 | #include <ATen/ops/round_native.h> |
105 | #include <ATen/ops/row_indices_native.h> |
106 | #include <ATen/ops/select_copy_native.h> |
107 | #include <ATen/ops/select_native.h> |
108 | #include <ATen/ops/sgn_native.h> |
109 | #include <ATen/ops/sign_native.h> |
110 | #include <ATen/ops/signbit_native.h> |
111 | #include <ATen/ops/sin_native.h> |
112 | #include <ATen/ops/sinh_native.h> |
113 | #include <ATen/ops/sparse_dim_native.h> |
114 | #include <ATen/ops/sparse_mask_native.h> |
115 | #include <ATen/ops/sparse_sampled_addmm_native.h> |
116 | #include <ATen/ops/sqrt_native.h> |
117 | #include <ATen/ops/sum_native.h> |
118 | #include <ATen/ops/tan_native.h> |
119 | #include <ATen/ops/tanh_native.h> |
120 | #include <ATen/ops/threshold_backward_native.h> |
121 | #include <ATen/ops/to_sparse_bsc_native.h> |
122 | #include <ATen/ops/to_sparse_bsr_native.h> |
123 | #include <ATen/ops/to_sparse_csc_native.h> |
124 | #include <ATen/ops/to_sparse_csr_native.h> |
125 | #include <ATen/ops/to_sparse_native.h> |
126 | #include <ATen/ops/triangular_solve_native.h> |
127 | #include <ATen/ops/trunc_native.h> |
128 | #include <ATen/ops/values_native.h> |
129 | #include <ATen/ops/zero_native.h> |
130 | |
131 | // See template file RegisterDispatchDefinitions.ini |
132 | namespace at { |
133 | // NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid |
134 | // ambiguity with conflicting identifiers that may have been defined in |
135 | // at namespace already. |
136 | namespace { |
137 | void resize_out(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) { |
138 | TORCH_CHECK(options.dtype() == out.dtype(), |
139 | "Expected out tensor to have dtype " , options.dtype(), ", but got " , out.dtype(), " instead" ); |
140 | TORCH_CHECK(options.device() == out.device(), |
141 | "Expected out tensor to have device " , options.device(), ", but got " , out.device(), " instead" ); |
142 | const bool resized = at::native::resize_output(out, sizes); |
143 | // Only restride if a resize occurred; otherwise we ignore the (advisory) |
144 | // strides from the meta function and directly use the output tensor's |
145 | // preexisting strides |
146 | if (resized) { |
147 | if (!strides.empty()) { |
148 | TORCH_INTERNAL_ASSERT(!options.memory_format_opt().has_value()); |
149 | // TODO: avoid the redispatch here |
150 | out.as_strided_(sizes, strides); |
151 | } else if (options.memory_format_opt().has_value()) { |
152 | out.unsafeGetTensorImpl()->empty_tensor_restride(*options.memory_format_opt()); |
153 | } |
154 | } |
155 | } |
156 | void check_inplace(const Tensor &self, IntArrayRef sizes, const TensorOptions &options) { |
157 | // These checks are needed on those operators that: |
158 | // 1) don't use 'TensorIterator' (e.g. 'addmm' and 'baddbmm') |
159 | // 2) have particular typing rules (e.g. 'cumsum' and 'cumprod') |
160 | // For other operators (e.g. 'add'), 'TensorIterator' already checks |
161 | // these things separately. |
162 | TORCH_CHECK(options.dtype() == self.dtype(), |
163 | "Bad in-place call: " , |
164 | "input tensor dtype " , self.dtype(), " and output tensor dtype " , options.dtype(), " should match" ); |
165 | TORCH_CHECK(options.device() == self.device(), |
166 | "Bad in-place call: " , |
167 | "input tensor device " , self.device(), " and output tensor device " , options.device(), " should match" ); |
168 | TORCH_CHECK(sizes == self.sizes(), |
169 | "Bad in-place call: " , |
170 | "input tensor size " , self.sizes(), " and output tensor size " , sizes, " should match" ); |
171 | } |
172 | namespace { |
173 | at::Tensor wrapper_SparseCsrCUDA__abs(const at::Tensor & self) { |
174 | // No device check |
175 | const OptionalDeviceGuard device_guard(device_of(self)); |
176 | return at::native::abs_sparse_csr(self); |
177 | } |
178 | } // anonymous namespace |
179 | namespace { |
180 | at::Tensor & wrapper_SparseCsrCUDA_out_abs_out(const at::Tensor & self, at::Tensor & out) { |
181 | // No device check |
182 | const OptionalDeviceGuard device_guard(device_of(self)); |
183 | return at::native::abs_sparse_csr_out(self, out); |
184 | } |
185 | } // anonymous namespace |
186 | namespace { |
187 | at::Tensor & wrapper_SparseCsrCUDA__abs_(at::Tensor & self) { |
188 | // No device check |
189 | const OptionalDeviceGuard device_guard(device_of(self)); |
190 | return at::native::abs_sparse_csr_(self); |
191 | } |
192 | } // anonymous namespace |
193 | namespace { |
194 | at::Tensor wrapper_SparseCsrCUDA__angle(const at::Tensor & self) { |
195 | // No device check |
196 | const OptionalDeviceGuard device_guard(device_of(self)); |
197 | return at::native::angle_sparse_csr(self); |
198 | } |
199 | } // anonymous namespace |
200 | namespace { |
201 | at::Tensor & wrapper_SparseCsrCUDA_out_angle_out(const at::Tensor & self, at::Tensor & out) { |
202 | // No device check |
203 | const OptionalDeviceGuard device_guard(device_of(self)); |
204 | return at::native::angle_sparse_csr_out(self, out); |
205 | } |
206 | } // anonymous namespace |
207 | namespace { |
208 | at::Tensor wrapper_SparseCsrCUDA__sgn(const at::Tensor & self) { |
209 | c10::optional<Device> common_device = nullopt; |
210 | (void)common_device; // Suppress unused variable warning |
211 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__sgn" , "self" ); |
212 | const OptionalDeviceGuard device_guard(device_of(self)); |
213 | return at::native::sgn_sparse_csr(self); |
214 | } |
215 | } // anonymous namespace |
216 | namespace { |
217 | at::Tensor & wrapper_SparseCsrCUDA_out_sgn_out(const at::Tensor & self, at::Tensor & out) { |
218 | c10::optional<Device> common_device = nullopt; |
219 | (void)common_device; // Suppress unused variable warning |
220 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_sgn_out" , "out" ); |
221 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_sgn_out" , "self" ); |
222 | const OptionalDeviceGuard device_guard(device_of(self)); |
223 | return at::native::sgn_sparse_csr_out(self, out); |
224 | } |
225 | } // anonymous namespace |
226 | namespace { |
227 | at::Tensor & wrapper_SparseCsrCUDA__sgn_(at::Tensor & self) { |
228 | c10::optional<Device> common_device = nullopt; |
229 | (void)common_device; // Suppress unused variable warning |
230 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__sgn_" , "self" ); |
231 | const OptionalDeviceGuard device_guard(device_of(self)); |
232 | return at::native::sgn_sparse_csr_(self); |
233 | } |
234 | } // anonymous namespace |
235 | namespace { |
236 | at::Tensor wrapper_SparseCsrCUDA___conj_physical(const at::Tensor & self) { |
237 | c10::optional<Device> common_device = nullopt; |
238 | (void)common_device; // Suppress unused variable warning |
239 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA___conj_physical" , "self" ); |
240 | const OptionalDeviceGuard device_guard(device_of(self)); |
241 | return at::native::conj_physical_sparse_csr(self); |
242 | } |
243 | } // anonymous namespace |
244 | namespace { |
245 | at::Tensor & wrapper_SparseCsrCUDA_out_conj_physical_out(const at::Tensor & self, at::Tensor & out) { |
246 | c10::optional<Device> common_device = nullopt; |
247 | (void)common_device; // Suppress unused variable warning |
248 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_conj_physical_out" , "out" ); |
249 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_conj_physical_out" , "self" ); |
250 | const OptionalDeviceGuard device_guard(device_of(self)); |
251 | return at::native::conj_physical_sparse_csr_out(self, out); |
252 | } |
253 | } // anonymous namespace |
254 | namespace { |
255 | at::Tensor & wrapper_SparseCsrCUDA__conj_physical_(at::Tensor & self) { |
256 | c10::optional<Device> common_device = nullopt; |
257 | (void)common_device; // Suppress unused variable warning |
258 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__conj_physical_" , "self" ); |
259 | const OptionalDeviceGuard device_guard(device_of(self)); |
260 | return at::native::conj_physical_sparse_csr_(self); |
261 | } |
262 | } // anonymous namespace |
263 | namespace { |
264 | at::Tensor wrapper_SparseCsrCUDA_Tensor_add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
265 | // No device check |
266 | const OptionalDeviceGuard device_guard(device_of(self)); |
267 | return at::native::add_sparse_csr(self, other, alpha); |
268 | } |
269 | } // anonymous namespace |
270 | namespace { |
271 | at::Tensor & wrapper_SparseCsrCUDA_out_add_out(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { |
272 | // No device check |
273 | const OptionalDeviceGuard device_guard(device_of(self)); |
274 | return at::native::add_out_sparse_csr_cuda(self, other, alpha, out); |
275 | } |
276 | } // anonymous namespace |
277 | namespace { |
278 | at::Tensor & wrapper_SparseCsrCUDA_Tensor_add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
279 | // No device check |
280 | const OptionalDeviceGuard device_guard(device_of(self)); |
281 | return at::native::add_sparse_csr_(self, other, alpha); |
282 | } |
283 | } // anonymous namespace |
284 | namespace { |
285 | at::Tensor & wrapper_SparseCsrCUDA_out_addmv_out(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
286 | c10::optional<Device> common_device = nullopt; |
287 | (void)common_device; // Suppress unused variable warning |
288 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_addmv_out" , "out" ); |
289 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_addmv_out" , "self" ); |
290 | c10::impl::check_and_update_common_device(common_device, mat, "wrapper_SparseCsrCUDA_out_addmv_out" , "mat" ); |
291 | c10::impl::check_and_update_common_device(common_device, vec, "wrapper_SparseCsrCUDA_out_addmv_out" , "vec" ); |
292 | const OptionalDeviceGuard device_guard(device_of(self)); |
293 | return at::native::addmv_out_sparse_compressed_cuda(self, mat, vec, beta, alpha, out); |
294 | } |
295 | } // anonymous namespace |
296 | namespace { |
297 | at::Tensor wrapper_SparseCsrCUDA__asinh(const at::Tensor & self) { |
298 | c10::optional<Device> common_device = nullopt; |
299 | (void)common_device; // Suppress unused variable warning |
300 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__asinh" , "self" ); |
301 | const OptionalDeviceGuard device_guard(device_of(self)); |
302 | return at::native::asinh_sparse_csr(self); |
303 | } |
304 | } // anonymous namespace |
305 | namespace { |
306 | at::Tensor & wrapper_SparseCsrCUDA_out_asinh_out(const at::Tensor & self, at::Tensor & out) { |
307 | c10::optional<Device> common_device = nullopt; |
308 | (void)common_device; // Suppress unused variable warning |
309 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_asinh_out" , "out" ); |
310 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_asinh_out" , "self" ); |
311 | const OptionalDeviceGuard device_guard(device_of(self)); |
312 | return at::native::asinh_sparse_csr_out(self, out); |
313 | } |
314 | } // anonymous namespace |
315 | namespace { |
316 | at::Tensor & wrapper_SparseCsrCUDA__asinh_(at::Tensor & self) { |
317 | c10::optional<Device> common_device = nullopt; |
318 | (void)common_device; // Suppress unused variable warning |
319 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__asinh_" , "self" ); |
320 | const OptionalDeviceGuard device_guard(device_of(self)); |
321 | return at::native::asinh_sparse_csr_(self); |
322 | } |
323 | } // anonymous namespace |
324 | namespace { |
325 | at::Tensor wrapper_SparseCsrCUDA__atanh(const at::Tensor & self) { |
326 | c10::optional<Device> common_device = nullopt; |
327 | (void)common_device; // Suppress unused variable warning |
328 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__atanh" , "self" ); |
329 | const OptionalDeviceGuard device_guard(device_of(self)); |
330 | return at::native::atanh_sparse_csr(self); |
331 | } |
332 | } // anonymous namespace |
333 | namespace { |
334 | at::Tensor & wrapper_SparseCsrCUDA_out_atanh_out(const at::Tensor & self, at::Tensor & out) { |
335 | c10::optional<Device> common_device = nullopt; |
336 | (void)common_device; // Suppress unused variable warning |
337 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_atanh_out" , "out" ); |
338 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_atanh_out" , "self" ); |
339 | const OptionalDeviceGuard device_guard(device_of(self)); |
340 | return at::native::atanh_sparse_csr_out(self, out); |
341 | } |
342 | } // anonymous namespace |
343 | namespace { |
344 | at::Tensor & wrapper_SparseCsrCUDA__atanh_(at::Tensor & self) { |
345 | c10::optional<Device> common_device = nullopt; |
346 | (void)common_device; // Suppress unused variable warning |
347 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__atanh_" , "self" ); |
348 | const OptionalDeviceGuard device_guard(device_of(self)); |
349 | return at::native::atanh_sparse_csr_(self); |
350 | } |
351 | } // anonymous namespace |
352 | namespace { |
353 | at::Tensor wrapper_SparseCsrCUDA__asin(const at::Tensor & self) { |
354 | // No device check |
355 | const OptionalDeviceGuard device_guard(device_of(self)); |
356 | return at::native::asin_sparse_csr(self); |
357 | } |
358 | } // anonymous namespace |
359 | namespace { |
360 | at::Tensor & wrapper_SparseCsrCUDA_out_asin_out(const at::Tensor & self, at::Tensor & out) { |
361 | // No device check |
362 | const OptionalDeviceGuard device_guard(device_of(self)); |
363 | return at::native::asin_sparse_csr_out(self, out); |
364 | } |
365 | } // anonymous namespace |
366 | namespace { |
367 | at::Tensor & wrapper_SparseCsrCUDA__asin_(at::Tensor & self) { |
368 | // No device check |
369 | const OptionalDeviceGuard device_guard(device_of(self)); |
370 | return at::native::asin_sparse_csr_(self); |
371 | } |
372 | } // anonymous namespace |
373 | namespace { |
374 | at::Tensor wrapper_SparseCsrCUDA__atan(const at::Tensor & self) { |
375 | // No device check |
376 | const OptionalDeviceGuard device_guard(device_of(self)); |
377 | return at::native::atan_sparse_csr(self); |
378 | } |
379 | } // anonymous namespace |
380 | namespace { |
381 | at::Tensor & wrapper_SparseCsrCUDA_out_atan_out(const at::Tensor & self, at::Tensor & out) { |
382 | // No device check |
383 | const OptionalDeviceGuard device_guard(device_of(self)); |
384 | return at::native::atan_sparse_csr_out(self, out); |
385 | } |
386 | } // anonymous namespace |
387 | namespace { |
388 | at::Tensor & wrapper_SparseCsrCUDA__atan_(at::Tensor & self) { |
389 | // No device check |
390 | const OptionalDeviceGuard device_guard(device_of(self)); |
391 | return at::native::atan_sparse_csr_(self); |
392 | } |
393 | } // anonymous namespace |
394 | namespace { |
395 | at::Tensor & wrapper_SparseCsrCUDA_out_baddbmm_out(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
396 | c10::optional<Device> common_device = nullopt; |
397 | (void)common_device; // Suppress unused variable warning |
398 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_baddbmm_out" , "out" ); |
399 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_baddbmm_out" , "self" ); |
400 | c10::impl::check_and_update_common_device(common_device, batch1, "wrapper_SparseCsrCUDA_out_baddbmm_out" , "batch1" ); |
401 | c10::impl::check_and_update_common_device(common_device, batch2, "wrapper_SparseCsrCUDA_out_baddbmm_out" , "batch2" ); |
402 | const OptionalDeviceGuard device_guard(device_of(self)); |
403 | return at::native::baddbmm_out_sparse_csr_cuda(self, batch1, batch2, beta, alpha, out); |
404 | } |
405 | } // anonymous namespace |
406 | namespace { |
407 | at::Tensor & wrapper_SparseCsrCUDA_out_bmm_out(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) { |
408 | c10::optional<Device> common_device = nullopt; |
409 | (void)common_device; // Suppress unused variable warning |
410 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_bmm_out" , "out" ); |
411 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_bmm_out" , "self" ); |
412 | c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCsrCUDA_out_bmm_out" , "mat2" ); |
413 | const OptionalDeviceGuard device_guard(device_of(self)); |
414 | return at::native::bmm_out_sparse_csr_cuda(self, mat2, out); |
415 | } |
416 | } // anonymous namespace |
417 | namespace { |
418 | at::Tensor wrapper_SparseCsrCUDA__ceil(const at::Tensor & self) { |
419 | // No device check |
420 | const OptionalDeviceGuard device_guard(device_of(self)); |
421 | return at::native::ceil_sparse_csr(self); |
422 | } |
423 | } // anonymous namespace |
424 | namespace { |
425 | at::Tensor & wrapper_SparseCsrCUDA_out_ceil_out(const at::Tensor & self, at::Tensor & out) { |
426 | // No device check |
427 | const OptionalDeviceGuard device_guard(device_of(self)); |
428 | return at::native::ceil_sparse_csr_out(self, out); |
429 | } |
430 | } // anonymous namespace |
431 | namespace { |
432 | at::Tensor & wrapper_SparseCsrCUDA__ceil_(at::Tensor & self) { |
433 | // No device check |
434 | const OptionalDeviceGuard device_guard(device_of(self)); |
435 | return at::native::ceil_sparse_csr_(self); |
436 | } |
437 | } // anonymous namespace |
438 | namespace { |
439 | at::Tensor & wrapper_SparseCsrCUDA__copy_(at::Tensor & self, const at::Tensor & src, bool non_blocking) { |
440 | // No device check |
441 | // DeviceGuard omitted |
442 | return at::native::copy_sparse_compressed_(self, src, non_blocking); |
443 | } |
444 | } // anonymous namespace |
445 | namespace { |
446 | at::Tensor wrapper_SparseCsrCUDA_memory_format_empty(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
447 | c10::optional<Device> common_device = nullopt; |
448 | (void)common_device; // Suppress unused variable warning |
449 | globalContext().lazyInitCUDA(); |
450 | const DeviceGuard device_guard(device_or_default(device)); |
451 | return at::native::empty_sparse_compressed(C10_AS_INTARRAYREF_SLOW(size), dtype, layout, device, pin_memory, memory_format); |
452 | } |
453 | } // anonymous namespace |
454 | namespace { |
455 | const at::Tensor & wrapper_SparseCsrCUDA__resize_(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) { |
456 | // No device check |
457 | // DeviceGuard omitted |
458 | return at::native::resize_sparse_csr_(self, C10_AS_INTARRAYREF_SLOW(size), memory_format); |
459 | } |
460 | } // anonymous namespace |
461 | namespace { |
462 | at::Tensor wrapper_SparseCsrCUDA__empty_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
463 | // No device check |
464 | // DeviceGuard omitted |
465 | return at::native::empty_like_sparse_csr(self, dtype, layout, device, pin_memory, memory_format); |
466 | } |
467 | } // anonymous namespace |
468 | namespace { |
469 | at::Tensor wrapper_SparseCsrCUDA__erf(const at::Tensor & self) { |
470 | // No device check |
471 | const OptionalDeviceGuard device_guard(device_of(self)); |
472 | return at::native::erf_sparse_csr(self); |
473 | } |
474 | } // anonymous namespace |
475 | namespace { |
476 | at::Tensor & wrapper_SparseCsrCUDA_out_erf_out(const at::Tensor & self, at::Tensor & out) { |
477 | // No device check |
478 | const OptionalDeviceGuard device_guard(device_of(self)); |
479 | return at::native::erf_sparse_csr_out(self, out); |
480 | } |
481 | } // anonymous namespace |
482 | namespace { |
483 | at::Tensor & wrapper_SparseCsrCUDA__erf_(at::Tensor & self) { |
484 | // No device check |
485 | const OptionalDeviceGuard device_guard(device_of(self)); |
486 | return at::native::erf_sparse_csr_(self); |
487 | } |
488 | } // anonymous namespace |
489 | namespace { |
490 | at::Tensor wrapper_SparseCsrCUDA__expm1(const at::Tensor & self) { |
491 | // No device check |
492 | const OptionalDeviceGuard device_guard(device_of(self)); |
493 | return at::native::expm1_sparse_csr(self); |
494 | } |
495 | } // anonymous namespace |
496 | namespace { |
497 | at::Tensor & wrapper_SparseCsrCUDA_out_expm1_out(const at::Tensor & self, at::Tensor & out) { |
498 | // No device check |
499 | const OptionalDeviceGuard device_guard(device_of(self)); |
500 | return at::native::expm1_sparse_csr_out(self, out); |
501 | } |
502 | } // anonymous namespace |
503 | namespace { |
504 | at::Tensor & wrapper_SparseCsrCUDA__expm1_(at::Tensor & self) { |
505 | // No device check |
506 | const OptionalDeviceGuard device_guard(device_of(self)); |
507 | return at::native::expm1_sparse_csr_(self); |
508 | } |
509 | } // anonymous namespace |
510 | namespace { |
511 | at::Tensor & wrapper_SparseCsrCUDA_Scalar_fill_(at::Tensor & self, const at::Scalar & value) { |
512 | // No device check |
513 | const OptionalDeviceGuard device_guard(device_of(self)); |
514 | return at::native::fill_sparse_csr_(self, value); |
515 | } |
516 | } // anonymous namespace |
517 | namespace { |
518 | at::Tensor wrapper_SparseCsrCUDA__floor(const at::Tensor & self) { |
519 | // No device check |
520 | const OptionalDeviceGuard device_guard(device_of(self)); |
521 | return at::native::floor_sparse_csr(self); |
522 | } |
523 | } // anonymous namespace |
524 | namespace { |
525 | at::Tensor & wrapper_SparseCsrCUDA_out_floor_out(const at::Tensor & self, at::Tensor & out) { |
526 | // No device check |
527 | const OptionalDeviceGuard device_guard(device_of(self)); |
528 | return at::native::floor_sparse_csr_out(self, out); |
529 | } |
530 | } // anonymous namespace |
531 | namespace { |
532 | at::Tensor & wrapper_SparseCsrCUDA__floor_(at::Tensor & self) { |
533 | // No device check |
534 | const OptionalDeviceGuard device_guard(device_of(self)); |
535 | return at::native::floor_sparse_csr_(self); |
536 | } |
537 | } // anonymous namespace |
538 | namespace { |
539 | at::Tensor wrapper_SparseCsrCUDA__frac(const at::Tensor & self) { |
540 | // No device check |
541 | const OptionalDeviceGuard device_guard(device_of(self)); |
542 | return at::native::frac_sparse_csr(self); |
543 | } |
544 | } // anonymous namespace |
545 | namespace { |
546 | at::Tensor & wrapper_SparseCsrCUDA_out_frac_out(const at::Tensor & self, at::Tensor & out) { |
547 | // No device check |
548 | const OptionalDeviceGuard device_guard(device_of(self)); |
549 | return at::native::frac_sparse_csr_out(self, out); |
550 | } |
551 | } // anonymous namespace |
552 | namespace { |
553 | at::Tensor & wrapper_SparseCsrCUDA__frac_(at::Tensor & self) { |
554 | // No device check |
555 | const OptionalDeviceGuard device_guard(device_of(self)); |
556 | return at::native::frac_sparse_csr_(self); |
557 | } |
558 | } // anonymous namespace |
559 | namespace { |
560 | at::Tensor wrapper_SparseCsrCUDA__isnan(const at::Tensor & self) { |
561 | // No device check |
562 | // DeviceGuard omitted |
563 | return at::native::isnan_sparse_csr(self); |
564 | } |
565 | } // anonymous namespace |
566 | namespace { |
567 | at::Tensor wrapper_SparseCsrCUDA__log1p(const at::Tensor & self) { |
568 | // No device check |
569 | const OptionalDeviceGuard device_guard(device_of(self)); |
570 | return at::native::log1p_sparse_csr(self); |
571 | } |
572 | } // anonymous namespace |
573 | namespace { |
574 | at::Tensor & wrapper_SparseCsrCUDA_out_log1p_out(const at::Tensor & self, at::Tensor & out) { |
575 | // No device check |
576 | const OptionalDeviceGuard device_guard(device_of(self)); |
577 | return at::native::log1p_sparse_csr_out(self, out); |
578 | } |
579 | } // anonymous namespace |
580 | namespace { |
581 | at::Tensor & wrapper_SparseCsrCUDA__log1p_(at::Tensor & self) { |
582 | // No device check |
583 | const OptionalDeviceGuard device_guard(device_of(self)); |
584 | return at::native::log1p_sparse_csr_(self); |
585 | } |
586 | } // anonymous namespace |
587 | namespace { |
588 | at::Tensor wrapper_SparseCsrCUDA__mm(const at::Tensor & self, const at::Tensor & mat2) { |
589 | c10::optional<Device> common_device = nullopt; |
590 | (void)common_device; // Suppress unused variable warning |
591 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__mm" , "self" ); |
592 | c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCsrCUDA__mm" , "mat2" ); |
593 | const OptionalDeviceGuard device_guard(device_of(self)); |
594 | return at::native::_sparse_csr_mm(self, mat2); |
595 | } |
596 | } // anonymous namespace |
597 | namespace { |
598 | at::Tensor & wrapper_SparseCsrCUDA_out_mm_out(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) { |
599 | c10::optional<Device> common_device = nullopt; |
600 | (void)common_device; // Suppress unused variable warning |
601 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_mm_out" , "out" ); |
602 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_mm_out" , "self" ); |
603 | c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCsrCUDA_out_mm_out" , "mat2" ); |
604 | const OptionalDeviceGuard device_guard(device_of(self)); |
605 | return at::native::_sparse_csr_mm_out(self, mat2, out); |
606 | } |
607 | } // anonymous namespace |
608 | namespace { |
609 | at::Tensor wrapper_SparseCsrCUDA_Tensor_mul(const at::Tensor & self, const at::Tensor & other) { |
610 | // No device check |
611 | const OptionalDeviceGuard device_guard(device_of(self)); |
612 | return at::native::mul_sparse_csr(self, other); |
613 | } |
614 | } // anonymous namespace |
615 | namespace { |
616 | at::Tensor & wrapper_SparseCsrCUDA_out_mul_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
617 | // No device check |
618 | const OptionalDeviceGuard device_guard(device_of(self)); |
619 | return at::native::mul_out_sparse_csr(self, other, out); |
620 | } |
621 | } // anonymous namespace |
622 | namespace { |
623 | at::Tensor & wrapper_SparseCsrCUDA_Tensor_mul_(at::Tensor & self, const at::Tensor & other) { |
624 | // No device check |
625 | const OptionalDeviceGuard device_guard(device_of(self)); |
626 | return at::native::mul_sparse_csr_(self, other); |
627 | } |
628 | } // anonymous namespace |
629 | namespace { |
630 | at::Tensor wrapper_SparseCsrCUDA_Scalar_mul(const at::Tensor & self, const at::Scalar & other) { |
631 | // No device check |
632 | const OptionalDeviceGuard device_guard(device_of(self)); |
633 | return at::native::mul_scalar_sparse_csr(self, other); |
634 | } |
635 | } // anonymous namespace |
636 | namespace { |
637 | at::Tensor & wrapper_SparseCsrCUDA_Scalar_mul_(at::Tensor & self, const at::Scalar & other) { |
638 | // No device check |
639 | const OptionalDeviceGuard device_guard(device_of(self)); |
640 | return at::native::mul__scalar_sparse_csr(self, other); |
641 | } |
642 | } // anonymous namespace |
643 | namespace { |
644 | at::Tensor wrapper_SparseCsrCUDA__rad2deg(const at::Tensor & self) { |
645 | c10::optional<Device> common_device = nullopt; |
646 | (void)common_device; // Suppress unused variable warning |
647 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__rad2deg" , "self" ); |
648 | const OptionalDeviceGuard device_guard(device_of(self)); |
649 | return at::native::rad2deg_sparse_csr(self); |
650 | } |
651 | } // anonymous namespace |
652 | namespace { |
653 | at::Tensor & wrapper_SparseCsrCUDA_out_rad2deg_out(const at::Tensor & self, at::Tensor & out) { |
654 | c10::optional<Device> common_device = nullopt; |
655 | (void)common_device; // Suppress unused variable warning |
656 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_rad2deg_out" , "out" ); |
657 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_rad2deg_out" , "self" ); |
658 | const OptionalDeviceGuard device_guard(device_of(self)); |
659 | return at::native::rad2deg_sparse_csr_out(self, out); |
660 | } |
661 | } // anonymous namespace |
662 | namespace { |
663 | at::Tensor & wrapper_SparseCsrCUDA__rad2deg_(at::Tensor & self) { |
664 | c10::optional<Device> common_device = nullopt; |
665 | (void)common_device; // Suppress unused variable warning |
666 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__rad2deg_" , "self" ); |
667 | const OptionalDeviceGuard device_guard(device_of(self)); |
668 | return at::native::rad2deg_sparse_csr_(self); |
669 | } |
670 | } // anonymous namespace |
671 | namespace { |
672 | at::Tensor wrapper_SparseCsrCUDA__deg2rad(const at::Tensor & self) { |
673 | c10::optional<Device> common_device = nullopt; |
674 | (void)common_device; // Suppress unused variable warning |
675 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__deg2rad" , "self" ); |
676 | const OptionalDeviceGuard device_guard(device_of(self)); |
677 | return at::native::deg2rad_sparse_csr(self); |
678 | } |
679 | } // anonymous namespace |
680 | namespace { |
681 | at::Tensor & wrapper_SparseCsrCUDA_out_deg2rad_out(const at::Tensor & self, at::Tensor & out) { |
682 | c10::optional<Device> common_device = nullopt; |
683 | (void)common_device; // Suppress unused variable warning |
684 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_deg2rad_out" , "out" ); |
685 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_deg2rad_out" , "self" ); |
686 | const OptionalDeviceGuard device_guard(device_of(self)); |
687 | return at::native::deg2rad_sparse_csr_out(self, out); |
688 | } |
689 | } // anonymous namespace |
690 | namespace { |
691 | at::Tensor & wrapper_SparseCsrCUDA__deg2rad_(at::Tensor & self) { |
692 | c10::optional<Device> common_device = nullopt; |
693 | (void)common_device; // Suppress unused variable warning |
694 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__deg2rad_" , "self" ); |
695 | const OptionalDeviceGuard device_guard(device_of(self)); |
696 | return at::native::deg2rad_sparse_csr_(self); |
697 | } |
698 | } // anonymous namespace |
699 | namespace { |
700 | at::Tensor wrapper_SparseCsrCUDA__neg(const at::Tensor & self) { |
701 | // No device check |
702 | const OptionalDeviceGuard device_guard(device_of(self)); |
703 | return at::native::neg_sparse_csr(self); |
704 | } |
705 | } // anonymous namespace |
706 | namespace { |
707 | at::Tensor & wrapper_SparseCsrCUDA_out_neg_out(const at::Tensor & self, at::Tensor & out) { |
708 | // No device check |
709 | const OptionalDeviceGuard device_guard(device_of(self)); |
710 | return at::native::neg_sparse_csr_out(self, out); |
711 | } |
712 | } // anonymous namespace |
713 | namespace { |
714 | at::Tensor & wrapper_SparseCsrCUDA__neg_(at::Tensor & self) { |
715 | // No device check |
716 | const OptionalDeviceGuard device_guard(device_of(self)); |
717 | return at::native::neg_sparse_csr_(self); |
718 | } |
719 | } // anonymous namespace |
720 | namespace { |
721 | at::Tensor wrapper_SparseCsrCUDA__round(const at::Tensor & self) { |
722 | // No device check |
723 | const OptionalDeviceGuard device_guard(device_of(self)); |
724 | return at::native::round_sparse_csr(self); |
725 | } |
726 | } // anonymous namespace |
727 | namespace { |
728 | at::Tensor & wrapper_SparseCsrCUDA_out_round_out(const at::Tensor & self, at::Tensor & out) { |
729 | // No device check |
730 | const OptionalDeviceGuard device_guard(device_of(self)); |
731 | return at::native::round_sparse_csr_out(self, out); |
732 | } |
733 | } // anonymous namespace |
734 | namespace { |
735 | at::Tensor & wrapper_SparseCsrCUDA__round_(at::Tensor & self) { |
736 | // No device check |
737 | const OptionalDeviceGuard device_guard(device_of(self)); |
738 | return at::native::round_sparse_csr_(self); |
739 | } |
740 | } // anonymous namespace |
741 | namespace { |
742 | at::Tensor wrapper_SparseCsrCUDA__relu(const at::Tensor & self) { |
743 | // No device check |
744 | const OptionalDeviceGuard device_guard(device_of(self)); |
745 | return at::native::relu_sparse_csr(self); |
746 | } |
747 | } // anonymous namespace |
748 | namespace { |
749 | at::Tensor & wrapper_SparseCsrCUDA__relu_(at::Tensor & self) { |
750 | // No device check |
751 | const OptionalDeviceGuard device_guard(device_of(self)); |
752 | return at::native::relu_sparse_csr_(self); |
753 | } |
754 | } // anonymous namespace |
755 | namespace { |
756 | at::Tensor wrapper_SparseCsrCUDA_int_select(const at::Tensor & self, int64_t dim, c10::SymInt index) { |
757 | // No device check |
758 | // DeviceGuard omitted |
759 | return at::native::select_sparse_csr(self, dim, index.expect_int()); |
760 | } |
761 | } // anonymous namespace |
762 | namespace { |
763 | at::Tensor wrapper_SparseCsrCUDA__sin(const at::Tensor & self) { |
764 | // No device check |
765 | const OptionalDeviceGuard device_guard(device_of(self)); |
766 | return at::native::sin_sparse_csr(self); |
767 | } |
768 | } // anonymous namespace |
769 | namespace { |
770 | at::Tensor & wrapper_SparseCsrCUDA_out_sin_out(const at::Tensor & self, at::Tensor & out) { |
771 | // No device check |
772 | const OptionalDeviceGuard device_guard(device_of(self)); |
773 | return at::native::sin_sparse_csr_out(self, out); |
774 | } |
775 | } // anonymous namespace |
776 | namespace { |
777 | at::Tensor & wrapper_SparseCsrCUDA__sin_(at::Tensor & self) { |
778 | // No device check |
779 | const OptionalDeviceGuard device_guard(device_of(self)); |
780 | return at::native::sin_sparse_csr_(self); |
781 | } |
782 | } // anonymous namespace |
783 | namespace { |
784 | at::Tensor wrapper_SparseCsrCUDA__sinh(const at::Tensor & self) { |
785 | // No device check |
786 | const OptionalDeviceGuard device_guard(device_of(self)); |
787 | return at::native::sinh_sparse_csr(self); |
788 | } |
789 | } // anonymous namespace |
790 | namespace { |
791 | at::Tensor & wrapper_SparseCsrCUDA_out_sinh_out(const at::Tensor & self, at::Tensor & out) { |
792 | // No device check |
793 | const OptionalDeviceGuard device_guard(device_of(self)); |
794 | return at::native::sinh_sparse_csr_out(self, out); |
795 | } |
796 | } // anonymous namespace |
797 | namespace { |
798 | at::Tensor & wrapper_SparseCsrCUDA__sinh_(at::Tensor & self) { |
799 | // No device check |
800 | const OptionalDeviceGuard device_guard(device_of(self)); |
801 | return at::native::sinh_sparse_csr_(self); |
802 | } |
803 | } // anonymous namespace |
804 | namespace { |
805 | at::Tensor wrapper_SparseCsrCUDA__sum(const at::Tensor & self, c10::optional<at::ScalarType> dtype) { |
806 | // No device check |
807 | const OptionalDeviceGuard device_guard(device_of(self)); |
808 | return at::native::sum_csr(self, dtype); |
809 | } |
810 | } // anonymous namespace |
811 | namespace { |
812 | at::Tensor wrapper_SparseCsrCUDA__sqrt(const at::Tensor & self) { |
813 | // No device check |
814 | const OptionalDeviceGuard device_guard(device_of(self)); |
815 | return at::native::sqrt_sparse_csr(self); |
816 | } |
817 | } // anonymous namespace |
818 | namespace { |
819 | at::Tensor & wrapper_SparseCsrCUDA_out_sqrt_out(const at::Tensor & self, at::Tensor & out) { |
820 | // No device check |
821 | const OptionalDeviceGuard device_guard(device_of(self)); |
822 | return at::native::sqrt_sparse_csr_out(self, out); |
823 | } |
824 | } // anonymous namespace |
825 | namespace { |
826 | at::Tensor & wrapper_SparseCsrCUDA__sqrt_(at::Tensor & self) { |
827 | // No device check |
828 | const OptionalDeviceGuard device_guard(device_of(self)); |
829 | return at::native::sqrt_sparse_csr_(self); |
830 | } |
831 | } // anonymous namespace |
832 | namespace { |
833 | at::Tensor wrapper_SparseCsrCUDA__tan(const at::Tensor & self) { |
834 | // No device check |
835 | const OptionalDeviceGuard device_guard(device_of(self)); |
836 | return at::native::tan_sparse_csr(self); |
837 | } |
838 | } // anonymous namespace |
839 | namespace { |
840 | at::Tensor & wrapper_SparseCsrCUDA_out_tan_out(const at::Tensor & self, at::Tensor & out) { |
841 | // No device check |
842 | const OptionalDeviceGuard device_guard(device_of(self)); |
843 | return at::native::tan_sparse_csr_out(self, out); |
844 | } |
845 | } // anonymous namespace |
846 | namespace { |
847 | at::Tensor & wrapper_SparseCsrCUDA__tan_(at::Tensor & self) { |
848 | // No device check |
849 | const OptionalDeviceGuard device_guard(device_of(self)); |
850 | return at::native::tan_sparse_csr_(self); |
851 | } |
852 | } // anonymous namespace |
853 | namespace { |
854 | at::Tensor wrapper_SparseCsrCUDA__tanh(const at::Tensor & self) { |
855 | // No device check |
856 | const OptionalDeviceGuard device_guard(device_of(self)); |
857 | return at::native::tanh_sparse_csr(self); |
858 | } |
859 | } // anonymous namespace |
860 | namespace { |
861 | at::Tensor & wrapper_SparseCsrCUDA_out_tanh_out(const at::Tensor & self, at::Tensor & out) { |
862 | // No device check |
863 | const OptionalDeviceGuard device_guard(device_of(self)); |
864 | return at::native::tanh_sparse_csr_out(self, out); |
865 | } |
866 | } // anonymous namespace |
867 | namespace { |
868 | at::Tensor & wrapper_SparseCsrCUDA__tanh_(at::Tensor & self) { |
869 | // No device check |
870 | const OptionalDeviceGuard device_guard(device_of(self)); |
871 | return at::native::tanh_sparse_csr_(self); |
872 | } |
873 | } // anonymous namespace |
874 | namespace { |
875 | at::Tensor wrapper_SparseCsrCUDA__threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) { |
876 | c10::optional<Device> common_device = nullopt; |
877 | (void)common_device; // Suppress unused variable warning |
878 | c10::impl::check_and_update_common_device(common_device, grad_output, "wrapper_SparseCsrCUDA__threshold_backward" , "grad_output" ); |
879 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__threshold_backward" , "self" ); |
880 | const OptionalDeviceGuard device_guard(device_of(self)); |
881 | return at::native::threshold_backward_sparse_compressed(grad_output, self, threshold); |
882 | } |
883 | } // anonymous namespace |
884 | namespace { |
885 | at::Tensor & wrapper_SparseCsrCUDA_grad_input_threshold_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) { |
886 | c10::optional<Device> common_device = nullopt; |
887 | (void)common_device; // Suppress unused variable warning |
888 | c10::impl::check_and_update_common_device(common_device, grad_input, "wrapper_SparseCsrCUDA_grad_input_threshold_backward_out" , "grad_input" ); |
889 | c10::impl::check_and_update_common_device(common_device, grad_output, "wrapper_SparseCsrCUDA_grad_input_threshold_backward_out" , "grad_output" ); |
890 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_grad_input_threshold_backward_out" , "self" ); |
891 | const OptionalDeviceGuard device_guard(device_of(self)); |
892 | return at::native::threshold_backward_sparse_compressed_out(grad_output, self, threshold, grad_input); |
893 | } |
894 | } // anonymous namespace |
895 | namespace { |
896 | at::Tensor wrapper_SparseCsrCUDA__trunc(const at::Tensor & self) { |
897 | // No device check |
898 | const OptionalDeviceGuard device_guard(device_of(self)); |
899 | return at::native::trunc_sparse_csr(self); |
900 | } |
901 | } // anonymous namespace |
902 | namespace { |
903 | at::Tensor & wrapper_SparseCsrCUDA_out_trunc_out(const at::Tensor & self, at::Tensor & out) { |
904 | // No device check |
905 | const OptionalDeviceGuard device_guard(device_of(self)); |
906 | return at::native::trunc_sparse_csr_out(self, out); |
907 | } |
908 | } // anonymous namespace |
909 | namespace { |
910 | at::Tensor & wrapper_SparseCsrCUDA__trunc_(at::Tensor & self) { |
911 | // No device check |
912 | const OptionalDeviceGuard device_guard(device_of(self)); |
913 | return at::native::trunc_sparse_csr_(self); |
914 | } |
915 | } // anonymous namespace |
916 | namespace { |
917 | at::Tensor wrapper_SparseCsrCUDA_dim_dtype__sparse_csr_sum(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
918 | c10::optional<Device> common_device = nullopt; |
919 | (void)common_device; // Suppress unused variable warning |
920 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_dim_dtype__sparse_csr_sum" , "self" ); |
921 | const OptionalDeviceGuard device_guard(device_of(self)); |
922 | return at::native::_sparse_csr_sum_cuda(self, dim, keepdim, dtype); |
923 | } |
924 | } // anonymous namespace |
925 | namespace { |
926 | at::Tensor wrapper_SparseCsrCUDA_dim_dtype__sparse_csr_prod(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
927 | c10::optional<Device> common_device = nullopt; |
928 | (void)common_device; // Suppress unused variable warning |
929 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_dim_dtype__sparse_csr_prod" , "self" ); |
930 | const OptionalDeviceGuard device_guard(device_of(self)); |
931 | return at::native::_sparse_csr_prod_cuda(self, dim, keepdim, dtype); |
932 | } |
933 | } // anonymous namespace |
934 | namespace { |
935 | at::Tensor wrapper_SparseCsrCUDA__clone(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) { |
936 | c10::optional<Device> common_device = nullopt; |
937 | (void)common_device; // Suppress unused variable warning |
938 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__clone" , "self" ); |
939 | const OptionalDeviceGuard device_guard(device_of(self)); |
940 | return at::native::clone_sparse_compressed(self, memory_format); |
941 | } |
942 | } // anonymous namespace |
943 | namespace { |
944 | const at::Tensor & wrapper_SparseCsrCUDA__resize_as_sparse_(const at::Tensor & self, const at::Tensor & the_template) { |
945 | c10::optional<Device> common_device = nullopt; |
946 | (void)common_device; // Suppress unused variable warning |
947 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__resize_as_sparse_" , "self" ); |
948 | c10::impl::check_and_update_common_device(common_device, the_template, "wrapper_SparseCsrCUDA__resize_as_sparse_" , "the_template" ); |
949 | const OptionalDeviceGuard device_guard(device_of(self)); |
950 | return at::native::resize_as_sparse_compressed_(self, the_template); |
951 | } |
952 | } // anonymous namespace |
953 | namespace { |
954 | at::Tensor & wrapper_SparseCsrCUDA__zero_(at::Tensor & self) { |
955 | // No device check |
956 | const OptionalDeviceGuard device_guard(device_of(self)); |
957 | return at::native::zero_sparse_csr_(self); |
958 | } |
959 | } // anonymous namespace |
960 | namespace { |
961 | at::Tensor wrapper_SparseCsrCUDA__sparse_sampled_addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { |
962 | c10::optional<Device> common_device = nullopt; |
963 | (void)common_device; // Suppress unused variable warning |
964 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__sparse_sampled_addmm" , "self" ); |
965 | c10::impl::check_and_update_common_device(common_device, mat1, "wrapper_SparseCsrCUDA__sparse_sampled_addmm" , "mat1" ); |
966 | c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCsrCUDA__sparse_sampled_addmm" , "mat2" ); |
967 | const OptionalDeviceGuard device_guard(device_of(self)); |
968 | return at::native::sparse_sampled_addmm_sparse_csr_cuda(self, mat1, mat2, beta, alpha); |
969 | } |
970 | } // anonymous namespace |
971 | namespace { |
972 | at::Tensor & wrapper_SparseCsrCUDA_out_sparse_sampled_addmm_out(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
973 | c10::optional<Device> common_device = nullopt; |
974 | (void)common_device; // Suppress unused variable warning |
975 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_sparse_sampled_addmm_out" , "out" ); |
976 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_sparse_sampled_addmm_out" , "self" ); |
977 | c10::impl::check_and_update_common_device(common_device, mat1, "wrapper_SparseCsrCUDA_out_sparse_sampled_addmm_out" , "mat1" ); |
978 | c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCsrCUDA_out_sparse_sampled_addmm_out" , "mat2" ); |
979 | const OptionalDeviceGuard device_guard(device_of(self)); |
980 | return at::native::sparse_sampled_addmm_out_sparse_csr_cuda(self, mat1, mat2, beta, alpha, out); |
981 | } |
982 | } // anonymous namespace |
983 | namespace { |
984 | at::Tensor wrapper_SparseCsrCUDA__addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { |
985 | c10::optional<Device> common_device = nullopt; |
986 | (void)common_device; // Suppress unused variable warning |
987 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__addmm" , "self" ); |
988 | c10::impl::check_and_update_common_device(common_device, mat1, "wrapper_SparseCsrCUDA__addmm" , "mat1" ); |
989 | c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCsrCUDA__addmm" , "mat2" ); |
990 | const OptionalDeviceGuard device_guard(device_of(self)); |
991 | return at::native::addmm_sparse_compressed_dense(self, mat1, mat2, beta, alpha); |
992 | } |
993 | } // anonymous namespace |
994 | namespace { |
995 | at::Tensor & wrapper_SparseCsrCUDA_out_addmm_out(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
996 | c10::optional<Device> common_device = nullopt; |
997 | (void)common_device; // Suppress unused variable warning |
998 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_addmm_out" , "out" ); |
999 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_addmm_out" , "self" ); |
1000 | c10::impl::check_and_update_common_device(common_device, mat1, "wrapper_SparseCsrCUDA_out_addmm_out" , "mat1" ); |
1001 | c10::impl::check_and_update_common_device(common_device, mat2, "wrapper_SparseCsrCUDA_out_addmm_out" , "mat2" ); |
1002 | const OptionalDeviceGuard device_guard(device_of(self)); |
1003 | return at::native::addmm_out_sparse_compressed_cuda(self, mat1, mat2, beta, alpha, out); |
1004 | } |
1005 | } // anonymous namespace |
1006 | namespace { |
1007 | at::Tensor wrapper_SparseCsrCUDA__sparse_mask(const at::Tensor & self, const at::Tensor & mask) { |
1008 | c10::optional<Device> common_device = nullopt; |
1009 | (void)common_device; // Suppress unused variable warning |
1010 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__sparse_mask" , "self" ); |
1011 | c10::impl::check_and_update_common_device(common_device, mask, "wrapper_SparseCsrCUDA__sparse_mask" , "mask" ); |
1012 | const OptionalDeviceGuard device_guard(device_of(self)); |
1013 | return at::native::sparse_mask_sparse_csr(self, mask); |
1014 | } |
1015 | } // anonymous namespace |
1016 | namespace { |
1017 | at::Tensor wrapper_SparseCsrCUDA___to_dense(const at::Tensor & self, c10::optional<at::ScalarType> dtype) { |
1018 | c10::optional<Device> common_device = nullopt; |
1019 | (void)common_device; // Suppress unused variable warning |
1020 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA___to_dense" , "self" ); |
1021 | const OptionalDeviceGuard device_guard(device_of(self)); |
1022 | return at::native::sparse_compressed_to_dense(self, dtype); |
1023 | } |
1024 | } // anonymous namespace |
1025 | namespace { |
1026 | int64_t wrapper_SparseCsrCUDA__sparse_dim(const at::Tensor & self) { |
1027 | // No device check |
1028 | // DeviceGuard omitted |
1029 | return at::native::sparse_dim_sparse_csr(self); |
1030 | } |
1031 | } // anonymous namespace |
1032 | namespace { |
1033 | int64_t wrapper_SparseCsrCUDA__dense_dim(const at::Tensor & self) { |
1034 | // No device check |
1035 | // DeviceGuard omitted |
1036 | return at::native::dense_dim_sparse_csr(self); |
1037 | } |
1038 | } // anonymous namespace |
1039 | namespace { |
1040 | int64_t wrapper_SparseCsrCUDA___nnz(const at::Tensor & self) { |
1041 | // No device check |
1042 | // DeviceGuard omitted |
1043 | return at::native::_nnz_sparse_csr(self); |
1044 | } |
1045 | } // anonymous namespace |
1046 | namespace { |
1047 | at::Tensor wrapper_SparseCsrCUDA__values(const at::Tensor & self) { |
1048 | // No device check |
1049 | // DeviceGuard omitted |
1050 | return at::native::values_sparse_csr(self); |
1051 | } |
1052 | } // anonymous namespace |
1053 | namespace { |
1054 | at::Tensor wrapper_SparseCsrCUDA__crow_indices(const at::Tensor & self) { |
1055 | // No device check |
1056 | // DeviceGuard omitted |
1057 | return at::native::crow_indices_sparse_csr(self); |
1058 | } |
1059 | } // anonymous namespace |
1060 | namespace { |
1061 | at::Tensor wrapper_SparseCsrCUDA__col_indices(const at::Tensor & self) { |
1062 | // No device check |
1063 | // DeviceGuard omitted |
1064 | return at::native::col_indices_sparse_csr(self); |
1065 | } |
1066 | } // anonymous namespace |
1067 | namespace { |
1068 | at::Tensor wrapper_SparseCsrCUDA__ccol_indices(const at::Tensor & self) { |
1069 | // No device check |
1070 | // DeviceGuard omitted |
1071 | return at::native::ccol_indices_sparse_csr(self); |
1072 | } |
1073 | } // anonymous namespace |
1074 | namespace { |
1075 | at::Tensor wrapper_SparseCsrCUDA__row_indices(const at::Tensor & self) { |
1076 | // No device check |
1077 | // DeviceGuard omitted |
1078 | return at::native::row_indices_sparse_csr(self); |
1079 | } |
1080 | } // anonymous namespace |
1081 | namespace { |
1082 | at::Tensor wrapper_SparseCsrCUDA_sparse_dim_to_sparse(const at::Tensor & self, int64_t sparse_dim) { |
1083 | c10::optional<Device> common_device = nullopt; |
1084 | (void)common_device; // Suppress unused variable warning |
1085 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_sparse_dim_to_sparse" , "self" ); |
1086 | const OptionalDeviceGuard device_guard(device_of(self)); |
1087 | return at::native::sparse_compressed_to_sparse(self, sparse_dim); |
1088 | } |
1089 | } // anonymous namespace |
1090 | namespace { |
1091 | at::Tensor wrapper_SparseCsrCUDA__to_sparse(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim) { |
1092 | c10::optional<Device> common_device = nullopt; |
1093 | (void)common_device; // Suppress unused variable warning |
1094 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__to_sparse" , "self" ); |
1095 | const OptionalDeviceGuard device_guard(device_of(self)); |
1096 | return at::native::sparse_compressed_to_sparse(self, layout, blocksize, dense_dim); |
1097 | } |
1098 | } // anonymous namespace |
1099 | namespace { |
1100 | at::Tensor wrapper_SparseCsrCUDA__to_sparse_csr(const at::Tensor & self, c10::optional<int64_t> dense_dim) { |
1101 | c10::optional<Device> common_device = nullopt; |
1102 | (void)common_device; // Suppress unused variable warning |
1103 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__to_sparse_csr" , "self" ); |
1104 | const OptionalDeviceGuard device_guard(device_of(self)); |
1105 | return at::native::sparse_compressed_to_sparse_csr(self, dense_dim); |
1106 | } |
1107 | } // anonymous namespace |
1108 | namespace { |
1109 | at::Tensor wrapper_SparseCsrCUDA__to_sparse_csc(const at::Tensor & self, c10::optional<int64_t> dense_dim) { |
1110 | c10::optional<Device> common_device = nullopt; |
1111 | (void)common_device; // Suppress unused variable warning |
1112 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__to_sparse_csc" , "self" ); |
1113 | const OptionalDeviceGuard device_guard(device_of(self)); |
1114 | return at::native::sparse_compressed_to_sparse_csc(self, dense_dim); |
1115 | } |
1116 | } // anonymous namespace |
1117 | namespace { |
1118 | at::Tensor wrapper_SparseCsrCUDA__to_sparse_bsr(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) { |
1119 | c10::optional<Device> common_device = nullopt; |
1120 | (void)common_device; // Suppress unused variable warning |
1121 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__to_sparse_bsr" , "self" ); |
1122 | const OptionalDeviceGuard device_guard(device_of(self)); |
1123 | return at::native::sparse_compressed_to_sparse_bsr(self, blocksize, dense_dim); |
1124 | } |
1125 | } // anonymous namespace |
1126 | namespace { |
1127 | at::Tensor wrapper_SparseCsrCUDA__to_sparse_bsc(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) { |
1128 | c10::optional<Device> common_device = nullopt; |
1129 | (void)common_device; // Suppress unused variable warning |
1130 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__to_sparse_bsc" , "self" ); |
1131 | const OptionalDeviceGuard device_guard(device_of(self)); |
1132 | return at::native::sparse_compressed_to_sparse_bsc(self, blocksize, dense_dim); |
1133 | } |
1134 | } // anonymous namespace |
1135 | namespace { |
1136 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_SparseCsrCUDA_X_triangular_solve_out(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular, at::Tensor & X, at::Tensor & M) { |
1137 | c10::optional<Device> common_device = nullopt; |
1138 | (void)common_device; // Suppress unused variable warning |
1139 | c10::impl::check_and_update_common_device(common_device, X, "wrapper_SparseCsrCUDA_X_triangular_solve_out" , "X" ); |
1140 | c10::impl::check_and_update_common_device(common_device, M, "wrapper_SparseCsrCUDA_X_triangular_solve_out" , "M" ); |
1141 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_X_triangular_solve_out" , "self" ); |
1142 | c10::impl::check_and_update_common_device(common_device, A, "wrapper_SparseCsrCUDA_X_triangular_solve_out" , "A" ); |
1143 | const OptionalDeviceGuard device_guard(device_of(self)); |
1144 | return at::native::triangular_solve_out_sparse_csr_cuda(self, A, upper, transpose, unitriangular, X, M); |
1145 | } |
1146 | } // anonymous namespace |
1147 | namespace { |
1148 | at::Tensor wrapper_SparseCsrCUDA__erfinv(const at::Tensor & self) { |
1149 | // No device check |
1150 | const OptionalDeviceGuard device_guard(device_of(self)); |
1151 | return at::native::erfinv_sparse_csr(self); |
1152 | } |
1153 | } // anonymous namespace |
1154 | namespace { |
1155 | at::Tensor & wrapper_SparseCsrCUDA_out_erfinv_out(const at::Tensor & self, at::Tensor & out) { |
1156 | // No device check |
1157 | const OptionalDeviceGuard device_guard(device_of(self)); |
1158 | return at::native::erfinv_sparse_csr_out(self, out); |
1159 | } |
1160 | } // anonymous namespace |
1161 | namespace { |
1162 | at::Tensor & wrapper_SparseCsrCUDA__erfinv_(at::Tensor & self) { |
1163 | // No device check |
1164 | const OptionalDeviceGuard device_guard(device_of(self)); |
1165 | return at::native::erfinv_sparse_csr_(self); |
1166 | } |
1167 | } // anonymous namespace |
1168 | namespace { |
1169 | at::Tensor wrapper_SparseCsrCUDA__sign(const at::Tensor & self) { |
1170 | // No device check |
1171 | const OptionalDeviceGuard device_guard(device_of(self)); |
1172 | return at::native::sign_sparse_csr(self); |
1173 | } |
1174 | } // anonymous namespace |
1175 | namespace { |
1176 | at::Tensor & wrapper_SparseCsrCUDA_out_sign_out(const at::Tensor & self, at::Tensor & out) { |
1177 | // No device check |
1178 | const OptionalDeviceGuard device_guard(device_of(self)); |
1179 | return at::native::sign_sparse_csr_out(self, out); |
1180 | } |
1181 | } // anonymous namespace |
1182 | namespace { |
1183 | at::Tensor & wrapper_SparseCsrCUDA__sign_(at::Tensor & self) { |
1184 | // No device check |
1185 | const OptionalDeviceGuard device_guard(device_of(self)); |
1186 | return at::native::sign_sparse_csr_(self); |
1187 | } |
1188 | } // anonymous namespace |
1189 | namespace { |
1190 | at::Tensor wrapper_SparseCsrCUDA__signbit(const at::Tensor & self) { |
1191 | c10::optional<Device> common_device = nullopt; |
1192 | (void)common_device; // Suppress unused variable warning |
1193 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__signbit" , "self" ); |
1194 | const OptionalDeviceGuard device_guard(device_of(self)); |
1195 | return at::native::signbit_sparse_csr(self); |
1196 | } |
1197 | } // anonymous namespace |
1198 | namespace { |
1199 | at::Tensor & wrapper_SparseCsrCUDA_out_signbit_out(const at::Tensor & self, at::Tensor & out) { |
1200 | c10::optional<Device> common_device = nullopt; |
1201 | (void)common_device; // Suppress unused variable warning |
1202 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_signbit_out" , "out" ); |
1203 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_signbit_out" , "self" ); |
1204 | const OptionalDeviceGuard device_guard(device_of(self)); |
1205 | return at::native::signbit_sparse_csr_out(self, out); |
1206 | } |
1207 | } // anonymous namespace |
1208 | namespace { |
1209 | at::Tensor & wrapper_SparseCsrCUDA__normal_(at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) { |
1210 | // No device check |
1211 | const OptionalDeviceGuard device_guard(device_of(self)); |
1212 | return at::native::normal_sparse_csr_(self, mean, std, generator); |
1213 | } |
1214 | } // anonymous namespace |
1215 | namespace { |
1216 | at::Tensor wrapper_SparseCsrCUDA__isinf(const at::Tensor & self) { |
1217 | // No device check |
1218 | // DeviceGuard omitted |
1219 | return at::native::isinf_sparse_csr(self); |
1220 | } |
1221 | } // anonymous namespace |
1222 | namespace { |
1223 | at::Tensor wrapper_SparseCsrCUDA__isposinf(const at::Tensor & self) { |
1224 | c10::optional<Device> common_device = nullopt; |
1225 | (void)common_device; // Suppress unused variable warning |
1226 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__isposinf" , "self" ); |
1227 | const OptionalDeviceGuard device_guard(device_of(self)); |
1228 | return at::native::isposinf_sparse_csr(self); |
1229 | } |
1230 | } // anonymous namespace |
1231 | namespace { |
1232 | at::Tensor & wrapper_SparseCsrCUDA_out_isposinf_out(const at::Tensor & self, at::Tensor & out) { |
1233 | c10::optional<Device> common_device = nullopt; |
1234 | (void)common_device; // Suppress unused variable warning |
1235 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_isposinf_out" , "out" ); |
1236 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_isposinf_out" , "self" ); |
1237 | const OptionalDeviceGuard device_guard(device_of(self)); |
1238 | return at::native::isposinf_sparse_csr_out(self, out); |
1239 | } |
1240 | } // anonymous namespace |
1241 | namespace { |
1242 | at::Tensor wrapper_SparseCsrCUDA__isneginf(const at::Tensor & self) { |
1243 | c10::optional<Device> common_device = nullopt; |
1244 | (void)common_device; // Suppress unused variable warning |
1245 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA__isneginf" , "self" ); |
1246 | const OptionalDeviceGuard device_guard(device_of(self)); |
1247 | return at::native::isneginf_sparse_csr(self); |
1248 | } |
1249 | } // anonymous namespace |
1250 | namespace { |
1251 | at::Tensor & wrapper_SparseCsrCUDA_out_isneginf_out(const at::Tensor & self, at::Tensor & out) { |
1252 | c10::optional<Device> common_device = nullopt; |
1253 | (void)common_device; // Suppress unused variable warning |
1254 | c10::impl::check_and_update_common_device(common_device, out, "wrapper_SparseCsrCUDA_out_isneginf_out" , "out" ); |
1255 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_out_isneginf_out" , "self" ); |
1256 | const OptionalDeviceGuard device_guard(device_of(self)); |
1257 | return at::native::isneginf_sparse_csr_out(self, out); |
1258 | } |
1259 | } // anonymous namespace |
1260 | namespace { |
1261 | at::Tensor wrapper_SparseCsrCUDA_int_select_copy(const at::Tensor & self, int64_t dim, c10::SymInt index) { |
1262 | c10::optional<Device> common_device = nullopt; |
1263 | (void)common_device; // Suppress unused variable warning |
1264 | c10::impl::check_and_update_common_device(common_device, self, "wrapper_SparseCsrCUDA_int_select_copy" , "self" ); |
1265 | const OptionalDeviceGuard device_guard(device_of(self)); |
1266 | return at::native::select_copy_sparse_csr(self, dim, index.expect_int()); |
1267 | } |
1268 | } // anonymous namespace |
1269 | TORCH_LIBRARY_IMPL(aten, SparseCsrCUDA, m) { |
1270 | m.impl("abs" , |
1271 | TORCH_FN(wrapper_SparseCsrCUDA__abs)); |
1272 | m.impl("abs.out" , |
1273 | TORCH_FN(wrapper_SparseCsrCUDA_out_abs_out)); |
1274 | m.impl("abs_" , |
1275 | TORCH_FN(wrapper_SparseCsrCUDA__abs_)); |
1276 | m.impl("angle" , |
1277 | TORCH_FN(wrapper_SparseCsrCUDA__angle)); |
1278 | m.impl("angle.out" , |
1279 | TORCH_FN(wrapper_SparseCsrCUDA_out_angle_out)); |
1280 | m.impl("sgn" , |
1281 | TORCH_FN(wrapper_SparseCsrCUDA__sgn)); |
1282 | m.impl("sgn.out" , |
1283 | TORCH_FN(wrapper_SparseCsrCUDA_out_sgn_out)); |
1284 | m.impl("sgn_" , |
1285 | TORCH_FN(wrapper_SparseCsrCUDA__sgn_)); |
1286 | m.impl("_conj_physical" , |
1287 | TORCH_FN(wrapper_SparseCsrCUDA___conj_physical)); |
1288 | m.impl("conj_physical.out" , |
1289 | TORCH_FN(wrapper_SparseCsrCUDA_out_conj_physical_out)); |
1290 | m.impl("conj_physical_" , |
1291 | TORCH_FN(wrapper_SparseCsrCUDA__conj_physical_)); |
1292 | m.impl("add.Tensor" , |
1293 | TORCH_FN(wrapper_SparseCsrCUDA_Tensor_add)); |
1294 | m.impl("add.out" , |
1295 | TORCH_FN(wrapper_SparseCsrCUDA_out_add_out)); |
1296 | m.impl("add_.Tensor" , |
1297 | TORCH_FN(wrapper_SparseCsrCUDA_Tensor_add_)); |
1298 | m.impl("addmv.out" , |
1299 | TORCH_FN(wrapper_SparseCsrCUDA_out_addmv_out)); |
1300 | m.impl("asinh" , |
1301 | TORCH_FN(wrapper_SparseCsrCUDA__asinh)); |
1302 | m.impl("asinh.out" , |
1303 | TORCH_FN(wrapper_SparseCsrCUDA_out_asinh_out)); |
1304 | m.impl("asinh_" , |
1305 | TORCH_FN(wrapper_SparseCsrCUDA__asinh_)); |
1306 | m.impl("atanh" , |
1307 | TORCH_FN(wrapper_SparseCsrCUDA__atanh)); |
1308 | m.impl("atanh.out" , |
1309 | TORCH_FN(wrapper_SparseCsrCUDA_out_atanh_out)); |
1310 | m.impl("atanh_" , |
1311 | TORCH_FN(wrapper_SparseCsrCUDA__atanh_)); |
1312 | m.impl("asin" , |
1313 | TORCH_FN(wrapper_SparseCsrCUDA__asin)); |
1314 | m.impl("asin.out" , |
1315 | TORCH_FN(wrapper_SparseCsrCUDA_out_asin_out)); |
1316 | m.impl("asin_" , |
1317 | TORCH_FN(wrapper_SparseCsrCUDA__asin_)); |
1318 | m.impl("atan" , |
1319 | TORCH_FN(wrapper_SparseCsrCUDA__atan)); |
1320 | m.impl("atan.out" , |
1321 | TORCH_FN(wrapper_SparseCsrCUDA_out_atan_out)); |
1322 | m.impl("atan_" , |
1323 | TORCH_FN(wrapper_SparseCsrCUDA__atan_)); |
1324 | m.impl("baddbmm.out" , |
1325 | TORCH_FN(wrapper_SparseCsrCUDA_out_baddbmm_out)); |
1326 | m.impl("bmm.out" , |
1327 | TORCH_FN(wrapper_SparseCsrCUDA_out_bmm_out)); |
1328 | m.impl("ceil" , |
1329 | TORCH_FN(wrapper_SparseCsrCUDA__ceil)); |
1330 | m.impl("ceil.out" , |
1331 | TORCH_FN(wrapper_SparseCsrCUDA_out_ceil_out)); |
1332 | m.impl("ceil_" , |
1333 | TORCH_FN(wrapper_SparseCsrCUDA__ceil_)); |
1334 | m.impl("copy_" , |
1335 | TORCH_FN(wrapper_SparseCsrCUDA__copy_)); |
1336 | m.impl("empty.memory_format" , |
1337 | TORCH_FN(wrapper_SparseCsrCUDA_memory_format_empty)); |
1338 | m.impl("resize_" , |
1339 | TORCH_FN(wrapper_SparseCsrCUDA__resize_)); |
1340 | m.impl("empty_like" , |
1341 | TORCH_FN(wrapper_SparseCsrCUDA__empty_like)); |
1342 | m.impl("erf" , |
1343 | TORCH_FN(wrapper_SparseCsrCUDA__erf)); |
1344 | m.impl("erf.out" , |
1345 | TORCH_FN(wrapper_SparseCsrCUDA_out_erf_out)); |
1346 | m.impl("erf_" , |
1347 | TORCH_FN(wrapper_SparseCsrCUDA__erf_)); |
1348 | m.impl("expm1" , |
1349 | TORCH_FN(wrapper_SparseCsrCUDA__expm1)); |
1350 | m.impl("expm1.out" , |
1351 | TORCH_FN(wrapper_SparseCsrCUDA_out_expm1_out)); |
1352 | m.impl("expm1_" , |
1353 | TORCH_FN(wrapper_SparseCsrCUDA__expm1_)); |
1354 | m.impl("fill_.Scalar" , |
1355 | TORCH_FN(wrapper_SparseCsrCUDA_Scalar_fill_)); |
1356 | m.impl("floor" , |
1357 | TORCH_FN(wrapper_SparseCsrCUDA__floor)); |
1358 | m.impl("floor.out" , |
1359 | TORCH_FN(wrapper_SparseCsrCUDA_out_floor_out)); |
1360 | m.impl("floor_" , |
1361 | TORCH_FN(wrapper_SparseCsrCUDA__floor_)); |
1362 | m.impl("frac" , |
1363 | TORCH_FN(wrapper_SparseCsrCUDA__frac)); |
1364 | m.impl("frac.out" , |
1365 | TORCH_FN(wrapper_SparseCsrCUDA_out_frac_out)); |
1366 | m.impl("frac_" , |
1367 | TORCH_FN(wrapper_SparseCsrCUDA__frac_)); |
1368 | m.impl("isnan" , |
1369 | TORCH_FN(wrapper_SparseCsrCUDA__isnan)); |
1370 | m.impl("log1p" , |
1371 | TORCH_FN(wrapper_SparseCsrCUDA__log1p)); |
1372 | m.impl("log1p.out" , |
1373 | TORCH_FN(wrapper_SparseCsrCUDA_out_log1p_out)); |
1374 | m.impl("log1p_" , |
1375 | TORCH_FN(wrapper_SparseCsrCUDA__log1p_)); |
1376 | m.impl("mm" , |
1377 | TORCH_FN(wrapper_SparseCsrCUDA__mm)); |
1378 | m.impl("mm.out" , |
1379 | TORCH_FN(wrapper_SparseCsrCUDA_out_mm_out)); |
1380 | m.impl("mul.Tensor" , |
1381 | TORCH_FN(wrapper_SparseCsrCUDA_Tensor_mul)); |
1382 | m.impl("mul.out" , |
1383 | TORCH_FN(wrapper_SparseCsrCUDA_out_mul_out)); |
1384 | m.impl("mul_.Tensor" , |
1385 | TORCH_FN(wrapper_SparseCsrCUDA_Tensor_mul_)); |
1386 | m.impl("mul.Scalar" , |
1387 | TORCH_FN(wrapper_SparseCsrCUDA_Scalar_mul)); |
1388 | m.impl("mul_.Scalar" , |
1389 | TORCH_FN(wrapper_SparseCsrCUDA_Scalar_mul_)); |
1390 | m.impl("rad2deg" , |
1391 | TORCH_FN(wrapper_SparseCsrCUDA__rad2deg)); |
1392 | m.impl("rad2deg.out" , |
1393 | TORCH_FN(wrapper_SparseCsrCUDA_out_rad2deg_out)); |
1394 | m.impl("rad2deg_" , |
1395 | TORCH_FN(wrapper_SparseCsrCUDA__rad2deg_)); |
1396 | m.impl("deg2rad" , |
1397 | TORCH_FN(wrapper_SparseCsrCUDA__deg2rad)); |
1398 | m.impl("deg2rad.out" , |
1399 | TORCH_FN(wrapper_SparseCsrCUDA_out_deg2rad_out)); |
1400 | m.impl("deg2rad_" , |
1401 | TORCH_FN(wrapper_SparseCsrCUDA__deg2rad_)); |
1402 | m.impl("neg" , |
1403 | TORCH_FN(wrapper_SparseCsrCUDA__neg)); |
1404 | m.impl("neg.out" , |
1405 | TORCH_FN(wrapper_SparseCsrCUDA_out_neg_out)); |
1406 | m.impl("neg_" , |
1407 | TORCH_FN(wrapper_SparseCsrCUDA__neg_)); |
1408 | m.impl("round" , |
1409 | TORCH_FN(wrapper_SparseCsrCUDA__round)); |
1410 | m.impl("round.out" , |
1411 | TORCH_FN(wrapper_SparseCsrCUDA_out_round_out)); |
1412 | m.impl("round_" , |
1413 | TORCH_FN(wrapper_SparseCsrCUDA__round_)); |
1414 | m.impl("relu" , |
1415 | TORCH_FN(wrapper_SparseCsrCUDA__relu)); |
1416 | m.impl("relu_" , |
1417 | TORCH_FN(wrapper_SparseCsrCUDA__relu_)); |
1418 | m.impl("select.int" , |
1419 | TORCH_FN(wrapper_SparseCsrCUDA_int_select)); |
1420 | m.impl("sin" , |
1421 | TORCH_FN(wrapper_SparseCsrCUDA__sin)); |
1422 | m.impl("sin.out" , |
1423 | TORCH_FN(wrapper_SparseCsrCUDA_out_sin_out)); |
1424 | m.impl("sin_" , |
1425 | TORCH_FN(wrapper_SparseCsrCUDA__sin_)); |
1426 | m.impl("sinh" , |
1427 | TORCH_FN(wrapper_SparseCsrCUDA__sinh)); |
1428 | m.impl("sinh.out" , |
1429 | TORCH_FN(wrapper_SparseCsrCUDA_out_sinh_out)); |
1430 | m.impl("sinh_" , |
1431 | TORCH_FN(wrapper_SparseCsrCUDA__sinh_)); |
1432 | m.impl("sum" , |
1433 | TORCH_FN(wrapper_SparseCsrCUDA__sum)); |
1434 | m.impl("sqrt" , |
1435 | TORCH_FN(wrapper_SparseCsrCUDA__sqrt)); |
1436 | m.impl("sqrt.out" , |
1437 | TORCH_FN(wrapper_SparseCsrCUDA_out_sqrt_out)); |
1438 | m.impl("sqrt_" , |
1439 | TORCH_FN(wrapper_SparseCsrCUDA__sqrt_)); |
1440 | m.impl("tan" , |
1441 | TORCH_FN(wrapper_SparseCsrCUDA__tan)); |
1442 | m.impl("tan.out" , |
1443 | TORCH_FN(wrapper_SparseCsrCUDA_out_tan_out)); |
1444 | m.impl("tan_" , |
1445 | TORCH_FN(wrapper_SparseCsrCUDA__tan_)); |
1446 | m.impl("tanh" , |
1447 | TORCH_FN(wrapper_SparseCsrCUDA__tanh)); |
1448 | m.impl("tanh.out" , |
1449 | TORCH_FN(wrapper_SparseCsrCUDA_out_tanh_out)); |
1450 | m.impl("tanh_" , |
1451 | TORCH_FN(wrapper_SparseCsrCUDA__tanh_)); |
1452 | m.impl("threshold_backward" , |
1453 | TORCH_FN(wrapper_SparseCsrCUDA__threshold_backward)); |
1454 | m.impl("threshold_backward.grad_input" , |
1455 | TORCH_FN(wrapper_SparseCsrCUDA_grad_input_threshold_backward_out)); |
1456 | m.impl("trunc" , |
1457 | TORCH_FN(wrapper_SparseCsrCUDA__trunc)); |
1458 | m.impl("trunc.out" , |
1459 | TORCH_FN(wrapper_SparseCsrCUDA_out_trunc_out)); |
1460 | m.impl("trunc_" , |
1461 | TORCH_FN(wrapper_SparseCsrCUDA__trunc_)); |
1462 | m.impl("_sparse_csr_sum.dim_dtype" , |
1463 | TORCH_FN(wrapper_SparseCsrCUDA_dim_dtype__sparse_csr_sum)); |
1464 | m.impl("_sparse_csr_prod.dim_dtype" , |
1465 | TORCH_FN(wrapper_SparseCsrCUDA_dim_dtype__sparse_csr_prod)); |
1466 | m.impl("clone" , |
1467 | TORCH_FN(wrapper_SparseCsrCUDA__clone)); |
1468 | m.impl("resize_as_sparse_" , |
1469 | TORCH_FN(wrapper_SparseCsrCUDA__resize_as_sparse_)); |
1470 | m.impl("zero_" , |
1471 | TORCH_FN(wrapper_SparseCsrCUDA__zero_)); |
1472 | m.impl("sparse_sampled_addmm" , |
1473 | TORCH_FN(wrapper_SparseCsrCUDA__sparse_sampled_addmm)); |
1474 | m.impl("sparse_sampled_addmm.out" , |
1475 | TORCH_FN(wrapper_SparseCsrCUDA_out_sparse_sampled_addmm_out)); |
1476 | m.impl("addmm" , |
1477 | TORCH_FN(wrapper_SparseCsrCUDA__addmm)); |
1478 | m.impl("addmm.out" , |
1479 | TORCH_FN(wrapper_SparseCsrCUDA_out_addmm_out)); |
1480 | m.impl("sparse_mask" , |
1481 | TORCH_FN(wrapper_SparseCsrCUDA__sparse_mask)); |
1482 | m.impl("_to_dense" , |
1483 | TORCH_FN(wrapper_SparseCsrCUDA___to_dense)); |
1484 | m.impl("sparse_dim" , |
1485 | TORCH_FN(wrapper_SparseCsrCUDA__sparse_dim)); |
1486 | m.impl("dense_dim" , |
1487 | TORCH_FN(wrapper_SparseCsrCUDA__dense_dim)); |
1488 | m.impl("_nnz" , |
1489 | TORCH_FN(wrapper_SparseCsrCUDA___nnz)); |
1490 | m.impl("values" , |
1491 | TORCH_FN(wrapper_SparseCsrCUDA__values)); |
1492 | m.impl("crow_indices" , |
1493 | TORCH_FN(wrapper_SparseCsrCUDA__crow_indices)); |
1494 | m.impl("col_indices" , |
1495 | TORCH_FN(wrapper_SparseCsrCUDA__col_indices)); |
1496 | m.impl("ccol_indices" , |
1497 | TORCH_FN(wrapper_SparseCsrCUDA__ccol_indices)); |
1498 | m.impl("row_indices" , |
1499 | TORCH_FN(wrapper_SparseCsrCUDA__row_indices)); |
1500 | m.impl("to_sparse.sparse_dim" , |
1501 | TORCH_FN(wrapper_SparseCsrCUDA_sparse_dim_to_sparse)); |
1502 | m.impl("to_sparse" , |
1503 | TORCH_FN(wrapper_SparseCsrCUDA__to_sparse)); |
1504 | m.impl("to_sparse_csr" , |
1505 | TORCH_FN(wrapper_SparseCsrCUDA__to_sparse_csr)); |
1506 | m.impl("to_sparse_csc" , |
1507 | TORCH_FN(wrapper_SparseCsrCUDA__to_sparse_csc)); |
1508 | m.impl("to_sparse_bsr" , |
1509 | TORCH_FN(wrapper_SparseCsrCUDA__to_sparse_bsr)); |
1510 | m.impl("to_sparse_bsc" , |
1511 | TORCH_FN(wrapper_SparseCsrCUDA__to_sparse_bsc)); |
1512 | m.impl("triangular_solve.X" , |
1513 | TORCH_FN(wrapper_SparseCsrCUDA_X_triangular_solve_out)); |
1514 | m.impl("erfinv" , |
1515 | TORCH_FN(wrapper_SparseCsrCUDA__erfinv)); |
1516 | m.impl("erfinv.out" , |
1517 | TORCH_FN(wrapper_SparseCsrCUDA_out_erfinv_out)); |
1518 | m.impl("erfinv_" , |
1519 | TORCH_FN(wrapper_SparseCsrCUDA__erfinv_)); |
1520 | m.impl("sign" , |
1521 | TORCH_FN(wrapper_SparseCsrCUDA__sign)); |
1522 | m.impl("sign.out" , |
1523 | TORCH_FN(wrapper_SparseCsrCUDA_out_sign_out)); |
1524 | m.impl("sign_" , |
1525 | TORCH_FN(wrapper_SparseCsrCUDA__sign_)); |
1526 | m.impl("signbit" , |
1527 | TORCH_FN(wrapper_SparseCsrCUDA__signbit)); |
1528 | m.impl("signbit.out" , |
1529 | TORCH_FN(wrapper_SparseCsrCUDA_out_signbit_out)); |
1530 | m.impl("normal_" , |
1531 | TORCH_FN(wrapper_SparseCsrCUDA__normal_)); |
1532 | m.impl("isinf" , |
1533 | TORCH_FN(wrapper_SparseCsrCUDA__isinf)); |
1534 | m.impl("isposinf" , |
1535 | TORCH_FN(wrapper_SparseCsrCUDA__isposinf)); |
1536 | m.impl("isposinf.out" , |
1537 | TORCH_FN(wrapper_SparseCsrCUDA_out_isposinf_out)); |
1538 | m.impl("isneginf" , |
1539 | TORCH_FN(wrapper_SparseCsrCUDA__isneginf)); |
1540 | m.impl("isneginf.out" , |
1541 | TORCH_FN(wrapper_SparseCsrCUDA_out_isneginf_out)); |
1542 | m.impl("select_copy.int" , |
1543 | TORCH_FN(wrapper_SparseCsrCUDA_int_select_copy)); |
1544 | }; |
1545 | } // anonymous namespace |
1546 | namespace sparsecsrcuda { |
1547 | at::Tensor abs(const at::Tensor & self) { |
1548 | return wrapper_SparseCsrCUDA__abs(self); |
1549 | } |
1550 | at::Tensor & abs_out(at::Tensor & out, const at::Tensor & self) { |
1551 | return wrapper_SparseCsrCUDA_out_abs_out(self, out); |
1552 | } |
1553 | at::Tensor & abs_outf(const at::Tensor & self, at::Tensor & out) { |
1554 | return wrapper_SparseCsrCUDA_out_abs_out(self, out); |
1555 | } |
1556 | at::Tensor & abs_(at::Tensor & self) { |
1557 | return wrapper_SparseCsrCUDA__abs_(self); |
1558 | } |
1559 | at::Tensor angle(const at::Tensor & self) { |
1560 | return wrapper_SparseCsrCUDA__angle(self); |
1561 | } |
1562 | at::Tensor & angle_out(at::Tensor & out, const at::Tensor & self) { |
1563 | return wrapper_SparseCsrCUDA_out_angle_out(self, out); |
1564 | } |
1565 | at::Tensor & angle_outf(const at::Tensor & self, at::Tensor & out) { |
1566 | return wrapper_SparseCsrCUDA_out_angle_out(self, out); |
1567 | } |
1568 | at::Tensor sgn(const at::Tensor & self) { |
1569 | return wrapper_SparseCsrCUDA__sgn(self); |
1570 | } |
1571 | at::Tensor & sgn_out(at::Tensor & out, const at::Tensor & self) { |
1572 | return wrapper_SparseCsrCUDA_out_sgn_out(self, out); |
1573 | } |
1574 | at::Tensor & sgn_outf(const at::Tensor & self, at::Tensor & out) { |
1575 | return wrapper_SparseCsrCUDA_out_sgn_out(self, out); |
1576 | } |
1577 | at::Tensor & sgn_(at::Tensor & self) { |
1578 | return wrapper_SparseCsrCUDA__sgn_(self); |
1579 | } |
1580 | at::Tensor _conj_physical(const at::Tensor & self) { |
1581 | return wrapper_SparseCsrCUDA___conj_physical(self); |
1582 | } |
1583 | at::Tensor & conj_physical_out(at::Tensor & out, const at::Tensor & self) { |
1584 | return wrapper_SparseCsrCUDA_out_conj_physical_out(self, out); |
1585 | } |
1586 | at::Tensor & conj_physical_outf(const at::Tensor & self, at::Tensor & out) { |
1587 | return wrapper_SparseCsrCUDA_out_conj_physical_out(self, out); |
1588 | } |
1589 | at::Tensor & conj_physical_(at::Tensor & self) { |
1590 | return wrapper_SparseCsrCUDA__conj_physical_(self); |
1591 | } |
1592 | at::Tensor add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
1593 | return wrapper_SparseCsrCUDA_Tensor_add(self, other, alpha); |
1594 | } |
1595 | at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
1596 | return wrapper_SparseCsrCUDA_out_add_out(self, other, alpha, out); |
1597 | } |
1598 | at::Tensor & add_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { |
1599 | return wrapper_SparseCsrCUDA_out_add_out(self, other, alpha, out); |
1600 | } |
1601 | at::Tensor & add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
1602 | return wrapper_SparseCsrCUDA_Tensor_add_(self, other, alpha); |
1603 | } |
1604 | at::Tensor & addmv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) { |
1605 | return wrapper_SparseCsrCUDA_out_addmv_out(self, mat, vec, beta, alpha, out); |
1606 | } |
1607 | at::Tensor & addmv_outf(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
1608 | return wrapper_SparseCsrCUDA_out_addmv_out(self, mat, vec, beta, alpha, out); |
1609 | } |
1610 | at::Tensor asinh(const at::Tensor & self) { |
1611 | return wrapper_SparseCsrCUDA__asinh(self); |
1612 | } |
1613 | at::Tensor & asinh_out(at::Tensor & out, const at::Tensor & self) { |
1614 | return wrapper_SparseCsrCUDA_out_asinh_out(self, out); |
1615 | } |
1616 | at::Tensor & asinh_outf(const at::Tensor & self, at::Tensor & out) { |
1617 | return wrapper_SparseCsrCUDA_out_asinh_out(self, out); |
1618 | } |
1619 | at::Tensor & asinh_(at::Tensor & self) { |
1620 | return wrapper_SparseCsrCUDA__asinh_(self); |
1621 | } |
1622 | at::Tensor atanh(const at::Tensor & self) { |
1623 | return wrapper_SparseCsrCUDA__atanh(self); |
1624 | } |
1625 | at::Tensor & atanh_out(at::Tensor & out, const at::Tensor & self) { |
1626 | return wrapper_SparseCsrCUDA_out_atanh_out(self, out); |
1627 | } |
1628 | at::Tensor & atanh_outf(const at::Tensor & self, at::Tensor & out) { |
1629 | return wrapper_SparseCsrCUDA_out_atanh_out(self, out); |
1630 | } |
1631 | at::Tensor & atanh_(at::Tensor & self) { |
1632 | return wrapper_SparseCsrCUDA__atanh_(self); |
1633 | } |
1634 | at::Tensor asin(const at::Tensor & self) { |
1635 | return wrapper_SparseCsrCUDA__asin(self); |
1636 | } |
1637 | at::Tensor & asin_out(at::Tensor & out, const at::Tensor & self) { |
1638 | return wrapper_SparseCsrCUDA_out_asin_out(self, out); |
1639 | } |
1640 | at::Tensor & asin_outf(const at::Tensor & self, at::Tensor & out) { |
1641 | return wrapper_SparseCsrCUDA_out_asin_out(self, out); |
1642 | } |
1643 | at::Tensor & asin_(at::Tensor & self) { |
1644 | return wrapper_SparseCsrCUDA__asin_(self); |
1645 | } |
1646 | at::Tensor atan(const at::Tensor & self) { |
1647 | return wrapper_SparseCsrCUDA__atan(self); |
1648 | } |
1649 | at::Tensor & atan_out(at::Tensor & out, const at::Tensor & self) { |
1650 | return wrapper_SparseCsrCUDA_out_atan_out(self, out); |
1651 | } |
1652 | at::Tensor & atan_outf(const at::Tensor & self, at::Tensor & out) { |
1653 | return wrapper_SparseCsrCUDA_out_atan_out(self, out); |
1654 | } |
1655 | at::Tensor & atan_(at::Tensor & self) { |
1656 | return wrapper_SparseCsrCUDA__atan_(self); |
1657 | } |
1658 | at::Tensor & baddbmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) { |
1659 | return wrapper_SparseCsrCUDA_out_baddbmm_out(self, batch1, batch2, beta, alpha, out); |
1660 | } |
1661 | at::Tensor & baddbmm_outf(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
1662 | return wrapper_SparseCsrCUDA_out_baddbmm_out(self, batch1, batch2, beta, alpha, out); |
1663 | } |
1664 | at::Tensor & bmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) { |
1665 | return wrapper_SparseCsrCUDA_out_bmm_out(self, mat2, out); |
1666 | } |
1667 | at::Tensor & bmm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) { |
1668 | return wrapper_SparseCsrCUDA_out_bmm_out(self, mat2, out); |
1669 | } |
1670 | at::Tensor ceil(const at::Tensor & self) { |
1671 | return wrapper_SparseCsrCUDA__ceil(self); |
1672 | } |
1673 | at::Tensor & ceil_out(at::Tensor & out, const at::Tensor & self) { |
1674 | return wrapper_SparseCsrCUDA_out_ceil_out(self, out); |
1675 | } |
1676 | at::Tensor & ceil_outf(const at::Tensor & self, at::Tensor & out) { |
1677 | return wrapper_SparseCsrCUDA_out_ceil_out(self, out); |
1678 | } |
1679 | at::Tensor & ceil_(at::Tensor & self) { |
1680 | return wrapper_SparseCsrCUDA__ceil_(self); |
1681 | } |
1682 | at::Tensor & copy_(at::Tensor & self, const at::Tensor & src, bool non_blocking) { |
1683 | return wrapper_SparseCsrCUDA__copy_(self, src, non_blocking); |
1684 | } |
1685 | at::Tensor empty(at::IntArrayRef size, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) { |
1686 | return wrapper_SparseCsrCUDA_memory_format_empty(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
1687 | } |
1688 | at::Tensor empty(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
1689 | return wrapper_SparseCsrCUDA_memory_format_empty(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, memory_format); |
1690 | } |
1691 | at::Tensor empty_symint(c10::SymIntArrayRef size, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) { |
1692 | return wrapper_SparseCsrCUDA_memory_format_empty(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
1693 | } |
1694 | at::Tensor empty_symint(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
1695 | return wrapper_SparseCsrCUDA_memory_format_empty(size, dtype, layout, device, pin_memory, memory_format); |
1696 | } |
1697 | const at::Tensor & resize_(const at::Tensor & self, at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format) { |
1698 | return wrapper_SparseCsrCUDA__resize_(self, c10::fromIntArrayRefSlow(size), memory_format); |
1699 | } |
1700 | const at::Tensor & resize__symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) { |
1701 | return wrapper_SparseCsrCUDA__resize_(self, size, memory_format); |
1702 | } |
1703 | at::Tensor empty_like(const at::Tensor & self, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) { |
1704 | return wrapper_SparseCsrCUDA__empty_like(self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
1705 | } |
1706 | at::Tensor empty_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
1707 | return wrapper_SparseCsrCUDA__empty_like(self, dtype, layout, device, pin_memory, memory_format); |
1708 | } |
1709 | at::Tensor erf(const at::Tensor & self) { |
1710 | return wrapper_SparseCsrCUDA__erf(self); |
1711 | } |
1712 | at::Tensor & erf_out(at::Tensor & out, const at::Tensor & self) { |
1713 | return wrapper_SparseCsrCUDA_out_erf_out(self, out); |
1714 | } |
1715 | at::Tensor & erf_outf(const at::Tensor & self, at::Tensor & out) { |
1716 | return wrapper_SparseCsrCUDA_out_erf_out(self, out); |
1717 | } |
1718 | at::Tensor & erf_(at::Tensor & self) { |
1719 | return wrapper_SparseCsrCUDA__erf_(self); |
1720 | } |
1721 | at::Tensor expm1(const at::Tensor & self) { |
1722 | return wrapper_SparseCsrCUDA__expm1(self); |
1723 | } |
1724 | at::Tensor & expm1_out(at::Tensor & out, const at::Tensor & self) { |
1725 | return wrapper_SparseCsrCUDA_out_expm1_out(self, out); |
1726 | } |
1727 | at::Tensor & expm1_outf(const at::Tensor & self, at::Tensor & out) { |
1728 | return wrapper_SparseCsrCUDA_out_expm1_out(self, out); |
1729 | } |
1730 | at::Tensor & expm1_(at::Tensor & self) { |
1731 | return wrapper_SparseCsrCUDA__expm1_(self); |
1732 | } |
1733 | at::Tensor & fill_(at::Tensor & self, const at::Scalar & value) { |
1734 | return wrapper_SparseCsrCUDA_Scalar_fill_(self, value); |
1735 | } |
1736 | at::Tensor floor(const at::Tensor & self) { |
1737 | return wrapper_SparseCsrCUDA__floor(self); |
1738 | } |
1739 | at::Tensor & floor_out(at::Tensor & out, const at::Tensor & self) { |
1740 | return wrapper_SparseCsrCUDA_out_floor_out(self, out); |
1741 | } |
1742 | at::Tensor & floor_outf(const at::Tensor & self, at::Tensor & out) { |
1743 | return wrapper_SparseCsrCUDA_out_floor_out(self, out); |
1744 | } |
1745 | at::Tensor & floor_(at::Tensor & self) { |
1746 | return wrapper_SparseCsrCUDA__floor_(self); |
1747 | } |
1748 | at::Tensor frac(const at::Tensor & self) { |
1749 | return wrapper_SparseCsrCUDA__frac(self); |
1750 | } |
1751 | at::Tensor & frac_out(at::Tensor & out, const at::Tensor & self) { |
1752 | return wrapper_SparseCsrCUDA_out_frac_out(self, out); |
1753 | } |
1754 | at::Tensor & frac_outf(const at::Tensor & self, at::Tensor & out) { |
1755 | return wrapper_SparseCsrCUDA_out_frac_out(self, out); |
1756 | } |
1757 | at::Tensor & frac_(at::Tensor & self) { |
1758 | return wrapper_SparseCsrCUDA__frac_(self); |
1759 | } |
1760 | at::Tensor isnan(const at::Tensor & self) { |
1761 | return wrapper_SparseCsrCUDA__isnan(self); |
1762 | } |
1763 | at::Tensor log1p(const at::Tensor & self) { |
1764 | return wrapper_SparseCsrCUDA__log1p(self); |
1765 | } |
1766 | at::Tensor & log1p_out(at::Tensor & out, const at::Tensor & self) { |
1767 | return wrapper_SparseCsrCUDA_out_log1p_out(self, out); |
1768 | } |
1769 | at::Tensor & log1p_outf(const at::Tensor & self, at::Tensor & out) { |
1770 | return wrapper_SparseCsrCUDA_out_log1p_out(self, out); |
1771 | } |
1772 | at::Tensor & log1p_(at::Tensor & self) { |
1773 | return wrapper_SparseCsrCUDA__log1p_(self); |
1774 | } |
1775 | at::Tensor mm(const at::Tensor & self, const at::Tensor & mat2) { |
1776 | return wrapper_SparseCsrCUDA__mm(self, mat2); |
1777 | } |
1778 | at::Tensor & mm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) { |
1779 | return wrapper_SparseCsrCUDA_out_mm_out(self, mat2, out); |
1780 | } |
1781 | at::Tensor & mm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) { |
1782 | return wrapper_SparseCsrCUDA_out_mm_out(self, mat2, out); |
1783 | } |
1784 | at::Tensor mul(const at::Tensor & self, const at::Tensor & other) { |
1785 | return wrapper_SparseCsrCUDA_Tensor_mul(self, other); |
1786 | } |
1787 | at::Tensor & mul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
1788 | return wrapper_SparseCsrCUDA_out_mul_out(self, other, out); |
1789 | } |
1790 | at::Tensor & mul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
1791 | return wrapper_SparseCsrCUDA_out_mul_out(self, other, out); |
1792 | } |
1793 | at::Tensor & mul_(at::Tensor & self, const at::Tensor & other) { |
1794 | return wrapper_SparseCsrCUDA_Tensor_mul_(self, other); |
1795 | } |
1796 | at::Tensor mul(const at::Tensor & self, const at::Scalar & other) { |
1797 | return wrapper_SparseCsrCUDA_Scalar_mul(self, other); |
1798 | } |
1799 | at::Tensor & mul_(at::Tensor & self, const at::Scalar & other) { |
1800 | return wrapper_SparseCsrCUDA_Scalar_mul_(self, other); |
1801 | } |
1802 | at::Tensor rad2deg(const at::Tensor & self) { |
1803 | return wrapper_SparseCsrCUDA__rad2deg(self); |
1804 | } |
1805 | at::Tensor & rad2deg_out(at::Tensor & out, const at::Tensor & self) { |
1806 | return wrapper_SparseCsrCUDA_out_rad2deg_out(self, out); |
1807 | } |
1808 | at::Tensor & rad2deg_outf(const at::Tensor & self, at::Tensor & out) { |
1809 | return wrapper_SparseCsrCUDA_out_rad2deg_out(self, out); |
1810 | } |
1811 | at::Tensor & rad2deg_(at::Tensor & self) { |
1812 | return wrapper_SparseCsrCUDA__rad2deg_(self); |
1813 | } |
1814 | at::Tensor deg2rad(const at::Tensor & self) { |
1815 | return wrapper_SparseCsrCUDA__deg2rad(self); |
1816 | } |
1817 | at::Tensor & deg2rad_out(at::Tensor & out, const at::Tensor & self) { |
1818 | return wrapper_SparseCsrCUDA_out_deg2rad_out(self, out); |
1819 | } |
1820 | at::Tensor & deg2rad_outf(const at::Tensor & self, at::Tensor & out) { |
1821 | return wrapper_SparseCsrCUDA_out_deg2rad_out(self, out); |
1822 | } |
1823 | at::Tensor & deg2rad_(at::Tensor & self) { |
1824 | return wrapper_SparseCsrCUDA__deg2rad_(self); |
1825 | } |
1826 | at::Tensor neg(const at::Tensor & self) { |
1827 | return wrapper_SparseCsrCUDA__neg(self); |
1828 | } |
1829 | at::Tensor & neg_out(at::Tensor & out, const at::Tensor & self) { |
1830 | return wrapper_SparseCsrCUDA_out_neg_out(self, out); |
1831 | } |
1832 | at::Tensor & neg_outf(const at::Tensor & self, at::Tensor & out) { |
1833 | return wrapper_SparseCsrCUDA_out_neg_out(self, out); |
1834 | } |
1835 | at::Tensor & neg_(at::Tensor & self) { |
1836 | return wrapper_SparseCsrCUDA__neg_(self); |
1837 | } |
1838 | at::Tensor round(const at::Tensor & self) { |
1839 | return wrapper_SparseCsrCUDA__round(self); |
1840 | } |
1841 | at::Tensor & round_out(at::Tensor & out, const at::Tensor & self) { |
1842 | return wrapper_SparseCsrCUDA_out_round_out(self, out); |
1843 | } |
1844 | at::Tensor & round_outf(const at::Tensor & self, at::Tensor & out) { |
1845 | return wrapper_SparseCsrCUDA_out_round_out(self, out); |
1846 | } |
1847 | at::Tensor & round_(at::Tensor & self) { |
1848 | return wrapper_SparseCsrCUDA__round_(self); |
1849 | } |
1850 | at::Tensor relu(const at::Tensor & self) { |
1851 | return wrapper_SparseCsrCUDA__relu(self); |
1852 | } |
1853 | at::Tensor & relu_(at::Tensor & self) { |
1854 | return wrapper_SparseCsrCUDA__relu_(self); |
1855 | } |
1856 | at::Tensor select(const at::Tensor & self, int64_t dim, int64_t index) { |
1857 | return wrapper_SparseCsrCUDA_int_select(self, dim, index); |
1858 | } |
1859 | at::Tensor select_symint(const at::Tensor & self, int64_t dim, c10::SymInt index) { |
1860 | return wrapper_SparseCsrCUDA_int_select(self, dim, index); |
1861 | } |
1862 | at::Tensor sin(const at::Tensor & self) { |
1863 | return wrapper_SparseCsrCUDA__sin(self); |
1864 | } |
1865 | at::Tensor & sin_out(at::Tensor & out, const at::Tensor & self) { |
1866 | return wrapper_SparseCsrCUDA_out_sin_out(self, out); |
1867 | } |
1868 | at::Tensor & sin_outf(const at::Tensor & self, at::Tensor & out) { |
1869 | return wrapper_SparseCsrCUDA_out_sin_out(self, out); |
1870 | } |
1871 | at::Tensor & sin_(at::Tensor & self) { |
1872 | return wrapper_SparseCsrCUDA__sin_(self); |
1873 | } |
1874 | at::Tensor sinh(const at::Tensor & self) { |
1875 | return wrapper_SparseCsrCUDA__sinh(self); |
1876 | } |
1877 | at::Tensor & sinh_out(at::Tensor & out, const at::Tensor & self) { |
1878 | return wrapper_SparseCsrCUDA_out_sinh_out(self, out); |
1879 | } |
1880 | at::Tensor & sinh_outf(const at::Tensor & self, at::Tensor & out) { |
1881 | return wrapper_SparseCsrCUDA_out_sinh_out(self, out); |
1882 | } |
1883 | at::Tensor & sinh_(at::Tensor & self) { |
1884 | return wrapper_SparseCsrCUDA__sinh_(self); |
1885 | } |
1886 | at::Tensor sum(const at::Tensor & self, c10::optional<at::ScalarType> dtype) { |
1887 | return wrapper_SparseCsrCUDA__sum(self, dtype); |
1888 | } |
1889 | at::Tensor sqrt(const at::Tensor & self) { |
1890 | return wrapper_SparseCsrCUDA__sqrt(self); |
1891 | } |
1892 | at::Tensor & sqrt_out(at::Tensor & out, const at::Tensor & self) { |
1893 | return wrapper_SparseCsrCUDA_out_sqrt_out(self, out); |
1894 | } |
1895 | at::Tensor & sqrt_outf(const at::Tensor & self, at::Tensor & out) { |
1896 | return wrapper_SparseCsrCUDA_out_sqrt_out(self, out); |
1897 | } |
1898 | at::Tensor & sqrt_(at::Tensor & self) { |
1899 | return wrapper_SparseCsrCUDA__sqrt_(self); |
1900 | } |
1901 | at::Tensor tan(const at::Tensor & self) { |
1902 | return wrapper_SparseCsrCUDA__tan(self); |
1903 | } |
1904 | at::Tensor & tan_out(at::Tensor & out, const at::Tensor & self) { |
1905 | return wrapper_SparseCsrCUDA_out_tan_out(self, out); |
1906 | } |
1907 | at::Tensor & tan_outf(const at::Tensor & self, at::Tensor & out) { |
1908 | return wrapper_SparseCsrCUDA_out_tan_out(self, out); |
1909 | } |
1910 | at::Tensor & tan_(at::Tensor & self) { |
1911 | return wrapper_SparseCsrCUDA__tan_(self); |
1912 | } |
1913 | at::Tensor tanh(const at::Tensor & self) { |
1914 | return wrapper_SparseCsrCUDA__tanh(self); |
1915 | } |
1916 | at::Tensor & tanh_out(at::Tensor & out, const at::Tensor & self) { |
1917 | return wrapper_SparseCsrCUDA_out_tanh_out(self, out); |
1918 | } |
1919 | at::Tensor & tanh_outf(const at::Tensor & self, at::Tensor & out) { |
1920 | return wrapper_SparseCsrCUDA_out_tanh_out(self, out); |
1921 | } |
1922 | at::Tensor & tanh_(at::Tensor & self) { |
1923 | return wrapper_SparseCsrCUDA__tanh_(self); |
1924 | } |
1925 | at::Tensor threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) { |
1926 | return wrapper_SparseCsrCUDA__threshold_backward(grad_output, self, threshold); |
1927 | } |
1928 | at::Tensor & threshold_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) { |
1929 | return wrapper_SparseCsrCUDA_grad_input_threshold_backward_out(grad_output, self, threshold, grad_input); |
1930 | } |
1931 | at::Tensor & threshold_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) { |
1932 | return wrapper_SparseCsrCUDA_grad_input_threshold_backward_out(grad_output, self, threshold, grad_input); |
1933 | } |
1934 | at::Tensor trunc(const at::Tensor & self) { |
1935 | return wrapper_SparseCsrCUDA__trunc(self); |
1936 | } |
1937 | at::Tensor & trunc_out(at::Tensor & out, const at::Tensor & self) { |
1938 | return wrapper_SparseCsrCUDA_out_trunc_out(self, out); |
1939 | } |
1940 | at::Tensor & trunc_outf(const at::Tensor & self, at::Tensor & out) { |
1941 | return wrapper_SparseCsrCUDA_out_trunc_out(self, out); |
1942 | } |
1943 | at::Tensor & trunc_(at::Tensor & self) { |
1944 | return wrapper_SparseCsrCUDA__trunc_(self); |
1945 | } |
1946 | at::Tensor _sparse_csr_sum(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
1947 | return wrapper_SparseCsrCUDA_dim_dtype__sparse_csr_sum(self, dim, keepdim, dtype); |
1948 | } |
1949 | at::Tensor _sparse_csr_prod(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
1950 | return wrapper_SparseCsrCUDA_dim_dtype__sparse_csr_prod(self, dim, keepdim, dtype); |
1951 | } |
1952 | at::Tensor clone(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) { |
1953 | return wrapper_SparseCsrCUDA__clone(self, memory_format); |
1954 | } |
1955 | const at::Tensor & resize_as_sparse_(const at::Tensor & self, const at::Tensor & the_template) { |
1956 | return wrapper_SparseCsrCUDA__resize_as_sparse_(self, the_template); |
1957 | } |
1958 | at::Tensor & zero_(at::Tensor & self) { |
1959 | return wrapper_SparseCsrCUDA__zero_(self); |
1960 | } |
1961 | at::Tensor sparse_sampled_addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { |
1962 | return wrapper_SparseCsrCUDA__sparse_sampled_addmm(self, mat1, mat2, beta, alpha); |
1963 | } |
1964 | at::Tensor & sparse_sampled_addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { |
1965 | return wrapper_SparseCsrCUDA_out_sparse_sampled_addmm_out(self, mat1, mat2, beta, alpha, out); |
1966 | } |
1967 | at::Tensor & sparse_sampled_addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
1968 | return wrapper_SparseCsrCUDA_out_sparse_sampled_addmm_out(self, mat1, mat2, beta, alpha, out); |
1969 | } |
1970 | at::Tensor addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { |
1971 | return wrapper_SparseCsrCUDA__addmm(self, mat1, mat2, beta, alpha); |
1972 | } |
1973 | at::Tensor & addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { |
1974 | return wrapper_SparseCsrCUDA_out_addmm_out(self, mat1, mat2, beta, alpha, out); |
1975 | } |
1976 | at::Tensor & addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
1977 | return wrapper_SparseCsrCUDA_out_addmm_out(self, mat1, mat2, beta, alpha, out); |
1978 | } |
1979 | at::Tensor sparse_mask(const at::Tensor & self, const at::Tensor & mask) { |
1980 | return wrapper_SparseCsrCUDA__sparse_mask(self, mask); |
1981 | } |
1982 | at::Tensor _to_dense(const at::Tensor & self, c10::optional<at::ScalarType> dtype) { |
1983 | return wrapper_SparseCsrCUDA___to_dense(self, dtype); |
1984 | } |
1985 | int64_t sparse_dim(const at::Tensor & self) { |
1986 | return wrapper_SparseCsrCUDA__sparse_dim(self); |
1987 | } |
1988 | int64_t dense_dim(const at::Tensor & self) { |
1989 | return wrapper_SparseCsrCUDA__dense_dim(self); |
1990 | } |
1991 | int64_t _nnz(const at::Tensor & self) { |
1992 | return wrapper_SparseCsrCUDA___nnz(self); |
1993 | } |
1994 | at::Tensor values(const at::Tensor & self) { |
1995 | return wrapper_SparseCsrCUDA__values(self); |
1996 | } |
1997 | at::Tensor crow_indices(const at::Tensor & self) { |
1998 | return wrapper_SparseCsrCUDA__crow_indices(self); |
1999 | } |
2000 | at::Tensor col_indices(const at::Tensor & self) { |
2001 | return wrapper_SparseCsrCUDA__col_indices(self); |
2002 | } |
2003 | at::Tensor ccol_indices(const at::Tensor & self) { |
2004 | return wrapper_SparseCsrCUDA__ccol_indices(self); |
2005 | } |
2006 | at::Tensor row_indices(const at::Tensor & self) { |
2007 | return wrapper_SparseCsrCUDA__row_indices(self); |
2008 | } |
2009 | at::Tensor to_sparse(const at::Tensor & self, int64_t sparse_dim) { |
2010 | return wrapper_SparseCsrCUDA_sparse_dim_to_sparse(self, sparse_dim); |
2011 | } |
2012 | at::Tensor to_sparse(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim) { |
2013 | return wrapper_SparseCsrCUDA__to_sparse(self, layout, blocksize, dense_dim); |
2014 | } |
2015 | at::Tensor to_sparse_csr(const at::Tensor & self, c10::optional<int64_t> dense_dim) { |
2016 | return wrapper_SparseCsrCUDA__to_sparse_csr(self, dense_dim); |
2017 | } |
2018 | at::Tensor to_sparse_csc(const at::Tensor & self, c10::optional<int64_t> dense_dim) { |
2019 | return wrapper_SparseCsrCUDA__to_sparse_csc(self, dense_dim); |
2020 | } |
2021 | at::Tensor to_sparse_bsr(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) { |
2022 | return wrapper_SparseCsrCUDA__to_sparse_bsr(self, blocksize, dense_dim); |
2023 | } |
2024 | at::Tensor to_sparse_bsc(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) { |
2025 | return wrapper_SparseCsrCUDA__to_sparse_bsc(self, blocksize, dense_dim); |
2026 | } |
2027 | ::std::tuple<at::Tensor &,at::Tensor &> triangular_solve_out(at::Tensor & X, at::Tensor & M, const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) { |
2028 | return wrapper_SparseCsrCUDA_X_triangular_solve_out(self, A, upper, transpose, unitriangular, X, M); |
2029 | } |
2030 | ::std::tuple<at::Tensor &,at::Tensor &> triangular_solve_outf(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular, at::Tensor & X, at::Tensor & M) { |
2031 | return wrapper_SparseCsrCUDA_X_triangular_solve_out(self, A, upper, transpose, unitriangular, X, M); |
2032 | } |
2033 | at::Tensor erfinv(const at::Tensor & self) { |
2034 | return wrapper_SparseCsrCUDA__erfinv(self); |
2035 | } |
2036 | at::Tensor & erfinv_out(at::Tensor & out, const at::Tensor & self) { |
2037 | return wrapper_SparseCsrCUDA_out_erfinv_out(self, out); |
2038 | } |
2039 | at::Tensor & erfinv_outf(const at::Tensor & self, at::Tensor & out) { |
2040 | return wrapper_SparseCsrCUDA_out_erfinv_out(self, out); |
2041 | } |
2042 | at::Tensor & erfinv_(at::Tensor & self) { |
2043 | return wrapper_SparseCsrCUDA__erfinv_(self); |
2044 | } |
2045 | at::Tensor sign(const at::Tensor & self) { |
2046 | return wrapper_SparseCsrCUDA__sign(self); |
2047 | } |
2048 | at::Tensor & sign_out(at::Tensor & out, const at::Tensor & self) { |
2049 | return wrapper_SparseCsrCUDA_out_sign_out(self, out); |
2050 | } |
2051 | at::Tensor & sign_outf(const at::Tensor & self, at::Tensor & out) { |
2052 | return wrapper_SparseCsrCUDA_out_sign_out(self, out); |
2053 | } |
2054 | at::Tensor & sign_(at::Tensor & self) { |
2055 | return wrapper_SparseCsrCUDA__sign_(self); |
2056 | } |
2057 | at::Tensor signbit(const at::Tensor & self) { |
2058 | return wrapper_SparseCsrCUDA__signbit(self); |
2059 | } |
2060 | at::Tensor & signbit_out(at::Tensor & out, const at::Tensor & self) { |
2061 | return wrapper_SparseCsrCUDA_out_signbit_out(self, out); |
2062 | } |
2063 | at::Tensor & signbit_outf(const at::Tensor & self, at::Tensor & out) { |
2064 | return wrapper_SparseCsrCUDA_out_signbit_out(self, out); |
2065 | } |
2066 | at::Tensor & normal_(at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) { |
2067 | return wrapper_SparseCsrCUDA__normal_(self, mean, std, generator); |
2068 | } |
2069 | at::Tensor isinf(const at::Tensor & self) { |
2070 | return wrapper_SparseCsrCUDA__isinf(self); |
2071 | } |
2072 | at::Tensor isposinf(const at::Tensor & self) { |
2073 | return wrapper_SparseCsrCUDA__isposinf(self); |
2074 | } |
2075 | at::Tensor & isposinf_out(at::Tensor & out, const at::Tensor & self) { |
2076 | return wrapper_SparseCsrCUDA_out_isposinf_out(self, out); |
2077 | } |
2078 | at::Tensor & isposinf_outf(const at::Tensor & self, at::Tensor & out) { |
2079 | return wrapper_SparseCsrCUDA_out_isposinf_out(self, out); |
2080 | } |
2081 | at::Tensor isneginf(const at::Tensor & self) { |
2082 | return wrapper_SparseCsrCUDA__isneginf(self); |
2083 | } |
2084 | at::Tensor & isneginf_out(at::Tensor & out, const at::Tensor & self) { |
2085 | return wrapper_SparseCsrCUDA_out_isneginf_out(self, out); |
2086 | } |
2087 | at::Tensor & isneginf_outf(const at::Tensor & self, at::Tensor & out) { |
2088 | return wrapper_SparseCsrCUDA_out_isneginf_out(self, out); |
2089 | } |
2090 | at::Tensor select_copy(const at::Tensor & self, int64_t dim, int64_t index) { |
2091 | return wrapper_SparseCsrCUDA_int_select_copy(self, dim, index); |
2092 | } |
2093 | at::Tensor select_copy_symint(const at::Tensor & self, int64_t dim, c10::SymInt index) { |
2094 | return wrapper_SparseCsrCUDA_int_select_copy(self, dim, index); |
2095 | } |
2096 | } // namespace sparsecsrcuda |
2097 | } // namespace at |
2098 | |