1 | // required for old g++ to compile PRId64 macros, see |
2 | // https://github.com/pytorch/pytorch/issues/3571 |
3 | // for context |
4 | #ifndef __STDC_FORMAT_MACROS |
5 | #define __STDC_FORMAT_MACROS |
6 | #endif |
7 | |
8 | // an external backend might generate file within its code tree |
9 | // and check all the source files within the tree with clang-format. |
10 | // so, disable it since the backend might have a different config. |
11 | // clang-format off |
12 | |
13 | // NOTE: This condition is true for all PyTorch internal libraries, it |
14 | // just excludes external projects such as torch_xla which |
15 | // re-use some of the PyTorch codegen machinery. |
16 | #if defined(CAFFE2_BUILD_MAIN_LIB) || \ |
17 | defined(TORCH_CUDA_BUILD_MAIN_LIB) || \ |
18 | defined(TORCH_HIP_BUILD_MAIN_LIB) || \ |
19 | defined(TORCH_CUDA_CU_BUILD_MAIN_LIB) || \ |
20 | defined(TORCH_CUDA_CPP_BUILD_MAIN_LIB) |
21 | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS |
22 | #endif |
23 | |
24 | // @generated by torchgen/gen.py from RegisterDispatchKey.cpp |
25 | |
26 | #include <c10/core/TensorImpl.h> |
27 | #include <c10/core/Allocator.h> |
28 | #include <ATen/DeviceGuard.h> |
29 | #include <ATen/NamedTensorUtils.h> |
30 | #include <ATen/Utils.h> |
31 | #include <ATen/WrapDimUtils.h> |
32 | #include <ATen/Dispatch.h> |
33 | #include <c10/util/ExclusivelyOwned.h> |
34 | #include <c10/util/Half.h> |
35 | #include <c10/core/UndefinedTensorImpl.h> |
36 | #include <c10/util/Optional.h> |
37 | #include <ATen/Tensor.h> |
38 | #include <ATen/native/Resize.h> |
39 | |
40 | #include <cstddef> |
41 | #include <functional> |
42 | #include <memory> |
43 | #include <utility> |
44 | |
45 | #include <ATen/Config.h> |
46 | #include <ATen/core/op_registration/adaption.h> |
47 | #include <torch/library.h> |
48 | |
49 | |
50 | #include <ATen/ops/as_strided_native.h> |
51 | #include <ATen/ops/empty.h> |
52 | #include <ATen/ops/empty_strided.h> |
53 | #include <ATen/ops/_copy_from_and_resize.h> |
54 | #include <ATen/ops/_copy_from.h> |
55 | #include <ATen/ops/_coalesced_native.h> |
56 | #include <ATen/ops/_dimV_native.h> |
57 | #include <ATen/ops/_indices_native.h> |
58 | #include <ATen/ops/_nnz_native.h> |
59 | #include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_native.h> |
60 | #include <ATen/ops/_sparse_coo_tensor_with_dims_native.h> |
61 | #include <ATen/ops/_values_native.h> |
62 | #include <ATen/ops/dense_dim_native.h> |
63 | #include <ATen/ops/empty_like_native.h> |
64 | #include <ATen/ops/empty_native.h> |
65 | #include <ATen/ops/indices_native.h> |
66 | #include <ATen/ops/is_coalesced_native.h> |
67 | #include <ATen/ops/isinf_native.h> |
68 | #include <ATen/ops/sparse_dim_native.h> |
69 | #include <ATen/ops/sparse_resize_and_clear_native.h> |
70 | #include <ATen/ops/sparse_resize_native.h> |
71 | #include <ATen/ops/values_native.h> |
72 | #include <ATen/ops/zero_native.h> |
73 | #include <ATen/ops/zeros_native.h> |
74 | |
75 | // See template file RegisterDispatchDefinitions.ini |
76 | namespace at { |
77 | // NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid |
78 | // ambiguity with conflicting identifiers that may have been defined in |
79 | // at namespace already. |
80 | namespace { |
81 | void resize_out(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) { |
82 | TORCH_CHECK(options.dtype() == out.dtype(), |
83 | "Expected out tensor to have dtype " , options.dtype(), ", but got " , out.dtype(), " instead" ); |
84 | TORCH_CHECK(options.device() == out.device(), |
85 | "Expected out tensor to have device " , options.device(), ", but got " , out.device(), " instead" ); |
86 | const bool resized = at::native::resize_output(out, sizes); |
87 | // Only restride if a resize occurred; otherwise we ignore the (advisory) |
88 | // strides from the meta function and directly use the output tensor's |
89 | // preexisting strides |
90 | if (resized) { |
91 | if (!strides.empty()) { |
92 | TORCH_INTERNAL_ASSERT(!options.memory_format_opt().has_value()); |
93 | // TODO: avoid the redispatch here |
94 | out.as_strided_(sizes, strides); |
95 | } else if (options.memory_format_opt().has_value()) { |
96 | out.unsafeGetTensorImpl()->empty_tensor_restride(*options.memory_format_opt()); |
97 | } |
98 | } |
99 | } |
100 | void check_inplace(const Tensor &self, IntArrayRef sizes, const TensorOptions &options) { |
101 | // These checks are needed on those operators that: |
102 | // 1) don't use 'TensorIterator' (e.g. 'addmm' and 'baddbmm') |
103 | // 2) have particular typing rules (e.g. 'cumsum' and 'cumprod') |
104 | // For other operators (e.g. 'add'), 'TensorIterator' already checks |
105 | // these things separately. |
106 | TORCH_CHECK(options.dtype() == self.dtype(), |
107 | "Bad in-place call: " , |
108 | "input tensor dtype " , self.dtype(), " and output tensor dtype " , options.dtype(), " should match" ); |
109 | TORCH_CHECK(options.device() == self.device(), |
110 | "Bad in-place call: " , |
111 | "input tensor device " , self.device(), " and output tensor device " , options.device(), " should match" ); |
112 | TORCH_CHECK(sizes == self.sizes(), |
113 | "Bad in-place call: " , |
114 | "input tensor size " , self.sizes(), " and output tensor size " , sizes, " should match" ); |
115 | } |
116 | namespace { |
117 | at::Tensor wrapper_SparseMeta_memory_format_empty(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
118 | // No device check |
119 | // DeviceGuard omitted |
120 | return at::native::empty_sparse(C10_AS_INTARRAYREF_SLOW(size), dtype, layout, device, pin_memory, memory_format); |
121 | } |
122 | } // anonymous namespace |
123 | namespace { |
124 | at::Tensor wrapper_SparseMeta__empty_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
125 | // No device check |
126 | // DeviceGuard omitted |
127 | return at::native::empty_like_sparse_coo(self, dtype, layout, device, pin_memory, memory_format); |
128 | } |
129 | } // anonymous namespace |
130 | namespace { |
131 | at::Tensor & wrapper_SparseMeta_out_zeros_out(c10::SymIntArrayRef size, at::Tensor & out) { |
132 | // No device check |
133 | // DeviceGuard omitted |
134 | return at::native::zeros_sparse_out(C10_AS_INTARRAYREF_SLOW(size), out); |
135 | } |
136 | } // anonymous namespace |
137 | namespace { |
138 | at::Tensor & wrapper_SparseMeta__zero_(at::Tensor & self) { |
139 | // No device check |
140 | // DeviceGuard omitted |
141 | return at::native::zero_sparse_(self); |
142 | } |
143 | } // anonymous namespace |
144 | namespace { |
145 | at::Tensor wrapper_SparseMeta___sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
146 | // No device check |
147 | // DeviceGuard omitted |
148 | return at::native::new_with_dims_sparse(sparse_dim, dense_dim, size, dtype, layout, device, pin_memory); |
149 | } |
150 | } // anonymous namespace |
151 | namespace { |
152 | at::Tensor wrapper_SparseMeta___sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
153 | // No device check |
154 | // DeviceGuard omitted |
155 | return at::native::new_with_dims_and_tensor_sparse_symint(sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory); |
156 | } |
157 | } // anonymous namespace |
158 | namespace { |
159 | const at::Tensor & wrapper_SparseMeta__sparse_resize_(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { |
160 | // No device check |
161 | // DeviceGuard omitted |
162 | return at::native::sparse_resize_(self, size, sparse_dim, dense_dim); |
163 | } |
164 | } // anonymous namespace |
165 | namespace { |
166 | const at::Tensor & wrapper_SparseMeta__sparse_resize_and_clear_(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { |
167 | // No device check |
168 | // DeviceGuard omitted |
169 | return at::native::sparse_resize_and_clear_(self, size, sparse_dim, dense_dim); |
170 | } |
171 | } // anonymous namespace |
172 | namespace { |
173 | int64_t wrapper_SparseMeta__sparse_dim(const at::Tensor & self) { |
174 | // No device check |
175 | // DeviceGuard omitted |
176 | return at::native::sparse_dim_sparse(self); |
177 | } |
178 | } // anonymous namespace |
179 | namespace { |
180 | int64_t wrapper_SparseMeta__dense_dim(const at::Tensor & self) { |
181 | // No device check |
182 | // DeviceGuard omitted |
183 | return at::native::dense_dim_sparse(self); |
184 | } |
185 | } // anonymous namespace |
186 | namespace { |
187 | int64_t wrapper_SparseMeta___dimV(const at::Tensor & self) { |
188 | // No device check |
189 | // DeviceGuard omitted |
190 | return at::native::dense_dim_sparse(self); |
191 | } |
192 | } // anonymous namespace |
193 | namespace { |
194 | int64_t wrapper_SparseMeta___nnz(const at::Tensor & self) { |
195 | // No device check |
196 | // DeviceGuard omitted |
197 | return at::native::_nnz_sparse(self); |
198 | } |
199 | } // anonymous namespace |
200 | namespace { |
201 | bool wrapper_SparseMeta__is_coalesced(const at::Tensor & self) { |
202 | // No device check |
203 | // DeviceGuard omitted |
204 | return at::native::is_coalesced_sparse(self); |
205 | } |
206 | } // anonymous namespace |
207 | namespace { |
208 | at::Tensor wrapper_SparseMeta___indices(const at::Tensor & self) { |
209 | // No device check |
210 | // DeviceGuard omitted |
211 | return at::native::_indices_sparse(self); |
212 | } |
213 | } // anonymous namespace |
214 | namespace { |
215 | at::Tensor wrapper_SparseMeta___values(const at::Tensor & self) { |
216 | // No device check |
217 | // DeviceGuard omitted |
218 | return at::native::_values_sparse(self); |
219 | } |
220 | } // anonymous namespace |
221 | namespace { |
222 | at::Tensor & wrapper_SparseMeta___coalesced_(at::Tensor & self, bool coalesced) { |
223 | // No device check |
224 | // DeviceGuard omitted |
225 | return at::native::_coalesced_sparse_(self, coalesced); |
226 | } |
227 | } // anonymous namespace |
228 | namespace { |
229 | at::Tensor wrapper_SparseMeta__indices(const at::Tensor & self) { |
230 | // No device check |
231 | // DeviceGuard omitted |
232 | return at::native::indices_sparse(self); |
233 | } |
234 | } // anonymous namespace |
235 | namespace { |
236 | at::Tensor wrapper_SparseMeta__values(const at::Tensor & self) { |
237 | // No device check |
238 | // DeviceGuard omitted |
239 | return at::native::values_sparse(self); |
240 | } |
241 | } // anonymous namespace |
242 | namespace { |
243 | at::Tensor wrapper_SparseMeta__isinf(const at::Tensor & self) { |
244 | // No device check |
245 | // DeviceGuard omitted |
246 | return at::native::isinf_sparse_meta(self); |
247 | } |
248 | } // anonymous namespace |
249 | TORCH_LIBRARY_IMPL(aten, SparseMeta, m) { |
250 | m.impl("empty.memory_format" , |
251 | TORCH_FN(wrapper_SparseMeta_memory_format_empty)); |
252 | m.impl("empty_like" , |
253 | TORCH_FN(wrapper_SparseMeta__empty_like)); |
254 | m.impl("zeros.out" , |
255 | TORCH_FN(wrapper_SparseMeta_out_zeros_out)); |
256 | m.impl("zero_" , |
257 | TORCH_FN(wrapper_SparseMeta__zero_)); |
258 | m.impl("_sparse_coo_tensor_with_dims" , |
259 | TORCH_FN(wrapper_SparseMeta___sparse_coo_tensor_with_dims)); |
260 | m.impl("_sparse_coo_tensor_with_dims_and_tensors" , |
261 | TORCH_FN(wrapper_SparseMeta___sparse_coo_tensor_with_dims_and_tensors)); |
262 | m.impl("sparse_resize_" , |
263 | TORCH_FN(wrapper_SparseMeta__sparse_resize_)); |
264 | m.impl("sparse_resize_and_clear_" , |
265 | TORCH_FN(wrapper_SparseMeta__sparse_resize_and_clear_)); |
266 | m.impl("sparse_dim" , |
267 | TORCH_FN(wrapper_SparseMeta__sparse_dim)); |
268 | m.impl("dense_dim" , |
269 | TORCH_FN(wrapper_SparseMeta__dense_dim)); |
270 | m.impl("_dimV" , |
271 | TORCH_FN(wrapper_SparseMeta___dimV)); |
272 | m.impl("_nnz" , |
273 | TORCH_FN(wrapper_SparseMeta___nnz)); |
274 | m.impl("is_coalesced" , |
275 | TORCH_FN(wrapper_SparseMeta__is_coalesced)); |
276 | m.impl("_indices" , |
277 | TORCH_FN(wrapper_SparseMeta___indices)); |
278 | m.impl("_values" , |
279 | TORCH_FN(wrapper_SparseMeta___values)); |
280 | m.impl("_coalesced_" , |
281 | TORCH_FN(wrapper_SparseMeta___coalesced_)); |
282 | m.impl("indices" , |
283 | TORCH_FN(wrapper_SparseMeta__indices)); |
284 | m.impl("values" , |
285 | TORCH_FN(wrapper_SparseMeta__values)); |
286 | m.impl("isinf" , |
287 | TORCH_FN(wrapper_SparseMeta__isinf)); |
288 | }; |
289 | } // anonymous namespace |
290 | namespace sparsemeta { |
291 | at::Tensor empty(at::IntArrayRef size, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) { |
292 | return wrapper_SparseMeta_memory_format_empty(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
293 | } |
294 | at::Tensor empty(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
295 | return wrapper_SparseMeta_memory_format_empty(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, memory_format); |
296 | } |
297 | at::Tensor empty_symint(c10::SymIntArrayRef size, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) { |
298 | return wrapper_SparseMeta_memory_format_empty(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
299 | } |
300 | at::Tensor empty_symint(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
301 | return wrapper_SparseMeta_memory_format_empty(size, dtype, layout, device, pin_memory, memory_format); |
302 | } |
303 | at::Tensor empty_like(const at::Tensor & self, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) { |
304 | return wrapper_SparseMeta__empty_like(self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
305 | } |
306 | at::Tensor empty_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
307 | return wrapper_SparseMeta__empty_like(self, dtype, layout, device, pin_memory, memory_format); |
308 | } |
309 | at::Tensor & zeros_out(at::Tensor & out, at::IntArrayRef size) { |
310 | return wrapper_SparseMeta_out_zeros_out(c10::fromIntArrayRefSlow(size), out); |
311 | } |
312 | at::Tensor & zeros_outf(at::IntArrayRef size, at::Tensor & out) { |
313 | return wrapper_SparseMeta_out_zeros_out(c10::fromIntArrayRefSlow(size), out); |
314 | } |
315 | at::Tensor & zeros_symint_out(at::Tensor & out, c10::SymIntArrayRef size) { |
316 | return wrapper_SparseMeta_out_zeros_out(size, out); |
317 | } |
318 | at::Tensor & zeros_symint_outf(c10::SymIntArrayRef size, at::Tensor & out) { |
319 | return wrapper_SparseMeta_out_zeros_out(size, out); |
320 | } |
321 | at::Tensor & zero_(at::Tensor & self) { |
322 | return wrapper_SparseMeta__zero_(self); |
323 | } |
324 | at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::TensorOptions options) { |
325 | return wrapper_SparseMeta___sparse_coo_tensor_with_dims(sparse_dim, dense_dim, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
326 | } |
327 | at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
328 | return wrapper_SparseMeta___sparse_coo_tensor_with_dims(sparse_dim, dense_dim, size, dtype, layout, device, pin_memory); |
329 | } |
330 | at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options) { |
331 | return wrapper_SparseMeta___sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
332 | } |
333 | at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
334 | return wrapper_SparseMeta___sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, dtype, layout, device, pin_memory); |
335 | } |
336 | at::Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options) { |
337 | return wrapper_SparseMeta___sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, size, indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
338 | } |
339 | at::Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
340 | return wrapper_SparseMeta___sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory); |
341 | } |
342 | const at::Tensor & sparse_resize_(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { |
343 | return wrapper_SparseMeta__sparse_resize_(self, size, sparse_dim, dense_dim); |
344 | } |
345 | const at::Tensor & sparse_resize_and_clear_(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { |
346 | return wrapper_SparseMeta__sparse_resize_and_clear_(self, size, sparse_dim, dense_dim); |
347 | } |
348 | int64_t sparse_dim(const at::Tensor & self) { |
349 | return wrapper_SparseMeta__sparse_dim(self); |
350 | } |
351 | int64_t dense_dim(const at::Tensor & self) { |
352 | return wrapper_SparseMeta__dense_dim(self); |
353 | } |
354 | int64_t _dimV(const at::Tensor & self) { |
355 | return wrapper_SparseMeta___dimV(self); |
356 | } |
357 | int64_t _nnz(const at::Tensor & self) { |
358 | return wrapper_SparseMeta___nnz(self); |
359 | } |
360 | bool is_coalesced(const at::Tensor & self) { |
361 | return wrapper_SparseMeta__is_coalesced(self); |
362 | } |
363 | at::Tensor _indices(const at::Tensor & self) { |
364 | return wrapper_SparseMeta___indices(self); |
365 | } |
366 | at::Tensor _values(const at::Tensor & self) { |
367 | return wrapper_SparseMeta___values(self); |
368 | } |
369 | at::Tensor & _coalesced_(at::Tensor & self, bool coalesced) { |
370 | return wrapper_SparseMeta___coalesced_(self, coalesced); |
371 | } |
372 | at::Tensor indices(const at::Tensor & self) { |
373 | return wrapper_SparseMeta__indices(self); |
374 | } |
375 | at::Tensor values(const at::Tensor & self) { |
376 | return wrapper_SparseMeta__values(self); |
377 | } |
378 | at::Tensor isinf(const at::Tensor & self) { |
379 | return wrapper_SparseMeta__isinf(self); |
380 | } |
381 | } // namespace sparsemeta |
382 | } // namespace at |
383 | |