1 | // required for old g++ to compile PRId64 macros, see |
2 | // https://github.com/pytorch/pytorch/issues/3571 |
3 | // for context |
4 | #ifndef __STDC_FORMAT_MACROS |
5 | #define __STDC_FORMAT_MACROS |
6 | #endif |
7 | |
8 | // an external backend might generate file within its code tree |
9 | // and check all the source files within the tree with clang-format. |
10 | // so, disable it since the backend might have a different config. |
11 | // clang-format off |
12 | |
13 | // NOTE: This condition is true for all PyTorch internal libraries, it |
14 | // just excludes external projects such as torch_xla which |
15 | // re-use some of the PyTorch codegen machinery. |
16 | #if defined(CAFFE2_BUILD_MAIN_LIB) || \ |
17 | defined(TORCH_CUDA_BUILD_MAIN_LIB) || \ |
18 | defined(TORCH_HIP_BUILD_MAIN_LIB) || \ |
19 | defined(TORCH_CUDA_CU_BUILD_MAIN_LIB) || \ |
20 | defined(TORCH_CUDA_CPP_BUILD_MAIN_LIB) |
21 | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS |
22 | #endif |
23 | |
24 | // @generated by torchgen/gen.py from RegisterDispatchKey.cpp |
25 | |
26 | #include <c10/core/TensorImpl.h> |
27 | #include <c10/core/Allocator.h> |
28 | #include <ATen/DeviceGuard.h> |
29 | #include <ATen/NamedTensorUtils.h> |
30 | #include <ATen/Utils.h> |
31 | #include <ATen/WrapDimUtils.h> |
32 | #include <ATen/Dispatch.h> |
33 | #include <c10/util/ExclusivelyOwned.h> |
34 | #include <c10/util/Half.h> |
35 | #include <c10/core/UndefinedTensorImpl.h> |
36 | #include <c10/util/Optional.h> |
37 | #include <ATen/Tensor.h> |
38 | #include <ATen/native/Resize.h> |
39 | |
40 | #include <cstddef> |
41 | #include <functional> |
42 | #include <memory> |
43 | #include <utility> |
44 | |
45 | #include <ATen/Config.h> |
46 | #include <ATen/core/op_registration/adaption.h> |
47 | #include <torch/library.h> |
48 | |
49 | |
50 | #include <ATen/ops/as_strided_native.h> |
51 | #include <ATen/ops/empty.h> |
52 | #include <ATen/ops/empty_strided.h> |
53 | #include <ATen/ops/_copy_from_and_resize.h> |
54 | #include <ATen/ops/_copy_from.h> |
55 | #include <ATen/ops/_adaptive_avg_pool2d_native.h> |
56 | #include <ATen/ops/_adaptive_avg_pool3d_native.h> |
57 | #include <ATen/ops/_empty_affine_quantized_native.h> |
58 | #include <ATen/ops/_empty_per_channel_affine_quantized_native.h> |
59 | #include <ATen/ops/_index_put_impl_native.h> |
60 | #include <ATen/ops/_prelu_kernel_native.h> |
61 | #include <ATen/ops/_reshape_alias_native.h> |
62 | #include <ATen/ops/_upsample_nearest_exact2d_native.h> |
63 | #include <ATen/ops/_upsample_nearest_exact3d_native.h> |
64 | #include <ATen/ops/adaptive_avg_pool3d_native.h> |
65 | #include <ATen/ops/as_strided_native.h> |
66 | #include <ATen/ops/avg_pool2d_native.h> |
67 | #include <ATen/ops/avg_pool3d_native.h> |
68 | #include <ATen/ops/cat_native.h> |
69 | #include <ATen/ops/channel_shuffle_native.h> |
70 | #include <ATen/ops/clamp_native.h> |
71 | #include <ATen/ops/clone_native.h> |
72 | #include <ATen/ops/dequantize_native.h> |
73 | #include <ATen/ops/empty_like_native.h> |
74 | #include <ATen/ops/empty_native.h> |
75 | #include <ATen/ops/empty_quantized_native.h> |
76 | #include <ATen/ops/empty_strided_native.h> |
77 | #include <ATen/ops/eq_native.h> |
78 | #include <ATen/ops/equal_native.h> |
79 | #include <ATen/ops/fill_native.h> |
80 | #include <ATen/ops/flip_native.h> |
81 | #include <ATen/ops/ge_native.h> |
82 | #include <ATen/ops/gelu_native.h> |
83 | #include <ATen/ops/grid_sampler_2d_native.h> |
84 | #include <ATen/ops/gt_native.h> |
85 | #include <ATen/ops/hardsigmoid_native.h> |
86 | #include <ATen/ops/hardtanh_native.h> |
87 | #include <ATen/ops/index_native.h> |
88 | #include <ATen/ops/index_select_native.h> |
89 | #include <ATen/ops/int_repr_native.h> |
90 | #include <ATen/ops/le_native.h> |
91 | #include <ATen/ops/leaky_relu_native.h> |
92 | #include <ATen/ops/lt_native.h> |
93 | #include <ATen/ops/masked_fill_native.h> |
94 | #include <ATen/ops/max_native.h> |
95 | #include <ATen/ops/mean_native.h> |
96 | #include <ATen/ops/min_native.h> |
97 | #include <ATen/ops/ne_native.h> |
98 | #include <ATen/ops/q_per_channel_axis_native.h> |
99 | #include <ATen/ops/q_per_channel_scales_native.h> |
100 | #include <ATen/ops/q_per_channel_zero_points_native.h> |
101 | #include <ATen/ops/q_scale_native.h> |
102 | #include <ATen/ops/q_zero_point_native.h> |
103 | #include <ATen/ops/qscheme_native.h> |
104 | #include <ATen/ops/quantized_batch_norm_native.h> |
105 | #include <ATen/ops/quantized_max_pool1d_native.h> |
106 | #include <ATen/ops/quantized_max_pool2d_native.h> |
107 | #include <ATen/ops/reflection_pad1d_native.h> |
108 | #include <ATen/ops/reflection_pad2d_native.h> |
109 | #include <ATen/ops/relu_native.h> |
110 | #include <ATen/ops/resize_native.h> |
111 | #include <ATen/ops/set_native.h> |
112 | #include <ATen/ops/sigmoid_native.h> |
113 | #include <ATen/ops/sort_native.h> |
114 | #include <ATen/ops/squeeze_native.h> |
115 | #include <ATen/ops/std_native.h> |
116 | #include <ATen/ops/tanh_native.h> |
117 | #include <ATen/ops/threshold_native.h> |
118 | #include <ATen/ops/topk_native.h> |
119 | #include <ATen/ops/unfold_native.h> |
120 | #include <ATen/ops/unsqueeze_native.h> |
121 | #include <ATen/ops/upsample_bilinear2d_native.h> |
122 | #include <ATen/ops/upsample_nearest2d_native.h> |
123 | #include <ATen/ops/upsample_nearest3d_native.h> |
124 | #include <ATen/ops/view_native.h> |
125 | |
126 | // See template file RegisterDispatchDefinitions.ini |
127 | namespace at { |
128 | // NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid |
129 | // ambiguity with conflicting identifiers that may have been defined in |
130 | // at namespace already. |
131 | namespace { |
132 | Tensor create_out(IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) { |
133 | if (strides.empty()) { |
134 | return at::empty(sizes, options); |
135 | } else { |
136 | return at::empty_strided(sizes, strides, options); |
137 | } |
138 | } |
139 | void resize_out(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) { |
140 | TORCH_CHECK(options.dtype() == out.dtype(), |
141 | "Expected out tensor to have dtype " , options.dtype(), ", but got " , out.dtype(), " instead" ); |
142 | TORCH_CHECK(options.device() == out.device(), |
143 | "Expected out tensor to have device " , options.device(), ", but got " , out.device(), " instead" ); |
144 | const bool resized = at::native::resize_output(out, sizes); |
145 | // Only restride if a resize occurred; otherwise we ignore the (advisory) |
146 | // strides from the meta function and directly use the output tensor's |
147 | // preexisting strides |
148 | if (resized) { |
149 | if (!strides.empty()) { |
150 | TORCH_INTERNAL_ASSERT(!options.memory_format_opt().has_value()); |
151 | // TODO: avoid the redispatch here |
152 | out.as_strided_(sizes, strides); |
153 | } else if (options.memory_format_opt().has_value()) { |
154 | out.unsafeGetTensorImpl()->empty_tensor_restride(*options.memory_format_opt()); |
155 | } |
156 | } |
157 | } |
158 | void check_inplace(const Tensor &self, IntArrayRef sizes, const TensorOptions &options) { |
159 | // These checks are needed on those operators that: |
160 | // 1) don't use 'TensorIterator' (e.g. 'addmm' and 'baddbmm') |
161 | // 2) have particular typing rules (e.g. 'cumsum' and 'cumprod') |
162 | // For other operators (e.g. 'add'), 'TensorIterator' already checks |
163 | // these things separately. |
164 | TORCH_CHECK(options.dtype() == self.dtype(), |
165 | "Bad in-place call: " , |
166 | "input tensor dtype " , self.dtype(), " and output tensor dtype " , options.dtype(), " should match" ); |
167 | TORCH_CHECK(options.device() == self.device(), |
168 | "Bad in-place call: " , |
169 | "input tensor device " , self.device(), " and output tensor device " , options.device(), " should match" ); |
170 | TORCH_CHECK(sizes == self.sizes(), |
171 | "Bad in-place call: " , |
172 | "input tensor size " , self.sizes(), " and output tensor size " , sizes, " should match" ); |
173 | } |
174 | c10::optional<Tensor> maybe_create_proxy(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) { |
175 | if (out.strides() != strides) { |
176 | return at::empty_strided(sizes, strides, options); |
177 | } |
178 | return c10::nullopt; |
179 | } |
180 | namespace { |
181 | at::Tensor wrapper_QuantizedCPU__as_strided(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) { |
182 | // No device check |
183 | // DeviceGuard omitted |
184 | return at::native::as_strided_qtensorimpl(self, C10_AS_INTARRAYREF_SLOW(size), C10_AS_INTARRAYREF_SLOW(stride), storage_offset.has_value() ? c10::make_optional(storage_offset->expect_int()) : c10::nullopt); |
185 | } |
186 | } // anonymous namespace |
187 | namespace { |
188 | at::Tensor wrapper_QuantizedCPU__quantized_batch_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point) { |
189 | // No device check |
190 | // DeviceGuard omitted |
191 | return at::native::quantized_batch_norm(input, weight, bias, mean, var, eps, output_scale, output_zero_point); |
192 | } |
193 | } // anonymous namespace |
194 | namespace { |
195 | at::Tensor wrapper_QuantizedCPU__cat(const at::ITensorListRef & tensors, int64_t dim) { |
196 | // No device check |
197 | // DeviceGuard omitted |
198 | return at::native::cat_quantized_cpu(tensors, dim); |
199 | } |
200 | } // anonymous namespace |
201 | namespace { |
202 | at::Tensor & wrapper_QuantizedCPU_out_cat_out(const at::ITensorListRef & tensors, int64_t dim, at::Tensor & out) { |
203 | // No device check |
204 | // DeviceGuard omitted |
205 | return at::native::cat_out_quantized_cpu(tensors, dim, out); |
206 | } |
207 | } // anonymous namespace |
208 | namespace { |
209 | at::Tensor wrapper_QuantizedCPU__clamp(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) { |
210 | // No device check |
211 | // DeviceGuard omitted |
212 | return at::native::clamp_quantized_cpu(self, min, max); |
213 | } |
214 | } // anonymous namespace |
215 | namespace { |
216 | at::Tensor wrapper_QuantizedCPU_memory_format_empty(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
217 | // No device check |
218 | // DeviceGuard omitted |
219 | return at::native::empty_unknown_quantized(C10_AS_INTARRAYREF_SLOW(size), dtype, layout, device, pin_memory, memory_format); |
220 | } |
221 | } // anonymous namespace |
222 | namespace { |
223 | at::Tensor wrapper_QuantizedCPU___empty_affine_quantized(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, double scale, int64_t zero_point, c10::optional<at::MemoryFormat> memory_format) { |
224 | // No device check |
225 | // DeviceGuard omitted |
226 | return at::native::empty_affine_quantized(size, dtype, layout, device, pin_memory, scale, zero_point, memory_format); |
227 | } |
228 | } // anonymous namespace |
229 | namespace { |
230 | at::Tensor wrapper_QuantizedCPU___empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
231 | // No device check |
232 | // DeviceGuard omitted |
233 | return at::native::empty_per_channel_affine_quantized(size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format); |
234 | } |
235 | } // anonymous namespace |
236 | namespace { |
237 | const at::Tensor & wrapper_QuantizedCPU__resize_(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) { |
238 | // No device check |
239 | // DeviceGuard omitted |
240 | return at::native::quantized_resize_cpu_(self, C10_AS_INTARRAYREF_SLOW(size), memory_format); |
241 | } |
242 | } // anonymous namespace |
243 | namespace { |
244 | at::Tensor wrapper_QuantizedCPU__empty_quantized(at::IntArrayRef size, const at::Tensor & qtensor, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
245 | // No device check |
246 | // DeviceGuard omitted |
247 | return at::native::empty_quantized(size, qtensor, dtype, layout, device, pin_memory, memory_format); |
248 | } |
249 | } // anonymous namespace |
250 | namespace { |
251 | at::Tensor wrapper_QuantizedCPU__empty_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
252 | // No device check |
253 | // DeviceGuard omitted |
254 | return at::native::empty_like_quantized(self, dtype, layout, device, pin_memory, memory_format); |
255 | } |
256 | } // anonymous namespace |
257 | namespace { |
258 | at::Tensor wrapper_QuantizedCPU__empty_strided(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
259 | // No device check |
260 | // DeviceGuard omitted |
261 | return at::native::empty_strided_unknown_quantized(C10_AS_INTARRAYREF_SLOW(size), C10_AS_INTARRAYREF_SLOW(stride), dtype, layout, device, pin_memory); |
262 | } |
263 | } // anonymous namespace |
264 | namespace { |
265 | at::Tensor & wrapper_QuantizedCPU_Scalar_fill_(at::Tensor & self, const at::Scalar & value) { |
266 | // No device check |
267 | // DeviceGuard omitted |
268 | return at::native::fill_quantized_(self, value); |
269 | } |
270 | } // anonymous namespace |
271 | namespace { |
272 | at::Tensor & wrapper_QuantizedCPU_Tensor_fill_(at::Tensor & self, const at::Tensor & value) { |
273 | // No device check |
274 | // DeviceGuard omitted |
275 | return at::native::fill_quantized_(self, value); |
276 | } |
277 | } // anonymous namespace |
278 | namespace { |
279 | at::Tensor wrapper_QuantizedCPU__grid_sampler_2d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { |
280 | // No device check |
281 | // DeviceGuard omitted |
282 | return at::native::grid_sampler_2d_cpu(input, grid, interpolation_mode, padding_mode, align_corners); |
283 | } |
284 | } // anonymous namespace |
285 | namespace { |
286 | at::Tensor wrapper_QuantizedCPU_Tensor_index(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices) { |
287 | // No device check |
288 | // DeviceGuard omitted |
289 | return at::native::quantized_index(self, indices); |
290 | } |
291 | } // anonymous namespace |
292 | namespace { |
293 | at::Tensor & wrapper_QuantizedCPU___index_put_impl_(at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) { |
294 | // No device check |
295 | // DeviceGuard omitted |
296 | return at::native::_index_put_impl_quantized_cpu_(self, indices, values, accumulate, unsafe); |
297 | } |
298 | } // anonymous namespace |
299 | namespace { |
300 | ::std::tuple<at::Tensor,at::Tensor> wrapper_QuantizedCPU_dim_max(const at::Tensor & self, int64_t dim, bool keepdim) { |
301 | // No device check |
302 | // DeviceGuard omitted |
303 | return at::native::qmax(self, dim, keepdim); |
304 | } |
305 | } // anonymous namespace |
306 | namespace { |
307 | at::Tensor wrapper_QuantizedCPU__quantized_max_pool1d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { |
308 | // No device check |
309 | // DeviceGuard omitted |
310 | return at::native::quantized_max_pool1d(self, kernel_size, stride, padding, dilation, ceil_mode); |
311 | } |
312 | } // anonymous namespace |
313 | namespace { |
314 | at::Tensor wrapper_QuantizedCPU__quantized_max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { |
315 | // No device check |
316 | // DeviceGuard omitted |
317 | return at::native::quantized_max_pool2d(self, kernel_size, stride, padding, dilation, ceil_mode); |
318 | } |
319 | } // anonymous namespace |
320 | namespace { |
321 | at::Tensor wrapper_QuantizedCPU_dim_mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
322 | // No device check |
323 | // DeviceGuard omitted |
324 | return at::native::mean_quantized_cpu(self, dim, keepdim, dtype); |
325 | } |
326 | } // anonymous namespace |
327 | namespace { |
328 | at::Tensor & wrapper_QuantizedCPU_out_mean_out(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
329 | // No device check |
330 | // DeviceGuard omitted |
331 | return at::native::mean_out_quantized_cpu(self, dim, keepdim, dtype, out); |
332 | } |
333 | } // anonymous namespace |
334 | namespace { |
335 | ::std::tuple<at::Tensor,at::Tensor> wrapper_QuantizedCPU_dim_min(const at::Tensor & self, int64_t dim, bool keepdim) { |
336 | // No device check |
337 | // DeviceGuard omitted |
338 | return at::native::qmin(self, dim, keepdim); |
339 | } |
340 | } // anonymous namespace |
341 | namespace { |
342 | at::Tensor wrapper_QuantizedCPU__channel_shuffle(const at::Tensor & self, int64_t groups) { |
343 | // No device check |
344 | // DeviceGuard omitted |
345 | return at::native::channel_shuffle_quantized_cpu(self, groups); |
346 | } |
347 | } // anonymous namespace |
348 | namespace { |
349 | at::Tensor wrapper_QuantizedCPU___reshape_alias(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { |
350 | // No device check |
351 | // DeviceGuard omitted |
352 | return at::native::_reshape_alias(self, C10_AS_INTARRAYREF_SLOW(size), C10_AS_INTARRAYREF_SLOW(stride)); |
353 | } |
354 | } // anonymous namespace |
355 | namespace { |
356 | at::Tensor wrapper_QuantizedCPU__relu(const at::Tensor & self) { |
357 | // No device check |
358 | // DeviceGuard omitted |
359 | return at::native::relu_quantized_cpu(self); |
360 | } |
361 | } // anonymous namespace |
362 | namespace { |
363 | at::Tensor & wrapper_QuantizedCPU__relu_(at::Tensor & self) { |
364 | // No device check |
365 | // DeviceGuard omitted |
366 | return at::native::relu_quantized_cpu_(self); |
367 | } |
368 | } // anonymous namespace |
369 | namespace { |
370 | at::Tensor wrapper_QuantizedCPU___prelu_kernel(const at::Tensor & self, const at::Tensor & weight) { |
371 | // No device check |
372 | // DeviceGuard omitted |
373 | return at::native::_prelu_kernel_quantized_cpu(self, weight); |
374 | } |
375 | } // anonymous namespace |
376 | namespace { |
377 | at::Tensor wrapper_QuantizedCPU__gelu(const at::Tensor & self, c10::string_view approximate) { |
378 | // No device check |
379 | // DeviceGuard omitted |
380 | return at::native::gelu_quantized_cpu(self, approximate); |
381 | } |
382 | } // anonymous namespace |
383 | namespace { |
384 | at::Tensor wrapper_QuantizedCPU__sigmoid(const at::Tensor & self) { |
385 | // No device check |
386 | // DeviceGuard omitted |
387 | return at::native::sigmoid_quantized_cpu(self); |
388 | } |
389 | } // anonymous namespace |
390 | namespace { |
391 | at::Tensor wrapper_QuantizedCPU__squeeze(const at::Tensor & self) { |
392 | // No device check |
393 | // DeviceGuard omitted |
394 | return at::native::squeeze_quantized(self); |
395 | } |
396 | } // anonymous namespace |
397 | namespace { |
398 | at::Tensor wrapper_QuantizedCPU_dim_squeeze(const at::Tensor & self, int64_t dim) { |
399 | // No device check |
400 | // DeviceGuard omitted |
401 | return at::native::squeeze_quantized(self, dim); |
402 | } |
403 | } // anonymous namespace |
404 | namespace { |
405 | at::Tensor wrapper_QuantizedCPU_dims_squeeze(const at::Tensor & self, at::IntArrayRef dim) { |
406 | // No device check |
407 | // DeviceGuard omitted |
408 | return at::native::squeeze_quantized(self, dim); |
409 | } |
410 | } // anonymous namespace |
411 | namespace { |
412 | at::Tensor wrapper_QuantizedCPU_correction_std(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) { |
413 | // No device check |
414 | // DeviceGuard omitted |
415 | return at::native::std_quantized_cpu(self, dim, correction, keepdim); |
416 | } |
417 | } // anonymous namespace |
418 | namespace { |
419 | at::Tensor & wrapper_QuantizedCPU_correction_out_std_out(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) { |
420 | // No device check |
421 | // DeviceGuard omitted |
422 | return at::native::std_out_quantized_cpu(self, dim, correction, keepdim, out); |
423 | } |
424 | } // anonymous namespace |
425 | namespace { |
426 | at::Tensor wrapper_QuantizedCPU__tanh(const at::Tensor & self) { |
427 | // No device check |
428 | // DeviceGuard omitted |
429 | return at::native::tanh_quantized_cpu(self); |
430 | } |
431 | } // anonymous namespace |
432 | namespace { |
433 | at::Tensor wrapper_QuantizedCPU__threshold(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) { |
434 | // No device check |
435 | // DeviceGuard omitted |
436 | return at::native::threshold_quantized_cpu(self, threshold, value); |
437 | } |
438 | } // anonymous namespace |
439 | namespace { |
440 | at::Tensor wrapper_QuantizedCPU__flip(const at::Tensor & self, at::IntArrayRef dims) { |
441 | // No device check |
442 | // DeviceGuard omitted |
443 | return at::native::flip(self, dims); |
444 | } |
445 | } // anonymous namespace |
446 | namespace { |
447 | at::Tensor wrapper_QuantizedCPU__unsqueeze(const at::Tensor & self, int64_t dim) { |
448 | // No device check |
449 | // DeviceGuard omitted |
450 | return at::native::unsqueeze_quantized(self, dim); |
451 | } |
452 | } // anonymous namespace |
453 | namespace { |
454 | at::Tensor wrapper_QuantizedCPU__clone(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) { |
455 | // No device check |
456 | // DeviceGuard omitted |
457 | return at::native::quantized_clone(self, memory_format); |
458 | } |
459 | } // anonymous namespace |
460 | namespace { |
461 | at::Tensor wrapper_QuantizedCPU_self_dequantize(const at::Tensor & self) { |
462 | // No device check |
463 | // DeviceGuard omitted |
464 | return at::native::dequantize_quantized(self); |
465 | } |
466 | } // anonymous namespace |
467 | namespace { |
468 | ::std::vector<at::Tensor> wrapper_QuantizedCPU_tensors_dequantize(at::TensorList tensors) { |
469 | // No device check |
470 | // DeviceGuard omitted |
471 | return at::native::dequantize_tensors_quantized_cpu(tensors); |
472 | } |
473 | } // anonymous namespace |
474 | namespace { |
475 | double wrapper_QuantizedCPU__q_scale(const at::Tensor & self) { |
476 | // No device check |
477 | // DeviceGuard omitted |
478 | return at::native::q_scale_quant(self); |
479 | } |
480 | } // anonymous namespace |
481 | namespace { |
482 | int64_t wrapper_QuantizedCPU__q_zero_point(const at::Tensor & self) { |
483 | // No device check |
484 | // DeviceGuard omitted |
485 | return at::native::q_zero_point_quant(self); |
486 | } |
487 | } // anonymous namespace |
488 | namespace { |
489 | at::Tensor wrapper_QuantizedCPU__q_per_channel_scales(const at::Tensor & self) { |
490 | // No device check |
491 | // DeviceGuard omitted |
492 | return at::native::q_per_channel_scales(self); |
493 | } |
494 | } // anonymous namespace |
495 | namespace { |
496 | at::Tensor wrapper_QuantizedCPU__q_per_channel_zero_points(const at::Tensor & self) { |
497 | // No device check |
498 | // DeviceGuard omitted |
499 | return at::native::q_per_channel_zero_points(self); |
500 | } |
501 | } // anonymous namespace |
502 | namespace { |
503 | int64_t wrapper_QuantizedCPU__q_per_channel_axis(const at::Tensor & self) { |
504 | // No device check |
505 | // DeviceGuard omitted |
506 | return at::native::q_per_channel_axis(self); |
507 | } |
508 | } // anonymous namespace |
509 | namespace { |
510 | at::Tensor wrapper_QuantizedCPU__int_repr(const at::Tensor & self) { |
511 | // No device check |
512 | // DeviceGuard omitted |
513 | return at::native::int_repr_quantized_cpu(self); |
514 | } |
515 | } // anonymous namespace |
516 | namespace { |
517 | at::QScheme wrapper_QuantizedCPU__qscheme(const at::Tensor & self) { |
518 | // No device check |
519 | // DeviceGuard omitted |
520 | return at::native::qscheme_quant(self); |
521 | } |
522 | } // anonymous namespace |
523 | namespace { |
524 | at::Tensor & wrapper_QuantizedCPU_source_Storage_storage_offset_set_(at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { |
525 | // No device check |
526 | // DeviceGuard omitted |
527 | return at::native::set_storage_quantized_(self, source, storage_offset.expect_int(), C10_AS_INTARRAYREF_SLOW(size), C10_AS_INTARRAYREF_SLOW(stride)); |
528 | } |
529 | } // anonymous namespace |
530 | namespace { |
531 | at::Tensor & wrapper_QuantizedCPU_Scalar_masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) { |
532 | // No device check |
533 | // DeviceGuard omitted |
534 | return at::native::masked_fill__quantized_cpu(self, mask, value); |
535 | } |
536 | } // anonymous namespace |
537 | namespace { |
538 | at::Tensor & wrapper_QuantizedCPU_Tensor_masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) { |
539 | // No device check |
540 | // DeviceGuard omitted |
541 | return at::native::masked_fill__quantized_cpu(self, mask, value); |
542 | } |
543 | } // anonymous namespace |
544 | namespace { |
545 | at::Tensor wrapper_QuantizedCPU__view(const at::Tensor & self, c10::SymIntArrayRef size) { |
546 | // No device check |
547 | // DeviceGuard omitted |
548 | return at::native::view(self, C10_AS_INTARRAYREF_SLOW(size)); |
549 | } |
550 | } // anonymous namespace |
551 | namespace { |
552 | at::Tensor wrapper_QuantizedCPU_Scalar_eq(const at::Tensor & self, const at::Scalar & other) { |
553 | // No device check |
554 | // DeviceGuard omitted |
555 | return at::native::eq_quantized_cpu(self, other); |
556 | } |
557 | } // anonymous namespace |
558 | namespace { |
559 | at::Tensor & wrapper_QuantizedCPU_Scalar_out_eq_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
560 | // No device check |
561 | // DeviceGuard omitted |
562 | return at::native::eq_out_quantized_cpu(self, other, out); |
563 | } |
564 | } // anonymous namespace |
565 | namespace { |
566 | at::Tensor wrapper_QuantizedCPU_Tensor_eq(const at::Tensor & self, const at::Tensor & other) { |
567 | // No device check |
568 | // DeviceGuard omitted |
569 | return at::native::eq_quantized_cpu(self, other); |
570 | } |
571 | } // anonymous namespace |
572 | namespace { |
573 | at::Tensor & wrapper_QuantizedCPU_Tensor_out_eq_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
574 | // No device check |
575 | // DeviceGuard omitted |
576 | return at::native::eq_out_quantized_cpu(self, other, out); |
577 | } |
578 | } // anonymous namespace |
579 | namespace { |
580 | at::Tensor wrapper_QuantizedCPU_Scalar_ne(const at::Tensor & self, const at::Scalar & other) { |
581 | // No device check |
582 | // DeviceGuard omitted |
583 | return at::native::ne_quantized_cpu(self, other); |
584 | } |
585 | } // anonymous namespace |
586 | namespace { |
587 | at::Tensor & wrapper_QuantizedCPU_Scalar_out_ne_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
588 | // No device check |
589 | // DeviceGuard omitted |
590 | return at::native::ne_out_quantized_cpu(self, other, out); |
591 | } |
592 | } // anonymous namespace |
593 | namespace { |
594 | at::Tensor wrapper_QuantizedCPU_Tensor_ne(const at::Tensor & self, const at::Tensor & other) { |
595 | // No device check |
596 | // DeviceGuard omitted |
597 | return at::native::ne_quantized_cpu(self, other); |
598 | } |
599 | } // anonymous namespace |
600 | namespace { |
601 | at::Tensor & wrapper_QuantizedCPU_Tensor_out_ne_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
602 | // No device check |
603 | // DeviceGuard omitted |
604 | return at::native::ne_out_quantized_cpu(self, other, out); |
605 | } |
606 | } // anonymous namespace |
607 | namespace { |
608 | at::Tensor wrapper_QuantizedCPU_Scalar_ge(const at::Tensor & self, const at::Scalar & other) { |
609 | // No device check |
610 | // DeviceGuard omitted |
611 | return at::native::ge_quantized_cpu(self, other); |
612 | } |
613 | } // anonymous namespace |
614 | namespace { |
615 | at::Tensor & wrapper_QuantizedCPU_Scalar_out_ge_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
616 | // No device check |
617 | // DeviceGuard omitted |
618 | return at::native::ge_out_quantized_cpu(self, other, out); |
619 | } |
620 | } // anonymous namespace |
621 | namespace { |
622 | at::Tensor wrapper_QuantizedCPU_Tensor_ge(const at::Tensor & self, const at::Tensor & other) { |
623 | // No device check |
624 | // DeviceGuard omitted |
625 | return at::native::ge_quantized_cpu(self, other); |
626 | } |
627 | } // anonymous namespace |
628 | namespace { |
629 | at::Tensor & wrapper_QuantizedCPU_Tensor_out_ge_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
630 | // No device check |
631 | // DeviceGuard omitted |
632 | return at::native::ge_out_quantized_cpu(self, other, out); |
633 | } |
634 | } // anonymous namespace |
635 | namespace { |
636 | at::Tensor wrapper_QuantizedCPU_Scalar_le(const at::Tensor & self, const at::Scalar & other) { |
637 | // No device check |
638 | // DeviceGuard omitted |
639 | return at::native::le_quantized_cpu(self, other); |
640 | } |
641 | } // anonymous namespace |
642 | namespace { |
643 | at::Tensor & wrapper_QuantizedCPU_Scalar_out_le_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
644 | // No device check |
645 | // DeviceGuard omitted |
646 | return at::native::le_out_quantized_cpu(self, other, out); |
647 | } |
648 | } // anonymous namespace |
649 | namespace { |
650 | at::Tensor wrapper_QuantizedCPU_Tensor_le(const at::Tensor & self, const at::Tensor & other) { |
651 | // No device check |
652 | // DeviceGuard omitted |
653 | return at::native::le_quantized_cpu(self, other); |
654 | } |
655 | } // anonymous namespace |
656 | namespace { |
657 | at::Tensor & wrapper_QuantizedCPU_Tensor_out_le_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
658 | // No device check |
659 | // DeviceGuard omitted |
660 | return at::native::le_out_quantized_cpu(self, other, out); |
661 | } |
662 | } // anonymous namespace |
663 | namespace { |
664 | at::Tensor wrapper_QuantizedCPU_Scalar_gt(const at::Tensor & self, const at::Scalar & other) { |
665 | // No device check |
666 | // DeviceGuard omitted |
667 | return at::native::gt_quantized_cpu(self, other); |
668 | } |
669 | } // anonymous namespace |
670 | namespace { |
671 | at::Tensor & wrapper_QuantizedCPU_Scalar_out_gt_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
672 | // No device check |
673 | // DeviceGuard omitted |
674 | return at::native::gt_out_quantized_cpu(self, other, out); |
675 | } |
676 | } // anonymous namespace |
677 | namespace { |
678 | at::Tensor wrapper_QuantizedCPU_Tensor_gt(const at::Tensor & self, const at::Tensor & other) { |
679 | // No device check |
680 | // DeviceGuard omitted |
681 | return at::native::gt_quantized_cpu(self, other); |
682 | } |
683 | } // anonymous namespace |
684 | namespace { |
685 | at::Tensor & wrapper_QuantizedCPU_Tensor_out_gt_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
686 | // No device check |
687 | // DeviceGuard omitted |
688 | return at::native::gt_out_quantized_cpu(self, other, out); |
689 | } |
690 | } // anonymous namespace |
691 | namespace { |
692 | at::Tensor wrapper_QuantizedCPU_Scalar_lt(const at::Tensor & self, const at::Scalar & other) { |
693 | // No device check |
694 | // DeviceGuard omitted |
695 | return at::native::lt_quantized_cpu(self, other); |
696 | } |
697 | } // anonymous namespace |
698 | namespace { |
699 | at::Tensor & wrapper_QuantizedCPU_Scalar_out_lt_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
700 | // No device check |
701 | // DeviceGuard omitted |
702 | return at::native::lt_out_quantized_cpu(self, other, out); |
703 | } |
704 | } // anonymous namespace |
705 | namespace { |
706 | at::Tensor wrapper_QuantizedCPU_Tensor_lt(const at::Tensor & self, const at::Tensor & other) { |
707 | // No device check |
708 | // DeviceGuard omitted |
709 | return at::native::lt_quantized_cpu(self, other); |
710 | } |
711 | } // anonymous namespace |
712 | namespace { |
713 | at::Tensor & wrapper_QuantizedCPU_Tensor_out_lt_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
714 | // No device check |
715 | // DeviceGuard omitted |
716 | return at::native::lt_out_quantized_cpu(self, other, out); |
717 | } |
718 | } // anonymous namespace |
719 | namespace { |
720 | at::Tensor wrapper_QuantizedCPU__index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index) { |
721 | // No device check |
722 | // DeviceGuard omitted |
723 | return at::native::index_select_quantized_cpu_(self, dim, index); |
724 | } |
725 | } // anonymous namespace |
726 | namespace { |
727 | at::Tensor & wrapper_QuantizedCPU_out_index_select_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out) { |
728 | // No device check |
729 | // DeviceGuard omitted |
730 | return at::native::index_select_out_cpu_(self, dim, index, out); |
731 | } |
732 | } // anonymous namespace |
733 | namespace { |
734 | at::Tensor wrapper_QuantizedCPU__min(const at::Tensor & self) { |
735 | // No device check |
736 | // DeviceGuard omitted |
737 | return at::native::min_quantized_cpu(self); |
738 | } |
739 | } // anonymous namespace |
740 | namespace { |
741 | at::Tensor wrapper_QuantizedCPU__max(const at::Tensor & self) { |
742 | // No device check |
743 | // DeviceGuard omitted |
744 | return at::native::max_quantized_cpu(self); |
745 | } |
746 | } // anonymous namespace |
747 | namespace { |
748 | at::Tensor & wrapper_QuantizedCPU_unary_out_max_out(const at::Tensor & self, at::Tensor & out) { |
749 | // No device check |
750 | // DeviceGuard omitted |
751 | return at::native::max_quantized_unary_out(self, out); |
752 | } |
753 | } // anonymous namespace |
754 | namespace { |
755 | ::std::tuple<at::Tensor,at::Tensor> wrapper_QuantizedCPU_stable_sort(const at::Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending) { |
756 | // No device check |
757 | // DeviceGuard omitted |
758 | return at::native::sort_quantized_cpu_stable(self, stable, dim, descending); |
759 | } |
760 | } // anonymous namespace |
761 | namespace { |
762 | ::std::tuple<at::Tensor,at::Tensor> wrapper_QuantizedCPU__topk(const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) { |
763 | // No device check |
764 | // DeviceGuard omitted |
765 | return at::native::topk_quantized_cpu(self, k, dim, largest, sorted); |
766 | } |
767 | } // anonymous namespace |
768 | namespace { |
769 | at::Tensor wrapper_QuantizedCPU__unfold(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) { |
770 | // No device check |
771 | // DeviceGuard omitted |
772 | return at::native::unfold(self, dimension, size, step); |
773 | } |
774 | } // anonymous namespace |
775 | namespace { |
776 | bool wrapper_QuantizedCPU__equal(const at::Tensor & self, const at::Tensor & other) { |
777 | // No device check |
778 | // DeviceGuard omitted |
779 | return at::native::equal_quantized_cpu(self, other); |
780 | } |
781 | } // anonymous namespace |
782 | namespace { |
783 | at::Tensor wrapper_QuantizedCPU__hardsigmoid(const at::Tensor & self) { |
784 | // No device check |
785 | // DeviceGuard omitted |
786 | return at::native::hardsigmoid_quantized_cpu(self); |
787 | } |
788 | } // anonymous namespace |
789 | namespace { |
790 | at::Tensor & wrapper_QuantizedCPU_out_hardsigmoid_out(const at::Tensor & self, at::Tensor & out) { |
791 | // No device check |
792 | // DeviceGuard omitted |
793 | return at::native::hardsigmoid_out_quantized_cpu(self, out); |
794 | } |
795 | } // anonymous namespace |
796 | namespace { |
797 | at::Tensor wrapper_QuantizedCPU__hardtanh(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) { |
798 | // No device check |
799 | // DeviceGuard omitted |
800 | return at::native::hardtanh_quantized_cpu(self, min_val, max_val); |
801 | } |
802 | } // anonymous namespace |
803 | namespace { |
804 | at::Tensor & wrapper_QuantizedCPU_out_hardtanh_out(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out) { |
805 | // No device check |
806 | // DeviceGuard omitted |
807 | return at::native::hardtanh_out_quantized_cpu(self, min_val, max_val, out); |
808 | } |
809 | } // anonymous namespace |
810 | namespace { |
811 | at::Tensor & wrapper_QuantizedCPU__hardtanh_(at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) { |
812 | // No device check |
813 | // DeviceGuard omitted |
814 | return at::native::hardtanh_quantized_cpu_(self, min_val, max_val); |
815 | } |
816 | } // anonymous namespace |
817 | namespace { |
818 | at::Tensor wrapper_QuantizedCPU__leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope) { |
819 | // No device check |
820 | // DeviceGuard omitted |
821 | return at::native::leaky_relu_quantized_cpu(self, negative_slope); |
822 | } |
823 | } // anonymous namespace |
824 | namespace { |
825 | at::Tensor & wrapper_QuantizedCPU_out_leaky_relu_out(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out) { |
826 | // No device check |
827 | // DeviceGuard omitted |
828 | return at::native::leaky_relu_out_quantized_cpu(self, negative_slope, out); |
829 | } |
830 | } // anonymous namespace |
831 | namespace { |
832 | at::Tensor & wrapper_QuantizedCPU__leaky_relu_(at::Tensor & self, const at::Scalar & negative_slope) { |
833 | // No device check |
834 | // DeviceGuard omitted |
835 | return at::native::leaky_relu_quantized_cpu_(self, negative_slope); |
836 | } |
837 | } // anonymous namespace |
838 | namespace { |
839 | at::Tensor wrapper_QuantizedCPU___adaptive_avg_pool2d(const at::Tensor & self, c10::SymIntArrayRef output_size) { |
840 | // No device check |
841 | // DeviceGuard omitted |
842 | return at::native::adaptive_avg_pool2d_quantized_cpu(self, C10_AS_INTARRAYREF_SLOW(output_size)); |
843 | } |
844 | } // anonymous namespace |
845 | namespace { |
846 | at::Tensor & wrapper_QuantizedCPU_out_adaptive_avg_pool3d_out(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) { |
847 | // No device check |
848 | // DeviceGuard omitted |
849 | return at::native::adaptive_avg_pool3d_out_quantized_cpu(self, C10_AS_INTARRAYREF_SLOW(output_size), out); |
850 | } |
851 | } // anonymous namespace |
852 | namespace { |
853 | at::Tensor wrapper_QuantizedCPU___adaptive_avg_pool3d(const at::Tensor & self, c10::SymIntArrayRef output_size) { |
854 | // No device check |
855 | // DeviceGuard omitted |
856 | return at::native::adaptive_avg_pool3d_quantized_cpu(self, C10_AS_INTARRAYREF_SLOW(output_size)); |
857 | } |
858 | } // anonymous namespace |
859 | namespace { |
860 | at::Tensor wrapper_QuantizedCPU__avg_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { |
861 | // No device check |
862 | // DeviceGuard omitted |
863 | return at::native::avg_pool2d_quantized_cpu(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); |
864 | } |
865 | } // anonymous namespace |
866 | namespace { |
867 | at::Tensor wrapper_QuantizedCPU__avg_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { |
868 | // No device check |
869 | // DeviceGuard omitted |
870 | return at::native::avg_pool3d_quantized_cpu(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); |
871 | } |
872 | } // anonymous namespace |
873 | namespace { |
874 | at::Tensor & wrapper_QuantizedCPU_out_reflection_pad1d_out(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) { |
875 | // No device check |
876 | // DeviceGuard omitted |
877 | return at::native::reflection_pad1d_out_quantized_cpu(self, C10_AS_INTARRAYREF_SLOW(padding), out); |
878 | } |
879 | } // anonymous namespace |
880 | namespace { |
881 | at::Tensor wrapper_QuantizedCPU__reflection_pad2d(const at::Tensor & self, c10::SymIntArrayRef padding) { |
882 | // No device check |
883 | // DeviceGuard omitted |
884 | return at::native::reflection_pad2d_quantized_cpu(self, C10_AS_INTARRAYREF_SLOW(padding)); |
885 | } |
886 | } // anonymous namespace |
887 | namespace { |
888 | at::Tensor & wrapper_QuantizedCPU_out_reflection_pad2d_out(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) { |
889 | // No device check |
890 | // DeviceGuard omitted |
891 | return at::native::reflection_pad2d_out_cpu(self, C10_AS_INTARRAYREF_SLOW(padding), out); |
892 | } |
893 | } // anonymous namespace |
894 | namespace { |
895 | at::Tensor wrapper_QuantizedCPU__upsample_bilinear2d(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) { |
896 | // No device check |
897 | // DeviceGuard omitted |
898 | return at::native::upsample_bilinear2d_quantized_cpu(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w); |
899 | } |
900 | } // anonymous namespace |
901 | namespace { |
902 | at::Tensor wrapper_QuantizedCPU__upsample_nearest2d(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) { |
903 | // No device check |
904 | // DeviceGuard omitted |
905 | return at::native::upsample_nearest2d_quantized_cpu(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_h, scales_w); |
906 | } |
907 | } // anonymous namespace |
908 | namespace { |
909 | at::Tensor wrapper_QuantizedCPU___upsample_nearest_exact2d(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) { |
910 | // No device check |
911 | // DeviceGuard omitted |
912 | return at::native::_upsample_nearest_exact2d_quantized_cpu(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_h, scales_w); |
913 | } |
914 | } // anonymous namespace |
915 | namespace { |
916 | at::Tensor wrapper_QuantizedCPU__upsample_nearest3d(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) { |
917 | // No device check |
918 | // DeviceGuard omitted |
919 | return at::native::upsample_nearest3d_quantized_cpu(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_d, scales_h, scales_w); |
920 | } |
921 | } // anonymous namespace |
922 | namespace { |
923 | at::Tensor wrapper_QuantizedCPU___upsample_nearest_exact3d(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) { |
924 | // No device check |
925 | // DeviceGuard omitted |
926 | return at::native::_upsample_nearest_exact3d_quantized_cpu(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_d, scales_h, scales_w); |
927 | } |
928 | } // anonymous namespace |
929 | TORCH_LIBRARY_IMPL(aten, QuantizedCPU, m) { |
930 | m.impl("as_strided" , |
931 | TORCH_FN(wrapper_QuantizedCPU__as_strided)); |
932 | m.impl("quantized_batch_norm" , |
933 | TORCH_FN(wrapper_QuantizedCPU__quantized_batch_norm)); |
934 | m.impl("cat" , |
935 | TORCH_FN(wrapper_QuantizedCPU__cat)); |
936 | m.impl("cat.out" , |
937 | TORCH_FN(wrapper_QuantizedCPU_out_cat_out)); |
938 | m.impl("clamp" , |
939 | TORCH_FN(wrapper_QuantizedCPU__clamp)); |
940 | m.impl("empty.memory_format" , |
941 | TORCH_FN(wrapper_QuantizedCPU_memory_format_empty)); |
942 | m.impl("_empty_affine_quantized" , |
943 | TORCH_FN(wrapper_QuantizedCPU___empty_affine_quantized)); |
944 | m.impl("_empty_per_channel_affine_quantized" , |
945 | TORCH_FN(wrapper_QuantizedCPU___empty_per_channel_affine_quantized)); |
946 | m.impl("resize_" , |
947 | TORCH_FN(wrapper_QuantizedCPU__resize_)); |
948 | m.impl("empty_quantized" , |
949 | TORCH_FN(wrapper_QuantizedCPU__empty_quantized)); |
950 | m.impl("empty_like" , |
951 | TORCH_FN(wrapper_QuantizedCPU__empty_like)); |
952 | m.impl("empty_strided" , |
953 | TORCH_FN(wrapper_QuantizedCPU__empty_strided)); |
954 | m.impl("fill_.Scalar" , |
955 | TORCH_FN(wrapper_QuantizedCPU_Scalar_fill_)); |
956 | m.impl("fill_.Tensor" , |
957 | TORCH_FN(wrapper_QuantizedCPU_Tensor_fill_)); |
958 | m.impl("grid_sampler_2d" , |
959 | TORCH_FN(wrapper_QuantizedCPU__grid_sampler_2d)); |
960 | m.impl("index.Tensor" , |
961 | TORCH_FN(wrapper_QuantizedCPU_Tensor_index)); |
962 | m.impl("_index_put_impl_" , |
963 | TORCH_FN(wrapper_QuantizedCPU___index_put_impl_)); |
964 | m.impl("max.dim" , |
965 | TORCH_FN(wrapper_QuantizedCPU_dim_max)); |
966 | m.impl("quantized_max_pool1d" , |
967 | TORCH_FN(wrapper_QuantizedCPU__quantized_max_pool1d)); |
968 | m.impl("quantized_max_pool2d" , |
969 | TORCH_FN(wrapper_QuantizedCPU__quantized_max_pool2d)); |
970 | m.impl("mean.dim" , |
971 | TORCH_FN(wrapper_QuantizedCPU_dim_mean)); |
972 | m.impl("mean.out" , |
973 | TORCH_FN(wrapper_QuantizedCPU_out_mean_out)); |
974 | m.impl("min.dim" , |
975 | TORCH_FN(wrapper_QuantizedCPU_dim_min)); |
976 | m.impl("channel_shuffle" , |
977 | TORCH_FN(wrapper_QuantizedCPU__channel_shuffle)); |
978 | m.impl("_reshape_alias" , |
979 | TORCH_FN(wrapper_QuantizedCPU___reshape_alias)); |
980 | m.impl("relu" , |
981 | TORCH_FN(wrapper_QuantizedCPU__relu)); |
982 | m.impl("relu_" , |
983 | TORCH_FN(wrapper_QuantizedCPU__relu_)); |
984 | m.impl("_prelu_kernel" , |
985 | TORCH_FN(wrapper_QuantizedCPU___prelu_kernel)); |
986 | m.impl("gelu" , |
987 | TORCH_FN(wrapper_QuantizedCPU__gelu)); |
988 | m.impl("sigmoid" , |
989 | TORCH_FN(wrapper_QuantizedCPU__sigmoid)); |
990 | m.impl("squeeze" , |
991 | TORCH_FN(wrapper_QuantizedCPU__squeeze)); |
992 | m.impl("squeeze.dim" , |
993 | TORCH_FN(wrapper_QuantizedCPU_dim_squeeze)); |
994 | m.impl("squeeze.dims" , |
995 | TORCH_FN(wrapper_QuantizedCPU_dims_squeeze)); |
996 | m.impl("std.correction" , |
997 | TORCH_FN(wrapper_QuantizedCPU_correction_std)); |
998 | m.impl("std.correction_out" , |
999 | TORCH_FN(wrapper_QuantizedCPU_correction_out_std_out)); |
1000 | m.impl("tanh" , |
1001 | TORCH_FN(wrapper_QuantizedCPU__tanh)); |
1002 | m.impl("threshold" , |
1003 | TORCH_FN(wrapper_QuantizedCPU__threshold)); |
1004 | m.impl("flip" , |
1005 | TORCH_FN(wrapper_QuantizedCPU__flip)); |
1006 | m.impl("unsqueeze" , |
1007 | TORCH_FN(wrapper_QuantizedCPU__unsqueeze)); |
1008 | m.impl("clone" , |
1009 | TORCH_FN(wrapper_QuantizedCPU__clone)); |
1010 | m.impl("dequantize.self" , |
1011 | TORCH_FN(wrapper_QuantizedCPU_self_dequantize)); |
1012 | m.impl("dequantize.tensors" , |
1013 | TORCH_FN(wrapper_QuantizedCPU_tensors_dequantize)); |
1014 | m.impl("q_scale" , |
1015 | TORCH_FN(wrapper_QuantizedCPU__q_scale)); |
1016 | m.impl("q_zero_point" , |
1017 | TORCH_FN(wrapper_QuantizedCPU__q_zero_point)); |
1018 | m.impl("q_per_channel_scales" , |
1019 | TORCH_FN(wrapper_QuantizedCPU__q_per_channel_scales)); |
1020 | m.impl("q_per_channel_zero_points" , |
1021 | TORCH_FN(wrapper_QuantizedCPU__q_per_channel_zero_points)); |
1022 | m.impl("q_per_channel_axis" , |
1023 | TORCH_FN(wrapper_QuantizedCPU__q_per_channel_axis)); |
1024 | m.impl("int_repr" , |
1025 | TORCH_FN(wrapper_QuantizedCPU__int_repr)); |
1026 | m.impl("qscheme" , |
1027 | TORCH_FN(wrapper_QuantizedCPU__qscheme)); |
1028 | m.impl("set_.source_Storage_storage_offset" , |
1029 | TORCH_FN(wrapper_QuantizedCPU_source_Storage_storage_offset_set_)); |
1030 | m.impl("masked_fill_.Scalar" , |
1031 | TORCH_FN(wrapper_QuantizedCPU_Scalar_masked_fill_)); |
1032 | m.impl("masked_fill_.Tensor" , |
1033 | TORCH_FN(wrapper_QuantizedCPU_Tensor_masked_fill_)); |
1034 | m.impl("view" , |
1035 | TORCH_FN(wrapper_QuantizedCPU__view)); |
1036 | m.impl("eq.Scalar" , |
1037 | TORCH_FN(wrapper_QuantizedCPU_Scalar_eq)); |
1038 | m.impl("eq.Scalar_out" , |
1039 | TORCH_FN(wrapper_QuantizedCPU_Scalar_out_eq_out)); |
1040 | m.impl("eq.Tensor" , |
1041 | TORCH_FN(wrapper_QuantizedCPU_Tensor_eq)); |
1042 | m.impl("eq.Tensor_out" , |
1043 | TORCH_FN(wrapper_QuantizedCPU_Tensor_out_eq_out)); |
1044 | m.impl("ne.Scalar" , |
1045 | TORCH_FN(wrapper_QuantizedCPU_Scalar_ne)); |
1046 | m.impl("ne.Scalar_out" , |
1047 | TORCH_FN(wrapper_QuantizedCPU_Scalar_out_ne_out)); |
1048 | m.impl("ne.Tensor" , |
1049 | TORCH_FN(wrapper_QuantizedCPU_Tensor_ne)); |
1050 | m.impl("ne.Tensor_out" , |
1051 | TORCH_FN(wrapper_QuantizedCPU_Tensor_out_ne_out)); |
1052 | m.impl("ge.Scalar" , |
1053 | TORCH_FN(wrapper_QuantizedCPU_Scalar_ge)); |
1054 | m.impl("ge.Scalar_out" , |
1055 | TORCH_FN(wrapper_QuantizedCPU_Scalar_out_ge_out)); |
1056 | m.impl("ge.Tensor" , |
1057 | TORCH_FN(wrapper_QuantizedCPU_Tensor_ge)); |
1058 | m.impl("ge.Tensor_out" , |
1059 | TORCH_FN(wrapper_QuantizedCPU_Tensor_out_ge_out)); |
1060 | m.impl("le.Scalar" , |
1061 | TORCH_FN(wrapper_QuantizedCPU_Scalar_le)); |
1062 | m.impl("le.Scalar_out" , |
1063 | TORCH_FN(wrapper_QuantizedCPU_Scalar_out_le_out)); |
1064 | m.impl("le.Tensor" , |
1065 | TORCH_FN(wrapper_QuantizedCPU_Tensor_le)); |
1066 | m.impl("le.Tensor_out" , |
1067 | TORCH_FN(wrapper_QuantizedCPU_Tensor_out_le_out)); |
1068 | m.impl("gt.Scalar" , |
1069 | TORCH_FN(wrapper_QuantizedCPU_Scalar_gt)); |
1070 | m.impl("gt.Scalar_out" , |
1071 | TORCH_FN(wrapper_QuantizedCPU_Scalar_out_gt_out)); |
1072 | m.impl("gt.Tensor" , |
1073 | TORCH_FN(wrapper_QuantizedCPU_Tensor_gt)); |
1074 | m.impl("gt.Tensor_out" , |
1075 | TORCH_FN(wrapper_QuantizedCPU_Tensor_out_gt_out)); |
1076 | m.impl("lt.Scalar" , |
1077 | TORCH_FN(wrapper_QuantizedCPU_Scalar_lt)); |
1078 | m.impl("lt.Scalar_out" , |
1079 | TORCH_FN(wrapper_QuantizedCPU_Scalar_out_lt_out)); |
1080 | m.impl("lt.Tensor" , |
1081 | TORCH_FN(wrapper_QuantizedCPU_Tensor_lt)); |
1082 | m.impl("lt.Tensor_out" , |
1083 | TORCH_FN(wrapper_QuantizedCPU_Tensor_out_lt_out)); |
1084 | m.impl("index_select" , |
1085 | TORCH_FN(wrapper_QuantizedCPU__index_select)); |
1086 | m.impl("index_select.out" , |
1087 | TORCH_FN(wrapper_QuantizedCPU_out_index_select_out)); |
1088 | m.impl("min" , |
1089 | TORCH_FN(wrapper_QuantizedCPU__min)); |
1090 | m.impl("max" , |
1091 | TORCH_FN(wrapper_QuantizedCPU__max)); |
1092 | m.impl("max.unary_out" , |
1093 | TORCH_FN(wrapper_QuantizedCPU_unary_out_max_out)); |
1094 | m.impl("sort.stable" , |
1095 | TORCH_FN(wrapper_QuantizedCPU_stable_sort)); |
1096 | m.impl("topk" , |
1097 | TORCH_FN(wrapper_QuantizedCPU__topk)); |
1098 | m.impl("unfold" , |
1099 | TORCH_FN(wrapper_QuantizedCPU__unfold)); |
1100 | m.impl("equal" , |
1101 | TORCH_FN(wrapper_QuantizedCPU__equal)); |
1102 | m.impl("hardsigmoid" , |
1103 | TORCH_FN(wrapper_QuantizedCPU__hardsigmoid)); |
1104 | m.impl("hardsigmoid.out" , |
1105 | TORCH_FN(wrapper_QuantizedCPU_out_hardsigmoid_out)); |
1106 | m.impl("hardtanh" , |
1107 | TORCH_FN(wrapper_QuantizedCPU__hardtanh)); |
1108 | m.impl("hardtanh.out" , |
1109 | TORCH_FN(wrapper_QuantizedCPU_out_hardtanh_out)); |
1110 | m.impl("hardtanh_" , |
1111 | TORCH_FN(wrapper_QuantizedCPU__hardtanh_)); |
1112 | m.impl("leaky_relu" , |
1113 | TORCH_FN(wrapper_QuantizedCPU__leaky_relu)); |
1114 | m.impl("leaky_relu.out" , |
1115 | TORCH_FN(wrapper_QuantizedCPU_out_leaky_relu_out)); |
1116 | m.impl("leaky_relu_" , |
1117 | TORCH_FN(wrapper_QuantizedCPU__leaky_relu_)); |
1118 | m.impl("_adaptive_avg_pool2d" , |
1119 | TORCH_FN(wrapper_QuantizedCPU___adaptive_avg_pool2d)); |
1120 | m.impl("adaptive_avg_pool3d.out" , |
1121 | TORCH_FN(wrapper_QuantizedCPU_out_adaptive_avg_pool3d_out)); |
1122 | m.impl("_adaptive_avg_pool3d" , |
1123 | TORCH_FN(wrapper_QuantizedCPU___adaptive_avg_pool3d)); |
1124 | m.impl("avg_pool2d" , |
1125 | TORCH_FN(wrapper_QuantizedCPU__avg_pool2d)); |
1126 | m.impl("avg_pool3d" , |
1127 | TORCH_FN(wrapper_QuantizedCPU__avg_pool3d)); |
1128 | m.impl("reflection_pad1d.out" , |
1129 | TORCH_FN(wrapper_QuantizedCPU_out_reflection_pad1d_out)); |
1130 | m.impl("reflection_pad2d" , |
1131 | TORCH_FN(wrapper_QuantizedCPU__reflection_pad2d)); |
1132 | m.impl("reflection_pad2d.out" , |
1133 | TORCH_FN(wrapper_QuantizedCPU_out_reflection_pad2d_out)); |
1134 | m.impl("upsample_bilinear2d" , |
1135 | TORCH_FN(wrapper_QuantizedCPU__upsample_bilinear2d)); |
1136 | m.impl("upsample_nearest2d" , |
1137 | TORCH_FN(wrapper_QuantizedCPU__upsample_nearest2d)); |
1138 | m.impl("_upsample_nearest_exact2d" , |
1139 | TORCH_FN(wrapper_QuantizedCPU___upsample_nearest_exact2d)); |
1140 | m.impl("upsample_nearest3d" , |
1141 | TORCH_FN(wrapper_QuantizedCPU__upsample_nearest3d)); |
1142 | m.impl("_upsample_nearest_exact3d" , |
1143 | TORCH_FN(wrapper_QuantizedCPU___upsample_nearest_exact3d)); |
1144 | }; |
1145 | } // anonymous namespace |
1146 | namespace quantizedcpu { |
1147 | at::Tensor as_strided(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset) { |
1148 | return wrapper_QuantizedCPU__as_strided(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt); |
1149 | } |
1150 | at::Tensor as_strided_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) { |
1151 | return wrapper_QuantizedCPU__as_strided(self, size, stride, storage_offset); |
1152 | } |
1153 | at::Tensor quantized_batch_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point) { |
1154 | return wrapper_QuantizedCPU__quantized_batch_norm(input, weight, bias, mean, var, eps, output_scale, output_zero_point); |
1155 | } |
1156 | at::Tensor cat(const at::ITensorListRef & tensors, int64_t dim) { |
1157 | return wrapper_QuantizedCPU__cat(tensors, dim); |
1158 | } |
1159 | at::Tensor & cat_out(at::Tensor & out, const at::ITensorListRef & tensors, int64_t dim) { |
1160 | return wrapper_QuantizedCPU_out_cat_out(tensors, dim, out); |
1161 | } |
1162 | at::Tensor & cat_outf(const at::ITensorListRef & tensors, int64_t dim, at::Tensor & out) { |
1163 | return wrapper_QuantizedCPU_out_cat_out(tensors, dim, out); |
1164 | } |
1165 | at::Tensor clamp(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) { |
1166 | return wrapper_QuantizedCPU__clamp(self, min, max); |
1167 | } |
1168 | at::Tensor empty(at::IntArrayRef size, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) { |
1169 | return wrapper_QuantizedCPU_memory_format_empty(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
1170 | } |
1171 | at::Tensor empty(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
1172 | return wrapper_QuantizedCPU_memory_format_empty(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, memory_format); |
1173 | } |
1174 | at::Tensor empty_symint(c10::SymIntArrayRef size, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) { |
1175 | return wrapper_QuantizedCPU_memory_format_empty(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
1176 | } |
1177 | at::Tensor empty_symint(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
1178 | return wrapper_QuantizedCPU_memory_format_empty(size, dtype, layout, device, pin_memory, memory_format); |
1179 | } |
1180 | at::Tensor _empty_affine_quantized(at::IntArrayRef size, at::TensorOptions options, double scale, int64_t zero_point, c10::optional<at::MemoryFormat> memory_format) { |
1181 | return wrapper_QuantizedCPU___empty_affine_quantized(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), scale, zero_point, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
1182 | } |
1183 | at::Tensor _empty_affine_quantized(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, double scale, int64_t zero_point, c10::optional<at::MemoryFormat> memory_format) { |
1184 | return wrapper_QuantizedCPU___empty_affine_quantized(size, dtype, layout, device, pin_memory, scale, zero_point, memory_format); |
1185 | } |
1186 | at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) { |
1187 | return wrapper_QuantizedCPU___empty_per_channel_affine_quantized(size, scales, zero_points, axis, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
1188 | } |
1189 | at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
1190 | return wrapper_QuantizedCPU___empty_per_channel_affine_quantized(size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format); |
1191 | } |
1192 | const at::Tensor & resize_(const at::Tensor & self, at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format) { |
1193 | return wrapper_QuantizedCPU__resize_(self, c10::fromIntArrayRefSlow(size), memory_format); |
1194 | } |
1195 | const at::Tensor & resize__symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) { |
1196 | return wrapper_QuantizedCPU__resize_(self, size, memory_format); |
1197 | } |
1198 | at::Tensor empty_quantized(at::IntArrayRef size, const at::Tensor & qtensor, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) { |
1199 | return wrapper_QuantizedCPU__empty_quantized(size, qtensor, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
1200 | } |
1201 | at::Tensor empty_quantized(at::IntArrayRef size, const at::Tensor & qtensor, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
1202 | return wrapper_QuantizedCPU__empty_quantized(size, qtensor, dtype, layout, device, pin_memory, memory_format); |
1203 | } |
1204 | at::Tensor empty_like(const at::Tensor & self, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) { |
1205 | return wrapper_QuantizedCPU__empty_like(self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
1206 | } |
1207 | at::Tensor empty_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) { |
1208 | return wrapper_QuantizedCPU__empty_like(self, dtype, layout, device, pin_memory, memory_format); |
1209 | } |
1210 | at::Tensor empty_strided(at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options) { |
1211 | return wrapper_QuantizedCPU__empty_strided(c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
1212 | } |
1213 | at::Tensor empty_strided(at::IntArrayRef size, at::IntArrayRef stride, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
1214 | return wrapper_QuantizedCPU__empty_strided(c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), dtype, layout, device, pin_memory); |
1215 | } |
1216 | at::Tensor empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options) { |
1217 | return wrapper_QuantizedCPU__empty_strided(size, stride, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
1218 | } |
1219 | at::Tensor empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
1220 | return wrapper_QuantizedCPU__empty_strided(size, stride, dtype, layout, device, pin_memory); |
1221 | } |
1222 | at::Tensor & fill_(at::Tensor & self, const at::Scalar & value) { |
1223 | return wrapper_QuantizedCPU_Scalar_fill_(self, value); |
1224 | } |
1225 | at::Tensor & fill_(at::Tensor & self, const at::Tensor & value) { |
1226 | return wrapper_QuantizedCPU_Tensor_fill_(self, value); |
1227 | } |
1228 | at::Tensor grid_sampler_2d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { |
1229 | return wrapper_QuantizedCPU__grid_sampler_2d(input, grid, interpolation_mode, padding_mode, align_corners); |
1230 | } |
1231 | at::Tensor index(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices) { |
1232 | return wrapper_QuantizedCPU_Tensor_index(self, indices); |
1233 | } |
1234 | at::Tensor & _index_put_impl_(at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) { |
1235 | return wrapper_QuantizedCPU___index_put_impl_(self, indices, values, accumulate, unsafe); |
1236 | } |
1237 | ::std::tuple<at::Tensor,at::Tensor> max(const at::Tensor & self, int64_t dim, bool keepdim) { |
1238 | return wrapper_QuantizedCPU_dim_max(self, dim, keepdim); |
1239 | } |
1240 | at::Tensor quantized_max_pool1d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { |
1241 | return wrapper_QuantizedCPU__quantized_max_pool1d(self, kernel_size, stride, padding, dilation, ceil_mode); |
1242 | } |
1243 | at::Tensor quantized_max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { |
1244 | return wrapper_QuantizedCPU__quantized_max_pool2d(self, kernel_size, stride, padding, dilation, ceil_mode); |
1245 | } |
1246 | at::Tensor mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
1247 | return wrapper_QuantizedCPU_dim_mean(self, dim, keepdim, dtype); |
1248 | } |
1249 | at::Tensor & mean_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
1250 | return wrapper_QuantizedCPU_out_mean_out(self, dim, keepdim, dtype, out); |
1251 | } |
1252 | at::Tensor & mean_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
1253 | return wrapper_QuantizedCPU_out_mean_out(self, dim, keepdim, dtype, out); |
1254 | } |
1255 | ::std::tuple<at::Tensor,at::Tensor> min(const at::Tensor & self, int64_t dim, bool keepdim) { |
1256 | return wrapper_QuantizedCPU_dim_min(self, dim, keepdim); |
1257 | } |
1258 | at::Tensor channel_shuffle(const at::Tensor & self, int64_t groups) { |
1259 | return wrapper_QuantizedCPU__channel_shuffle(self, groups); |
1260 | } |
1261 | at::Tensor _reshape_alias(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) { |
1262 | return wrapper_QuantizedCPU___reshape_alias(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride)); |
1263 | } |
1264 | at::Tensor _reshape_alias_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { |
1265 | return wrapper_QuantizedCPU___reshape_alias(self, size, stride); |
1266 | } |
1267 | at::Tensor relu(const at::Tensor & self) { |
1268 | return wrapper_QuantizedCPU__relu(self); |
1269 | } |
1270 | at::Tensor & relu_(at::Tensor & self) { |
1271 | return wrapper_QuantizedCPU__relu_(self); |
1272 | } |
1273 | at::Tensor _prelu_kernel(const at::Tensor & self, const at::Tensor & weight) { |
1274 | return wrapper_QuantizedCPU___prelu_kernel(self, weight); |
1275 | } |
1276 | at::Tensor gelu(const at::Tensor & self, c10::string_view approximate) { |
1277 | return wrapper_QuantizedCPU__gelu(self, approximate); |
1278 | } |
1279 | at::Tensor sigmoid(const at::Tensor & self) { |
1280 | return wrapper_QuantizedCPU__sigmoid(self); |
1281 | } |
1282 | at::Tensor squeeze(const at::Tensor & self) { |
1283 | return wrapper_QuantizedCPU__squeeze(self); |
1284 | } |
1285 | at::Tensor squeeze(const at::Tensor & self, int64_t dim) { |
1286 | return wrapper_QuantizedCPU_dim_squeeze(self, dim); |
1287 | } |
1288 | at::Tensor squeeze(const at::Tensor & self, at::IntArrayRef dim) { |
1289 | return wrapper_QuantizedCPU_dims_squeeze(self, dim); |
1290 | } |
1291 | at::Tensor std(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) { |
1292 | return wrapper_QuantizedCPU_correction_std(self, dim, correction, keepdim); |
1293 | } |
1294 | at::Tensor & std_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) { |
1295 | return wrapper_QuantizedCPU_correction_out_std_out(self, dim, correction, keepdim, out); |
1296 | } |
1297 | at::Tensor & std_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) { |
1298 | return wrapper_QuantizedCPU_correction_out_std_out(self, dim, correction, keepdim, out); |
1299 | } |
1300 | at::Tensor tanh(const at::Tensor & self) { |
1301 | return wrapper_QuantizedCPU__tanh(self); |
1302 | } |
1303 | at::Tensor threshold(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) { |
1304 | return wrapper_QuantizedCPU__threshold(self, threshold, value); |
1305 | } |
1306 | at::Tensor flip(const at::Tensor & self, at::IntArrayRef dims) { |
1307 | return wrapper_QuantizedCPU__flip(self, dims); |
1308 | } |
1309 | at::Tensor unsqueeze(const at::Tensor & self, int64_t dim) { |
1310 | return wrapper_QuantizedCPU__unsqueeze(self, dim); |
1311 | } |
1312 | at::Tensor clone(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) { |
1313 | return wrapper_QuantizedCPU__clone(self, memory_format); |
1314 | } |
1315 | at::Tensor dequantize(const at::Tensor & self) { |
1316 | return wrapper_QuantizedCPU_self_dequantize(self); |
1317 | } |
1318 | ::std::vector<at::Tensor> dequantize(at::TensorList tensors) { |
1319 | return wrapper_QuantizedCPU_tensors_dequantize(tensors); |
1320 | } |
1321 | double q_scale(const at::Tensor & self) { |
1322 | return wrapper_QuantizedCPU__q_scale(self); |
1323 | } |
1324 | int64_t q_zero_point(const at::Tensor & self) { |
1325 | return wrapper_QuantizedCPU__q_zero_point(self); |
1326 | } |
1327 | at::Tensor q_per_channel_scales(const at::Tensor & self) { |
1328 | return wrapper_QuantizedCPU__q_per_channel_scales(self); |
1329 | } |
1330 | at::Tensor q_per_channel_zero_points(const at::Tensor & self) { |
1331 | return wrapper_QuantizedCPU__q_per_channel_zero_points(self); |
1332 | } |
1333 | int64_t q_per_channel_axis(const at::Tensor & self) { |
1334 | return wrapper_QuantizedCPU__q_per_channel_axis(self); |
1335 | } |
1336 | at::Tensor int_repr(const at::Tensor & self) { |
1337 | return wrapper_QuantizedCPU__int_repr(self); |
1338 | } |
1339 | at::QScheme qscheme(const at::Tensor & self) { |
1340 | return wrapper_QuantizedCPU__qscheme(self); |
1341 | } |
1342 | at::Tensor & set_(at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride) { |
1343 | return wrapper_QuantizedCPU_source_Storage_storage_offset_set_(self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride)); |
1344 | } |
1345 | at::Tensor & set__symint(at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { |
1346 | return wrapper_QuantizedCPU_source_Storage_storage_offset_set_(self, source, storage_offset, size, stride); |
1347 | } |
1348 | at::Tensor & masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) { |
1349 | return wrapper_QuantizedCPU_Scalar_masked_fill_(self, mask, value); |
1350 | } |
1351 | at::Tensor & masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) { |
1352 | return wrapper_QuantizedCPU_Tensor_masked_fill_(self, mask, value); |
1353 | } |
1354 | at::Tensor view(const at::Tensor & self, at::IntArrayRef size) { |
1355 | return wrapper_QuantizedCPU__view(self, c10::fromIntArrayRefSlow(size)); |
1356 | } |
1357 | at::Tensor view_symint(const at::Tensor & self, c10::SymIntArrayRef size) { |
1358 | return wrapper_QuantizedCPU__view(self, size); |
1359 | } |
1360 | at::Tensor eq(const at::Tensor & self, const at::Scalar & other) { |
1361 | return wrapper_QuantizedCPU_Scalar_eq(self, other); |
1362 | } |
1363 | at::Tensor & eq_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { |
1364 | return wrapper_QuantizedCPU_Scalar_out_eq_out(self, other, out); |
1365 | } |
1366 | at::Tensor & eq_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
1367 | return wrapper_QuantizedCPU_Scalar_out_eq_out(self, other, out); |
1368 | } |
1369 | at::Tensor eq(const at::Tensor & self, const at::Tensor & other) { |
1370 | return wrapper_QuantizedCPU_Tensor_eq(self, other); |
1371 | } |
1372 | at::Tensor & eq_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
1373 | return wrapper_QuantizedCPU_Tensor_out_eq_out(self, other, out); |
1374 | } |
1375 | at::Tensor & eq_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
1376 | return wrapper_QuantizedCPU_Tensor_out_eq_out(self, other, out); |
1377 | } |
1378 | at::Tensor ne(const at::Tensor & self, const at::Scalar & other) { |
1379 | return wrapper_QuantizedCPU_Scalar_ne(self, other); |
1380 | } |
1381 | at::Tensor & ne_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { |
1382 | return wrapper_QuantizedCPU_Scalar_out_ne_out(self, other, out); |
1383 | } |
1384 | at::Tensor & ne_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
1385 | return wrapper_QuantizedCPU_Scalar_out_ne_out(self, other, out); |
1386 | } |
1387 | at::Tensor ne(const at::Tensor & self, const at::Tensor & other) { |
1388 | return wrapper_QuantizedCPU_Tensor_ne(self, other); |
1389 | } |
1390 | at::Tensor & ne_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
1391 | return wrapper_QuantizedCPU_Tensor_out_ne_out(self, other, out); |
1392 | } |
1393 | at::Tensor & ne_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
1394 | return wrapper_QuantizedCPU_Tensor_out_ne_out(self, other, out); |
1395 | } |
1396 | at::Tensor ge(const at::Tensor & self, const at::Scalar & other) { |
1397 | return wrapper_QuantizedCPU_Scalar_ge(self, other); |
1398 | } |
1399 | at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { |
1400 | return wrapper_QuantizedCPU_Scalar_out_ge_out(self, other, out); |
1401 | } |
1402 | at::Tensor & ge_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
1403 | return wrapper_QuantizedCPU_Scalar_out_ge_out(self, other, out); |
1404 | } |
1405 | at::Tensor ge(const at::Tensor & self, const at::Tensor & other) { |
1406 | return wrapper_QuantizedCPU_Tensor_ge(self, other); |
1407 | } |
1408 | at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
1409 | return wrapper_QuantizedCPU_Tensor_out_ge_out(self, other, out); |
1410 | } |
1411 | at::Tensor & ge_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
1412 | return wrapper_QuantizedCPU_Tensor_out_ge_out(self, other, out); |
1413 | } |
1414 | at::Tensor le(const at::Tensor & self, const at::Scalar & other) { |
1415 | return wrapper_QuantizedCPU_Scalar_le(self, other); |
1416 | } |
1417 | at::Tensor & le_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { |
1418 | return wrapper_QuantizedCPU_Scalar_out_le_out(self, other, out); |
1419 | } |
1420 | at::Tensor & le_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
1421 | return wrapper_QuantizedCPU_Scalar_out_le_out(self, other, out); |
1422 | } |
1423 | at::Tensor le(const at::Tensor & self, const at::Tensor & other) { |
1424 | return wrapper_QuantizedCPU_Tensor_le(self, other); |
1425 | } |
1426 | at::Tensor & le_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
1427 | return wrapper_QuantizedCPU_Tensor_out_le_out(self, other, out); |
1428 | } |
1429 | at::Tensor & le_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
1430 | return wrapper_QuantizedCPU_Tensor_out_le_out(self, other, out); |
1431 | } |
1432 | at::Tensor gt(const at::Tensor & self, const at::Scalar & other) { |
1433 | return wrapper_QuantizedCPU_Scalar_gt(self, other); |
1434 | } |
1435 | at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { |
1436 | return wrapper_QuantizedCPU_Scalar_out_gt_out(self, other, out); |
1437 | } |
1438 | at::Tensor & gt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
1439 | return wrapper_QuantizedCPU_Scalar_out_gt_out(self, other, out); |
1440 | } |
1441 | at::Tensor gt(const at::Tensor & self, const at::Tensor & other) { |
1442 | return wrapper_QuantizedCPU_Tensor_gt(self, other); |
1443 | } |
1444 | at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
1445 | return wrapper_QuantizedCPU_Tensor_out_gt_out(self, other, out); |
1446 | } |
1447 | at::Tensor & gt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
1448 | return wrapper_QuantizedCPU_Tensor_out_gt_out(self, other, out); |
1449 | } |
1450 | at::Tensor lt(const at::Tensor & self, const at::Scalar & other) { |
1451 | return wrapper_QuantizedCPU_Scalar_lt(self, other); |
1452 | } |
1453 | at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { |
1454 | return wrapper_QuantizedCPU_Scalar_out_lt_out(self, other, out); |
1455 | } |
1456 | at::Tensor & lt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
1457 | return wrapper_QuantizedCPU_Scalar_out_lt_out(self, other, out); |
1458 | } |
1459 | at::Tensor lt(const at::Tensor & self, const at::Tensor & other) { |
1460 | return wrapper_QuantizedCPU_Tensor_lt(self, other); |
1461 | } |
1462 | at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
1463 | return wrapper_QuantizedCPU_Tensor_out_lt_out(self, other, out); |
1464 | } |
1465 | at::Tensor & lt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
1466 | return wrapper_QuantizedCPU_Tensor_out_lt_out(self, other, out); |
1467 | } |
1468 | at::Tensor index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index) { |
1469 | return wrapper_QuantizedCPU__index_select(self, dim, index); |
1470 | } |
1471 | at::Tensor & index_select_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index) { |
1472 | return wrapper_QuantizedCPU_out_index_select_out(self, dim, index, out); |
1473 | } |
1474 | at::Tensor & index_select_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out) { |
1475 | return wrapper_QuantizedCPU_out_index_select_out(self, dim, index, out); |
1476 | } |
1477 | at::Tensor min(const at::Tensor & self) { |
1478 | return wrapper_QuantizedCPU__min(self); |
1479 | } |
1480 | at::Tensor max(const at::Tensor & self) { |
1481 | return wrapper_QuantizedCPU__max(self); |
1482 | } |
1483 | at::Tensor & max_out(at::Tensor & out, const at::Tensor & self) { |
1484 | return wrapper_QuantizedCPU_unary_out_max_out(self, out); |
1485 | } |
1486 | at::Tensor & max_outf(const at::Tensor & self, at::Tensor & out) { |
1487 | return wrapper_QuantizedCPU_unary_out_max_out(self, out); |
1488 | } |
1489 | ::std::tuple<at::Tensor,at::Tensor> sort(const at::Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending) { |
1490 | return wrapper_QuantizedCPU_stable_sort(self, stable, dim, descending); |
1491 | } |
1492 | ::std::tuple<at::Tensor,at::Tensor> topk(const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) { |
1493 | return wrapper_QuantizedCPU__topk(self, k, dim, largest, sorted); |
1494 | } |
1495 | at::Tensor unfold(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) { |
1496 | return wrapper_QuantizedCPU__unfold(self, dimension, size, step); |
1497 | } |
1498 | bool equal(const at::Tensor & self, const at::Tensor & other) { |
1499 | return wrapper_QuantizedCPU__equal(self, other); |
1500 | } |
1501 | at::Tensor hardsigmoid(const at::Tensor & self) { |
1502 | return wrapper_QuantizedCPU__hardsigmoid(self); |
1503 | } |
1504 | at::Tensor & hardsigmoid_out(at::Tensor & out, const at::Tensor & self) { |
1505 | return wrapper_QuantizedCPU_out_hardsigmoid_out(self, out); |
1506 | } |
1507 | at::Tensor & hardsigmoid_outf(const at::Tensor & self, at::Tensor & out) { |
1508 | return wrapper_QuantizedCPU_out_hardsigmoid_out(self, out); |
1509 | } |
1510 | at::Tensor hardtanh(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) { |
1511 | return wrapper_QuantizedCPU__hardtanh(self, min_val, max_val); |
1512 | } |
1513 | at::Tensor & hardtanh_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) { |
1514 | return wrapper_QuantizedCPU_out_hardtanh_out(self, min_val, max_val, out); |
1515 | } |
1516 | at::Tensor & hardtanh_outf(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out) { |
1517 | return wrapper_QuantizedCPU_out_hardtanh_out(self, min_val, max_val, out); |
1518 | } |
1519 | at::Tensor & hardtanh_(at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) { |
1520 | return wrapper_QuantizedCPU__hardtanh_(self, min_val, max_val); |
1521 | } |
1522 | at::Tensor leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope) { |
1523 | return wrapper_QuantizedCPU__leaky_relu(self, negative_slope); |
1524 | } |
1525 | at::Tensor & leaky_relu_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & negative_slope) { |
1526 | return wrapper_QuantizedCPU_out_leaky_relu_out(self, negative_slope, out); |
1527 | } |
1528 | at::Tensor & leaky_relu_outf(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out) { |
1529 | return wrapper_QuantizedCPU_out_leaky_relu_out(self, negative_slope, out); |
1530 | } |
1531 | at::Tensor & leaky_relu_(at::Tensor & self, const at::Scalar & negative_slope) { |
1532 | return wrapper_QuantizedCPU__leaky_relu_(self, negative_slope); |
1533 | } |
1534 | at::Tensor _adaptive_avg_pool2d(const at::Tensor & self, at::IntArrayRef output_size) { |
1535 | return wrapper_QuantizedCPU___adaptive_avg_pool2d(self, c10::fromIntArrayRefSlow(output_size)); |
1536 | } |
1537 | at::Tensor _adaptive_avg_pool2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size) { |
1538 | return wrapper_QuantizedCPU___adaptive_avg_pool2d(self, output_size); |
1539 | } |
1540 | at::Tensor & adaptive_avg_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) { |
1541 | return wrapper_QuantizedCPU_out_adaptive_avg_pool3d_out(self, c10::fromIntArrayRefSlow(output_size), out); |
1542 | } |
1543 | at::Tensor & adaptive_avg_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) { |
1544 | return wrapper_QuantizedCPU_out_adaptive_avg_pool3d_out(self, c10::fromIntArrayRefSlow(output_size), out); |
1545 | } |
1546 | at::Tensor & adaptive_avg_pool3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size) { |
1547 | return wrapper_QuantizedCPU_out_adaptive_avg_pool3d_out(self, output_size, out); |
1548 | } |
1549 | at::Tensor & adaptive_avg_pool3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) { |
1550 | return wrapper_QuantizedCPU_out_adaptive_avg_pool3d_out(self, output_size, out); |
1551 | } |
1552 | at::Tensor _adaptive_avg_pool3d(const at::Tensor & self, at::IntArrayRef output_size) { |
1553 | return wrapper_QuantizedCPU___adaptive_avg_pool3d(self, c10::fromIntArrayRefSlow(output_size)); |
1554 | } |
1555 | at::Tensor _adaptive_avg_pool3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size) { |
1556 | return wrapper_QuantizedCPU___adaptive_avg_pool3d(self, output_size); |
1557 | } |
1558 | at::Tensor avg_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { |
1559 | return wrapper_QuantizedCPU__avg_pool2d(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); |
1560 | } |
1561 | at::Tensor avg_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { |
1562 | return wrapper_QuantizedCPU__avg_pool3d(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); |
1563 | } |
1564 | at::Tensor & reflection_pad1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) { |
1565 | return wrapper_QuantizedCPU_out_reflection_pad1d_out(self, c10::fromIntArrayRefSlow(padding), out); |
1566 | } |
1567 | at::Tensor & reflection_pad1d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) { |
1568 | return wrapper_QuantizedCPU_out_reflection_pad1d_out(self, c10::fromIntArrayRefSlow(padding), out); |
1569 | } |
1570 | at::Tensor & reflection_pad1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) { |
1571 | return wrapper_QuantizedCPU_out_reflection_pad1d_out(self, padding, out); |
1572 | } |
1573 | at::Tensor & reflection_pad1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) { |
1574 | return wrapper_QuantizedCPU_out_reflection_pad1d_out(self, padding, out); |
1575 | } |
1576 | at::Tensor reflection_pad2d(const at::Tensor & self, at::IntArrayRef padding) { |
1577 | return wrapper_QuantizedCPU__reflection_pad2d(self, c10::fromIntArrayRefSlow(padding)); |
1578 | } |
1579 | at::Tensor reflection_pad2d_symint(const at::Tensor & self, c10::SymIntArrayRef padding) { |
1580 | return wrapper_QuantizedCPU__reflection_pad2d(self, padding); |
1581 | } |
1582 | at::Tensor & reflection_pad2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) { |
1583 | return wrapper_QuantizedCPU_out_reflection_pad2d_out(self, c10::fromIntArrayRefSlow(padding), out); |
1584 | } |
1585 | at::Tensor & reflection_pad2d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) { |
1586 | return wrapper_QuantizedCPU_out_reflection_pad2d_out(self, c10::fromIntArrayRefSlow(padding), out); |
1587 | } |
1588 | at::Tensor & reflection_pad2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) { |
1589 | return wrapper_QuantizedCPU_out_reflection_pad2d_out(self, padding, out); |
1590 | } |
1591 | at::Tensor & reflection_pad2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) { |
1592 | return wrapper_QuantizedCPU_out_reflection_pad2d_out(self, padding, out); |
1593 | } |
1594 | at::Tensor upsample_bilinear2d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) { |
1595 | return wrapper_QuantizedCPU__upsample_bilinear2d(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w); |
1596 | } |
1597 | at::Tensor upsample_bilinear2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) { |
1598 | return wrapper_QuantizedCPU__upsample_bilinear2d(self, output_size, align_corners, scales_h, scales_w); |
1599 | } |
1600 | at::Tensor upsample_nearest2d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) { |
1601 | return wrapper_QuantizedCPU__upsample_nearest2d(self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w); |
1602 | } |
1603 | at::Tensor upsample_nearest2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) { |
1604 | return wrapper_QuantizedCPU__upsample_nearest2d(self, output_size, scales_h, scales_w); |
1605 | } |
1606 | at::Tensor _upsample_nearest_exact2d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) { |
1607 | return wrapper_QuantizedCPU___upsample_nearest_exact2d(self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w); |
1608 | } |
1609 | at::Tensor _upsample_nearest_exact2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) { |
1610 | return wrapper_QuantizedCPU___upsample_nearest_exact2d(self, output_size, scales_h, scales_w); |
1611 | } |
1612 | at::Tensor upsample_nearest3d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) { |
1613 | return wrapper_QuantizedCPU__upsample_nearest3d(self, c10::fromIntArrayRefSlow(output_size), scales_d, scales_h, scales_w); |
1614 | } |
1615 | at::Tensor upsample_nearest3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) { |
1616 | return wrapper_QuantizedCPU__upsample_nearest3d(self, output_size, scales_d, scales_h, scales_w); |
1617 | } |
1618 | at::Tensor _upsample_nearest_exact3d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) { |
1619 | return wrapper_QuantizedCPU___upsample_nearest_exact3d(self, c10::fromIntArrayRefSlow(output_size), scales_d, scales_h, scales_w); |
1620 | } |
1621 | at::Tensor _upsample_nearest_exact3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) { |
1622 | return wrapper_QuantizedCPU___upsample_nearest_exact3d(self, output_size, scales_d, scales_h, scales_w); |
1623 | } |
1624 | } // namespace quantizedcpu |
1625 | } // namespace at |
1626 | |