1// required for old g++ to compile PRId64 macros, see
2// https://github.com/pytorch/pytorch/issues/3571
3// for context
4#ifndef __STDC_FORMAT_MACROS
5#define __STDC_FORMAT_MACROS
6#endif
7
8// an external backend might generate file within its code tree
9// and check all the source files within the tree with clang-format.
10// so, disable it since the backend might have a different config.
11// clang-format off
12
13// NOTE: This condition is true for all PyTorch internal libraries, it
14// just excludes external projects such as torch_xla which
15// re-use some of the PyTorch codegen machinery.
16#if defined(CAFFE2_BUILD_MAIN_LIB) || \
17 defined(TORCH_CUDA_BUILD_MAIN_LIB) || \
18 defined(TORCH_HIP_BUILD_MAIN_LIB) || \
19 defined(TORCH_CUDA_CU_BUILD_MAIN_LIB) || \
20 defined(TORCH_CUDA_CPP_BUILD_MAIN_LIB)
21#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
22#endif
23
24// @generated by torchgen/gen.py from RegisterDispatchKey.cpp
25
26#include <c10/core/TensorImpl.h>
27#include <c10/core/Allocator.h>
28#include <ATen/DeviceGuard.h>
29#include <ATen/NamedTensorUtils.h>
30#include <ATen/Utils.h>
31#include <ATen/WrapDimUtils.h>
32#include <ATen/Dispatch.h>
33#include <c10/util/ExclusivelyOwned.h>
34#include <c10/util/Half.h>
35#include <c10/core/UndefinedTensorImpl.h>
36#include <c10/util/Optional.h>
37#include <ATen/Tensor.h>
38#include <ATen/native/Resize.h>
39
40#include <cstddef>
41#include <functional>
42#include <memory>
43#include <utility>
44
45#include <ATen/Config.h>
46#include <ATen/core/op_registration/adaption.h>
47#include <torch/library.h>
48
49
50#include <ATen/ops/as_strided_native.h>
51#include <ATen/ops/empty.h>
52#include <ATen/ops/empty_strided.h>
53#include <ATen/ops/_copy_from_and_resize.h>
54#include <ATen/ops/_copy_from.h>
55#include <ATen/ops/_adaptive_avg_pool2d_native.h>
56#include <ATen/ops/_adaptive_avg_pool3d_native.h>
57#include <ATen/ops/_empty_affine_quantized_native.h>
58#include <ATen/ops/_empty_per_channel_affine_quantized_native.h>
59#include <ATen/ops/_index_put_impl_native.h>
60#include <ATen/ops/_prelu_kernel_native.h>
61#include <ATen/ops/_reshape_alias_native.h>
62#include <ATen/ops/_upsample_nearest_exact2d_native.h>
63#include <ATen/ops/_upsample_nearest_exact3d_native.h>
64#include <ATen/ops/adaptive_avg_pool3d_native.h>
65#include <ATen/ops/as_strided_native.h>
66#include <ATen/ops/avg_pool2d_native.h>
67#include <ATen/ops/avg_pool3d_native.h>
68#include <ATen/ops/cat_native.h>
69#include <ATen/ops/channel_shuffle_native.h>
70#include <ATen/ops/clamp_native.h>
71#include <ATen/ops/clone_native.h>
72#include <ATen/ops/dequantize_native.h>
73#include <ATen/ops/empty_like_native.h>
74#include <ATen/ops/empty_native.h>
75#include <ATen/ops/empty_quantized_native.h>
76#include <ATen/ops/empty_strided_native.h>
77#include <ATen/ops/eq_native.h>
78#include <ATen/ops/equal_native.h>
79#include <ATen/ops/fill_native.h>
80#include <ATen/ops/flip_native.h>
81#include <ATen/ops/ge_native.h>
82#include <ATen/ops/gelu_native.h>
83#include <ATen/ops/grid_sampler_2d_native.h>
84#include <ATen/ops/gt_native.h>
85#include <ATen/ops/hardsigmoid_native.h>
86#include <ATen/ops/hardtanh_native.h>
87#include <ATen/ops/index_native.h>
88#include <ATen/ops/index_select_native.h>
89#include <ATen/ops/int_repr_native.h>
90#include <ATen/ops/le_native.h>
91#include <ATen/ops/leaky_relu_native.h>
92#include <ATen/ops/lt_native.h>
93#include <ATen/ops/masked_fill_native.h>
94#include <ATen/ops/max_native.h>
95#include <ATen/ops/mean_native.h>
96#include <ATen/ops/min_native.h>
97#include <ATen/ops/ne_native.h>
98#include <ATen/ops/q_per_channel_axis_native.h>
99#include <ATen/ops/q_per_channel_scales_native.h>
100#include <ATen/ops/q_per_channel_zero_points_native.h>
101#include <ATen/ops/q_scale_native.h>
102#include <ATen/ops/q_zero_point_native.h>
103#include <ATen/ops/qscheme_native.h>
104#include <ATen/ops/quantized_batch_norm_native.h>
105#include <ATen/ops/quantized_max_pool1d_native.h>
106#include <ATen/ops/quantized_max_pool2d_native.h>
107#include <ATen/ops/reflection_pad1d_native.h>
108#include <ATen/ops/reflection_pad2d_native.h>
109#include <ATen/ops/relu_native.h>
110#include <ATen/ops/resize_native.h>
111#include <ATen/ops/set_native.h>
112#include <ATen/ops/sigmoid_native.h>
113#include <ATen/ops/sort_native.h>
114#include <ATen/ops/squeeze_native.h>
115#include <ATen/ops/std_native.h>
116#include <ATen/ops/tanh_native.h>
117#include <ATen/ops/threshold_native.h>
118#include <ATen/ops/topk_native.h>
119#include <ATen/ops/unfold_native.h>
120#include <ATen/ops/unsqueeze_native.h>
121#include <ATen/ops/upsample_bilinear2d_native.h>
122#include <ATen/ops/upsample_nearest2d_native.h>
123#include <ATen/ops/upsample_nearest3d_native.h>
124#include <ATen/ops/view_native.h>
125
126// See template file RegisterDispatchDefinitions.ini
127namespace at {
128// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
129// ambiguity with conflicting identifiers that may have been defined in
130// at namespace already.
131namespace {
132Tensor create_out(IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
133 if (strides.empty()) {
134 return at::empty(sizes, options);
135 } else {
136 return at::empty_strided(sizes, strides, options);
137 }
138}
139void resize_out(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
140 TORCH_CHECK(options.dtype() == out.dtype(),
141 "Expected out tensor to have dtype ", options.dtype(), ", but got ", out.dtype(), " instead");
142 TORCH_CHECK(options.device() == out.device(),
143 "Expected out tensor to have device ", options.device(), ", but got ", out.device(), " instead");
144 const bool resized = at::native::resize_output(out, sizes);
145 // Only restride if a resize occurred; otherwise we ignore the (advisory)
146 // strides from the meta function and directly use the output tensor's
147 // preexisting strides
148 if (resized) {
149 if (!strides.empty()) {
150 TORCH_INTERNAL_ASSERT(!options.memory_format_opt().has_value());
151 // TODO: avoid the redispatch here
152 out.as_strided_(sizes, strides);
153 } else if (options.memory_format_opt().has_value()) {
154 out.unsafeGetTensorImpl()->empty_tensor_restride(*options.memory_format_opt());
155 }
156 }
157}
158void check_inplace(const Tensor &self, IntArrayRef sizes, const TensorOptions &options) {
159 // These checks are needed on those operators that:
160 // 1) don't use 'TensorIterator' (e.g. 'addmm' and 'baddbmm')
161 // 2) have particular typing rules (e.g. 'cumsum' and 'cumprod')
162 // For other operators (e.g. 'add'), 'TensorIterator' already checks
163 // these things separately.
164 TORCH_CHECK(options.dtype() == self.dtype(),
165 "Bad in-place call: ",
166 "input tensor dtype ", self.dtype(), " and output tensor dtype ", options.dtype(), " should match");
167 TORCH_CHECK(options.device() == self.device(),
168 "Bad in-place call: ",
169 "input tensor device ", self.device(), " and output tensor device ", options.device(), " should match");
170 TORCH_CHECK(sizes == self.sizes(),
171 "Bad in-place call: ",
172 "input tensor size ", self.sizes(), " and output tensor size ", sizes, " should match");
173}
174c10::optional<Tensor> maybe_create_proxy(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
175 if (out.strides() != strides) {
176 return at::empty_strided(sizes, strides, options);
177 }
178 return c10::nullopt;
179}
180namespace {
181at::Tensor wrapper_QuantizedCPU__as_strided(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
182 // No device check
183 // DeviceGuard omitted
184 return at::native::as_strided_qtensorimpl(self, C10_AS_INTARRAYREF_SLOW(size), C10_AS_INTARRAYREF_SLOW(stride), storage_offset.has_value() ? c10::make_optional(storage_offset->expect_int()) : c10::nullopt);
185}
186} // anonymous namespace
187namespace {
188at::Tensor wrapper_QuantizedCPU__quantized_batch_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point) {
189 // No device check
190 // DeviceGuard omitted
191 return at::native::quantized_batch_norm(input, weight, bias, mean, var, eps, output_scale, output_zero_point);
192}
193} // anonymous namespace
194namespace {
195at::Tensor wrapper_QuantizedCPU__cat(const at::ITensorListRef & tensors, int64_t dim) {
196 // No device check
197 // DeviceGuard omitted
198 return at::native::cat_quantized_cpu(tensors, dim);
199}
200} // anonymous namespace
201namespace {
202at::Tensor & wrapper_QuantizedCPU_out_cat_out(const at::ITensorListRef & tensors, int64_t dim, at::Tensor & out) {
203 // No device check
204 // DeviceGuard omitted
205 return at::native::cat_out_quantized_cpu(tensors, dim, out);
206}
207} // anonymous namespace
208namespace {
209at::Tensor wrapper_QuantizedCPU__clamp(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
210 // No device check
211 // DeviceGuard omitted
212 return at::native::clamp_quantized_cpu(self, min, max);
213}
214} // anonymous namespace
215namespace {
216at::Tensor wrapper_QuantizedCPU_memory_format_empty(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
217 // No device check
218 // DeviceGuard omitted
219 return at::native::empty_unknown_quantized(C10_AS_INTARRAYREF_SLOW(size), dtype, layout, device, pin_memory, memory_format);
220}
221} // anonymous namespace
222namespace {
223at::Tensor wrapper_QuantizedCPU___empty_affine_quantized(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, double scale, int64_t zero_point, c10::optional<at::MemoryFormat> memory_format) {
224 // No device check
225 // DeviceGuard omitted
226 return at::native::empty_affine_quantized(size, dtype, layout, device, pin_memory, scale, zero_point, memory_format);
227}
228} // anonymous namespace
229namespace {
230at::Tensor wrapper_QuantizedCPU___empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
231 // No device check
232 // DeviceGuard omitted
233 return at::native::empty_per_channel_affine_quantized(size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format);
234}
235} // anonymous namespace
236namespace {
237const at::Tensor & wrapper_QuantizedCPU__resize_(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) {
238 // No device check
239 // DeviceGuard omitted
240 return at::native::quantized_resize_cpu_(self, C10_AS_INTARRAYREF_SLOW(size), memory_format);
241}
242} // anonymous namespace
243namespace {
244at::Tensor wrapper_QuantizedCPU__empty_quantized(at::IntArrayRef size, const at::Tensor & qtensor, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
245 // No device check
246 // DeviceGuard omitted
247 return at::native::empty_quantized(size, qtensor, dtype, layout, device, pin_memory, memory_format);
248}
249} // anonymous namespace
250namespace {
251at::Tensor wrapper_QuantizedCPU__empty_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
252 // No device check
253 // DeviceGuard omitted
254 return at::native::empty_like_quantized(self, dtype, layout, device, pin_memory, memory_format);
255}
256} // anonymous namespace
257namespace {
258at::Tensor wrapper_QuantizedCPU__empty_strided(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
259 // No device check
260 // DeviceGuard omitted
261 return at::native::empty_strided_unknown_quantized(C10_AS_INTARRAYREF_SLOW(size), C10_AS_INTARRAYREF_SLOW(stride), dtype, layout, device, pin_memory);
262}
263} // anonymous namespace
264namespace {
265at::Tensor & wrapper_QuantizedCPU_Scalar_fill_(at::Tensor & self, const at::Scalar & value) {
266 // No device check
267 // DeviceGuard omitted
268 return at::native::fill_quantized_(self, value);
269}
270} // anonymous namespace
271namespace {
272at::Tensor & wrapper_QuantizedCPU_Tensor_fill_(at::Tensor & self, const at::Tensor & value) {
273 // No device check
274 // DeviceGuard omitted
275 return at::native::fill_quantized_(self, value);
276}
277} // anonymous namespace
278namespace {
279at::Tensor wrapper_QuantizedCPU__grid_sampler_2d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
280 // No device check
281 // DeviceGuard omitted
282 return at::native::grid_sampler_2d_cpu(input, grid, interpolation_mode, padding_mode, align_corners);
283}
284} // anonymous namespace
285namespace {
286at::Tensor wrapper_QuantizedCPU_Tensor_index(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices) {
287 // No device check
288 // DeviceGuard omitted
289 return at::native::quantized_index(self, indices);
290}
291} // anonymous namespace
292namespace {
293at::Tensor & wrapper_QuantizedCPU___index_put_impl_(at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) {
294 // No device check
295 // DeviceGuard omitted
296 return at::native::_index_put_impl_quantized_cpu_(self, indices, values, accumulate, unsafe);
297}
298} // anonymous namespace
299namespace {
300::std::tuple<at::Tensor,at::Tensor> wrapper_QuantizedCPU_dim_max(const at::Tensor & self, int64_t dim, bool keepdim) {
301 // No device check
302 // DeviceGuard omitted
303 return at::native::qmax(self, dim, keepdim);
304}
305} // anonymous namespace
306namespace {
307at::Tensor wrapper_QuantizedCPU__quantized_max_pool1d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
308 // No device check
309 // DeviceGuard omitted
310 return at::native::quantized_max_pool1d(self, kernel_size, stride, padding, dilation, ceil_mode);
311}
312} // anonymous namespace
313namespace {
314at::Tensor wrapper_QuantizedCPU__quantized_max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
315 // No device check
316 // DeviceGuard omitted
317 return at::native::quantized_max_pool2d(self, kernel_size, stride, padding, dilation, ceil_mode);
318}
319} // anonymous namespace
320namespace {
321at::Tensor wrapper_QuantizedCPU_dim_mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
322 // No device check
323 // DeviceGuard omitted
324 return at::native::mean_quantized_cpu(self, dim, keepdim, dtype);
325}
326} // anonymous namespace
327namespace {
328at::Tensor & wrapper_QuantizedCPU_out_mean_out(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
329 // No device check
330 // DeviceGuard omitted
331 return at::native::mean_out_quantized_cpu(self, dim, keepdim, dtype, out);
332}
333} // anonymous namespace
334namespace {
335::std::tuple<at::Tensor,at::Tensor> wrapper_QuantizedCPU_dim_min(const at::Tensor & self, int64_t dim, bool keepdim) {
336 // No device check
337 // DeviceGuard omitted
338 return at::native::qmin(self, dim, keepdim);
339}
340} // anonymous namespace
341namespace {
342at::Tensor wrapper_QuantizedCPU__channel_shuffle(const at::Tensor & self, int64_t groups) {
343 // No device check
344 // DeviceGuard omitted
345 return at::native::channel_shuffle_quantized_cpu(self, groups);
346}
347} // anonymous namespace
348namespace {
349at::Tensor wrapper_QuantizedCPU___reshape_alias(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
350 // No device check
351 // DeviceGuard omitted
352 return at::native::_reshape_alias(self, C10_AS_INTARRAYREF_SLOW(size), C10_AS_INTARRAYREF_SLOW(stride));
353}
354} // anonymous namespace
355namespace {
356at::Tensor wrapper_QuantizedCPU__relu(const at::Tensor & self) {
357 // No device check
358 // DeviceGuard omitted
359 return at::native::relu_quantized_cpu(self);
360}
361} // anonymous namespace
362namespace {
363at::Tensor & wrapper_QuantizedCPU__relu_(at::Tensor & self) {
364 // No device check
365 // DeviceGuard omitted
366 return at::native::relu_quantized_cpu_(self);
367}
368} // anonymous namespace
369namespace {
370at::Tensor wrapper_QuantizedCPU___prelu_kernel(const at::Tensor & self, const at::Tensor & weight) {
371 // No device check
372 // DeviceGuard omitted
373 return at::native::_prelu_kernel_quantized_cpu(self, weight);
374}
375} // anonymous namespace
376namespace {
377at::Tensor wrapper_QuantizedCPU__gelu(const at::Tensor & self, c10::string_view approximate) {
378 // No device check
379 // DeviceGuard omitted
380 return at::native::gelu_quantized_cpu(self, approximate);
381}
382} // anonymous namespace
383namespace {
384at::Tensor wrapper_QuantizedCPU__sigmoid(const at::Tensor & self) {
385 // No device check
386 // DeviceGuard omitted
387 return at::native::sigmoid_quantized_cpu(self);
388}
389} // anonymous namespace
390namespace {
391at::Tensor wrapper_QuantizedCPU__squeeze(const at::Tensor & self) {
392 // No device check
393 // DeviceGuard omitted
394 return at::native::squeeze_quantized(self);
395}
396} // anonymous namespace
397namespace {
398at::Tensor wrapper_QuantizedCPU_dim_squeeze(const at::Tensor & self, int64_t dim) {
399 // No device check
400 // DeviceGuard omitted
401 return at::native::squeeze_quantized(self, dim);
402}
403} // anonymous namespace
404namespace {
405at::Tensor wrapper_QuantizedCPU_dims_squeeze(const at::Tensor & self, at::IntArrayRef dim) {
406 // No device check
407 // DeviceGuard omitted
408 return at::native::squeeze_quantized(self, dim);
409}
410} // anonymous namespace
411namespace {
412at::Tensor wrapper_QuantizedCPU_correction_std(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
413 // No device check
414 // DeviceGuard omitted
415 return at::native::std_quantized_cpu(self, dim, correction, keepdim);
416}
417} // anonymous namespace
418namespace {
419at::Tensor & wrapper_QuantizedCPU_correction_out_std_out(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) {
420 // No device check
421 // DeviceGuard omitted
422 return at::native::std_out_quantized_cpu(self, dim, correction, keepdim, out);
423}
424} // anonymous namespace
425namespace {
426at::Tensor wrapper_QuantizedCPU__tanh(const at::Tensor & self) {
427 // No device check
428 // DeviceGuard omitted
429 return at::native::tanh_quantized_cpu(self);
430}
431} // anonymous namespace
432namespace {
433at::Tensor wrapper_QuantizedCPU__threshold(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
434 // No device check
435 // DeviceGuard omitted
436 return at::native::threshold_quantized_cpu(self, threshold, value);
437}
438} // anonymous namespace
439namespace {
440at::Tensor wrapper_QuantizedCPU__flip(const at::Tensor & self, at::IntArrayRef dims) {
441 // No device check
442 // DeviceGuard omitted
443 return at::native::flip(self, dims);
444}
445} // anonymous namespace
446namespace {
447at::Tensor wrapper_QuantizedCPU__unsqueeze(const at::Tensor & self, int64_t dim) {
448 // No device check
449 // DeviceGuard omitted
450 return at::native::unsqueeze_quantized(self, dim);
451}
452} // anonymous namespace
453namespace {
454at::Tensor wrapper_QuantizedCPU__clone(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) {
455 // No device check
456 // DeviceGuard omitted
457 return at::native::quantized_clone(self, memory_format);
458}
459} // anonymous namespace
460namespace {
461at::Tensor wrapper_QuantizedCPU_self_dequantize(const at::Tensor & self) {
462 // No device check
463 // DeviceGuard omitted
464 return at::native::dequantize_quantized(self);
465}
466} // anonymous namespace
467namespace {
468::std::vector<at::Tensor> wrapper_QuantizedCPU_tensors_dequantize(at::TensorList tensors) {
469 // No device check
470 // DeviceGuard omitted
471 return at::native::dequantize_tensors_quantized_cpu(tensors);
472}
473} // anonymous namespace
474namespace {
475double wrapper_QuantizedCPU__q_scale(const at::Tensor & self) {
476 // No device check
477 // DeviceGuard omitted
478 return at::native::q_scale_quant(self);
479}
480} // anonymous namespace
481namespace {
482int64_t wrapper_QuantizedCPU__q_zero_point(const at::Tensor & self) {
483 // No device check
484 // DeviceGuard omitted
485 return at::native::q_zero_point_quant(self);
486}
487} // anonymous namespace
488namespace {
489at::Tensor wrapper_QuantizedCPU__q_per_channel_scales(const at::Tensor & self) {
490 // No device check
491 // DeviceGuard omitted
492 return at::native::q_per_channel_scales(self);
493}
494} // anonymous namespace
495namespace {
496at::Tensor wrapper_QuantizedCPU__q_per_channel_zero_points(const at::Tensor & self) {
497 // No device check
498 // DeviceGuard omitted
499 return at::native::q_per_channel_zero_points(self);
500}
501} // anonymous namespace
502namespace {
503int64_t wrapper_QuantizedCPU__q_per_channel_axis(const at::Tensor & self) {
504 // No device check
505 // DeviceGuard omitted
506 return at::native::q_per_channel_axis(self);
507}
508} // anonymous namespace
509namespace {
510at::Tensor wrapper_QuantizedCPU__int_repr(const at::Tensor & self) {
511 // No device check
512 // DeviceGuard omitted
513 return at::native::int_repr_quantized_cpu(self);
514}
515} // anonymous namespace
516namespace {
517at::QScheme wrapper_QuantizedCPU__qscheme(const at::Tensor & self) {
518 // No device check
519 // DeviceGuard omitted
520 return at::native::qscheme_quant(self);
521}
522} // anonymous namespace
523namespace {
524at::Tensor & wrapper_QuantizedCPU_source_Storage_storage_offset_set_(at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
525 // No device check
526 // DeviceGuard omitted
527 return at::native::set_storage_quantized_(self, source, storage_offset.expect_int(), C10_AS_INTARRAYREF_SLOW(size), C10_AS_INTARRAYREF_SLOW(stride));
528}
529} // anonymous namespace
530namespace {
531at::Tensor & wrapper_QuantizedCPU_Scalar_masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
532 // No device check
533 // DeviceGuard omitted
534 return at::native::masked_fill__quantized_cpu(self, mask, value);
535}
536} // anonymous namespace
537namespace {
538at::Tensor & wrapper_QuantizedCPU_Tensor_masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
539 // No device check
540 // DeviceGuard omitted
541 return at::native::masked_fill__quantized_cpu(self, mask, value);
542}
543} // anonymous namespace
544namespace {
545at::Tensor wrapper_QuantizedCPU__view(const at::Tensor & self, c10::SymIntArrayRef size) {
546 // No device check
547 // DeviceGuard omitted
548 return at::native::view(self, C10_AS_INTARRAYREF_SLOW(size));
549}
550} // anonymous namespace
551namespace {
552at::Tensor wrapper_QuantizedCPU_Scalar_eq(const at::Tensor & self, const at::Scalar & other) {
553 // No device check
554 // DeviceGuard omitted
555 return at::native::eq_quantized_cpu(self, other);
556}
557} // anonymous namespace
558namespace {
559at::Tensor & wrapper_QuantizedCPU_Scalar_out_eq_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
560 // No device check
561 // DeviceGuard omitted
562 return at::native::eq_out_quantized_cpu(self, other, out);
563}
564} // anonymous namespace
565namespace {
566at::Tensor wrapper_QuantizedCPU_Tensor_eq(const at::Tensor & self, const at::Tensor & other) {
567 // No device check
568 // DeviceGuard omitted
569 return at::native::eq_quantized_cpu(self, other);
570}
571} // anonymous namespace
572namespace {
573at::Tensor & wrapper_QuantizedCPU_Tensor_out_eq_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
574 // No device check
575 // DeviceGuard omitted
576 return at::native::eq_out_quantized_cpu(self, other, out);
577}
578} // anonymous namespace
579namespace {
580at::Tensor wrapper_QuantizedCPU_Scalar_ne(const at::Tensor & self, const at::Scalar & other) {
581 // No device check
582 // DeviceGuard omitted
583 return at::native::ne_quantized_cpu(self, other);
584}
585} // anonymous namespace
586namespace {
587at::Tensor & wrapper_QuantizedCPU_Scalar_out_ne_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
588 // No device check
589 // DeviceGuard omitted
590 return at::native::ne_out_quantized_cpu(self, other, out);
591}
592} // anonymous namespace
593namespace {
594at::Tensor wrapper_QuantizedCPU_Tensor_ne(const at::Tensor & self, const at::Tensor & other) {
595 // No device check
596 // DeviceGuard omitted
597 return at::native::ne_quantized_cpu(self, other);
598}
599} // anonymous namespace
600namespace {
601at::Tensor & wrapper_QuantizedCPU_Tensor_out_ne_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
602 // No device check
603 // DeviceGuard omitted
604 return at::native::ne_out_quantized_cpu(self, other, out);
605}
606} // anonymous namespace
607namespace {
608at::Tensor wrapper_QuantizedCPU_Scalar_ge(const at::Tensor & self, const at::Scalar & other) {
609 // No device check
610 // DeviceGuard omitted
611 return at::native::ge_quantized_cpu(self, other);
612}
613} // anonymous namespace
614namespace {
615at::Tensor & wrapper_QuantizedCPU_Scalar_out_ge_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
616 // No device check
617 // DeviceGuard omitted
618 return at::native::ge_out_quantized_cpu(self, other, out);
619}
620} // anonymous namespace
621namespace {
622at::Tensor wrapper_QuantizedCPU_Tensor_ge(const at::Tensor & self, const at::Tensor & other) {
623 // No device check
624 // DeviceGuard omitted
625 return at::native::ge_quantized_cpu(self, other);
626}
627} // anonymous namespace
628namespace {
629at::Tensor & wrapper_QuantizedCPU_Tensor_out_ge_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
630 // No device check
631 // DeviceGuard omitted
632 return at::native::ge_out_quantized_cpu(self, other, out);
633}
634} // anonymous namespace
635namespace {
636at::Tensor wrapper_QuantizedCPU_Scalar_le(const at::Tensor & self, const at::Scalar & other) {
637 // No device check
638 // DeviceGuard omitted
639 return at::native::le_quantized_cpu(self, other);
640}
641} // anonymous namespace
642namespace {
643at::Tensor & wrapper_QuantizedCPU_Scalar_out_le_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
644 // No device check
645 // DeviceGuard omitted
646 return at::native::le_out_quantized_cpu(self, other, out);
647}
648} // anonymous namespace
649namespace {
650at::Tensor wrapper_QuantizedCPU_Tensor_le(const at::Tensor & self, const at::Tensor & other) {
651 // No device check
652 // DeviceGuard omitted
653 return at::native::le_quantized_cpu(self, other);
654}
655} // anonymous namespace
656namespace {
657at::Tensor & wrapper_QuantizedCPU_Tensor_out_le_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
658 // No device check
659 // DeviceGuard omitted
660 return at::native::le_out_quantized_cpu(self, other, out);
661}
662} // anonymous namespace
663namespace {
664at::Tensor wrapper_QuantizedCPU_Scalar_gt(const at::Tensor & self, const at::Scalar & other) {
665 // No device check
666 // DeviceGuard omitted
667 return at::native::gt_quantized_cpu(self, other);
668}
669} // anonymous namespace
670namespace {
671at::Tensor & wrapper_QuantizedCPU_Scalar_out_gt_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
672 // No device check
673 // DeviceGuard omitted
674 return at::native::gt_out_quantized_cpu(self, other, out);
675}
676} // anonymous namespace
677namespace {
678at::Tensor wrapper_QuantizedCPU_Tensor_gt(const at::Tensor & self, const at::Tensor & other) {
679 // No device check
680 // DeviceGuard omitted
681 return at::native::gt_quantized_cpu(self, other);
682}
683} // anonymous namespace
684namespace {
685at::Tensor & wrapper_QuantizedCPU_Tensor_out_gt_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
686 // No device check
687 // DeviceGuard omitted
688 return at::native::gt_out_quantized_cpu(self, other, out);
689}
690} // anonymous namespace
691namespace {
692at::Tensor wrapper_QuantizedCPU_Scalar_lt(const at::Tensor & self, const at::Scalar & other) {
693 // No device check
694 // DeviceGuard omitted
695 return at::native::lt_quantized_cpu(self, other);
696}
697} // anonymous namespace
698namespace {
699at::Tensor & wrapper_QuantizedCPU_Scalar_out_lt_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
700 // No device check
701 // DeviceGuard omitted
702 return at::native::lt_out_quantized_cpu(self, other, out);
703}
704} // anonymous namespace
705namespace {
706at::Tensor wrapper_QuantizedCPU_Tensor_lt(const at::Tensor & self, const at::Tensor & other) {
707 // No device check
708 // DeviceGuard omitted
709 return at::native::lt_quantized_cpu(self, other);
710}
711} // anonymous namespace
712namespace {
713at::Tensor & wrapper_QuantizedCPU_Tensor_out_lt_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
714 // No device check
715 // DeviceGuard omitted
716 return at::native::lt_out_quantized_cpu(self, other, out);
717}
718} // anonymous namespace
719namespace {
720at::Tensor wrapper_QuantizedCPU__index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index) {
721 // No device check
722 // DeviceGuard omitted
723 return at::native::index_select_quantized_cpu_(self, dim, index);
724}
725} // anonymous namespace
726namespace {
727at::Tensor & wrapper_QuantizedCPU_out_index_select_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out) {
728 // No device check
729 // DeviceGuard omitted
730 return at::native::index_select_out_cpu_(self, dim, index, out);
731}
732} // anonymous namespace
733namespace {
734at::Tensor wrapper_QuantizedCPU__min(const at::Tensor & self) {
735 // No device check
736 // DeviceGuard omitted
737 return at::native::min_quantized_cpu(self);
738}
739} // anonymous namespace
740namespace {
741at::Tensor wrapper_QuantizedCPU__max(const at::Tensor & self) {
742 // No device check
743 // DeviceGuard omitted
744 return at::native::max_quantized_cpu(self);
745}
746} // anonymous namespace
747namespace {
748at::Tensor & wrapper_QuantizedCPU_unary_out_max_out(const at::Tensor & self, at::Tensor & out) {
749 // No device check
750 // DeviceGuard omitted
751 return at::native::max_quantized_unary_out(self, out);
752}
753} // anonymous namespace
754namespace {
755::std::tuple<at::Tensor,at::Tensor> wrapper_QuantizedCPU_stable_sort(const at::Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending) {
756 // No device check
757 // DeviceGuard omitted
758 return at::native::sort_quantized_cpu_stable(self, stable, dim, descending);
759}
760} // anonymous namespace
761namespace {
762::std::tuple<at::Tensor,at::Tensor> wrapper_QuantizedCPU__topk(const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) {
763 // No device check
764 // DeviceGuard omitted
765 return at::native::topk_quantized_cpu(self, k, dim, largest, sorted);
766}
767} // anonymous namespace
768namespace {
769at::Tensor wrapper_QuantizedCPU__unfold(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
770 // No device check
771 // DeviceGuard omitted
772 return at::native::unfold(self, dimension, size, step);
773}
774} // anonymous namespace
775namespace {
776bool wrapper_QuantizedCPU__equal(const at::Tensor & self, const at::Tensor & other) {
777 // No device check
778 // DeviceGuard omitted
779 return at::native::equal_quantized_cpu(self, other);
780}
781} // anonymous namespace
782namespace {
783at::Tensor wrapper_QuantizedCPU__hardsigmoid(const at::Tensor & self) {
784 // No device check
785 // DeviceGuard omitted
786 return at::native::hardsigmoid_quantized_cpu(self);
787}
788} // anonymous namespace
789namespace {
790at::Tensor & wrapper_QuantizedCPU_out_hardsigmoid_out(const at::Tensor & self, at::Tensor & out) {
791 // No device check
792 // DeviceGuard omitted
793 return at::native::hardsigmoid_out_quantized_cpu(self, out);
794}
795} // anonymous namespace
796namespace {
797at::Tensor wrapper_QuantizedCPU__hardtanh(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
798 // No device check
799 // DeviceGuard omitted
800 return at::native::hardtanh_quantized_cpu(self, min_val, max_val);
801}
802} // anonymous namespace
803namespace {
804at::Tensor & wrapper_QuantizedCPU_out_hardtanh_out(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out) {
805 // No device check
806 // DeviceGuard omitted
807 return at::native::hardtanh_out_quantized_cpu(self, min_val, max_val, out);
808}
809} // anonymous namespace
810namespace {
811at::Tensor & wrapper_QuantizedCPU__hardtanh_(at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
812 // No device check
813 // DeviceGuard omitted
814 return at::native::hardtanh_quantized_cpu_(self, min_val, max_val);
815}
816} // anonymous namespace
817namespace {
818at::Tensor wrapper_QuantizedCPU__leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope) {
819 // No device check
820 // DeviceGuard omitted
821 return at::native::leaky_relu_quantized_cpu(self, negative_slope);
822}
823} // anonymous namespace
824namespace {
825at::Tensor & wrapper_QuantizedCPU_out_leaky_relu_out(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out) {
826 // No device check
827 // DeviceGuard omitted
828 return at::native::leaky_relu_out_quantized_cpu(self, negative_slope, out);
829}
830} // anonymous namespace
831namespace {
832at::Tensor & wrapper_QuantizedCPU__leaky_relu_(at::Tensor & self, const at::Scalar & negative_slope) {
833 // No device check
834 // DeviceGuard omitted
835 return at::native::leaky_relu_quantized_cpu_(self, negative_slope);
836}
837} // anonymous namespace
838namespace {
839at::Tensor wrapper_QuantizedCPU___adaptive_avg_pool2d(const at::Tensor & self, c10::SymIntArrayRef output_size) {
840 // No device check
841 // DeviceGuard omitted
842 return at::native::adaptive_avg_pool2d_quantized_cpu(self, C10_AS_INTARRAYREF_SLOW(output_size));
843}
844} // anonymous namespace
845namespace {
846at::Tensor & wrapper_QuantizedCPU_out_adaptive_avg_pool3d_out(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
847 // No device check
848 // DeviceGuard omitted
849 return at::native::adaptive_avg_pool3d_out_quantized_cpu(self, C10_AS_INTARRAYREF_SLOW(output_size), out);
850}
851} // anonymous namespace
852namespace {
853at::Tensor wrapper_QuantizedCPU___adaptive_avg_pool3d(const at::Tensor & self, c10::SymIntArrayRef output_size) {
854 // No device check
855 // DeviceGuard omitted
856 return at::native::adaptive_avg_pool3d_quantized_cpu(self, C10_AS_INTARRAYREF_SLOW(output_size));
857}
858} // anonymous namespace
859namespace {
860at::Tensor wrapper_QuantizedCPU__avg_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
861 // No device check
862 // DeviceGuard omitted
863 return at::native::avg_pool2d_quantized_cpu(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
864}
865} // anonymous namespace
866namespace {
867at::Tensor wrapper_QuantizedCPU__avg_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
868 // No device check
869 // DeviceGuard omitted
870 return at::native::avg_pool3d_quantized_cpu(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
871}
872} // anonymous namespace
873namespace {
874at::Tensor & wrapper_QuantizedCPU_out_reflection_pad1d_out(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
875 // No device check
876 // DeviceGuard omitted
877 return at::native::reflection_pad1d_out_quantized_cpu(self, C10_AS_INTARRAYREF_SLOW(padding), out);
878}
879} // anonymous namespace
880namespace {
881at::Tensor wrapper_QuantizedCPU__reflection_pad2d(const at::Tensor & self, c10::SymIntArrayRef padding) {
882 // No device check
883 // DeviceGuard omitted
884 return at::native::reflection_pad2d_quantized_cpu(self, C10_AS_INTARRAYREF_SLOW(padding));
885}
886} // anonymous namespace
887namespace {
888at::Tensor & wrapper_QuantizedCPU_out_reflection_pad2d_out(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
889 // No device check
890 // DeviceGuard omitted
891 return at::native::reflection_pad2d_out_cpu(self, C10_AS_INTARRAYREF_SLOW(padding), out);
892}
893} // anonymous namespace
894namespace {
895at::Tensor wrapper_QuantizedCPU__upsample_bilinear2d(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
896 // No device check
897 // DeviceGuard omitted
898 return at::native::upsample_bilinear2d_quantized_cpu(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w);
899}
900} // anonymous namespace
901namespace {
902at::Tensor wrapper_QuantizedCPU__upsample_nearest2d(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
903 // No device check
904 // DeviceGuard omitted
905 return at::native::upsample_nearest2d_quantized_cpu(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_h, scales_w);
906}
907} // anonymous namespace
908namespace {
909at::Tensor wrapper_QuantizedCPU___upsample_nearest_exact2d(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
910 // No device check
911 // DeviceGuard omitted
912 return at::native::_upsample_nearest_exact2d_quantized_cpu(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_h, scales_w);
913}
914} // anonymous namespace
915namespace {
916at::Tensor wrapper_QuantizedCPU__upsample_nearest3d(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
917 // No device check
918 // DeviceGuard omitted
919 return at::native::upsample_nearest3d_quantized_cpu(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_d, scales_h, scales_w);
920}
921} // anonymous namespace
922namespace {
923at::Tensor wrapper_QuantizedCPU___upsample_nearest_exact3d(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
924 // No device check
925 // DeviceGuard omitted
926 return at::native::_upsample_nearest_exact3d_quantized_cpu(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_d, scales_h, scales_w);
927}
928} // anonymous namespace
929TORCH_LIBRARY_IMPL(aten, QuantizedCPU, m) {
930 m.impl("as_strided",
931TORCH_FN(wrapper_QuantizedCPU__as_strided));
932m.impl("quantized_batch_norm",
933TORCH_FN(wrapper_QuantizedCPU__quantized_batch_norm));
934m.impl("cat",
935TORCH_FN(wrapper_QuantizedCPU__cat));
936m.impl("cat.out",
937TORCH_FN(wrapper_QuantizedCPU_out_cat_out));
938m.impl("clamp",
939TORCH_FN(wrapper_QuantizedCPU__clamp));
940m.impl("empty.memory_format",
941TORCH_FN(wrapper_QuantizedCPU_memory_format_empty));
942m.impl("_empty_affine_quantized",
943TORCH_FN(wrapper_QuantizedCPU___empty_affine_quantized));
944m.impl("_empty_per_channel_affine_quantized",
945TORCH_FN(wrapper_QuantizedCPU___empty_per_channel_affine_quantized));
946m.impl("resize_",
947TORCH_FN(wrapper_QuantizedCPU__resize_));
948m.impl("empty_quantized",
949TORCH_FN(wrapper_QuantizedCPU__empty_quantized));
950m.impl("empty_like",
951TORCH_FN(wrapper_QuantizedCPU__empty_like));
952m.impl("empty_strided",
953TORCH_FN(wrapper_QuantizedCPU__empty_strided));
954m.impl("fill_.Scalar",
955TORCH_FN(wrapper_QuantizedCPU_Scalar_fill_));
956m.impl("fill_.Tensor",
957TORCH_FN(wrapper_QuantizedCPU_Tensor_fill_));
958m.impl("grid_sampler_2d",
959TORCH_FN(wrapper_QuantizedCPU__grid_sampler_2d));
960m.impl("index.Tensor",
961TORCH_FN(wrapper_QuantizedCPU_Tensor_index));
962m.impl("_index_put_impl_",
963TORCH_FN(wrapper_QuantizedCPU___index_put_impl_));
964m.impl("max.dim",
965TORCH_FN(wrapper_QuantizedCPU_dim_max));
966m.impl("quantized_max_pool1d",
967TORCH_FN(wrapper_QuantizedCPU__quantized_max_pool1d));
968m.impl("quantized_max_pool2d",
969TORCH_FN(wrapper_QuantizedCPU__quantized_max_pool2d));
970m.impl("mean.dim",
971TORCH_FN(wrapper_QuantizedCPU_dim_mean));
972m.impl("mean.out",
973TORCH_FN(wrapper_QuantizedCPU_out_mean_out));
974m.impl("min.dim",
975TORCH_FN(wrapper_QuantizedCPU_dim_min));
976m.impl("channel_shuffle",
977TORCH_FN(wrapper_QuantizedCPU__channel_shuffle));
978m.impl("_reshape_alias",
979TORCH_FN(wrapper_QuantizedCPU___reshape_alias));
980m.impl("relu",
981TORCH_FN(wrapper_QuantizedCPU__relu));
982m.impl("relu_",
983TORCH_FN(wrapper_QuantizedCPU__relu_));
984m.impl("_prelu_kernel",
985TORCH_FN(wrapper_QuantizedCPU___prelu_kernel));
986m.impl("gelu",
987TORCH_FN(wrapper_QuantizedCPU__gelu));
988m.impl("sigmoid",
989TORCH_FN(wrapper_QuantizedCPU__sigmoid));
990m.impl("squeeze",
991TORCH_FN(wrapper_QuantizedCPU__squeeze));
992m.impl("squeeze.dim",
993TORCH_FN(wrapper_QuantizedCPU_dim_squeeze));
994m.impl("squeeze.dims",
995TORCH_FN(wrapper_QuantizedCPU_dims_squeeze));
996m.impl("std.correction",
997TORCH_FN(wrapper_QuantizedCPU_correction_std));
998m.impl("std.correction_out",
999TORCH_FN(wrapper_QuantizedCPU_correction_out_std_out));
1000m.impl("tanh",
1001TORCH_FN(wrapper_QuantizedCPU__tanh));
1002m.impl("threshold",
1003TORCH_FN(wrapper_QuantizedCPU__threshold));
1004m.impl("flip",
1005TORCH_FN(wrapper_QuantizedCPU__flip));
1006m.impl("unsqueeze",
1007TORCH_FN(wrapper_QuantizedCPU__unsqueeze));
1008m.impl("clone",
1009TORCH_FN(wrapper_QuantizedCPU__clone));
1010m.impl("dequantize.self",
1011TORCH_FN(wrapper_QuantizedCPU_self_dequantize));
1012m.impl("dequantize.tensors",
1013TORCH_FN(wrapper_QuantizedCPU_tensors_dequantize));
1014m.impl("q_scale",
1015TORCH_FN(wrapper_QuantizedCPU__q_scale));
1016m.impl("q_zero_point",
1017TORCH_FN(wrapper_QuantizedCPU__q_zero_point));
1018m.impl("q_per_channel_scales",
1019TORCH_FN(wrapper_QuantizedCPU__q_per_channel_scales));
1020m.impl("q_per_channel_zero_points",
1021TORCH_FN(wrapper_QuantizedCPU__q_per_channel_zero_points));
1022m.impl("q_per_channel_axis",
1023TORCH_FN(wrapper_QuantizedCPU__q_per_channel_axis));
1024m.impl("int_repr",
1025TORCH_FN(wrapper_QuantizedCPU__int_repr));
1026m.impl("qscheme",
1027TORCH_FN(wrapper_QuantizedCPU__qscheme));
1028m.impl("set_.source_Storage_storage_offset",
1029TORCH_FN(wrapper_QuantizedCPU_source_Storage_storage_offset_set_));
1030m.impl("masked_fill_.Scalar",
1031TORCH_FN(wrapper_QuantizedCPU_Scalar_masked_fill_));
1032m.impl("masked_fill_.Tensor",
1033TORCH_FN(wrapper_QuantizedCPU_Tensor_masked_fill_));
1034m.impl("view",
1035TORCH_FN(wrapper_QuantizedCPU__view));
1036m.impl("eq.Scalar",
1037TORCH_FN(wrapper_QuantizedCPU_Scalar_eq));
1038m.impl("eq.Scalar_out",
1039TORCH_FN(wrapper_QuantizedCPU_Scalar_out_eq_out));
1040m.impl("eq.Tensor",
1041TORCH_FN(wrapper_QuantizedCPU_Tensor_eq));
1042m.impl("eq.Tensor_out",
1043TORCH_FN(wrapper_QuantizedCPU_Tensor_out_eq_out));
1044m.impl("ne.Scalar",
1045TORCH_FN(wrapper_QuantizedCPU_Scalar_ne));
1046m.impl("ne.Scalar_out",
1047TORCH_FN(wrapper_QuantizedCPU_Scalar_out_ne_out));
1048m.impl("ne.Tensor",
1049TORCH_FN(wrapper_QuantizedCPU_Tensor_ne));
1050m.impl("ne.Tensor_out",
1051TORCH_FN(wrapper_QuantizedCPU_Tensor_out_ne_out));
1052m.impl("ge.Scalar",
1053TORCH_FN(wrapper_QuantizedCPU_Scalar_ge));
1054m.impl("ge.Scalar_out",
1055TORCH_FN(wrapper_QuantizedCPU_Scalar_out_ge_out));
1056m.impl("ge.Tensor",
1057TORCH_FN(wrapper_QuantizedCPU_Tensor_ge));
1058m.impl("ge.Tensor_out",
1059TORCH_FN(wrapper_QuantizedCPU_Tensor_out_ge_out));
1060m.impl("le.Scalar",
1061TORCH_FN(wrapper_QuantizedCPU_Scalar_le));
1062m.impl("le.Scalar_out",
1063TORCH_FN(wrapper_QuantizedCPU_Scalar_out_le_out));
1064m.impl("le.Tensor",
1065TORCH_FN(wrapper_QuantizedCPU_Tensor_le));
1066m.impl("le.Tensor_out",
1067TORCH_FN(wrapper_QuantizedCPU_Tensor_out_le_out));
1068m.impl("gt.Scalar",
1069TORCH_FN(wrapper_QuantizedCPU_Scalar_gt));
1070m.impl("gt.Scalar_out",
1071TORCH_FN(wrapper_QuantizedCPU_Scalar_out_gt_out));
1072m.impl("gt.Tensor",
1073TORCH_FN(wrapper_QuantizedCPU_Tensor_gt));
1074m.impl("gt.Tensor_out",
1075TORCH_FN(wrapper_QuantizedCPU_Tensor_out_gt_out));
1076m.impl("lt.Scalar",
1077TORCH_FN(wrapper_QuantizedCPU_Scalar_lt));
1078m.impl("lt.Scalar_out",
1079TORCH_FN(wrapper_QuantizedCPU_Scalar_out_lt_out));
1080m.impl("lt.Tensor",
1081TORCH_FN(wrapper_QuantizedCPU_Tensor_lt));
1082m.impl("lt.Tensor_out",
1083TORCH_FN(wrapper_QuantizedCPU_Tensor_out_lt_out));
1084m.impl("index_select",
1085TORCH_FN(wrapper_QuantizedCPU__index_select));
1086m.impl("index_select.out",
1087TORCH_FN(wrapper_QuantizedCPU_out_index_select_out));
1088m.impl("min",
1089TORCH_FN(wrapper_QuantizedCPU__min));
1090m.impl("max",
1091TORCH_FN(wrapper_QuantizedCPU__max));
1092m.impl("max.unary_out",
1093TORCH_FN(wrapper_QuantizedCPU_unary_out_max_out));
1094m.impl("sort.stable",
1095TORCH_FN(wrapper_QuantizedCPU_stable_sort));
1096m.impl("topk",
1097TORCH_FN(wrapper_QuantizedCPU__topk));
1098m.impl("unfold",
1099TORCH_FN(wrapper_QuantizedCPU__unfold));
1100m.impl("equal",
1101TORCH_FN(wrapper_QuantizedCPU__equal));
1102m.impl("hardsigmoid",
1103TORCH_FN(wrapper_QuantizedCPU__hardsigmoid));
1104m.impl("hardsigmoid.out",
1105TORCH_FN(wrapper_QuantizedCPU_out_hardsigmoid_out));
1106m.impl("hardtanh",
1107TORCH_FN(wrapper_QuantizedCPU__hardtanh));
1108m.impl("hardtanh.out",
1109TORCH_FN(wrapper_QuantizedCPU_out_hardtanh_out));
1110m.impl("hardtanh_",
1111TORCH_FN(wrapper_QuantizedCPU__hardtanh_));
1112m.impl("leaky_relu",
1113TORCH_FN(wrapper_QuantizedCPU__leaky_relu));
1114m.impl("leaky_relu.out",
1115TORCH_FN(wrapper_QuantizedCPU_out_leaky_relu_out));
1116m.impl("leaky_relu_",
1117TORCH_FN(wrapper_QuantizedCPU__leaky_relu_));
1118m.impl("_adaptive_avg_pool2d",
1119TORCH_FN(wrapper_QuantizedCPU___adaptive_avg_pool2d));
1120m.impl("adaptive_avg_pool3d.out",
1121TORCH_FN(wrapper_QuantizedCPU_out_adaptive_avg_pool3d_out));
1122m.impl("_adaptive_avg_pool3d",
1123TORCH_FN(wrapper_QuantizedCPU___adaptive_avg_pool3d));
1124m.impl("avg_pool2d",
1125TORCH_FN(wrapper_QuantizedCPU__avg_pool2d));
1126m.impl("avg_pool3d",
1127TORCH_FN(wrapper_QuantizedCPU__avg_pool3d));
1128m.impl("reflection_pad1d.out",
1129TORCH_FN(wrapper_QuantizedCPU_out_reflection_pad1d_out));
1130m.impl("reflection_pad2d",
1131TORCH_FN(wrapper_QuantizedCPU__reflection_pad2d));
1132m.impl("reflection_pad2d.out",
1133TORCH_FN(wrapper_QuantizedCPU_out_reflection_pad2d_out));
1134m.impl("upsample_bilinear2d",
1135TORCH_FN(wrapper_QuantizedCPU__upsample_bilinear2d));
1136m.impl("upsample_nearest2d",
1137TORCH_FN(wrapper_QuantizedCPU__upsample_nearest2d));
1138m.impl("_upsample_nearest_exact2d",
1139TORCH_FN(wrapper_QuantizedCPU___upsample_nearest_exact2d));
1140m.impl("upsample_nearest3d",
1141TORCH_FN(wrapper_QuantizedCPU__upsample_nearest3d));
1142m.impl("_upsample_nearest_exact3d",
1143TORCH_FN(wrapper_QuantizedCPU___upsample_nearest_exact3d));
1144};
1145} // anonymous namespace
1146namespace quantizedcpu {
1147at::Tensor as_strided(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset) {
1148return wrapper_QuantizedCPU__as_strided(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt);
1149}
1150at::Tensor as_strided_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
1151return wrapper_QuantizedCPU__as_strided(self, size, stride, storage_offset);
1152}
1153at::Tensor quantized_batch_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point) {
1154return wrapper_QuantizedCPU__quantized_batch_norm(input, weight, bias, mean, var, eps, output_scale, output_zero_point);
1155}
1156at::Tensor cat(const at::ITensorListRef & tensors, int64_t dim) {
1157return wrapper_QuantizedCPU__cat(tensors, dim);
1158}
1159at::Tensor & cat_out(at::Tensor & out, const at::ITensorListRef & tensors, int64_t dim) {
1160return wrapper_QuantizedCPU_out_cat_out(tensors, dim, out);
1161}
1162at::Tensor & cat_outf(const at::ITensorListRef & tensors, int64_t dim, at::Tensor & out) {
1163return wrapper_QuantizedCPU_out_cat_out(tensors, dim, out);
1164}
1165at::Tensor clamp(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
1166return wrapper_QuantizedCPU__clamp(self, min, max);
1167}
1168at::Tensor empty(at::IntArrayRef size, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) {
1169return wrapper_QuantizedCPU_memory_format_empty(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
1170}
1171at::Tensor empty(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
1172return wrapper_QuantizedCPU_memory_format_empty(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, memory_format);
1173}
1174at::Tensor empty_symint(c10::SymIntArrayRef size, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) {
1175return wrapper_QuantizedCPU_memory_format_empty(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
1176}
1177at::Tensor empty_symint(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
1178return wrapper_QuantizedCPU_memory_format_empty(size, dtype, layout, device, pin_memory, memory_format);
1179}
1180at::Tensor _empty_affine_quantized(at::IntArrayRef size, at::TensorOptions options, double scale, int64_t zero_point, c10::optional<at::MemoryFormat> memory_format) {
1181return wrapper_QuantizedCPU___empty_affine_quantized(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), scale, zero_point, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
1182}
1183at::Tensor _empty_affine_quantized(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, double scale, int64_t zero_point, c10::optional<at::MemoryFormat> memory_format) {
1184return wrapper_QuantizedCPU___empty_affine_quantized(size, dtype, layout, device, pin_memory, scale, zero_point, memory_format);
1185}
1186at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) {
1187return wrapper_QuantizedCPU___empty_per_channel_affine_quantized(size, scales, zero_points, axis, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
1188}
1189at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
1190return wrapper_QuantizedCPU___empty_per_channel_affine_quantized(size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format);
1191}
1192const at::Tensor & resize_(const at::Tensor & self, at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format) {
1193return wrapper_QuantizedCPU__resize_(self, c10::fromIntArrayRefSlow(size), memory_format);
1194}
1195const at::Tensor & resize__symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) {
1196return wrapper_QuantizedCPU__resize_(self, size, memory_format);
1197}
1198at::Tensor empty_quantized(at::IntArrayRef size, const at::Tensor & qtensor, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) {
1199return wrapper_QuantizedCPU__empty_quantized(size, qtensor, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
1200}
1201at::Tensor empty_quantized(at::IntArrayRef size, const at::Tensor & qtensor, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
1202return wrapper_QuantizedCPU__empty_quantized(size, qtensor, dtype, layout, device, pin_memory, memory_format);
1203}
1204at::Tensor empty_like(const at::Tensor & self, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) {
1205return wrapper_QuantizedCPU__empty_like(self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
1206}
1207at::Tensor empty_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
1208return wrapper_QuantizedCPU__empty_like(self, dtype, layout, device, pin_memory, memory_format);
1209}
1210at::Tensor empty_strided(at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options) {
1211return wrapper_QuantizedCPU__empty_strided(c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
1212}
1213at::Tensor empty_strided(at::IntArrayRef size, at::IntArrayRef stride, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
1214return wrapper_QuantizedCPU__empty_strided(c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), dtype, layout, device, pin_memory);
1215}
1216at::Tensor empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options) {
1217return wrapper_QuantizedCPU__empty_strided(size, stride, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
1218}
1219at::Tensor empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
1220return wrapper_QuantizedCPU__empty_strided(size, stride, dtype, layout, device, pin_memory);
1221}
1222at::Tensor & fill_(at::Tensor & self, const at::Scalar & value) {
1223return wrapper_QuantizedCPU_Scalar_fill_(self, value);
1224}
1225at::Tensor & fill_(at::Tensor & self, const at::Tensor & value) {
1226return wrapper_QuantizedCPU_Tensor_fill_(self, value);
1227}
1228at::Tensor grid_sampler_2d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
1229return wrapper_QuantizedCPU__grid_sampler_2d(input, grid, interpolation_mode, padding_mode, align_corners);
1230}
1231at::Tensor index(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices) {
1232return wrapper_QuantizedCPU_Tensor_index(self, indices);
1233}
1234at::Tensor & _index_put_impl_(at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) {
1235return wrapper_QuantizedCPU___index_put_impl_(self, indices, values, accumulate, unsafe);
1236}
1237::std::tuple<at::Tensor,at::Tensor> max(const at::Tensor & self, int64_t dim, bool keepdim) {
1238return wrapper_QuantizedCPU_dim_max(self, dim, keepdim);
1239}
1240at::Tensor quantized_max_pool1d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
1241return wrapper_QuantizedCPU__quantized_max_pool1d(self, kernel_size, stride, padding, dilation, ceil_mode);
1242}
1243at::Tensor quantized_max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
1244return wrapper_QuantizedCPU__quantized_max_pool2d(self, kernel_size, stride, padding, dilation, ceil_mode);
1245}
1246at::Tensor mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
1247return wrapper_QuantizedCPU_dim_mean(self, dim, keepdim, dtype);
1248}
1249at::Tensor & mean_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
1250return wrapper_QuantizedCPU_out_mean_out(self, dim, keepdim, dtype, out);
1251}
1252at::Tensor & mean_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
1253return wrapper_QuantizedCPU_out_mean_out(self, dim, keepdim, dtype, out);
1254}
1255::std::tuple<at::Tensor,at::Tensor> min(const at::Tensor & self, int64_t dim, bool keepdim) {
1256return wrapper_QuantizedCPU_dim_min(self, dim, keepdim);
1257}
1258at::Tensor channel_shuffle(const at::Tensor & self, int64_t groups) {
1259return wrapper_QuantizedCPU__channel_shuffle(self, groups);
1260}
1261at::Tensor _reshape_alias(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) {
1262return wrapper_QuantizedCPU___reshape_alias(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride));
1263}
1264at::Tensor _reshape_alias_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
1265return wrapper_QuantizedCPU___reshape_alias(self, size, stride);
1266}
1267at::Tensor relu(const at::Tensor & self) {
1268return wrapper_QuantizedCPU__relu(self);
1269}
1270at::Tensor & relu_(at::Tensor & self) {
1271return wrapper_QuantizedCPU__relu_(self);
1272}
1273at::Tensor _prelu_kernel(const at::Tensor & self, const at::Tensor & weight) {
1274return wrapper_QuantizedCPU___prelu_kernel(self, weight);
1275}
1276at::Tensor gelu(const at::Tensor & self, c10::string_view approximate) {
1277return wrapper_QuantizedCPU__gelu(self, approximate);
1278}
1279at::Tensor sigmoid(const at::Tensor & self) {
1280return wrapper_QuantizedCPU__sigmoid(self);
1281}
1282at::Tensor squeeze(const at::Tensor & self) {
1283return wrapper_QuantizedCPU__squeeze(self);
1284}
1285at::Tensor squeeze(const at::Tensor & self, int64_t dim) {
1286return wrapper_QuantizedCPU_dim_squeeze(self, dim);
1287}
1288at::Tensor squeeze(const at::Tensor & self, at::IntArrayRef dim) {
1289return wrapper_QuantizedCPU_dims_squeeze(self, dim);
1290}
1291at::Tensor std(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
1292return wrapper_QuantizedCPU_correction_std(self, dim, correction, keepdim);
1293}
1294at::Tensor & std_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
1295return wrapper_QuantizedCPU_correction_out_std_out(self, dim, correction, keepdim, out);
1296}
1297at::Tensor & std_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) {
1298return wrapper_QuantizedCPU_correction_out_std_out(self, dim, correction, keepdim, out);
1299}
1300at::Tensor tanh(const at::Tensor & self) {
1301return wrapper_QuantizedCPU__tanh(self);
1302}
1303at::Tensor threshold(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
1304return wrapper_QuantizedCPU__threshold(self, threshold, value);
1305}
1306at::Tensor flip(const at::Tensor & self, at::IntArrayRef dims) {
1307return wrapper_QuantizedCPU__flip(self, dims);
1308}
1309at::Tensor unsqueeze(const at::Tensor & self, int64_t dim) {
1310return wrapper_QuantizedCPU__unsqueeze(self, dim);
1311}
1312at::Tensor clone(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) {
1313return wrapper_QuantizedCPU__clone(self, memory_format);
1314}
1315at::Tensor dequantize(const at::Tensor & self) {
1316return wrapper_QuantizedCPU_self_dequantize(self);
1317}
1318::std::vector<at::Tensor> dequantize(at::TensorList tensors) {
1319return wrapper_QuantizedCPU_tensors_dequantize(tensors);
1320}
1321double q_scale(const at::Tensor & self) {
1322return wrapper_QuantizedCPU__q_scale(self);
1323}
1324int64_t q_zero_point(const at::Tensor & self) {
1325return wrapper_QuantizedCPU__q_zero_point(self);
1326}
1327at::Tensor q_per_channel_scales(const at::Tensor & self) {
1328return wrapper_QuantizedCPU__q_per_channel_scales(self);
1329}
1330at::Tensor q_per_channel_zero_points(const at::Tensor & self) {
1331return wrapper_QuantizedCPU__q_per_channel_zero_points(self);
1332}
1333int64_t q_per_channel_axis(const at::Tensor & self) {
1334return wrapper_QuantizedCPU__q_per_channel_axis(self);
1335}
1336at::Tensor int_repr(const at::Tensor & self) {
1337return wrapper_QuantizedCPU__int_repr(self);
1338}
1339at::QScheme qscheme(const at::Tensor & self) {
1340return wrapper_QuantizedCPU__qscheme(self);
1341}
1342at::Tensor & set_(at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride) {
1343return wrapper_QuantizedCPU_source_Storage_storage_offset_set_(self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride));
1344}
1345at::Tensor & set__symint(at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
1346return wrapper_QuantizedCPU_source_Storage_storage_offset_set_(self, source, storage_offset, size, stride);
1347}
1348at::Tensor & masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
1349return wrapper_QuantizedCPU_Scalar_masked_fill_(self, mask, value);
1350}
1351at::Tensor & masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
1352return wrapper_QuantizedCPU_Tensor_masked_fill_(self, mask, value);
1353}
1354at::Tensor view(const at::Tensor & self, at::IntArrayRef size) {
1355return wrapper_QuantizedCPU__view(self, c10::fromIntArrayRefSlow(size));
1356}
1357at::Tensor view_symint(const at::Tensor & self, c10::SymIntArrayRef size) {
1358return wrapper_QuantizedCPU__view(self, size);
1359}
1360at::Tensor eq(const at::Tensor & self, const at::Scalar & other) {
1361return wrapper_QuantizedCPU_Scalar_eq(self, other);
1362}
1363at::Tensor & eq_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
1364return wrapper_QuantizedCPU_Scalar_out_eq_out(self, other, out);
1365}
1366at::Tensor & eq_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
1367return wrapper_QuantizedCPU_Scalar_out_eq_out(self, other, out);
1368}
1369at::Tensor eq(const at::Tensor & self, const at::Tensor & other) {
1370return wrapper_QuantizedCPU_Tensor_eq(self, other);
1371}
1372at::Tensor & eq_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
1373return wrapper_QuantizedCPU_Tensor_out_eq_out(self, other, out);
1374}
1375at::Tensor & eq_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
1376return wrapper_QuantizedCPU_Tensor_out_eq_out(self, other, out);
1377}
1378at::Tensor ne(const at::Tensor & self, const at::Scalar & other) {
1379return wrapper_QuantizedCPU_Scalar_ne(self, other);
1380}
1381at::Tensor & ne_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
1382return wrapper_QuantizedCPU_Scalar_out_ne_out(self, other, out);
1383}
1384at::Tensor & ne_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
1385return wrapper_QuantizedCPU_Scalar_out_ne_out(self, other, out);
1386}
1387at::Tensor ne(const at::Tensor & self, const at::Tensor & other) {
1388return wrapper_QuantizedCPU_Tensor_ne(self, other);
1389}
1390at::Tensor & ne_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
1391return wrapper_QuantizedCPU_Tensor_out_ne_out(self, other, out);
1392}
1393at::Tensor & ne_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
1394return wrapper_QuantizedCPU_Tensor_out_ne_out(self, other, out);
1395}
1396at::Tensor ge(const at::Tensor & self, const at::Scalar & other) {
1397return wrapper_QuantizedCPU_Scalar_ge(self, other);
1398}
1399at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
1400return wrapper_QuantizedCPU_Scalar_out_ge_out(self, other, out);
1401}
1402at::Tensor & ge_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
1403return wrapper_QuantizedCPU_Scalar_out_ge_out(self, other, out);
1404}
1405at::Tensor ge(const at::Tensor & self, const at::Tensor & other) {
1406return wrapper_QuantizedCPU_Tensor_ge(self, other);
1407}
1408at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
1409return wrapper_QuantizedCPU_Tensor_out_ge_out(self, other, out);
1410}
1411at::Tensor & ge_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
1412return wrapper_QuantizedCPU_Tensor_out_ge_out(self, other, out);
1413}
1414at::Tensor le(const at::Tensor & self, const at::Scalar & other) {
1415return wrapper_QuantizedCPU_Scalar_le(self, other);
1416}
1417at::Tensor & le_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
1418return wrapper_QuantizedCPU_Scalar_out_le_out(self, other, out);
1419}
1420at::Tensor & le_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
1421return wrapper_QuantizedCPU_Scalar_out_le_out(self, other, out);
1422}
1423at::Tensor le(const at::Tensor & self, const at::Tensor & other) {
1424return wrapper_QuantizedCPU_Tensor_le(self, other);
1425}
1426at::Tensor & le_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
1427return wrapper_QuantizedCPU_Tensor_out_le_out(self, other, out);
1428}
1429at::Tensor & le_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
1430return wrapper_QuantizedCPU_Tensor_out_le_out(self, other, out);
1431}
1432at::Tensor gt(const at::Tensor & self, const at::Scalar & other) {
1433return wrapper_QuantizedCPU_Scalar_gt(self, other);
1434}
1435at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
1436return wrapper_QuantizedCPU_Scalar_out_gt_out(self, other, out);
1437}
1438at::Tensor & gt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
1439return wrapper_QuantizedCPU_Scalar_out_gt_out(self, other, out);
1440}
1441at::Tensor gt(const at::Tensor & self, const at::Tensor & other) {
1442return wrapper_QuantizedCPU_Tensor_gt(self, other);
1443}
1444at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
1445return wrapper_QuantizedCPU_Tensor_out_gt_out(self, other, out);
1446}
1447at::Tensor & gt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
1448return wrapper_QuantizedCPU_Tensor_out_gt_out(self, other, out);
1449}
1450at::Tensor lt(const at::Tensor & self, const at::Scalar & other) {
1451return wrapper_QuantizedCPU_Scalar_lt(self, other);
1452}
1453at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
1454return wrapper_QuantizedCPU_Scalar_out_lt_out(self, other, out);
1455}
1456at::Tensor & lt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
1457return wrapper_QuantizedCPU_Scalar_out_lt_out(self, other, out);
1458}
1459at::Tensor lt(const at::Tensor & self, const at::Tensor & other) {
1460return wrapper_QuantizedCPU_Tensor_lt(self, other);
1461}
1462at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
1463return wrapper_QuantizedCPU_Tensor_out_lt_out(self, other, out);
1464}
1465at::Tensor & lt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
1466return wrapper_QuantizedCPU_Tensor_out_lt_out(self, other, out);
1467}
1468at::Tensor index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index) {
1469return wrapper_QuantizedCPU__index_select(self, dim, index);
1470}
1471at::Tensor & index_select_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index) {
1472return wrapper_QuantizedCPU_out_index_select_out(self, dim, index, out);
1473}
1474at::Tensor & index_select_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out) {
1475return wrapper_QuantizedCPU_out_index_select_out(self, dim, index, out);
1476}
1477at::Tensor min(const at::Tensor & self) {
1478return wrapper_QuantizedCPU__min(self);
1479}
1480at::Tensor max(const at::Tensor & self) {
1481return wrapper_QuantizedCPU__max(self);
1482}
1483at::Tensor & max_out(at::Tensor & out, const at::Tensor & self) {
1484return wrapper_QuantizedCPU_unary_out_max_out(self, out);
1485}
1486at::Tensor & max_outf(const at::Tensor & self, at::Tensor & out) {
1487return wrapper_QuantizedCPU_unary_out_max_out(self, out);
1488}
1489::std::tuple<at::Tensor,at::Tensor> sort(const at::Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending) {
1490return wrapper_QuantizedCPU_stable_sort(self, stable, dim, descending);
1491}
1492::std::tuple<at::Tensor,at::Tensor> topk(const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) {
1493return wrapper_QuantizedCPU__topk(self, k, dim, largest, sorted);
1494}
1495at::Tensor unfold(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
1496return wrapper_QuantizedCPU__unfold(self, dimension, size, step);
1497}
1498bool equal(const at::Tensor & self, const at::Tensor & other) {
1499return wrapper_QuantizedCPU__equal(self, other);
1500}
1501at::Tensor hardsigmoid(const at::Tensor & self) {
1502return wrapper_QuantizedCPU__hardsigmoid(self);
1503}
1504at::Tensor & hardsigmoid_out(at::Tensor & out, const at::Tensor & self) {
1505return wrapper_QuantizedCPU_out_hardsigmoid_out(self, out);
1506}
1507at::Tensor & hardsigmoid_outf(const at::Tensor & self, at::Tensor & out) {
1508return wrapper_QuantizedCPU_out_hardsigmoid_out(self, out);
1509}
1510at::Tensor hardtanh(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
1511return wrapper_QuantizedCPU__hardtanh(self, min_val, max_val);
1512}
1513at::Tensor & hardtanh_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
1514return wrapper_QuantizedCPU_out_hardtanh_out(self, min_val, max_val, out);
1515}
1516at::Tensor & hardtanh_outf(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out) {
1517return wrapper_QuantizedCPU_out_hardtanh_out(self, min_val, max_val, out);
1518}
1519at::Tensor & hardtanh_(at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
1520return wrapper_QuantizedCPU__hardtanh_(self, min_val, max_val);
1521}
1522at::Tensor leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope) {
1523return wrapper_QuantizedCPU__leaky_relu(self, negative_slope);
1524}
1525at::Tensor & leaky_relu_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & negative_slope) {
1526return wrapper_QuantizedCPU_out_leaky_relu_out(self, negative_slope, out);
1527}
1528at::Tensor & leaky_relu_outf(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out) {
1529return wrapper_QuantizedCPU_out_leaky_relu_out(self, negative_slope, out);
1530}
1531at::Tensor & leaky_relu_(at::Tensor & self, const at::Scalar & negative_slope) {
1532return wrapper_QuantizedCPU__leaky_relu_(self, negative_slope);
1533}
1534at::Tensor _adaptive_avg_pool2d(const at::Tensor & self, at::IntArrayRef output_size) {
1535return wrapper_QuantizedCPU___adaptive_avg_pool2d(self, c10::fromIntArrayRefSlow(output_size));
1536}
1537at::Tensor _adaptive_avg_pool2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size) {
1538return wrapper_QuantizedCPU___adaptive_avg_pool2d(self, output_size);
1539}
1540at::Tensor & adaptive_avg_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) {
1541return wrapper_QuantizedCPU_out_adaptive_avg_pool3d_out(self, c10::fromIntArrayRefSlow(output_size), out);
1542}
1543at::Tensor & adaptive_avg_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) {
1544return wrapper_QuantizedCPU_out_adaptive_avg_pool3d_out(self, c10::fromIntArrayRefSlow(output_size), out);
1545}
1546at::Tensor & adaptive_avg_pool3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size) {
1547return wrapper_QuantizedCPU_out_adaptive_avg_pool3d_out(self, output_size, out);
1548}
1549at::Tensor & adaptive_avg_pool3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
1550return wrapper_QuantizedCPU_out_adaptive_avg_pool3d_out(self, output_size, out);
1551}
1552at::Tensor _adaptive_avg_pool3d(const at::Tensor & self, at::IntArrayRef output_size) {
1553return wrapper_QuantizedCPU___adaptive_avg_pool3d(self, c10::fromIntArrayRefSlow(output_size));
1554}
1555at::Tensor _adaptive_avg_pool3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size) {
1556return wrapper_QuantizedCPU___adaptive_avg_pool3d(self, output_size);
1557}
1558at::Tensor avg_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
1559return wrapper_QuantizedCPU__avg_pool2d(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
1560}
1561at::Tensor avg_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
1562return wrapper_QuantizedCPU__avg_pool3d(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
1563}
1564at::Tensor & reflection_pad1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) {
1565return wrapper_QuantizedCPU_out_reflection_pad1d_out(self, c10::fromIntArrayRefSlow(padding), out);
1566}
1567at::Tensor & reflection_pad1d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) {
1568return wrapper_QuantizedCPU_out_reflection_pad1d_out(self, c10::fromIntArrayRefSlow(padding), out);
1569}
1570at::Tensor & reflection_pad1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) {
1571return wrapper_QuantizedCPU_out_reflection_pad1d_out(self, padding, out);
1572}
1573at::Tensor & reflection_pad1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
1574return wrapper_QuantizedCPU_out_reflection_pad1d_out(self, padding, out);
1575}
1576at::Tensor reflection_pad2d(const at::Tensor & self, at::IntArrayRef padding) {
1577return wrapper_QuantizedCPU__reflection_pad2d(self, c10::fromIntArrayRefSlow(padding));
1578}
1579at::Tensor reflection_pad2d_symint(const at::Tensor & self, c10::SymIntArrayRef padding) {
1580return wrapper_QuantizedCPU__reflection_pad2d(self, padding);
1581}
1582at::Tensor & reflection_pad2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) {
1583return wrapper_QuantizedCPU_out_reflection_pad2d_out(self, c10::fromIntArrayRefSlow(padding), out);
1584}
1585at::Tensor & reflection_pad2d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) {
1586return wrapper_QuantizedCPU_out_reflection_pad2d_out(self, c10::fromIntArrayRefSlow(padding), out);
1587}
1588at::Tensor & reflection_pad2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) {
1589return wrapper_QuantizedCPU_out_reflection_pad2d_out(self, padding, out);
1590}
1591at::Tensor & reflection_pad2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
1592return wrapper_QuantizedCPU_out_reflection_pad2d_out(self, padding, out);
1593}
1594at::Tensor upsample_bilinear2d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
1595return wrapper_QuantizedCPU__upsample_bilinear2d(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w);
1596}
1597at::Tensor upsample_bilinear2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
1598return wrapper_QuantizedCPU__upsample_bilinear2d(self, output_size, align_corners, scales_h, scales_w);
1599}
1600at::Tensor upsample_nearest2d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
1601return wrapper_QuantizedCPU__upsample_nearest2d(self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w);
1602}
1603at::Tensor upsample_nearest2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
1604return wrapper_QuantizedCPU__upsample_nearest2d(self, output_size, scales_h, scales_w);
1605}
1606at::Tensor _upsample_nearest_exact2d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
1607return wrapper_QuantizedCPU___upsample_nearest_exact2d(self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w);
1608}
1609at::Tensor _upsample_nearest_exact2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
1610return wrapper_QuantizedCPU___upsample_nearest_exact2d(self, output_size, scales_h, scales_w);
1611}
1612at::Tensor upsample_nearest3d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
1613return wrapper_QuantizedCPU__upsample_nearest3d(self, c10::fromIntArrayRefSlow(output_size), scales_d, scales_h, scales_w);
1614}
1615at::Tensor upsample_nearest3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
1616return wrapper_QuantizedCPU__upsample_nearest3d(self, output_size, scales_d, scales_h, scales_w);
1617}
1618at::Tensor _upsample_nearest_exact3d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
1619return wrapper_QuantizedCPU___upsample_nearest_exact3d(self, c10::fromIntArrayRefSlow(output_size), scales_d, scales_h, scales_w);
1620}
1621at::Tensor _upsample_nearest_exact3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
1622return wrapper_QuantizedCPU___upsample_nearest_exact3d(self, output_size, scales_d, scales_h, scales_w);
1623}
1624} // namespace quantizedcpu
1625} // namespace at
1626