1// clang-format off
2#pragma once
3#include <ATen/Context.h>
4#include <ATen/DeviceGuard.h>
5#include <ATen/TensorUtils.h>
6#include <ATen/TracerMode.h>
7#include <ATen/core/Generator.h>
8#include <ATen/core/Reduction.h>
9#include <ATen/core/Tensor.h>
10#include <c10/core/Scalar.h>
11#include <c10/core/Storage.h>
12#include <c10/core/TensorOptions.h>
13#include <c10/util/Deprecated.h>
14#include <c10/util/Optional.h>
15// @generated by torchgen/gen.py from Functions.h
16
17#include <ATen/Functions.h>
18#include "CustomOpsNativeFunctions.h"
19
20namespace torch {
21namespace executor {
22
23
24namespace aten {
25
26// aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
27TORCH_API inline at::Tensor & add_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
28 return at::add_outf(self, other, alpha, out);
29}
30
31
32// aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
33TORCH_API inline at::Tensor & baddbmm_outf(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
34 return at::baddbmm_outf(self, batch1, batch2, beta, alpha, out);
35}
36
37
38// aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
39TORCH_API inline at::Tensor & bmm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
40 return at::bmm_outf(self, mat2, out);
41}
42
43
44// aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
45TORCH_API inline at::Tensor & cat_outf(const at::ITensorListRef & tensors, int64_t dim, at::Tensor & out) {
46 return at::cat_outf(tensors, dim, out);
47}
48
49
50// aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
51TORCH_API inline at::Tensor & clamp_outf(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max, at::Tensor & out) {
52 return at::clamp_outf(self, min, max, out);
53}
54
55
56// aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
57TORCH_API inline at::Tensor & cumsum_outf(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
58 return at::cumsum_outf(self, dim, dtype, out);
59}
60
61
62// aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
63TORCH_API inline at::Tensor & div_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
64 return at::div_outf(self, other, out);
65}
66
67
68// aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
69TORCH_API inline at::Tensor & exp_outf(const at::Tensor & self, at::Tensor & out) {
70 return at::exp_outf(self, out);
71}
72
73
74// aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
75TORCH_API inline at::Tensor & floor_divide_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
76 return at::floor_divide_outf(self, other, out);
77}
78
79
80// aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!)
81TORCH_API inline at::Tensor & index_outf(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, at::Tensor & out) {
82 return at::index_outf(self, indices, out);
83}
84
85
86// aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
87TORCH_API inline at::Tensor & mean_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
88 return at::mean_outf(self, dim, keepdim, dtype, out);
89}
90
91
92// aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
93TORCH_API inline at::Tensor & mm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
94 return at::mm_outf(self, mat2, out);
95}
96
97
98// aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
99TORCH_API inline at::Tensor & mul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
100 return at::mul_outf(self, other, out);
101}
102
103
104// aten::native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))
105TORCH_API inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_batch_norm_outf(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
106 return at::native_batch_norm_outf(input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd);
107}
108
109
110// aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
111TORCH_API inline at::Tensor & round_outf(const at::Tensor & self, at::Tensor & out) {
112 return at::round_outf(self, out);
113}
114
115
116// aten::gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!)
117TORCH_API inline at::Tensor & gelu_outf(const at::Tensor & self, c10::string_view approximate, at::Tensor & out) {
118 return at::gelu_outf(self, approximate, out);
119}
120
121
122// aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
123TORCH_API inline at::Tensor & sigmoid_outf(const at::Tensor & self, at::Tensor & out) {
124 return at::sigmoid_outf(self, out);
125}
126
127
128// aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)
129TORCH_API inline at::Tensor & logit_outf(const at::Tensor & self, c10::optional<double> eps, at::Tensor & out) {
130 return at::logit_outf(self, eps, out);
131}
132
133
134// aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
135TORCH_API inline at::Tensor & _softmax_outf(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
136 return at::_softmax_outf(self, dim, half_to_float, out);
137}
138
139
140// aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
141TORCH_API inline at::Tensor & stack_outf(at::TensorList tensors, int64_t dim, at::Tensor & out) {
142 return at::stack_outf(tensors, dim, out);
143}
144
145
146// aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
147TORCH_API inline at::Tensor & sum_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
148 return at::sum_outf(self, dim, keepdim, dtype, out);
149}
150
151
152// aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
153TORCH_API inline at::Tensor & tanh_outf(const at::Tensor & self, at::Tensor & out) {
154 return at::tanh_outf(self, out);
155}
156
157
158// aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
159TORCH_API inline at::Tensor & sub_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
160 return at::sub_outf(self, other, alpha, out);
161}
162
163
164// aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
165TORCH_API inline at::Tensor & addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
166 return at::addmm_outf(self, mat1, mat2, beta, alpha, out);
167}
168
169
170// aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
171TORCH_API inline at::Tensor & bitwise_and_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
172 return at::bitwise_and_outf(self, other, out);
173}
174
175
176// aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
177TORCH_API inline at::Tensor & ne_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
178 return at::ne_outf(self, other, out);
179}
180
181
182// aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
183TORCH_API inline at::Tensor & eq_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
184 return at::eq_outf(self, other, out);
185}
186
187
188// aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
189TORCH_API inline at::Tensor & eq_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
190 return at::eq_outf(self, other, out);
191}
192
193
194// aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
195TORCH_API inline at::Tensor & gt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
196 return at::gt_outf(self, other, out);
197}
198
199
200// aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
201TORCH_API inline at::Tensor & index_select_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out) {
202 return at::index_select_outf(self, dim, index, out);
203}
204
205
206// aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
207TORCH_API inline at::Tensor & nonzero_outf(const at::Tensor & self, at::Tensor & out) {
208 return at::nonzero_outf(self, out);
209}
210
211
212// aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
213TORCH_API inline at::Tensor & remainder_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
214 return at::remainder_outf(self, other, out);
215}
216
217
218// aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
219TORCH_API inline at::Tensor & max_outf(const at::Tensor & self, at::Tensor & out) {
220 return at::max_outf(self, out);
221}
222
223
224// aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
225TORCH_API inline at::Tensor & minimum_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
226 return at::minimum_outf(self, other, out);
227}
228
229
230// aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
231TORCH_API inline ::std::tuple<at::Tensor &,at::Tensor &> sort_outf(const at::Tensor & self, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices) {
232 return at::sort_outf(self, dim, descending, values, indices);
233}
234
235
236// aten::topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
237TORCH_API inline ::std::tuple<at::Tensor &,at::Tensor &> topk_outf(const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted, at::Tensor & values, at::Tensor & indices) {
238 return at::topk_outf(self, k, dim, largest, sorted, values, indices);
239}
240
241
242// aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)
243TORCH_API inline at::Tensor & leaky_relu_outf(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out) {
244 return at::leaky_relu_outf(self, negative_slope, out);
245}
246
247
248// aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)
249TORCH_API inline at::Tensor & softplus_outf(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & out) {
250 return at::softplus_outf(self, beta, threshold, out);
251}
252
253
254// aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)
255TORCH_API inline at::Tensor & avg_pool2d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & out) {
256 return at::avg_pool2d_outf(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out);
257}
258
259
260// aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
261TORCH_API inline ::std::tuple<at::Tensor &,at::Tensor &> max_pool2d_with_indices_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) {
262 return at::max_pool2d_with_indices_outf(self, kernel_size, stride, padding, dilation, ceil_mode, out, indices);
263}
264
265
266// aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
267TORCH_API inline at::Tensor & upsample_nearest2d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
268 return at::upsample_nearest2d_outf(self, output_size, scales_h, scales_w, out);
269}
270
271
272// aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)
273TORCH_API inline ::std::tuple<at::Tensor &,at::Tensor &> linalg_inv_ex_outf(const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info) {
274 return at::linalg_inv_ex_outf(A, check_errors, inverse, info);
275}
276
277
278// aten::unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> ()
279TORCH_API inline void unbind_copy_outf(const at::Tensor & self, int64_t dim, at::TensorList out) {
280 return at::unbind_copy_outf(self, dim, out);
281}
282
283
284// aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()
285TORCH_API inline void split_copy_outf(const at::Tensor & self, int64_t split_size, int64_t dim, at::TensorList out) {
286 return at::split_copy_outf(self, split_size, dim, out);
287}
288
289
290// aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()
291TORCH_API inline void split_with_sizes_copy_outf(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim, at::TensorList out) {
292 return at::split_with_sizes_copy_outf(self, split_sizes, dim, out);
293}
294
295
296// aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!)
297TORCH_API inline at::Tensor & constant_pad_nd_outf(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value, at::Tensor & out) {
298 return at::constant_pad_nd_outf(self, pad, value, out);
299}
300
301
302// aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!)
303TORCH_API inline at::Tensor & convolution_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, at::Tensor & out) {
304 return at::convolution_outf(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out);
305}
306
307
308// aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!)
309TORCH_API inline at::Tensor & embedding_outf(const at::Tensor & weight, const at::Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse, at::Tensor & out) {
310 return at::embedding_outf(weight, indices, padding_idx, scale_grad_by_freq, sparse, out);
311}
312
313
314// aten::grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
315TORCH_API inline at::Tensor & grid_sampler_2d_outf(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) {
316 return at::grid_sampler_2d_outf(input, grid, interpolation_mode, padding_mode, align_corners, out);
317}
318
319
320// aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)
321TORCH_API inline at::Tensor & index_put_outf(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, at::Tensor & out) {
322 return at::index_put_outf(self, indices, values, accumulate, out);
323}
324
325
326// aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
327TORCH_API inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_outf(const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
328 return at::native_layer_norm_outf(input, normalized_shape, weight, bias, eps, out0, out1, out2);
329}
330
331
332// aten::pixel_shuffle.out(Tensor self, int upscale_factor, *, Tensor(a!) out) -> Tensor(a!)
333TORCH_API inline at::Tensor & pixel_shuffle_outf(const at::Tensor & self, int64_t upscale_factor, at::Tensor & out) {
334 return at::pixel_shuffle_outf(self, upscale_factor, out);
335}
336
337
338// aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!)
339TORCH_API inline at::Tensor & repeat_outf(const at::Tensor & self, at::IntArrayRef repeats, at::Tensor & out) {
340 return at::repeat_outf(self, repeats, out);
341}
342
343
344// aten::relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
345TORCH_API inline at::Tensor & relu_outf(const at::Tensor & self, at::Tensor & out) {
346 return at::relu_outf(self, out);
347}
348
349
350// aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()
351TORCH_API inline void unsafe_split_outf(const at::Tensor & self, int64_t split_size, int64_t dim, at::TensorList out) {
352 return at::unsafe_split_outf(self, split_size, dim, out);
353}
354
355
356// aten::_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
357TORCH_API inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _unique2_outf(const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
358 return at::_unique2_outf(self, sorted, return_inverse, return_counts, out0, out1, out2);
359}
360
361
362// aten::zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
363TORCH_API inline at::Tensor & zeros_like_outf(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
364 return at::zeros_like_outf(self, memory_format, out);
365}
366
367
368// aten::clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
369TORCH_API inline at::Tensor & clone_outf(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
370 return at::clone_outf(self, memory_format, out);
371}
372
373
374// aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
375TORCH_API inline at::Tensor & rsub_outf(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) {
376 return at::rsub_outf(self, other, alpha, out);
377}
378
379
380// aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
381TORCH_API inline ::std::tuple<at::Tensor &,at::Tensor &> _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_outf(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) {
382 return at::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_outf(self, scale, zero_point, fake_quant_enabled, quant_min, quant_max, out0, out1);
383}
384
385
386// aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
387TORCH_API inline at::Tensor & _to_copy_outf(const at::Tensor & self, bool non_blocking, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
388 return at::_to_copy_outf(self, non_blocking, memory_format, out);
389}
390
391
392// aten::masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
393TORCH_API inline at::Tensor & masked_fill_outf(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value, at::Tensor & out) {
394 return at::masked_fill_outf(self, mask, value, out);
395}
396
397
398// aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!)
399TORCH_API inline at::Tensor & expand_copy_outf(const at::Tensor & self, at::IntArrayRef size, bool implicit, at::Tensor & out) {
400 return at::expand_copy_outf(self, size, implicit, out);
401}
402
403
404// aten::permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)
405TORCH_API inline at::Tensor & permute_copy_outf(const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) {
406 return at::permute_copy_outf(self, dims, out);
407}
408
409
410// aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
411TORCH_API inline at::Tensor & _reshape_alias_copy_outf(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor & out) {
412 return at::_reshape_alias_copy_outf(self, size, stride, out);
413}
414
415
416// aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
417TORCH_API inline at::Tensor & select_copy_outf(const at::Tensor & self, int64_t dim, int64_t index, at::Tensor & out) {
418 return at::select_copy_outf(self, dim, index, out);
419}
420
421
422// aten::detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
423TORCH_API inline at::Tensor & detach_copy_outf(const at::Tensor & self, at::Tensor & out) {
424 return at::detach_copy_outf(self, out);
425}
426
427
428// aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)
429TORCH_API inline at::Tensor & slice_copy_outf(const at::Tensor & self, int64_t dim, c10::optional<int64_t> start, c10::optional<int64_t> end, int64_t step, at::Tensor & out) {
430 return at::slice_copy_outf(self, dim, start, end, step, out);
431}
432
433
434// aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)
435TORCH_API inline at::Tensor & transpose_copy_outf(const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) {
436 return at::transpose_copy_outf(self, dim0, dim1, out);
437}
438
439
440// aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
441TORCH_API inline at::Tensor & unsqueeze_copy_outf(const at::Tensor & self, int64_t dim, at::Tensor & out) {
442 return at::unsqueeze_copy_outf(self, dim, out);
443}
444
445
446// aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
447TORCH_API inline at::Tensor & view_copy_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) {
448 return at::view_copy_outf(self, size, out);
449}
450
451} // namespace aten
452
453namespace custom {
454
455// custom::add_3.out(Tensor a, Tensor b, Tensor c, *, Tensor(a!) out) -> Tensor(a!)
456TORCH_API inline at::Tensor & add_3_outf(const at::Tensor & a, const at::Tensor & b, const at::Tensor & c, at::Tensor & out) {
457 return ::custom::native::add_3_out(a, b, c, out);
458}
459
460} // namespace custom
461
462} // namespace executor
463} // namespace torch
464