1 | #include <operator_registry.h> |
2 | #include "Functions.h" |
3 | |
4 | namespace torch { |
5 | namespace executor { |
6 | |
7 | namespace { |
8 | using OpArrayRef = ::at::ArrayRef<::torch::executor::Operator>; |
9 | |
10 | static Operator operators_to_register[] = { |
11 | |
12 | Operator( |
13 | "aten::add.out" , |
14 | [](EValue** stack) { |
15 | EValue& self = *stack[0]; |
16 | EValue& other = *stack[1]; |
17 | EValue& alpha = *stack[2]; |
18 | EValue& out = *stack[3]; |
19 | const at::Tensor & self_base = self.to<at::Tensor>(); |
20 | const at::Tensor & other_base = other.to<at::Tensor>(); |
21 | const at::Scalar & alpha_base = alpha.to<at::Scalar>(); |
22 | at::Tensor & out_base = out.to<at::Tensor>(); |
23 | |
24 | EXECUTORCH_SCOPE_PROF("native_call_add.out" ); |
25 | torch::executor::aten::add_outf(self_base, other_base, alpha_base, out_base); |
26 | |
27 | |
28 | } |
29 | ), |
30 | |
31 | Operator( |
32 | "aten::baddbmm.out" , |
33 | [](EValue** stack) { |
34 | EValue& self = *stack[0]; |
35 | EValue& batch1 = *stack[1]; |
36 | EValue& batch2 = *stack[2]; |
37 | EValue& beta = *stack[3]; |
38 | EValue& alpha = *stack[4]; |
39 | EValue& out = *stack[5]; |
40 | const at::Tensor & self_base = self.to<at::Tensor>(); |
41 | const at::Tensor & batch1_base = batch1.to<at::Tensor>(); |
42 | const at::Tensor & batch2_base = batch2.to<at::Tensor>(); |
43 | const at::Scalar & beta_base = beta.to<at::Scalar>(); |
44 | const at::Scalar & alpha_base = alpha.to<at::Scalar>(); |
45 | at::Tensor & out_base = out.to<at::Tensor>(); |
46 | |
47 | EXECUTORCH_SCOPE_PROF("native_call_baddbmm.out" ); |
48 | torch::executor::aten::baddbmm_outf(self_base, batch1_base, batch2_base, beta_base, alpha_base, out_base); |
49 | |
50 | |
51 | } |
52 | ), |
53 | |
54 | Operator( |
55 | "aten::bmm.out" , |
56 | [](EValue** stack) { |
57 | EValue& self = *stack[0]; |
58 | EValue& mat2 = *stack[1]; |
59 | EValue& out = *stack[2]; |
60 | const at::Tensor & self_base = self.to<at::Tensor>(); |
61 | const at::Tensor & mat2_base = mat2.to<at::Tensor>(); |
62 | at::Tensor & out_base = out.to<at::Tensor>(); |
63 | |
64 | EXECUTORCH_SCOPE_PROF("native_call_bmm.out" ); |
65 | torch::executor::aten::bmm_outf(self_base, mat2_base, out_base); |
66 | |
67 | |
68 | } |
69 | ), |
70 | |
71 | Operator( |
72 | "aten::cat.out" , |
73 | [](EValue** stack) { |
74 | EValue& tensors = *stack[0]; |
75 | EValue& dim = *stack[1]; |
76 | EValue& out = *stack[2]; |
77 | |
78 | at::ITensorListRef tensors_list_out = tensors.toTensorList(); |
79 | |
80 | int64_t dim_base = dim.to<int64_t>(); |
81 | at::Tensor & out_base = out.to<at::Tensor>(); |
82 | |
83 | EXECUTORCH_SCOPE_PROF("native_call_cat.out" ); |
84 | torch::executor::aten::cat_outf(tensors_list_out, dim_base, out_base); |
85 | |
86 | |
87 | } |
88 | ), |
89 | |
90 | Operator( |
91 | "aten::clamp.out" , |
92 | [](EValue** stack) { |
93 | EValue& self = *stack[0]; |
94 | EValue& min = *stack[1]; |
95 | EValue& max = *stack[2]; |
96 | EValue& out = *stack[3]; |
97 | const at::Tensor & self_base = self.to<at::Tensor>(); |
98 | |
99 | c10::optional<at::Scalar> min_opt_out = min.toOptional<at::Scalar>(); |
100 | |
101 | |
102 | c10::optional<at::Scalar> max_opt_out = max.toOptional<at::Scalar>(); |
103 | |
104 | at::Tensor & out_base = out.to<at::Tensor>(); |
105 | |
106 | EXECUTORCH_SCOPE_PROF("native_call_clamp.out" ); |
107 | torch::executor::aten::clamp_outf(self_base, min_opt_out, max_opt_out, out_base); |
108 | |
109 | |
110 | } |
111 | ), |
112 | |
113 | Operator( |
114 | "aten::cumsum.out" , |
115 | [](EValue** stack) { |
116 | EValue& self = *stack[0]; |
117 | EValue& dim = *stack[1]; |
118 | EValue& dtype = *stack[2]; |
119 | EValue& out = *stack[3]; |
120 | const at::Tensor & self_base = self.to<at::Tensor>(); |
121 | int64_t dim_base = dim.to<int64_t>(); |
122 | |
123 | c10::optional<at::ScalarType> dtype_opt_out = dtype.toOptional<at::ScalarType>(); |
124 | |
125 | at::Tensor & out_base = out.to<at::Tensor>(); |
126 | |
127 | EXECUTORCH_SCOPE_PROF("native_call_cumsum.out" ); |
128 | torch::executor::aten::cumsum_outf(self_base, dim_base, dtype_opt_out, out_base); |
129 | |
130 | |
131 | } |
132 | ), |
133 | |
134 | Operator( |
135 | "aten::div.out" , |
136 | [](EValue** stack) { |
137 | EValue& self = *stack[0]; |
138 | EValue& other = *stack[1]; |
139 | EValue& out = *stack[2]; |
140 | const at::Tensor & self_base = self.to<at::Tensor>(); |
141 | const at::Tensor & other_base = other.to<at::Tensor>(); |
142 | at::Tensor & out_base = out.to<at::Tensor>(); |
143 | |
144 | EXECUTORCH_SCOPE_PROF("native_call_div.out" ); |
145 | torch::executor::aten::div_outf(self_base, other_base, out_base); |
146 | |
147 | |
148 | } |
149 | ), |
150 | |
151 | Operator( |
152 | "aten::exp.out" , |
153 | [](EValue** stack) { |
154 | EValue& self = *stack[0]; |
155 | EValue& out = *stack[1]; |
156 | const at::Tensor & self_base = self.to<at::Tensor>(); |
157 | at::Tensor & out_base = out.to<at::Tensor>(); |
158 | |
159 | EXECUTORCH_SCOPE_PROF("native_call_exp.out" ); |
160 | torch::executor::aten::exp_outf(self_base, out_base); |
161 | |
162 | |
163 | } |
164 | ), |
165 | |
166 | Operator( |
167 | "aten::floor_divide.out" , |
168 | [](EValue** stack) { |
169 | EValue& self = *stack[0]; |
170 | EValue& other = *stack[1]; |
171 | EValue& out = *stack[2]; |
172 | const at::Tensor & self_base = self.to<at::Tensor>(); |
173 | const at::Tensor & other_base = other.to<at::Tensor>(); |
174 | at::Tensor & out_base = out.to<at::Tensor>(); |
175 | |
176 | EXECUTORCH_SCOPE_PROF("native_call_floor_divide.out" ); |
177 | torch::executor::aten::floor_divide_outf(self_base, other_base, out_base); |
178 | |
179 | |
180 | } |
181 | ), |
182 | |
183 | Operator( |
184 | "aten::index.Tensor_out" , |
185 | [](EValue** stack) { |
186 | EValue& self = *stack[0]; |
187 | EValue& indices = *stack[1]; |
188 | EValue& out = *stack[2]; |
189 | const at::Tensor & self_base = self.to<at::Tensor>(); |
190 | |
191 | #ifdef USE_ATEN_LIB |
192 | at::ArrayRef<c10::optional<at::Tensor>> indices_list_in = indices.toListOptionalTensor(); |
193 | c10::List<c10::optional<at::Tensor>> indices_list_out; |
194 | for (auto indices_elem: indices_list_in) { |
195 | indices_list_out.push_back(indices_elem); |
196 | } |
197 | #else |
198 | torch::executor::ArrayRef<torch::executor::optional<torch::executor::Tensor>> indices_list_out = indices.toListOptionalTensor(); |
199 | #endif |
200 | |
201 | at::Tensor & out_base = out.to<at::Tensor>(); |
202 | |
203 | EXECUTORCH_SCOPE_PROF("native_call_index.Tensor_out" ); |
204 | torch::executor::aten::index_outf(self_base, indices_list_out, out_base); |
205 | |
206 | |
207 | } |
208 | ), |
209 | |
210 | Operator( |
211 | "aten::mean.out" , |
212 | [](EValue** stack) { |
213 | EValue& self = *stack[0]; |
214 | EValue& dim = *stack[1]; |
215 | EValue& keepdim = *stack[2]; |
216 | EValue& dtype = *stack[3]; |
217 | EValue& out = *stack[4]; |
218 | const at::Tensor & self_base = self.to<at::Tensor>(); |
219 | |
220 | at::OptionalIntArrayRef dim_opt_out = dim.toOptional<at::IntArrayRef>(); |
221 | |
222 | bool keepdim_base = keepdim.to<bool>(); |
223 | |
224 | c10::optional<at::ScalarType> dtype_opt_out = dtype.toOptional<at::ScalarType>(); |
225 | |
226 | at::Tensor & out_base = out.to<at::Tensor>(); |
227 | |
228 | EXECUTORCH_SCOPE_PROF("native_call_mean.out" ); |
229 | torch::executor::aten::mean_outf(self_base, dim_opt_out, keepdim_base, dtype_opt_out, out_base); |
230 | |
231 | |
232 | } |
233 | ), |
234 | |
235 | Operator( |
236 | "aten::mm.out" , |
237 | [](EValue** stack) { |
238 | EValue& self = *stack[0]; |
239 | EValue& mat2 = *stack[1]; |
240 | EValue& out = *stack[2]; |
241 | const at::Tensor & self_base = self.to<at::Tensor>(); |
242 | const at::Tensor & mat2_base = mat2.to<at::Tensor>(); |
243 | at::Tensor & out_base = out.to<at::Tensor>(); |
244 | |
245 | EXECUTORCH_SCOPE_PROF("native_call_mm.out" ); |
246 | torch::executor::aten::mm_outf(self_base, mat2_base, out_base); |
247 | |
248 | |
249 | } |
250 | ), |
251 | |
252 | Operator( |
253 | "aten::mul.out" , |
254 | [](EValue** stack) { |
255 | EValue& self = *stack[0]; |
256 | EValue& other = *stack[1]; |
257 | EValue& out = *stack[2]; |
258 | const at::Tensor & self_base = self.to<at::Tensor>(); |
259 | const at::Tensor & other_base = other.to<at::Tensor>(); |
260 | at::Tensor & out_base = out.to<at::Tensor>(); |
261 | |
262 | EXECUTORCH_SCOPE_PROF("native_call_mul.out" ); |
263 | torch::executor::aten::mul_outf(self_base, other_base, out_base); |
264 | |
265 | |
266 | } |
267 | ), |
268 | |
269 | Operator( |
270 | "aten::native_batch_norm.out" , |
271 | [](EValue** stack) { |
272 | EValue& input = *stack[0]; |
273 | EValue& weight = *stack[1]; |
274 | EValue& bias = *stack[2]; |
275 | EValue& running_mean = *stack[3]; |
276 | EValue& running_var = *stack[4]; |
277 | EValue& training = *stack[5]; |
278 | EValue& momentum = *stack[6]; |
279 | EValue& eps = *stack[7]; |
280 | EValue& out = *stack[8]; |
281 | EValue& save_mean = *stack[9]; |
282 | EValue& save_invstd = *stack[10]; |
283 | const at::Tensor & input_base = input.to<at::Tensor>(); |
284 | |
285 | c10::optional<at::Tensor> weight_opt_out = weight.toOptional<at::Tensor>(); |
286 | |
287 | |
288 | c10::optional<at::Tensor> bias_opt_out = bias.toOptional<at::Tensor>(); |
289 | |
290 | |
291 | c10::optional<at::Tensor> running_mean_opt_out = running_mean.toOptional<at::Tensor>(); |
292 | |
293 | |
294 | c10::optional<at::Tensor> running_var_opt_out = running_var.toOptional<at::Tensor>(); |
295 | |
296 | bool training_base = training.to<bool>(); |
297 | double momentum_base = momentum.to<double>(); |
298 | double eps_base = eps.to<double>(); |
299 | at::Tensor & out_base = out.to<at::Tensor>(); |
300 | at::Tensor & save_mean_base = save_mean.to<at::Tensor>(); |
301 | at::Tensor & save_invstd_base = save_invstd.to<at::Tensor>(); |
302 | |
303 | EXECUTORCH_SCOPE_PROF("native_call_native_batch_norm.out" ); |
304 | torch::executor::aten::native_batch_norm_outf(input_base, weight_opt_out, bias_opt_out, running_mean_opt_out, running_var_opt_out, training_base, momentum_base, eps_base, out_base, save_mean_base, save_invstd_base); |
305 | |
306 | |
307 | } |
308 | ), |
309 | |
310 | Operator( |
311 | "aten::round.out" , |
312 | [](EValue** stack) { |
313 | EValue& self = *stack[0]; |
314 | EValue& out = *stack[1]; |
315 | const at::Tensor & self_base = self.to<at::Tensor>(); |
316 | at::Tensor & out_base = out.to<at::Tensor>(); |
317 | |
318 | EXECUTORCH_SCOPE_PROF("native_call_round.out" ); |
319 | torch::executor::aten::round_outf(self_base, out_base); |
320 | |
321 | |
322 | } |
323 | ), |
324 | |
325 | Operator( |
326 | "aten::gelu.out" , |
327 | [](EValue** stack) { |
328 | EValue& self = *stack[0]; |
329 | EValue& approximate = *stack[1]; |
330 | EValue& out = *stack[2]; |
331 | const at::Tensor & self_base = self.to<at::Tensor>(); |
332 | c10::string_view approximate_base = approximate.to<c10::string_view>(); |
333 | at::Tensor & out_base = out.to<at::Tensor>(); |
334 | |
335 | EXECUTORCH_SCOPE_PROF("native_call_gelu.out" ); |
336 | torch::executor::aten::gelu_outf(self_base, approximate_base, out_base); |
337 | |
338 | |
339 | } |
340 | ), |
341 | |
342 | Operator( |
343 | "aten::sigmoid.out" , |
344 | [](EValue** stack) { |
345 | EValue& self = *stack[0]; |
346 | EValue& out = *stack[1]; |
347 | const at::Tensor & self_base = self.to<at::Tensor>(); |
348 | at::Tensor & out_base = out.to<at::Tensor>(); |
349 | |
350 | EXECUTORCH_SCOPE_PROF("native_call_sigmoid.out" ); |
351 | torch::executor::aten::sigmoid_outf(self_base, out_base); |
352 | |
353 | |
354 | } |
355 | ), |
356 | |
357 | Operator( |
358 | "aten::logit.out" , |
359 | [](EValue** stack) { |
360 | EValue& self = *stack[0]; |
361 | EValue& eps = *stack[1]; |
362 | EValue& out = *stack[2]; |
363 | const at::Tensor & self_base = self.to<at::Tensor>(); |
364 | |
365 | c10::optional<double> eps_opt_out = eps.toOptional<double>(); |
366 | |
367 | at::Tensor & out_base = out.to<at::Tensor>(); |
368 | |
369 | EXECUTORCH_SCOPE_PROF("native_call_logit.out" ); |
370 | torch::executor::aten::logit_outf(self_base, eps_opt_out, out_base); |
371 | |
372 | |
373 | } |
374 | ), |
375 | |
376 | Operator( |
377 | "aten::_softmax.out" , |
378 | [](EValue** stack) { |
379 | EValue& self = *stack[0]; |
380 | EValue& dim = *stack[1]; |
381 | EValue& half_to_float = *stack[2]; |
382 | EValue& out = *stack[3]; |
383 | const at::Tensor & self_base = self.to<at::Tensor>(); |
384 | int64_t dim_base = dim.to<int64_t>(); |
385 | bool half_to_float_base = half_to_float.to<bool>(); |
386 | at::Tensor & out_base = out.to<at::Tensor>(); |
387 | |
388 | EXECUTORCH_SCOPE_PROF("native_call__softmax.out" ); |
389 | torch::executor::aten::_softmax_outf(self_base, dim_base, half_to_float_base, out_base); |
390 | |
391 | |
392 | } |
393 | ), |
394 | |
395 | Operator( |
396 | "aten::stack.out" , |
397 | [](EValue** stack) { |
398 | EValue& tensors = *stack[0]; |
399 | EValue& dim = *stack[1]; |
400 | EValue& out = *stack[2]; |
401 | |
402 | at::TensorList tensors_list_out = tensors.toTensorList(); |
403 | |
404 | int64_t dim_base = dim.to<int64_t>(); |
405 | at::Tensor & out_base = out.to<at::Tensor>(); |
406 | |
407 | EXECUTORCH_SCOPE_PROF("native_call_stack.out" ); |
408 | torch::executor::aten::stack_outf(tensors_list_out, dim_base, out_base); |
409 | |
410 | |
411 | } |
412 | ), |
413 | |
414 | Operator( |
415 | "aten::sum.IntList_out" , |
416 | [](EValue** stack) { |
417 | EValue& self = *stack[0]; |
418 | EValue& dim = *stack[1]; |
419 | EValue& keepdim = *stack[2]; |
420 | EValue& dtype = *stack[3]; |
421 | EValue& out = *stack[4]; |
422 | const at::Tensor & self_base = self.to<at::Tensor>(); |
423 | |
424 | at::OptionalIntArrayRef dim_opt_out = dim.toOptional<at::IntArrayRef>(); |
425 | |
426 | bool keepdim_base = keepdim.to<bool>(); |
427 | |
428 | c10::optional<at::ScalarType> dtype_opt_out = dtype.toOptional<at::ScalarType>(); |
429 | |
430 | at::Tensor & out_base = out.to<at::Tensor>(); |
431 | |
432 | EXECUTORCH_SCOPE_PROF("native_call_sum.IntList_out" ); |
433 | torch::executor::aten::sum_outf(self_base, dim_opt_out, keepdim_base, dtype_opt_out, out_base); |
434 | |
435 | |
436 | } |
437 | ), |
438 | |
439 | Operator( |
440 | "aten::tanh.out" , |
441 | [](EValue** stack) { |
442 | EValue& self = *stack[0]; |
443 | EValue& out = *stack[1]; |
444 | const at::Tensor & self_base = self.to<at::Tensor>(); |
445 | at::Tensor & out_base = out.to<at::Tensor>(); |
446 | |
447 | EXECUTORCH_SCOPE_PROF("native_call_tanh.out" ); |
448 | torch::executor::aten::tanh_outf(self_base, out_base); |
449 | |
450 | |
451 | } |
452 | ), |
453 | |
454 | Operator( |
455 | "aten::sub.out" , |
456 | [](EValue** stack) { |
457 | EValue& self = *stack[0]; |
458 | EValue& other = *stack[1]; |
459 | EValue& alpha = *stack[2]; |
460 | EValue& out = *stack[3]; |
461 | const at::Tensor & self_base = self.to<at::Tensor>(); |
462 | const at::Tensor & other_base = other.to<at::Tensor>(); |
463 | const at::Scalar & alpha_base = alpha.to<at::Scalar>(); |
464 | at::Tensor & out_base = out.to<at::Tensor>(); |
465 | |
466 | EXECUTORCH_SCOPE_PROF("native_call_sub.out" ); |
467 | torch::executor::aten::sub_outf(self_base, other_base, alpha_base, out_base); |
468 | |
469 | |
470 | } |
471 | ), |
472 | |
473 | Operator( |
474 | "aten::addmm.out" , |
475 | [](EValue** stack) { |
476 | EValue& self = *stack[0]; |
477 | EValue& mat1 = *stack[1]; |
478 | EValue& mat2 = *stack[2]; |
479 | EValue& beta = *stack[3]; |
480 | EValue& alpha = *stack[4]; |
481 | EValue& out = *stack[5]; |
482 | const at::Tensor & self_base = self.to<at::Tensor>(); |
483 | const at::Tensor & mat1_base = mat1.to<at::Tensor>(); |
484 | const at::Tensor & mat2_base = mat2.to<at::Tensor>(); |
485 | const at::Scalar & beta_base = beta.to<at::Scalar>(); |
486 | const at::Scalar & alpha_base = alpha.to<at::Scalar>(); |
487 | at::Tensor & out_base = out.to<at::Tensor>(); |
488 | |
489 | EXECUTORCH_SCOPE_PROF("native_call_addmm.out" ); |
490 | torch::executor::aten::addmm_outf(self_base, mat1_base, mat2_base, beta_base, alpha_base, out_base); |
491 | |
492 | |
493 | } |
494 | ), |
495 | |
496 | Operator( |
497 | "aten::bitwise_and.Tensor_out" , |
498 | [](EValue** stack) { |
499 | EValue& self = *stack[0]; |
500 | EValue& other = *stack[1]; |
501 | EValue& out = *stack[2]; |
502 | const at::Tensor & self_base = self.to<at::Tensor>(); |
503 | const at::Tensor & other_base = other.to<at::Tensor>(); |
504 | at::Tensor & out_base = out.to<at::Tensor>(); |
505 | |
506 | EXECUTORCH_SCOPE_PROF("native_call_bitwise_and.Tensor_out" ); |
507 | torch::executor::aten::bitwise_and_outf(self_base, other_base, out_base); |
508 | |
509 | |
510 | } |
511 | ), |
512 | |
513 | Operator( |
514 | "aten::ne.Scalar_out" , |
515 | [](EValue** stack) { |
516 | EValue& self = *stack[0]; |
517 | EValue& other = *stack[1]; |
518 | EValue& out = *stack[2]; |
519 | const at::Tensor & self_base = self.to<at::Tensor>(); |
520 | const at::Scalar & other_base = other.to<at::Scalar>(); |
521 | at::Tensor & out_base = out.to<at::Tensor>(); |
522 | |
523 | EXECUTORCH_SCOPE_PROF("native_call_ne.Scalar_out" ); |
524 | torch::executor::aten::ne_outf(self_base, other_base, out_base); |
525 | |
526 | |
527 | } |
528 | ), |
529 | |
530 | Operator( |
531 | "aten::eq.Scalar_out" , |
532 | [](EValue** stack) { |
533 | EValue& self = *stack[0]; |
534 | EValue& other = *stack[1]; |
535 | EValue& out = *stack[2]; |
536 | const at::Tensor & self_base = self.to<at::Tensor>(); |
537 | const at::Scalar & other_base = other.to<at::Scalar>(); |
538 | at::Tensor & out_base = out.to<at::Tensor>(); |
539 | |
540 | EXECUTORCH_SCOPE_PROF("native_call_eq.Scalar_out" ); |
541 | torch::executor::aten::eq_outf(self_base, other_base, out_base); |
542 | |
543 | |
544 | } |
545 | ), |
546 | |
547 | Operator( |
548 | "aten::eq.Tensor_out" , |
549 | [](EValue** stack) { |
550 | EValue& self = *stack[0]; |
551 | EValue& other = *stack[1]; |
552 | EValue& out = *stack[2]; |
553 | const at::Tensor & self_base = self.to<at::Tensor>(); |
554 | const at::Tensor & other_base = other.to<at::Tensor>(); |
555 | at::Tensor & out_base = out.to<at::Tensor>(); |
556 | |
557 | EXECUTORCH_SCOPE_PROF("native_call_eq.Tensor_out" ); |
558 | torch::executor::aten::eq_outf(self_base, other_base, out_base); |
559 | |
560 | |
561 | } |
562 | ), |
563 | |
564 | Operator( |
565 | "aten::gt.Scalar_out" , |
566 | [](EValue** stack) { |
567 | EValue& self = *stack[0]; |
568 | EValue& other = *stack[1]; |
569 | EValue& out = *stack[2]; |
570 | const at::Tensor & self_base = self.to<at::Tensor>(); |
571 | const at::Scalar & other_base = other.to<at::Scalar>(); |
572 | at::Tensor & out_base = out.to<at::Tensor>(); |
573 | |
574 | EXECUTORCH_SCOPE_PROF("native_call_gt.Scalar_out" ); |
575 | torch::executor::aten::gt_outf(self_base, other_base, out_base); |
576 | |
577 | |
578 | } |
579 | ), |
580 | |
581 | Operator( |
582 | "aten::index_select.out" , |
583 | [](EValue** stack) { |
584 | EValue& self = *stack[0]; |
585 | EValue& dim = *stack[1]; |
586 | EValue& index = *stack[2]; |
587 | EValue& out = *stack[3]; |
588 | const at::Tensor & self_base = self.to<at::Tensor>(); |
589 | int64_t dim_base = dim.to<int64_t>(); |
590 | const at::Tensor & index_base = index.to<at::Tensor>(); |
591 | at::Tensor & out_base = out.to<at::Tensor>(); |
592 | |
593 | EXECUTORCH_SCOPE_PROF("native_call_index_select.out" ); |
594 | torch::executor::aten::index_select_outf(self_base, dim_base, index_base, out_base); |
595 | |
596 | |
597 | } |
598 | ), |
599 | |
600 | Operator( |
601 | "aten::nonzero.out" , |
602 | [](EValue** stack) { |
603 | EValue& self = *stack[0]; |
604 | EValue& out = *stack[1]; |
605 | const at::Tensor & self_base = self.to<at::Tensor>(); |
606 | at::Tensor & out_base = out.to<at::Tensor>(); |
607 | |
608 | EXECUTORCH_SCOPE_PROF("native_call_nonzero.out" ); |
609 | torch::executor::aten::nonzero_outf(self_base, out_base); |
610 | |
611 | |
612 | } |
613 | ), |
614 | |
615 | Operator( |
616 | "aten::remainder.Scalar_out" , |
617 | [](EValue** stack) { |
618 | EValue& self = *stack[0]; |
619 | EValue& other = *stack[1]; |
620 | EValue& out = *stack[2]; |
621 | const at::Tensor & self_base = self.to<at::Tensor>(); |
622 | const at::Scalar & other_base = other.to<at::Scalar>(); |
623 | at::Tensor & out_base = out.to<at::Tensor>(); |
624 | |
625 | EXECUTORCH_SCOPE_PROF("native_call_remainder.Scalar_out" ); |
626 | torch::executor::aten::remainder_outf(self_base, other_base, out_base); |
627 | |
628 | |
629 | } |
630 | ), |
631 | |
632 | Operator( |
633 | "aten::max.unary_out" , |
634 | [](EValue** stack) { |
635 | EValue& self = *stack[0]; |
636 | EValue& out = *stack[1]; |
637 | const at::Tensor & self_base = self.to<at::Tensor>(); |
638 | at::Tensor & out_base = out.to<at::Tensor>(); |
639 | |
640 | EXECUTORCH_SCOPE_PROF("native_call_max.unary_out" ); |
641 | torch::executor::aten::max_outf(self_base, out_base); |
642 | |
643 | |
644 | } |
645 | ), |
646 | |
647 | Operator( |
648 | "aten::minimum.out" , |
649 | [](EValue** stack) { |
650 | EValue& self = *stack[0]; |
651 | EValue& other = *stack[1]; |
652 | EValue& out = *stack[2]; |
653 | const at::Tensor & self_base = self.to<at::Tensor>(); |
654 | const at::Tensor & other_base = other.to<at::Tensor>(); |
655 | at::Tensor & out_base = out.to<at::Tensor>(); |
656 | |
657 | EXECUTORCH_SCOPE_PROF("native_call_minimum.out" ); |
658 | torch::executor::aten::minimum_outf(self_base, other_base, out_base); |
659 | |
660 | |
661 | } |
662 | ), |
663 | |
664 | Operator( |
665 | "aten::sort.values" , |
666 | [](EValue** stack) { |
667 | EValue& self = *stack[0]; |
668 | EValue& dim = *stack[1]; |
669 | EValue& descending = *stack[2]; |
670 | EValue& values = *stack[3]; |
671 | EValue& indices = *stack[4]; |
672 | const at::Tensor & self_base = self.to<at::Tensor>(); |
673 | int64_t dim_base = dim.to<int64_t>(); |
674 | bool descending_base = descending.to<bool>(); |
675 | at::Tensor & values_base = values.to<at::Tensor>(); |
676 | at::Tensor & indices_base = indices.to<at::Tensor>(); |
677 | |
678 | EXECUTORCH_SCOPE_PROF("native_call_sort.values" ); |
679 | torch::executor::aten::sort_outf(self_base, dim_base, descending_base, values_base, indices_base); |
680 | |
681 | |
682 | } |
683 | ), |
684 | |
685 | Operator( |
686 | "aten::topk.values" , |
687 | [](EValue** stack) { |
688 | EValue& self = *stack[0]; |
689 | EValue& k = *stack[1]; |
690 | EValue& dim = *stack[2]; |
691 | EValue& largest = *stack[3]; |
692 | EValue& sorted = *stack[4]; |
693 | EValue& values = *stack[5]; |
694 | EValue& indices = *stack[6]; |
695 | const at::Tensor & self_base = self.to<at::Tensor>(); |
696 | int64_t k_base = k.to<int64_t>(); |
697 | int64_t dim_base = dim.to<int64_t>(); |
698 | bool largest_base = largest.to<bool>(); |
699 | bool sorted_base = sorted.to<bool>(); |
700 | at::Tensor & values_base = values.to<at::Tensor>(); |
701 | at::Tensor & indices_base = indices.to<at::Tensor>(); |
702 | |
703 | EXECUTORCH_SCOPE_PROF("native_call_topk.values" ); |
704 | torch::executor::aten::topk_outf(self_base, k_base, dim_base, largest_base, sorted_base, values_base, indices_base); |
705 | |
706 | |
707 | } |
708 | ), |
709 | |
710 | Operator( |
711 | "aten::leaky_relu.out" , |
712 | [](EValue** stack) { |
713 | EValue& self = *stack[0]; |
714 | EValue& negative_slope = *stack[1]; |
715 | EValue& out = *stack[2]; |
716 | const at::Tensor & self_base = self.to<at::Tensor>(); |
717 | const at::Scalar & negative_slope_base = negative_slope.to<at::Scalar>(); |
718 | at::Tensor & out_base = out.to<at::Tensor>(); |
719 | |
720 | EXECUTORCH_SCOPE_PROF("native_call_leaky_relu.out" ); |
721 | torch::executor::aten::leaky_relu_outf(self_base, negative_slope_base, out_base); |
722 | |
723 | |
724 | } |
725 | ), |
726 | |
727 | Operator( |
728 | "aten::softplus.out" , |
729 | [](EValue** stack) { |
730 | EValue& self = *stack[0]; |
731 | EValue& beta = *stack[1]; |
732 | EValue& threshold = *stack[2]; |
733 | EValue& out = *stack[3]; |
734 | const at::Tensor & self_base = self.to<at::Tensor>(); |
735 | const at::Scalar & beta_base = beta.to<at::Scalar>(); |
736 | const at::Scalar & threshold_base = threshold.to<at::Scalar>(); |
737 | at::Tensor & out_base = out.to<at::Tensor>(); |
738 | |
739 | EXECUTORCH_SCOPE_PROF("native_call_softplus.out" ); |
740 | torch::executor::aten::softplus_outf(self_base, beta_base, threshold_base, out_base); |
741 | |
742 | |
743 | } |
744 | ), |
745 | |
746 | Operator( |
747 | "aten::avg_pool2d.out" , |
748 | [](EValue** stack) { |
749 | EValue& self = *stack[0]; |
750 | EValue& kernel_size = *stack[1]; |
751 | EValue& stride = *stack[2]; |
752 | EValue& padding = *stack[3]; |
753 | EValue& ceil_mode = *stack[4]; |
754 | EValue& count_include_pad = *stack[5]; |
755 | EValue& divisor_override = *stack[6]; |
756 | EValue& out = *stack[7]; |
757 | const at::Tensor & self_base = self.to<at::Tensor>(); |
758 | |
759 | at::IntArrayRef kernel_size_list_out = kernel_size.toIntList(); |
760 | |
761 | |
762 | at::IntArrayRef stride_list_out = stride.toIntList(); |
763 | |
764 | |
765 | at::IntArrayRef padding_list_out = padding.toIntList(); |
766 | |
767 | bool ceil_mode_base = ceil_mode.to<bool>(); |
768 | bool count_include_pad_base = count_include_pad.to<bool>(); |
769 | |
770 | c10::optional<int64_t> divisor_override_opt_out = divisor_override.toOptional<int64_t>(); |
771 | |
772 | at::Tensor & out_base = out.to<at::Tensor>(); |
773 | |
774 | EXECUTORCH_SCOPE_PROF("native_call_avg_pool2d.out" ); |
775 | torch::executor::aten::avg_pool2d_outf(self_base, kernel_size_list_out, stride_list_out, padding_list_out, ceil_mode_base, count_include_pad_base, divisor_override_opt_out, out_base); |
776 | |
777 | |
778 | } |
779 | ), |
780 | |
781 | Operator( |
782 | "aten::max_pool2d_with_indices.out" , |
783 | [](EValue** stack) { |
784 | EValue& self = *stack[0]; |
785 | EValue& kernel_size = *stack[1]; |
786 | EValue& stride = *stack[2]; |
787 | EValue& padding = *stack[3]; |
788 | EValue& dilation = *stack[4]; |
789 | EValue& ceil_mode = *stack[5]; |
790 | EValue& out = *stack[6]; |
791 | EValue& indices = *stack[7]; |
792 | const at::Tensor & self_base = self.to<at::Tensor>(); |
793 | |
794 | at::IntArrayRef kernel_size_list_out = kernel_size.toIntList(); |
795 | |
796 | |
797 | at::IntArrayRef stride_list_out = stride.toIntList(); |
798 | |
799 | |
800 | at::IntArrayRef padding_list_out = padding.toIntList(); |
801 | |
802 | |
803 | at::IntArrayRef dilation_list_out = dilation.toIntList(); |
804 | |
805 | bool ceil_mode_base = ceil_mode.to<bool>(); |
806 | at::Tensor & out_base = out.to<at::Tensor>(); |
807 | at::Tensor & indices_base = indices.to<at::Tensor>(); |
808 | |
809 | EXECUTORCH_SCOPE_PROF("native_call_max_pool2d_with_indices.out" ); |
810 | torch::executor::aten::max_pool2d_with_indices_outf(self_base, kernel_size_list_out, stride_list_out, padding_list_out, dilation_list_out, ceil_mode_base, out_base, indices_base); |
811 | |
812 | |
813 | } |
814 | ), |
815 | |
816 | Operator( |
817 | "aten::upsample_nearest2d.out" , |
818 | [](EValue** stack) { |
819 | EValue& self = *stack[0]; |
820 | EValue& output_size = *stack[1]; |
821 | EValue& scales_h = *stack[2]; |
822 | EValue& scales_w = *stack[3]; |
823 | EValue& out = *stack[4]; |
824 | const at::Tensor & self_base = self.to<at::Tensor>(); |
825 | |
826 | at::IntArrayRef output_size_list_out = output_size.toIntList(); |
827 | |
828 | |
829 | c10::optional<double> scales_h_opt_out = scales_h.toOptional<double>(); |
830 | |
831 | |
832 | c10::optional<double> scales_w_opt_out = scales_w.toOptional<double>(); |
833 | |
834 | at::Tensor & out_base = out.to<at::Tensor>(); |
835 | |
836 | EXECUTORCH_SCOPE_PROF("native_call_upsample_nearest2d.out" ); |
837 | torch::executor::aten::upsample_nearest2d_outf(self_base, output_size_list_out, scales_h_opt_out, scales_w_opt_out, out_base); |
838 | |
839 | |
840 | } |
841 | ), |
842 | |
843 | Operator( |
844 | "aten::linalg_inv_ex.inverse" , |
845 | [](EValue** stack) { |
846 | EValue& A = *stack[0]; |
847 | EValue& check_errors = *stack[1]; |
848 | EValue& inverse = *stack[2]; |
849 | EValue& info = *stack[3]; |
850 | const at::Tensor & A_base = A.to<at::Tensor>(); |
851 | bool check_errors_base = check_errors.to<bool>(); |
852 | at::Tensor & inverse_base = inverse.to<at::Tensor>(); |
853 | at::Tensor & info_base = info.to<at::Tensor>(); |
854 | |
855 | EXECUTORCH_SCOPE_PROF("native_call_linalg_inv_ex.inverse" ); |
856 | torch::executor::aten::linalg_inv_ex_outf(A_base, check_errors_base, inverse_base, info_base); |
857 | |
858 | |
859 | } |
860 | ), |
861 | |
862 | Operator( |
863 | "aten::unbind_copy.int_out" , |
864 | [](EValue** stack) { |
865 | EValue& self = *stack[0]; |
866 | EValue& dim = *stack[1]; |
867 | EValue& out = *stack[2]; |
868 | const at::Tensor & self_base = self.to<at::Tensor>(); |
869 | int64_t dim_base = dim.to<int64_t>(); |
870 | |
871 | at::TensorList out_list_out = out.toTensorList(); |
872 | |
873 | |
874 | EXECUTORCH_SCOPE_PROF("native_call_unbind_copy.int_out" ); |
875 | torch::executor::aten::unbind_copy_outf(self_base, dim_base, out_list_out); |
876 | |
877 | stack[3] = &out; |
878 | } |
879 | ), |
880 | |
881 | Operator( |
882 | "aten::split_copy.Tensor_out" , |
883 | [](EValue** stack) { |
884 | EValue& self = *stack[0]; |
885 | EValue& split_size = *stack[1]; |
886 | EValue& dim = *stack[2]; |
887 | EValue& out = *stack[3]; |
888 | const at::Tensor & self_base = self.to<at::Tensor>(); |
889 | int64_t split_size_base = split_size.to<int64_t>(); |
890 | int64_t dim_base = dim.to<int64_t>(); |
891 | |
892 | at::TensorList out_list_out = out.toTensorList(); |
893 | |
894 | |
895 | EXECUTORCH_SCOPE_PROF("native_call_split_copy.Tensor_out" ); |
896 | torch::executor::aten::split_copy_outf(self_base, split_size_base, dim_base, out_list_out); |
897 | |
898 | stack[4] = &out; |
899 | } |
900 | ), |
901 | |
902 | Operator( |
903 | "aten::split_with_sizes_copy.out" , |
904 | [](EValue** stack) { |
905 | EValue& self = *stack[0]; |
906 | EValue& split_sizes = *stack[1]; |
907 | EValue& dim = *stack[2]; |
908 | EValue& out = *stack[3]; |
909 | const at::Tensor & self_base = self.to<at::Tensor>(); |
910 | |
911 | at::IntArrayRef split_sizes_list_out = split_sizes.toIntList(); |
912 | |
913 | int64_t dim_base = dim.to<int64_t>(); |
914 | |
915 | at::TensorList out_list_out = out.toTensorList(); |
916 | |
917 | |
918 | EXECUTORCH_SCOPE_PROF("native_call_split_with_sizes_copy.out" ); |
919 | torch::executor::aten::split_with_sizes_copy_outf(self_base, split_sizes_list_out, dim_base, out_list_out); |
920 | |
921 | stack[4] = &out; |
922 | } |
923 | ), |
924 | |
925 | Operator( |
926 | "aten::constant_pad_nd.out" , |
927 | [](EValue** stack) { |
928 | EValue& self = *stack[0]; |
929 | EValue& pad = *stack[1]; |
930 | EValue& value = *stack[2]; |
931 | EValue& out = *stack[3]; |
932 | const at::Tensor & self_base = self.to<at::Tensor>(); |
933 | |
934 | at::IntArrayRef pad_list_out = pad.toIntList(); |
935 | |
936 | const at::Scalar & value_base = value.to<at::Scalar>(); |
937 | at::Tensor & out_base = out.to<at::Tensor>(); |
938 | |
939 | EXECUTORCH_SCOPE_PROF("native_call_constant_pad_nd.out" ); |
940 | torch::executor::aten::constant_pad_nd_outf(self_base, pad_list_out, value_base, out_base); |
941 | |
942 | |
943 | } |
944 | ), |
945 | |
946 | Operator( |
947 | "aten::convolution.out" , |
948 | [](EValue** stack) { |
949 | EValue& input = *stack[0]; |
950 | EValue& weight = *stack[1]; |
951 | EValue& bias = *stack[2]; |
952 | EValue& stride = *stack[3]; |
953 | EValue& padding = *stack[4]; |
954 | EValue& dilation = *stack[5]; |
955 | EValue& transposed = *stack[6]; |
956 | EValue& output_padding = *stack[7]; |
957 | EValue& groups = *stack[8]; |
958 | EValue& out = *stack[9]; |
959 | const at::Tensor & input_base = input.to<at::Tensor>(); |
960 | const at::Tensor & weight_base = weight.to<at::Tensor>(); |
961 | |
962 | c10::optional<at::Tensor> bias_opt_out = bias.toOptional<at::Tensor>(); |
963 | |
964 | |
965 | at::IntArrayRef stride_list_out = stride.toIntList(); |
966 | |
967 | |
968 | at::IntArrayRef padding_list_out = padding.toIntList(); |
969 | |
970 | |
971 | at::IntArrayRef dilation_list_out = dilation.toIntList(); |
972 | |
973 | bool transposed_base = transposed.to<bool>(); |
974 | |
975 | at::IntArrayRef output_padding_list_out = output_padding.toIntList(); |
976 | |
977 | int64_t groups_base = groups.to<int64_t>(); |
978 | at::Tensor & out_base = out.to<at::Tensor>(); |
979 | |
980 | EXECUTORCH_SCOPE_PROF("native_call_convolution.out" ); |
981 | torch::executor::aten::convolution_outf(input_base, weight_base, bias_opt_out, stride_list_out, padding_list_out, dilation_list_out, transposed_base, output_padding_list_out, groups_base, out_base); |
982 | |
983 | |
984 | } |
985 | ), |
986 | |
987 | Operator( |
988 | "aten::embedding.out" , |
989 | [](EValue** stack) { |
990 | EValue& weight = *stack[0]; |
991 | EValue& indices = *stack[1]; |
992 | EValue& padding_idx = *stack[2]; |
993 | EValue& scale_grad_by_freq = *stack[3]; |
994 | EValue& sparse = *stack[4]; |
995 | EValue& out = *stack[5]; |
996 | const at::Tensor & weight_base = weight.to<at::Tensor>(); |
997 | const at::Tensor & indices_base = indices.to<at::Tensor>(); |
998 | int64_t padding_idx_base = padding_idx.to<int64_t>(); |
999 | bool scale_grad_by_freq_base = scale_grad_by_freq.to<bool>(); |
1000 | bool sparse_base = sparse.to<bool>(); |
1001 | at::Tensor & out_base = out.to<at::Tensor>(); |
1002 | |
1003 | EXECUTORCH_SCOPE_PROF("native_call_embedding.out" ); |
1004 | torch::executor::aten::embedding_outf(weight_base, indices_base, padding_idx_base, scale_grad_by_freq_base, sparse_base, out_base); |
1005 | |
1006 | |
1007 | } |
1008 | ), |
1009 | |
1010 | Operator( |
1011 | "aten::grid_sampler_2d.out" , |
1012 | [](EValue** stack) { |
1013 | EValue& input = *stack[0]; |
1014 | EValue& grid = *stack[1]; |
1015 | EValue& interpolation_mode = *stack[2]; |
1016 | EValue& padding_mode = *stack[3]; |
1017 | EValue& align_corners = *stack[4]; |
1018 | EValue& out = *stack[5]; |
1019 | const at::Tensor & input_base = input.to<at::Tensor>(); |
1020 | const at::Tensor & grid_base = grid.to<at::Tensor>(); |
1021 | int64_t interpolation_mode_base = interpolation_mode.to<int64_t>(); |
1022 | int64_t padding_mode_base = padding_mode.to<int64_t>(); |
1023 | bool align_corners_base = align_corners.to<bool>(); |
1024 | at::Tensor & out_base = out.to<at::Tensor>(); |
1025 | |
1026 | EXECUTORCH_SCOPE_PROF("native_call_grid_sampler_2d.out" ); |
1027 | torch::executor::aten::grid_sampler_2d_outf(input_base, grid_base, interpolation_mode_base, padding_mode_base, align_corners_base, out_base); |
1028 | |
1029 | |
1030 | } |
1031 | ), |
1032 | |
1033 | Operator( |
1034 | "aten::index_put.out" , |
1035 | [](EValue** stack) { |
1036 | EValue& self = *stack[0]; |
1037 | EValue& indices = *stack[1]; |
1038 | EValue& values = *stack[2]; |
1039 | EValue& accumulate = *stack[3]; |
1040 | EValue& out = *stack[4]; |
1041 | const at::Tensor & self_base = self.to<at::Tensor>(); |
1042 | |
1043 | #ifdef USE_ATEN_LIB |
1044 | at::ArrayRef<c10::optional<at::Tensor>> indices_list_in = indices.toListOptionalTensor(); |
1045 | c10::List<c10::optional<at::Tensor>> indices_list_out; |
1046 | for (auto indices_elem: indices_list_in) { |
1047 | indices_list_out.push_back(indices_elem); |
1048 | } |
1049 | #else |
1050 | torch::executor::ArrayRef<torch::executor::optional<torch::executor::Tensor>> indices_list_out = indices.toListOptionalTensor(); |
1051 | #endif |
1052 | |
1053 | const at::Tensor & values_base = values.to<at::Tensor>(); |
1054 | bool accumulate_base = accumulate.to<bool>(); |
1055 | at::Tensor & out_base = out.to<at::Tensor>(); |
1056 | |
1057 | EXECUTORCH_SCOPE_PROF("native_call_index_put.out" ); |
1058 | torch::executor::aten::index_put_outf(self_base, indices_list_out, values_base, accumulate_base, out_base); |
1059 | |
1060 | |
1061 | } |
1062 | ), |
1063 | |
1064 | Operator( |
1065 | "aten::native_layer_norm.out" , |
1066 | [](EValue** stack) { |
1067 | EValue& input = *stack[0]; |
1068 | EValue& normalized_shape = *stack[1]; |
1069 | EValue& weight = *stack[2]; |
1070 | EValue& bias = *stack[3]; |
1071 | EValue& eps = *stack[4]; |
1072 | EValue& out0 = *stack[5]; |
1073 | EValue& out1 = *stack[6]; |
1074 | EValue& out2 = *stack[7]; |
1075 | const at::Tensor & input_base = input.to<at::Tensor>(); |
1076 | |
1077 | at::IntArrayRef normalized_shape_list_out = normalized_shape.toIntList(); |
1078 | |
1079 | |
1080 | c10::optional<at::Tensor> weight_opt_out = weight.toOptional<at::Tensor>(); |
1081 | |
1082 | |
1083 | c10::optional<at::Tensor> bias_opt_out = bias.toOptional<at::Tensor>(); |
1084 | |
1085 | double eps_base = eps.to<double>(); |
1086 | at::Tensor & out0_base = out0.to<at::Tensor>(); |
1087 | at::Tensor & out1_base = out1.to<at::Tensor>(); |
1088 | at::Tensor & out2_base = out2.to<at::Tensor>(); |
1089 | |
1090 | EXECUTORCH_SCOPE_PROF("native_call_native_layer_norm.out" ); |
1091 | torch::executor::aten::native_layer_norm_outf(input_base, normalized_shape_list_out, weight_opt_out, bias_opt_out, eps_base, out0_base, out1_base, out2_base); |
1092 | |
1093 | |
1094 | } |
1095 | ), |
1096 | |
1097 | Operator( |
1098 | "aten::pixel_shuffle.out" , |
1099 | [](EValue** stack) { |
1100 | EValue& self = *stack[0]; |
1101 | EValue& upscale_factor = *stack[1]; |
1102 | EValue& out = *stack[2]; |
1103 | const at::Tensor & self_base = self.to<at::Tensor>(); |
1104 | int64_t upscale_factor_base = upscale_factor.to<int64_t>(); |
1105 | at::Tensor & out_base = out.to<at::Tensor>(); |
1106 | |
1107 | EXECUTORCH_SCOPE_PROF("native_call_pixel_shuffle.out" ); |
1108 | torch::executor::aten::pixel_shuffle_outf(self_base, upscale_factor_base, out_base); |
1109 | |
1110 | |
1111 | } |
1112 | ), |
1113 | |
1114 | Operator( |
1115 | "aten::repeat.out" , |
1116 | [](EValue** stack) { |
1117 | EValue& self = *stack[0]; |
1118 | EValue& repeats = *stack[1]; |
1119 | EValue& out = *stack[2]; |
1120 | const at::Tensor & self_base = self.to<at::Tensor>(); |
1121 | |
1122 | at::IntArrayRef repeats_list_out = repeats.toIntList(); |
1123 | |
1124 | at::Tensor & out_base = out.to<at::Tensor>(); |
1125 | |
1126 | EXECUTORCH_SCOPE_PROF("native_call_repeat.out" ); |
1127 | torch::executor::aten::repeat_outf(self_base, repeats_list_out, out_base); |
1128 | |
1129 | |
1130 | } |
1131 | ), |
1132 | |
1133 | Operator( |
1134 | "aten::relu.out" , |
1135 | [](EValue** stack) { |
1136 | EValue& self = *stack[0]; |
1137 | EValue& out = *stack[1]; |
1138 | const at::Tensor & self_base = self.to<at::Tensor>(); |
1139 | at::Tensor & out_base = out.to<at::Tensor>(); |
1140 | |
1141 | EXECUTORCH_SCOPE_PROF("native_call_relu.out" ); |
1142 | torch::executor::aten::relu_outf(self_base, out_base); |
1143 | |
1144 | |
1145 | } |
1146 | ), |
1147 | |
1148 | Operator( |
1149 | "aten::unsafe_split.Tensor_out" , |
1150 | [](EValue** stack) { |
1151 | EValue& self = *stack[0]; |
1152 | EValue& split_size = *stack[1]; |
1153 | EValue& dim = *stack[2]; |
1154 | EValue& out = *stack[3]; |
1155 | const at::Tensor & self_base = self.to<at::Tensor>(); |
1156 | int64_t split_size_base = split_size.to<int64_t>(); |
1157 | int64_t dim_base = dim.to<int64_t>(); |
1158 | |
1159 | at::TensorList out_list_out = out.toTensorList(); |
1160 | |
1161 | |
1162 | EXECUTORCH_SCOPE_PROF("native_call_unsafe_split.Tensor_out" ); |
1163 | torch::executor::aten::unsafe_split_outf(self_base, split_size_base, dim_base, out_list_out); |
1164 | |
1165 | stack[4] = &out; |
1166 | } |
1167 | ), |
1168 | |
1169 | Operator( |
1170 | "aten::_unique2.out" , |
1171 | [](EValue** stack) { |
1172 | EValue& self = *stack[0]; |
1173 | EValue& sorted = *stack[1]; |
1174 | EValue& return_inverse = *stack[2]; |
1175 | EValue& return_counts = *stack[3]; |
1176 | EValue& out0 = *stack[4]; |
1177 | EValue& out1 = *stack[5]; |
1178 | EValue& out2 = *stack[6]; |
1179 | const at::Tensor & self_base = self.to<at::Tensor>(); |
1180 | bool sorted_base = sorted.to<bool>(); |
1181 | bool return_inverse_base = return_inverse.to<bool>(); |
1182 | bool return_counts_base = return_counts.to<bool>(); |
1183 | at::Tensor & out0_base = out0.to<at::Tensor>(); |
1184 | at::Tensor & out1_base = out1.to<at::Tensor>(); |
1185 | at::Tensor & out2_base = out2.to<at::Tensor>(); |
1186 | |
1187 | EXECUTORCH_SCOPE_PROF("native_call__unique2.out" ); |
1188 | torch::executor::aten::_unique2_outf(self_base, sorted_base, return_inverse_base, return_counts_base, out0_base, out1_base, out2_base); |
1189 | |
1190 | |
1191 | } |
1192 | ), |
1193 | |
1194 | Operator( |
1195 | "aten::zeros_like.out" , |
1196 | [](EValue** stack) { |
1197 | EValue& self = *stack[0]; |
1198 | EValue& memory_format = *stack[1]; |
1199 | EValue& out = *stack[2]; |
1200 | const at::Tensor & self_base = self.to<at::Tensor>(); |
1201 | |
1202 | c10::optional<at::MemoryFormat> memory_format_opt_out = memory_format.toOptional<at::MemoryFormat>(); |
1203 | |
1204 | at::Tensor & out_base = out.to<at::Tensor>(); |
1205 | |
1206 | EXECUTORCH_SCOPE_PROF("native_call_zeros_like.out" ); |
1207 | torch::executor::aten::zeros_like_outf(self_base, memory_format_opt_out, out_base); |
1208 | |
1209 | |
1210 | } |
1211 | ), |
1212 | |
1213 | Operator( |
1214 | "aten::clone.out" , |
1215 | [](EValue** stack) { |
1216 | EValue& self = *stack[0]; |
1217 | EValue& memory_format = *stack[1]; |
1218 | EValue& out = *stack[2]; |
1219 | const at::Tensor & self_base = self.to<at::Tensor>(); |
1220 | |
1221 | c10::optional<at::MemoryFormat> memory_format_opt_out = memory_format.toOptional<at::MemoryFormat>(); |
1222 | |
1223 | at::Tensor & out_base = out.to<at::Tensor>(); |
1224 | |
1225 | EXECUTORCH_SCOPE_PROF("native_call_clone.out" ); |
1226 | torch::executor::aten::clone_outf(self_base, memory_format_opt_out, out_base); |
1227 | |
1228 | |
1229 | } |
1230 | ), |
1231 | |
1232 | Operator( |
1233 | "aten::rsub.Scalar_out" , |
1234 | [](EValue** stack) { |
1235 | EValue& self = *stack[0]; |
1236 | EValue& other = *stack[1]; |
1237 | EValue& alpha = *stack[2]; |
1238 | EValue& out = *stack[3]; |
1239 | const at::Tensor & self_base = self.to<at::Tensor>(); |
1240 | const at::Scalar & other_base = other.to<at::Scalar>(); |
1241 | const at::Scalar & alpha_base = alpha.to<at::Scalar>(); |
1242 | at::Tensor & out_base = out.to<at::Tensor>(); |
1243 | |
1244 | EXECUTORCH_SCOPE_PROF("native_call_rsub.Scalar_out" ); |
1245 | torch::executor::aten::rsub_outf(self_base, other_base, alpha_base, out_base); |
1246 | |
1247 | |
1248 | } |
1249 | ), |
1250 | |
1251 | Operator( |
1252 | "aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out" , |
1253 | [](EValue** stack) { |
1254 | EValue& self = *stack[0]; |
1255 | EValue& scale = *stack[1]; |
1256 | EValue& zero_point = *stack[2]; |
1257 | EValue& fake_quant_enabled = *stack[3]; |
1258 | EValue& quant_min = *stack[4]; |
1259 | EValue& quant_max = *stack[5]; |
1260 | EValue& out0 = *stack[6]; |
1261 | EValue& out1 = *stack[7]; |
1262 | const at::Tensor & self_base = self.to<at::Tensor>(); |
1263 | const at::Tensor & scale_base = scale.to<at::Tensor>(); |
1264 | const at::Tensor & zero_point_base = zero_point.to<at::Tensor>(); |
1265 | const at::Tensor & fake_quant_enabled_base = fake_quant_enabled.to<at::Tensor>(); |
1266 | int64_t quant_min_base = quant_min.to<int64_t>(); |
1267 | int64_t quant_max_base = quant_max.to<int64_t>(); |
1268 | at::Tensor & out0_base = out0.to<at::Tensor>(); |
1269 | at::Tensor & out1_base = out1.to<at::Tensor>(); |
1270 | |
1271 | EXECUTORCH_SCOPE_PROF("native_call__fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out" ); |
1272 | torch::executor::aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_outf(self_base, scale_base, zero_point_base, fake_quant_enabled_base, quant_min_base, quant_max_base, out0_base, out1_base); |
1273 | |
1274 | |
1275 | } |
1276 | ), |
1277 | |
1278 | Operator( |
1279 | "aten::_to_copy.out" , |
1280 | [](EValue** stack) { |
1281 | EValue& self = *stack[0]; |
1282 | EValue& non_blocking = *stack[1]; |
1283 | EValue& memory_format = *stack[2]; |
1284 | EValue& out = *stack[3]; |
1285 | const at::Tensor & self_base = self.to<at::Tensor>(); |
1286 | bool non_blocking_base = non_blocking.to<bool>(); |
1287 | |
1288 | c10::optional<at::MemoryFormat> memory_format_opt_out = memory_format.toOptional<at::MemoryFormat>(); |
1289 | |
1290 | at::Tensor & out_base = out.to<at::Tensor>(); |
1291 | |
1292 | EXECUTORCH_SCOPE_PROF("native_call__to_copy.out" ); |
1293 | torch::executor::aten::_to_copy_outf(self_base, non_blocking_base, memory_format_opt_out, out_base); |
1294 | |
1295 | |
1296 | } |
1297 | ), |
1298 | |
1299 | Operator( |
1300 | "aten::masked_fill.Scalar_out" , |
1301 | [](EValue** stack) { |
1302 | EValue& self = *stack[0]; |
1303 | EValue& mask = *stack[1]; |
1304 | EValue& value = *stack[2]; |
1305 | EValue& out = *stack[3]; |
1306 | const at::Tensor & self_base = self.to<at::Tensor>(); |
1307 | const at::Tensor & mask_base = mask.to<at::Tensor>(); |
1308 | const at::Scalar & value_base = value.to<at::Scalar>(); |
1309 | at::Tensor & out_base = out.to<at::Tensor>(); |
1310 | |
1311 | EXECUTORCH_SCOPE_PROF("native_call_masked_fill.Scalar_out" ); |
1312 | torch::executor::aten::masked_fill_outf(self_base, mask_base, value_base, out_base); |
1313 | |
1314 | |
1315 | } |
1316 | ), |
1317 | |
1318 | Operator( |
1319 | "aten::expand_copy.out" , |
1320 | [](EValue** stack) { |
1321 | EValue& self = *stack[0]; |
1322 | EValue& size = *stack[1]; |
1323 | EValue& implicit = *stack[2]; |
1324 | EValue& out = *stack[3]; |
1325 | const at::Tensor & self_base = self.to<at::Tensor>(); |
1326 | |
1327 | at::IntArrayRef size_list_out = size.toIntList(); |
1328 | |
1329 | bool implicit_base = implicit.to<bool>(); |
1330 | at::Tensor & out_base = out.to<at::Tensor>(); |
1331 | |
1332 | EXECUTORCH_SCOPE_PROF("native_call_expand_copy.out" ); |
1333 | torch::executor::aten::expand_copy_outf(self_base, size_list_out, implicit_base, out_base); |
1334 | |
1335 | |
1336 | } |
1337 | ), |
1338 | |
1339 | Operator( |
1340 | "aten::permute_copy.out" , |
1341 | [](EValue** stack) { |
1342 | EValue& self = *stack[0]; |
1343 | EValue& dims = *stack[1]; |
1344 | EValue& out = *stack[2]; |
1345 | const at::Tensor & self_base = self.to<at::Tensor>(); |
1346 | |
1347 | at::IntArrayRef dims_list_out = dims.toIntList(); |
1348 | |
1349 | at::Tensor & out_base = out.to<at::Tensor>(); |
1350 | |
1351 | EXECUTORCH_SCOPE_PROF("native_call_permute_copy.out" ); |
1352 | torch::executor::aten::permute_copy_outf(self_base, dims_list_out, out_base); |
1353 | |
1354 | |
1355 | } |
1356 | ), |
1357 | |
1358 | Operator( |
1359 | "aten::_reshape_alias_copy.out" , |
1360 | [](EValue** stack) { |
1361 | EValue& self = *stack[0]; |
1362 | EValue& size = *stack[1]; |
1363 | EValue& stride = *stack[2]; |
1364 | EValue& out = *stack[3]; |
1365 | const at::Tensor & self_base = self.to<at::Tensor>(); |
1366 | |
1367 | at::IntArrayRef size_list_out = size.toIntList(); |
1368 | |
1369 | |
1370 | at::IntArrayRef stride_list_out = stride.toIntList(); |
1371 | |
1372 | at::Tensor & out_base = out.to<at::Tensor>(); |
1373 | |
1374 | EXECUTORCH_SCOPE_PROF("native_call__reshape_alias_copy.out" ); |
1375 | torch::executor::aten::_reshape_alias_copy_outf(self_base, size_list_out, stride_list_out, out_base); |
1376 | |
1377 | |
1378 | } |
1379 | ), |
1380 | |
1381 | Operator( |
1382 | "aten::select_copy.int_out" , |
1383 | [](EValue** stack) { |
1384 | EValue& self = *stack[0]; |
1385 | EValue& dim = *stack[1]; |
1386 | EValue& index = *stack[2]; |
1387 | EValue& out = *stack[3]; |
1388 | const at::Tensor & self_base = self.to<at::Tensor>(); |
1389 | int64_t dim_base = dim.to<int64_t>(); |
1390 | int64_t index_base = index.to<int64_t>(); |
1391 | at::Tensor & out_base = out.to<at::Tensor>(); |
1392 | |
1393 | EXECUTORCH_SCOPE_PROF("native_call_select_copy.int_out" ); |
1394 | torch::executor::aten::select_copy_outf(self_base, dim_base, index_base, out_base); |
1395 | |
1396 | |
1397 | } |
1398 | ), |
1399 | |
1400 | Operator( |
1401 | "aten::detach_copy.out" , |
1402 | [](EValue** stack) { |
1403 | EValue& self = *stack[0]; |
1404 | EValue& out = *stack[1]; |
1405 | const at::Tensor & self_base = self.to<at::Tensor>(); |
1406 | at::Tensor & out_base = out.to<at::Tensor>(); |
1407 | |
1408 | EXECUTORCH_SCOPE_PROF("native_call_detach_copy.out" ); |
1409 | torch::executor::aten::detach_copy_outf(self_base, out_base); |
1410 | |
1411 | |
1412 | } |
1413 | ), |
1414 | |
1415 | Operator( |
1416 | "aten::slice_copy.Tensor_out" , |
1417 | [](EValue** stack) { |
1418 | EValue& self = *stack[0]; |
1419 | EValue& dim = *stack[1]; |
1420 | EValue& start = *stack[2]; |
1421 | EValue& end = *stack[3]; |
1422 | EValue& step = *stack[4]; |
1423 | EValue& out = *stack[5]; |
1424 | const at::Tensor & self_base = self.to<at::Tensor>(); |
1425 | int64_t dim_base = dim.to<int64_t>(); |
1426 | |
1427 | c10::optional<int64_t> start_opt_out = start.toOptional<int64_t>(); |
1428 | |
1429 | |
1430 | c10::optional<int64_t> end_opt_out = end.toOptional<int64_t>(); |
1431 | |
1432 | int64_t step_base = step.to<int64_t>(); |
1433 | at::Tensor & out_base = out.to<at::Tensor>(); |
1434 | |
1435 | EXECUTORCH_SCOPE_PROF("native_call_slice_copy.Tensor_out" ); |
1436 | torch::executor::aten::slice_copy_outf(self_base, dim_base, start_opt_out, end_opt_out, step_base, out_base); |
1437 | |
1438 | |
1439 | } |
1440 | ), |
1441 | |
1442 | Operator( |
1443 | "aten::transpose_copy.int_out" , |
1444 | [](EValue** stack) { |
1445 | EValue& self = *stack[0]; |
1446 | EValue& dim0 = *stack[1]; |
1447 | EValue& dim1 = *stack[2]; |
1448 | EValue& out = *stack[3]; |
1449 | const at::Tensor & self_base = self.to<at::Tensor>(); |
1450 | int64_t dim0_base = dim0.to<int64_t>(); |
1451 | int64_t dim1_base = dim1.to<int64_t>(); |
1452 | at::Tensor & out_base = out.to<at::Tensor>(); |
1453 | |
1454 | EXECUTORCH_SCOPE_PROF("native_call_transpose_copy.int_out" ); |
1455 | torch::executor::aten::transpose_copy_outf(self_base, dim0_base, dim1_base, out_base); |
1456 | |
1457 | |
1458 | } |
1459 | ), |
1460 | |
1461 | Operator( |
1462 | "aten::unsqueeze_copy.out" , |
1463 | [](EValue** stack) { |
1464 | EValue& self = *stack[0]; |
1465 | EValue& dim = *stack[1]; |
1466 | EValue& out = *stack[2]; |
1467 | const at::Tensor & self_base = self.to<at::Tensor>(); |
1468 | int64_t dim_base = dim.to<int64_t>(); |
1469 | at::Tensor & out_base = out.to<at::Tensor>(); |
1470 | |
1471 | EXECUTORCH_SCOPE_PROF("native_call_unsqueeze_copy.out" ); |
1472 | torch::executor::aten::unsqueeze_copy_outf(self_base, dim_base, out_base); |
1473 | |
1474 | |
1475 | } |
1476 | ), |
1477 | |
1478 | Operator( |
1479 | "aten::view_copy.out" , |
1480 | [](EValue** stack) { |
1481 | EValue& self = *stack[0]; |
1482 | EValue& size = *stack[1]; |
1483 | EValue& out = *stack[2]; |
1484 | const at::Tensor & self_base = self.to<at::Tensor>(); |
1485 | |
1486 | at::IntArrayRef size_list_out = size.toIntList(); |
1487 | |
1488 | at::Tensor & out_base = out.to<at::Tensor>(); |
1489 | |
1490 | EXECUTORCH_SCOPE_PROF("native_call_view_copy.out" ); |
1491 | torch::executor::aten::view_copy_outf(self_base, size_list_out, out_base); |
1492 | |
1493 | |
1494 | } |
1495 | ), |
1496 | |
1497 | Operator( |
1498 | "custom::add_3.out" , |
1499 | [](EValue** stack) { |
1500 | EValue& a = *stack[0]; |
1501 | EValue& b = *stack[1]; |
1502 | EValue& c = *stack[2]; |
1503 | EValue& out = *stack[3]; |
1504 | const at::Tensor & a_base = a.to<at::Tensor>(); |
1505 | const at::Tensor & b_base = b.to<at::Tensor>(); |
1506 | const at::Tensor & c_base = c.to<at::Tensor>(); |
1507 | at::Tensor & out_base = out.to<at::Tensor>(); |
1508 | |
1509 | EXECUTORCH_SCOPE_PROF("native_call_add_3.out" ); |
1510 | torch::executor::custom::add_3_outf(a_base, b_base, c_base, out_base); |
1511 | |
1512 | |
1513 | } |
1514 | ), // Generated operators |
1515 | }; |
1516 | |
1517 | // Explicitly convert to ArrayRef, so that the API can take an empty C array of |
1518 | // Operators. |
1519 | static OpArrayRef op_array_ref( |
1520 | operators_to_register, |
1521 | operators_to_register + sizeof(operators_to_register) / sizeof(Operator)); |
1522 | |
1523 | // Return value not used. Keep the static variable assignment to register |
1524 | // operators in static initialization time. |
1525 | static auto success_with_op_reg = register_operators(op_array_ref); |
1526 | } // namespace |
1527 | } // namespace executor |
1528 | } // namespace torch |
1529 | |