1// @generated by torchgen/gen.py from DispatchKeyNativeFunctions.cpp
2#include <torch/csrc/lazy/core/tensor.h>
3#include <torch/csrc/lazy/core/shape_inference.h>
4#include <ATen/Functions.h>
5#include <ATen/native/TensorConversions.h>
6#include <ATen/NativeFunctions.h>
7#include <ATen/CompositeExplicitAutogradNonFunctionalFunctions.h>
8#include <ATen/MetaFunctions.h>
9#include <ATen/Operators.h>
10#include <ATen/native/CPUFallback.h>
11#include <torch/csrc/lazy/core/ir_builder.h>
12#include <torch/csrc/lazy/core/lazy_graph_executor.h>
13#include <torch/csrc/lazy/core/metrics.h>
14#include <torch/csrc/lazy/core/shape.h>
15#include <torch/csrc/lazy/generated/LazyNativeFunctions.h>
16#include <torch/csrc/lazy/generated/LazyIr.h>
17#include <torch/csrc/lazy/ts_backend/ts_eager_fallback.h>
18
19
20namespace {
21at::Tensor to_meta(const at::Tensor& tensor) {
22 // undefined tensors can't be converted to the meta device, since they don't have sizes/strides
23 if (!tensor.defined()) return tensor;
24 auto out = at::native::empty_strided_meta_symint(tensor.sym_sizes(), tensor.sym_strides(), /*dtype=*/c10::make_optional(tensor.scalar_type()), /*layout=*/c10::make_optional(tensor.layout()), /*device=*/c10::make_optional(c10::Device(c10::kMeta)), /*pin_memory=*/c10::nullopt);
25 // needs to handle wrapped numbers, so dtype promotion works properly.
26 if (tensor.unsafeGetTensorImpl()->is_wrapped_number()) {
27 out.unsafeGetTensorImpl()->set_wrapped_number(true);
28 }
29 return out;
30}
31c10::optional<at::Tensor> to_meta(const c10::optional<at::Tensor>& tensor) {
32 if (tensor.has_value()) {
33 return to_meta(*tensor);
34 }
35 return c10::nullopt;
36}
37
38std::vector<at::Tensor> to_meta(at::ITensorListRef t_list) {
39 std::vector<at::Tensor> outs;
40 outs.reserve(t_list.size());
41 for (const auto& tensor : t_list) {
42 outs.push_back(to_meta(tensor));
43 }
44 return outs;
45}
46} // namespace
47
48namespace torch {
49namespace lazy {
50
51 at::Tensor LazyNativeFunctions::_adaptive_avg_pool2d(const at::Tensor & self, at::IntArrayRef output_size) {
52
53 if (force_eager_fallback(at::aten::_adaptive_avg_pool2d)) {
54 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(_adaptive_avg_pool2d)>::call(
55 self,
56 c10::fromIntArrayRefSlow(output_size)
57 );
58 }
59
60 TORCH_LAZY_FN_COUNTER("lazy::");
61 auto common_device = torch::lazy::GetBackendDevice(self);
62 TORCH_INTERNAL_ASSERT(common_device);
63
64 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
65 torch::lazy::NodePtr node = torch::lazy::ReuseNode<AdaptiveAvgPool2d>(lazy_self->GetIrValue(), std::vector<int64_t>(output_size.begin(), output_size.end()));
66 if (!node) {
67
68 auto shapes = torch::lazy::compute_shape__adaptive_avg_pool2d(self, output_size);
69 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
70 if(torch::lazy::symbolicShapeEnabled()){
71 std::vector<torch::jit::IValue> inputs = { self, output_size };
72 const char* schema_str = "aten::_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor";
73 applySymbolicShapesOnLT(schema_str, inputs, shapes);
74 }
75
76 node = torch::lazy::MakeNode<AdaptiveAvgPool2d>(lazy_self->GetIrValue(), std::vector<int64_t>(output_size.begin(), output_size.end()), std::move(shapes));
77 CacheNode(node);
78 }
79
80 auto result = torch::lazy::CreateAtenFromLtcTensor(
81 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
82 return result;
83 }
84
85
86 at::Tensor LazyNativeFunctions::_adaptive_avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self) {
87
88 if (force_eager_fallback(at::aten::_adaptive_avg_pool2d_backward)) {
89 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(_adaptive_avg_pool2d_backward)>::call(
90 grad_output,
91 self
92 );
93 }
94
95 TORCH_LAZY_FN_COUNTER("lazy::");
96 auto common_device = torch::lazy::GetBackendDevice(grad_output, self);
97 TORCH_INTERNAL_ASSERT(common_device);
98
99 LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
100 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
101 torch::lazy::NodePtr node = torch::lazy::ReuseNode<AdaptiveAvgPool2dBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue());
102 if (!node) {
103
104 auto shapes = torch::lazy::compute_shape__adaptive_avg_pool2d_backward(grad_output, self);
105 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
106 if(torch::lazy::symbolicShapeEnabled()){
107 std::vector<torch::jit::IValue> inputs = { grad_output, self };
108 const char* schema_str = "aten::_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor";
109 applySymbolicShapesOnLT(schema_str, inputs, shapes);
110 }
111
112 node = torch::lazy::MakeNode<AdaptiveAvgPool2dBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), std::move(shapes));
113 CacheNode(node);
114 }
115
116 auto result = torch::lazy::CreateAtenFromLtcTensor(
117 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
118 return result;
119 }
120
121
122 at::Tensor LazyNativeFunctions::_log_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
123
124 if (force_eager_fallback(at::aten::_log_softmax)) {
125 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(_log_softmax)>::call(
126 self,
127 dim,
128 half_to_float
129 );
130 }
131
132 TORCH_LAZY_FN_COUNTER("lazy::");
133 auto common_device = torch::lazy::GetBackendDevice(self);
134 TORCH_INTERNAL_ASSERT(common_device);
135
136 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
137 torch::lazy::NodePtr node = torch::lazy::ReuseNode<LogSoftmax>(lazy_self->GetIrValue(), dim, half_to_float);
138 if (!node) {
139 auto self_meta = to_meta(self);
140 auto out_meta = at::meta::_log_softmax(self_meta, dim, half_to_float);
141
142std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
143 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
144 if(torch::lazy::symbolicShapeEnabled()){
145 std::vector<torch::jit::IValue> inputs = { self, dim, half_to_float };
146 const char* schema_str = "aten::_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor";
147 applySymbolicShapesOnLT(schema_str, inputs, shapes);
148 }
149
150 node = torch::lazy::MakeNode<LogSoftmax>(lazy_self->GetIrValue(), dim, half_to_float, std::move(shapes));
151 CacheNode(node);
152 }
153
154 auto result = torch::lazy::CreateAtenFromLtcTensor(
155 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
156 return result;
157 }
158
159
160 at::Tensor LazyNativeFunctions::_log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
161
162 if (force_eager_fallback(at::aten::_log_softmax_backward_data)) {
163 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(_log_softmax_backward_data)>::call(
164 grad_output,
165 output,
166 dim,
167 input_dtype
168 );
169 }
170
171 TORCH_LAZY_FN_COUNTER("lazy::");
172 auto common_device = torch::lazy::GetBackendDevice(grad_output, output);
173 TORCH_INTERNAL_ASSERT(common_device);
174
175 LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
176 LazyTensorPtr lazy_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(output, *common_device);
177 torch::lazy::NodePtr node = torch::lazy::ReuseNode<LogSoftmaxBackwardData>(lazy_grad_output->GetIrValue(), lazy_output->GetIrValue(), dim, input_dtype);
178 if (!node) {
179 auto grad_output_meta = to_meta(grad_output);
180 auto output_meta = to_meta(output);
181 auto out_meta = at::meta::_log_softmax_backward_data(grad_output_meta, output_meta, dim, input_dtype);
182
183std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
184 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
185 if(torch::lazy::symbolicShapeEnabled()){
186 std::vector<torch::jit::IValue> inputs = { grad_output, output, dim, input_dtype };
187 const char* schema_str = "aten::_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor";
188 applySymbolicShapesOnLT(schema_str, inputs, shapes);
189 }
190
191 node = torch::lazy::MakeNode<LogSoftmaxBackwardData>(lazy_grad_output->GetIrValue(), lazy_output->GetIrValue(), dim, input_dtype, std::move(shapes));
192 CacheNode(node);
193 }
194
195 auto result = torch::lazy::CreateAtenFromLtcTensor(
196 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
197 return result;
198 }
199
200
201 at::Tensor LazyNativeFunctions::_reshape_alias_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
202
203 if (force_eager_fallback(at::aten::_reshape_alias_copy)) {
204 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(_reshape_alias_copy)>::call(
205 self,
206 size,
207 stride
208 );
209 }
210
211 TORCH_LAZY_FN_COUNTER("lazy::");
212 auto common_device = torch::lazy::GetBackendDevice(self);
213 TORCH_INTERNAL_ASSERT(common_device);
214
215 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
216 torch::lazy::NodePtr node = torch::lazy::ReuseNode<ReshapeAliasCopy>(lazy_self->GetIrValue(), GetSymIntArrayRefValue(size), GetSymIntArrayRefValue(stride));
217 if (!node) {
218 auto self_meta = to_meta(self);
219 auto out_meta = at::compositeexplicitautogradnonfunctional::_reshape_alias_copy_symint(self_meta, size, stride);
220
221std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
222 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
223 if(torch::lazy::symbolicShapeEnabled()){
224 std::vector<torch::jit::IValue> inputs = { self, size, stride };
225 const char* schema_str = "aten::_reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor";
226 applySymbolicShapesOnLT(schema_str, inputs, shapes);
227 }
228
229 node = torch::lazy::MakeNode<ReshapeAliasCopy>(lazy_self->GetIrValue(), GetSymIntArrayRefValue(size), GetSymIntArrayRefValue(stride), std::move(shapes));
230 CacheNode(node);
231 }
232
233 auto result = torch::lazy::CreateAtenFromLtcTensor(
234 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
235 return result;
236 }
237
238
239 at::Tensor LazyNativeFunctions::_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
240
241 if (force_eager_fallback(at::aten::_softmax)) {
242 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(_softmax)>::call(
243 self,
244 dim,
245 half_to_float
246 );
247 }
248
249 TORCH_LAZY_FN_COUNTER("lazy::");
250 auto common_device = torch::lazy::GetBackendDevice(self);
251 TORCH_INTERNAL_ASSERT(common_device);
252
253 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
254 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Softmax>(lazy_self->GetIrValue(), dim, half_to_float);
255 if (!node) {
256 auto self_meta = to_meta(self);
257 auto out_meta = at::meta::_softmax(self_meta, dim, half_to_float);
258
259std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
260 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
261 if(torch::lazy::symbolicShapeEnabled()){
262 std::vector<torch::jit::IValue> inputs = { self, dim, half_to_float };
263 const char* schema_str = "aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor";
264 applySymbolicShapesOnLT(schema_str, inputs, shapes);
265 }
266
267 node = torch::lazy::MakeNode<Softmax>(lazy_self->GetIrValue(), dim, half_to_float, std::move(shapes));
268 CacheNode(node);
269 }
270
271 auto result = torch::lazy::CreateAtenFromLtcTensor(
272 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
273 return result;
274 }
275
276
277 at::Tensor LazyNativeFunctions::_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
278
279 if (force_eager_fallback(at::aten::_softmax_backward_data)) {
280 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(_softmax_backward_data)>::call(
281 grad_output,
282 output,
283 dim,
284 input_dtype
285 );
286 }
287
288 TORCH_LAZY_FN_COUNTER("lazy::");
289 auto common_device = torch::lazy::GetBackendDevice(grad_output, output);
290 TORCH_INTERNAL_ASSERT(common_device);
291
292 LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
293 LazyTensorPtr lazy_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(output, *common_device);
294 torch::lazy::NodePtr node = torch::lazy::ReuseNode<SoftmaxBackwardData>(lazy_grad_output->GetIrValue(), lazy_output->GetIrValue(), dim, input_dtype);
295 if (!node) {
296 auto grad_output_meta = to_meta(grad_output);
297 auto output_meta = to_meta(output);
298 auto out_meta = at::meta::_softmax_backward_data(grad_output_meta, output_meta, dim, input_dtype);
299
300std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
301 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
302 if(torch::lazy::symbolicShapeEnabled()){
303 std::vector<torch::jit::IValue> inputs = { grad_output, output, dim, input_dtype };
304 const char* schema_str = "aten::_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor";
305 applySymbolicShapesOnLT(schema_str, inputs, shapes);
306 }
307
308 node = torch::lazy::MakeNode<SoftmaxBackwardData>(lazy_grad_output->GetIrValue(), lazy_output->GetIrValue(), dim, input_dtype, std::move(shapes));
309 CacheNode(node);
310 }
311
312 auto result = torch::lazy::CreateAtenFromLtcTensor(
313 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
314 return result;
315 }
316
317
318 at::Tensor LazyNativeFunctions::abs(const at::Tensor & self) {
319
320 if (force_eager_fallback(at::aten::abs)) {
321 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(abs)>::call(
322 self
323 );
324 }
325
326 TORCH_LAZY_FN_COUNTER("lazy::");
327 auto common_device = torch::lazy::GetBackendDevice(self);
328 TORCH_INTERNAL_ASSERT(common_device);
329
330 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
331 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Abs>(lazy_self->GetIrValue());
332 if (!node) {
333
334 auto shapes = torch::lazy::compute_shape_abs(self);
335 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
336 if(torch::lazy::symbolicShapeEnabled()){
337 std::vector<torch::jit::IValue> inputs = { self };
338 const char* schema_str = "aten::abs(Tensor self) -> Tensor";
339 applySymbolicShapesOnLT(schema_str, inputs, shapes);
340 }
341
342 node = torch::lazy::MakeNode<Abs>(lazy_self->GetIrValue(), std::move(shapes));
343 CacheNode(node);
344 }
345
346 auto result = torch::lazy::CreateAtenFromLtcTensor(
347 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
348 return result;
349 }
350
351
352 at::Tensor LazyNativeFunctions::add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
353
354 if (force_eager_fallback(at::aten::add)) {
355 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(add, Tensor)>::call(
356 self,
357 other,
358 alpha
359 );
360 }
361
362 TORCH_LAZY_FN_COUNTER("lazy::");
363 auto common_device = torch::lazy::GetBackendDevice(self, other);
364 TORCH_INTERNAL_ASSERT(common_device);
365
366 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
367 LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
368 auto node_alpha = torch::lazy::LazyGraphExecutor::Get()->
369 GetIrValueForScalarFromCodegen(alpha, *common_device);
370 torch::lazy::NodePtr node = torch::lazy::ReuseNode<AddTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), node_alpha);
371 if (!node) {
372 auto self_meta = to_meta(self);
373 auto other_meta = to_meta(other);
374 auto out_meta = at::meta::add(self_meta, other_meta, alpha);
375
376std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
377 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
378 if(torch::lazy::symbolicShapeEnabled()){
379 std::vector<torch::jit::IValue> inputs = { self, other, alpha };
380 const char* schema_str = "aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor";
381 applySymbolicShapesOnLT(schema_str, inputs, shapes);
382 }
383
384 node = torch::lazy::MakeNode<AddTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), node_alpha, std::move(shapes));
385 CacheNode(node);
386 }
387
388 auto result = torch::lazy::CreateAtenFromLtcTensor(
389 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
390 return result;
391 }
392
393
394 at::Tensor LazyNativeFunctions::addcdiv(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
395
396 if (force_eager_fallback(at::aten::addcdiv)) {
397 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(addcdiv)>::call(
398 self,
399 tensor1,
400 tensor2,
401 value
402 );
403 }
404
405 TORCH_LAZY_FN_COUNTER("lazy::");
406 auto common_device = torch::lazy::GetBackendDevice(self, tensor1, tensor2);
407 TORCH_INTERNAL_ASSERT(common_device);
408
409 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
410 LazyTensorPtr lazy_tensor1 = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(tensor1, *common_device);
411 LazyTensorPtr lazy_tensor2 = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(tensor2, *common_device);
412 auto node_value = torch::lazy::LazyGraphExecutor::Get()->
413 GetIrValueForScalarFromCodegen(value, *common_device);
414 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Addcdiv>(lazy_self->GetIrValue(), lazy_tensor1->GetIrValue(), lazy_tensor2->GetIrValue(), node_value);
415 if (!node) {
416 auto self_meta = to_meta(self);
417 auto tensor1_meta = to_meta(tensor1);
418 auto tensor2_meta = to_meta(tensor2);
419 auto out_meta = at::meta::addcdiv(self_meta, tensor1_meta, tensor2_meta, value);
420
421std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
422 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
423 if(torch::lazy::symbolicShapeEnabled()){
424 std::vector<torch::jit::IValue> inputs = { self, tensor1, tensor2, value };
425 const char* schema_str = "aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor";
426 applySymbolicShapesOnLT(schema_str, inputs, shapes);
427 }
428
429 node = torch::lazy::MakeNode<Addcdiv>(lazy_self->GetIrValue(), lazy_tensor1->GetIrValue(), lazy_tensor2->GetIrValue(), node_value, std::move(shapes));
430 CacheNode(node);
431 }
432
433 auto result = torch::lazy::CreateAtenFromLtcTensor(
434 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
435 return result;
436 }
437
438
439 at::Tensor LazyNativeFunctions::addcmul(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
440
441 if (force_eager_fallback(at::aten::addcmul)) {
442 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(addcmul)>::call(
443 self,
444 tensor1,
445 tensor2,
446 value
447 );
448 }
449
450 TORCH_LAZY_FN_COUNTER("lazy::");
451 auto common_device = torch::lazy::GetBackendDevice(self, tensor1, tensor2);
452 TORCH_INTERNAL_ASSERT(common_device);
453
454 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
455 LazyTensorPtr lazy_tensor1 = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(tensor1, *common_device);
456 LazyTensorPtr lazy_tensor2 = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(tensor2, *common_device);
457 auto node_value = torch::lazy::LazyGraphExecutor::Get()->
458 GetIrValueForScalarFromCodegen(value, *common_device);
459 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Addcmul>(lazy_self->GetIrValue(), lazy_tensor1->GetIrValue(), lazy_tensor2->GetIrValue(), node_value);
460 if (!node) {
461 auto self_meta = to_meta(self);
462 auto tensor1_meta = to_meta(tensor1);
463 auto tensor2_meta = to_meta(tensor2);
464 auto out_meta = at::meta::addcmul(self_meta, tensor1_meta, tensor2_meta, value);
465
466std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
467 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
468 if(torch::lazy::symbolicShapeEnabled()){
469 std::vector<torch::jit::IValue> inputs = { self, tensor1, tensor2, value };
470 const char* schema_str = "aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor";
471 applySymbolicShapesOnLT(schema_str, inputs, shapes);
472 }
473
474 node = torch::lazy::MakeNode<Addcmul>(lazy_self->GetIrValue(), lazy_tensor1->GetIrValue(), lazy_tensor2->GetIrValue(), node_value, std::move(shapes));
475 CacheNode(node);
476 }
477
478 auto result = torch::lazy::CreateAtenFromLtcTensor(
479 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
480 return result;
481 }
482
483
484 at::Tensor LazyNativeFunctions::addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
485
486 if (force_eager_fallback(at::aten::addmm)) {
487 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(addmm)>::call(
488 self,
489 mat1,
490 mat2,
491 beta,
492 alpha
493 );
494 }
495
496 TORCH_LAZY_FN_COUNTER("lazy::");
497 auto common_device = torch::lazy::GetBackendDevice(self, mat1, mat2);
498 TORCH_INTERNAL_ASSERT(common_device);
499
500 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
501 LazyTensorPtr lazy_mat1 = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(mat1, *common_device);
502 LazyTensorPtr lazy_mat2 = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(mat2, *common_device);
503 auto node_beta = torch::lazy::LazyGraphExecutor::Get()->
504 GetIrValueForScalarFromCodegen(beta, *common_device);
505 auto node_alpha = torch::lazy::LazyGraphExecutor::Get()->
506 GetIrValueForScalarFromCodegen(alpha, *common_device);
507 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Addmm>(lazy_self->GetIrValue(), lazy_mat1->GetIrValue(), lazy_mat2->GetIrValue(), node_beta, node_alpha);
508 if (!node) {
509 auto self_meta = to_meta(self);
510 auto mat1_meta = to_meta(mat1);
511 auto mat2_meta = to_meta(mat2);
512 auto out_meta = at::meta::addmm(self_meta, mat1_meta, mat2_meta, beta, alpha);
513
514std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
515 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
516 if(torch::lazy::symbolicShapeEnabled()){
517 std::vector<torch::jit::IValue> inputs = { self, mat1, mat2, beta, alpha };
518 const char* schema_str = "aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor";
519 applySymbolicShapesOnLT(schema_str, inputs, shapes);
520 }
521
522 node = torch::lazy::MakeNode<Addmm>(lazy_self->GetIrValue(), lazy_mat1->GetIrValue(), lazy_mat2->GetIrValue(), node_beta, node_alpha, std::move(shapes));
523 CacheNode(node);
524 }
525
526 auto result = torch::lazy::CreateAtenFromLtcTensor(
527 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
528 return result;
529 }
530
531
532 at::Tensor LazyNativeFunctions::alias_copy(const at::Tensor & self) {
533
534 if (force_eager_fallback(at::aten::alias_copy)) {
535 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(alias_copy)>::call(
536 self
537 );
538 }
539
540 TORCH_LAZY_FN_COUNTER("lazy::");
541 auto common_device = torch::lazy::GetBackendDevice(self);
542 TORCH_INTERNAL_ASSERT(common_device);
543
544 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
545 torch::lazy::NodePtr node = torch::lazy::ReuseNode<AliasCopy>(lazy_self->GetIrValue());
546 if (!node) {
547 auto self_meta = to_meta(self);
548 auto out_meta = at::compositeexplicitautogradnonfunctional::alias_copy(self_meta);
549
550std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
551 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
552 if(torch::lazy::symbolicShapeEnabled()){
553 std::vector<torch::jit::IValue> inputs = { self };
554 const char* schema_str = "aten::alias_copy(Tensor self) -> Tensor";
555 applySymbolicShapesOnLT(schema_str, inputs, shapes);
556 }
557
558 node = torch::lazy::MakeNode<AliasCopy>(lazy_self->GetIrValue(), std::move(shapes));
559 CacheNode(node);
560 }
561
562 auto result = torch::lazy::CreateAtenFromLtcTensor(
563 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
564 return result;
565 }
566
567
568 at::Tensor LazyNativeFunctions::all(const at::Tensor & self) {
569
570 if (force_eager_fallback(at::aten::all)) {
571 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(all)>::call(
572 self
573 );
574 }
575
576 TORCH_LAZY_FN_COUNTER("lazy::");
577 auto common_device = torch::lazy::GetBackendDevice(self);
578 TORCH_INTERNAL_ASSERT(common_device);
579
580 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
581 torch::lazy::NodePtr node = torch::lazy::ReuseNode<All>(lazy_self->GetIrValue());
582 if (!node) {
583 auto self_meta = to_meta(self);
584 auto out_meta = at::meta::all(self_meta);
585
586std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
587 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
588 if(torch::lazy::symbolicShapeEnabled()){
589 std::vector<torch::jit::IValue> inputs = { self };
590 const char* schema_str = "aten::all(Tensor self) -> Tensor";
591 applySymbolicShapesOnLT(schema_str, inputs, shapes);
592 }
593
594 node = torch::lazy::MakeNode<All>(lazy_self->GetIrValue(), std::move(shapes));
595 CacheNode(node);
596 }
597
598 auto result = torch::lazy::CreateAtenFromLtcTensor(
599 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
600 return result;
601 }
602
603
604 at::Tensor LazyNativeFunctions::any(const at::Tensor & self) {
605
606 if (force_eager_fallback(at::aten::any)) {
607 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(any)>::call(
608 self
609 );
610 }
611
612 TORCH_LAZY_FN_COUNTER("lazy::");
613 auto common_device = torch::lazy::GetBackendDevice(self);
614 TORCH_INTERNAL_ASSERT(common_device);
615
616 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
617 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Any>(lazy_self->GetIrValue());
618 if (!node) {
619 auto self_meta = to_meta(self);
620 auto out_meta = at::meta::any(self_meta);
621
622std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
623 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
624 if(torch::lazy::symbolicShapeEnabled()){
625 std::vector<torch::jit::IValue> inputs = { self };
626 const char* schema_str = "aten::any(Tensor self) -> Tensor";
627 applySymbolicShapesOnLT(schema_str, inputs, shapes);
628 }
629
630 node = torch::lazy::MakeNode<Any>(lazy_self->GetIrValue(), std::move(shapes));
631 CacheNode(node);
632 }
633
634 auto result = torch::lazy::CreateAtenFromLtcTensor(
635 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
636 return result;
637 }
638
639
640 at::Tensor & LazyNativeFunctions::arange_out(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out) {
641
642 if (force_eager_fallback(at::aten::arange)) {
643 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(arange, start_out)>::call(
644 start,
645 end,
646 step,
647 out
648 );
649 }
650
651 TORCH_LAZY_FN_COUNTER("lazy::");
652 auto common_device = torch::lazy::GetBackendDevice(out);
653 TORCH_INTERNAL_ASSERT(common_device);
654
655 auto node_start = torch::lazy::LazyGraphExecutor::Get()->
656 GetIrValueForScalarFromCodegen(start, *common_device);
657 auto node_end = torch::lazy::LazyGraphExecutor::Get()->
658 GetIrValueForScalarFromCodegen(end, *common_device);
659 auto node_step = torch::lazy::LazyGraphExecutor::Get()->
660 GetIrValueForScalarFromCodegen(step, *common_device);
661 LazyTensorPtr lazy_out = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(out, *common_device);
662 torch::lazy::NodePtr node = torch::lazy::ReuseNode<ArangeStartOut>(node_start, node_end, node_step, lazy_out->GetIrValue());
663 if (!node) {
664
665 auto shapes = torch::lazy::compute_shape_arange_out(start, end, step, out);
666 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
667 if(torch::lazy::symbolicShapeEnabled()){
668 std::vector<torch::jit::IValue> inputs = { start, end, step, out };
669 const char* schema_str = "aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)";
670 applySymbolicShapesOnLT(schema_str, inputs, shapes);
671 }
672
673 node = torch::lazy::MakeNode<ArangeStartOut>(node_start, node_end, node_step, lazy_out->GetIrValue(), std::move(shapes));
674 CacheNode(node);
675 }
676
677 lazy_out->SetInPlaceIrValue(node);
678 auto& result = out;
679 return result;
680 }
681
682
683 at::Tensor LazyNativeFunctions::as_strided_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
684
685 if (force_eager_fallback(at::aten::as_strided_copy)) {
686 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(as_strided_copy)>::call(
687 self,
688 size,
689 stride,
690 storage_offset
691 );
692 }
693
694 TORCH_LAZY_FN_COUNTER("lazy::");
695 auto common_device = torch::lazy::GetBackendDevice(self, storage_offset);
696 TORCH_INTERNAL_ASSERT(common_device);
697
698 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
699 torch::lazy::NodePtr node = torch::lazy::ReuseNode<AsStridedCopy>(lazy_self->GetIrValue(), GetSymIntArrayRefValue(size), GetSymIntArrayRefValue(stride), storage_offset ? c10::make_optional(GetSymIntValue(*storage_offset)) : c10::nullopt);
700 if (!node) {
701 auto self_meta = to_meta(self);
702 auto out_meta = at::compositeexplicitautogradnonfunctional::as_strided_copy_symint(self_meta, size, stride, storage_offset);
703
704std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
705 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
706 if(torch::lazy::symbolicShapeEnabled()){
707 std::vector<torch::jit::IValue> inputs = { self, size, stride, storage_offset };
708 const char* schema_str = "aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor";
709 applySymbolicShapesOnLT(schema_str, inputs, shapes);
710 }
711
712 node = torch::lazy::MakeNode<AsStridedCopy>(lazy_self->GetIrValue(), GetSymIntArrayRefValue(size), GetSymIntArrayRefValue(stride), storage_offset ? c10::make_optional(GetSymIntValue(*storage_offset)) : c10::nullopt, std::move(shapes));
713 CacheNode(node);
714 }
715
716 auto result = torch::lazy::CreateAtenFromLtcTensor(
717 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
718 return result;
719 }
720
721
722 at::Tensor LazyNativeFunctions::as_strided_scatter_symint(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
723
724 if (force_eager_fallback(at::aten::as_strided_scatter)) {
725 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(as_strided_scatter)>::call(
726 self,
727 src,
728 size,
729 stride,
730 storage_offset
731 );
732 }
733
734 TORCH_LAZY_FN_COUNTER("lazy::");
735 auto common_device = torch::lazy::GetBackendDevice(self, src, storage_offset);
736 TORCH_INTERNAL_ASSERT(common_device);
737
738 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
739 LazyTensorPtr lazy_src = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(src, *common_device);
740 torch::lazy::NodePtr node = torch::lazy::ReuseNode<AsStridedScatter>(lazy_self->GetIrValue(), lazy_src->GetIrValue(), GetSymIntArrayRefValue(size), GetSymIntArrayRefValue(stride), storage_offset ? c10::make_optional(GetSymIntValue(*storage_offset)) : c10::nullopt);
741 if (!node) {
742
743 auto shapes = torch::lazy::compute_shape_as_strided_scatter_symint(self, src, size, stride, storage_offset);
744 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
745 if(torch::lazy::symbolicShapeEnabled()){
746 std::vector<torch::jit::IValue> inputs = { self, src, size, stride, storage_offset };
747 const char* schema_str = "aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor";
748 applySymbolicShapesOnLT(schema_str, inputs, shapes);
749 }
750
751 node = torch::lazy::MakeNode<AsStridedScatter>(lazy_self->GetIrValue(), lazy_src->GetIrValue(), GetSymIntArrayRefValue(size), GetSymIntArrayRefValue(stride), storage_offset ? c10::make_optional(GetSymIntValue(*storage_offset)) : c10::nullopt, std::move(shapes));
752 CacheNode(node);
753 }
754
755 auto result = torch::lazy::CreateAtenFromLtcTensor(
756 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
757 return result;
758 }
759
760
761 at::Tensor LazyNativeFunctions::avg_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
762
763 if (force_eager_fallback(at::aten::avg_pool2d)) {
764 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(avg_pool2d)>::call(
765 self,
766 kernel_size,
767 stride,
768 padding,
769 ceil_mode,
770 count_include_pad,
771 divisor_override
772 );
773 }
774
775 TORCH_LAZY_FN_COUNTER("lazy::");
776 auto common_device = torch::lazy::GetBackendDevice(self);
777 TORCH_INTERNAL_ASSERT(common_device);
778
779 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
780 torch::lazy::NodePtr node = torch::lazy::ReuseNode<AvgPool2d>(lazy_self->GetIrValue(), std::vector<int64_t>(kernel_size.begin(), kernel_size.end()), std::vector<int64_t>(stride.begin(), stride.end()), std::vector<int64_t>(padding.begin(), padding.end()), ceil_mode, count_include_pad, divisor_override);
781 if (!node) {
782 auto self_meta = to_meta(self);
783 auto out_meta = at::meta::avg_pool2d(self_meta, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
784
785std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
786 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
787 if(torch::lazy::symbolicShapeEnabled()){
788 std::vector<torch::jit::IValue> inputs = { self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override };
789 const char* schema_str = "aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor";
790 applySymbolicShapesOnLT(schema_str, inputs, shapes);
791 }
792
793 node = torch::lazy::MakeNode<AvgPool2d>(lazy_self->GetIrValue(), std::vector<int64_t>(kernel_size.begin(), kernel_size.end()), std::vector<int64_t>(stride.begin(), stride.end()), std::vector<int64_t>(padding.begin(), padding.end()), ceil_mode, count_include_pad, divisor_override, std::move(shapes));
794 CacheNode(node);
795 }
796
797 auto result = torch::lazy::CreateAtenFromLtcTensor(
798 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
799 return result;
800 }
801
802
803 at::Tensor LazyNativeFunctions::avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
804
805 if (force_eager_fallback(at::aten::avg_pool2d_backward)) {
806 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(avg_pool2d_backward)>::call(
807 grad_output,
808 self,
809 kernel_size,
810 stride,
811 padding,
812 ceil_mode,
813 count_include_pad,
814 divisor_override
815 );
816 }
817
818 TORCH_LAZY_FN_COUNTER("lazy::");
819 auto common_device = torch::lazy::GetBackendDevice(grad_output, self);
820 TORCH_INTERNAL_ASSERT(common_device);
821
822 LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
823 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
824 torch::lazy::NodePtr node = torch::lazy::ReuseNode<AvgPool2dBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), std::vector<int64_t>(kernel_size.begin(), kernel_size.end()), std::vector<int64_t>(stride.begin(), stride.end()), std::vector<int64_t>(padding.begin(), padding.end()), ceil_mode, count_include_pad, divisor_override);
825 if (!node) {
826 auto grad_output_meta = to_meta(grad_output);
827 auto self_meta = to_meta(self);
828 auto out_meta = at::meta::avg_pool2d_backward(grad_output_meta, self_meta, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
829
830std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
831 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
832 if(torch::lazy::symbolicShapeEnabled()){
833 std::vector<torch::jit::IValue> inputs = { grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override };
834 const char* schema_str = "aten::avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor";
835 applySymbolicShapesOnLT(schema_str, inputs, shapes);
836 }
837
838 node = torch::lazy::MakeNode<AvgPool2dBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), std::vector<int64_t>(kernel_size.begin(), kernel_size.end()), std::vector<int64_t>(stride.begin(), stride.end()), std::vector<int64_t>(padding.begin(), padding.end()), ceil_mode, count_include_pad, divisor_override, std::move(shapes));
839 CacheNode(node);
840 }
841
842 auto result = torch::lazy::CreateAtenFromLtcTensor(
843 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
844 return result;
845 }
846
847
848 at::Tensor LazyNativeFunctions::baddbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
849
850 if (force_eager_fallback(at::aten::baddbmm)) {
851 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(baddbmm)>::call(
852 self,
853 batch1,
854 batch2,
855 beta,
856 alpha
857 );
858 }
859
860 TORCH_LAZY_FN_COUNTER("lazy::");
861 auto common_device = torch::lazy::GetBackendDevice(self, batch1, batch2);
862 TORCH_INTERNAL_ASSERT(common_device);
863
864 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
865 LazyTensorPtr lazy_batch1 = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(batch1, *common_device);
866 LazyTensorPtr lazy_batch2 = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(batch2, *common_device);
867 auto node_beta = torch::lazy::LazyGraphExecutor::Get()->
868 GetIrValueForScalarFromCodegen(beta, *common_device);
869 auto node_alpha = torch::lazy::LazyGraphExecutor::Get()->
870 GetIrValueForScalarFromCodegen(alpha, *common_device);
871 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Baddbmm>(lazy_self->GetIrValue(), lazy_batch1->GetIrValue(), lazy_batch2->GetIrValue(), node_beta, node_alpha);
872 if (!node) {
873 auto self_meta = to_meta(self);
874 auto batch1_meta = to_meta(batch1);
875 auto batch2_meta = to_meta(batch2);
876 auto out_meta = at::meta::baddbmm(self_meta, batch1_meta, batch2_meta, beta, alpha);
877
878std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
879 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
880 if(torch::lazy::symbolicShapeEnabled()){
881 std::vector<torch::jit::IValue> inputs = { self, batch1, batch2, beta, alpha };
882 const char* schema_str = "aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor";
883 applySymbolicShapesOnLT(schema_str, inputs, shapes);
884 }
885
886 node = torch::lazy::MakeNode<Baddbmm>(lazy_self->GetIrValue(), lazy_batch1->GetIrValue(), lazy_batch2->GetIrValue(), node_beta, node_alpha, std::move(shapes));
887 CacheNode(node);
888 }
889
890 auto result = torch::lazy::CreateAtenFromLtcTensor(
891 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
892 return result;
893 }
894
895
896 at::Tensor LazyNativeFunctions::bernoulli(const at::Tensor & self, c10::optional<at::Generator> generator) {
897
898 if (force_eager_fallback(at::aten::bernoulli) || (generator.has_value() && generator->defined())) {
899 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(bernoulli)>::call(
900 self,
901 generator
902 );
903 }
904
905 TORCH_LAZY_FN_COUNTER("lazy::");
906 auto common_device = torch::lazy::GetBackendDevice(self);
907 TORCH_INTERNAL_ASSERT(common_device);
908
909 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
910 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Bernoulli>(lazy_self->GetIrValue());
911 if (!node) {
912
913 auto shapes = torch::lazy::compute_shape_bernoulli(self, generator);
914 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
915 if(torch::lazy::symbolicShapeEnabled()){
916 std::vector<torch::jit::IValue> inputs = { self };
917 const char* schema_str = "aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor";
918 applySymbolicShapesOnLT(schema_str, inputs, shapes);
919 }
920
921 node = torch::lazy::MakeNode<Bernoulli>(lazy_self->GetIrValue(), std::move(shapes));
922 CacheNode(node);
923 }
924
925 auto result = torch::lazy::CreateAtenFromLtcTensor(
926 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
927 return result;
928 }
929
930
931 at::Tensor LazyNativeFunctions::bernoulli(const at::Tensor & self, double p, c10::optional<at::Generator> generator) {
932
933 if (force_eager_fallback(at::aten::bernoulli) || (generator.has_value() && generator->defined())) {
934 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(bernoulli, p)>::call(
935 self,
936 p,
937 generator
938 );
939 }
940
941 TORCH_LAZY_FN_COUNTER("lazy::");
942 auto common_device = torch::lazy::GetBackendDevice(self);
943 TORCH_INTERNAL_ASSERT(common_device);
944
945 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
946 torch::lazy::NodePtr node = torch::lazy::ReuseNode<BernoulliP>(lazy_self->GetIrValue(), p);
947 if (!node) {
948
949 auto shapes = torch::lazy::compute_shape_bernoulli(self, p, generator);
950 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
951 if(torch::lazy::symbolicShapeEnabled()){
952 std::vector<torch::jit::IValue> inputs = { self, p };
953 const char* schema_str = "aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor";
954 applySymbolicShapesOnLT(schema_str, inputs, shapes);
955 }
956
957 node = torch::lazy::MakeNode<BernoulliP>(lazy_self->GetIrValue(), p, std::move(shapes));
958 CacheNode(node);
959 }
960
961 auto result = torch::lazy::CreateAtenFromLtcTensor(
962 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
963 return result;
964 }
965
966
967 at::Tensor LazyNativeFunctions::binary_cross_entropy(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction) {
968
969 if (force_eager_fallback(at::aten::binary_cross_entropy)) {
970 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(binary_cross_entropy)>::call(
971 self,
972 target,
973 weight,
974 reduction
975 );
976 }
977
978 TORCH_LAZY_FN_COUNTER("lazy::");
979 auto common_device = torch::lazy::GetBackendDevice(self, target, weight);
980 TORCH_INTERNAL_ASSERT(common_device);
981
982 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
983 LazyTensorPtr lazy_target = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(target, *common_device);
984 LazyTensorPtr lazy_weight = torch::lazy::TryGetLtcTensor(weight.value_or(at::Tensor()));
985 torch::lazy::NodePtr node = torch::lazy::ReuseNode<BinaryCrossEntropy>(lazy_self->GetIrValue(), lazy_target->GetIrValue(), lazy_weight ? c10::make_optional(lazy_weight->GetIrValue()) : c10::nullopt, reduction);
986 if (!node) {
987
988 auto shapes = torch::lazy::compute_shape_binary_cross_entropy(self, target, weight, reduction);
989 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
990 if(torch::lazy::symbolicShapeEnabled()){
991 std::vector<torch::jit::IValue> inputs = { self, target, weight, reduction };
992 const char* schema_str = "aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor";
993 applySymbolicShapesOnLT(schema_str, inputs, shapes);
994 }
995
996 node = torch::lazy::MakeNode<BinaryCrossEntropy>(lazy_self->GetIrValue(), lazy_target->GetIrValue(), lazy_weight ? c10::make_optional(lazy_weight->GetIrValue()) : c10::nullopt, reduction, std::move(shapes));
997 CacheNode(node);
998 }
999
1000 auto result = torch::lazy::CreateAtenFromLtcTensor(
1001 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
1002 return result;
1003 }
1004
1005
1006 at::Tensor LazyNativeFunctions::binary_cross_entropy_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction) {
1007
1008 if (force_eager_fallback(at::aten::binary_cross_entropy_backward)) {
1009 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(binary_cross_entropy_backward)>::call(
1010 grad_output,
1011 self,
1012 target,
1013 weight,
1014 reduction
1015 );
1016 }
1017
1018 TORCH_LAZY_FN_COUNTER("lazy::");
1019 auto common_device = torch::lazy::GetBackendDevice(grad_output, self, target, weight);
1020 TORCH_INTERNAL_ASSERT(common_device);
1021
1022 LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
1023 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
1024 LazyTensorPtr lazy_target = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(target, *common_device);
1025 LazyTensorPtr lazy_weight = torch::lazy::TryGetLtcTensor(weight.value_or(at::Tensor()));
1026 torch::lazy::NodePtr node = torch::lazy::ReuseNode<BinaryCrossEntropyBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), lazy_target->GetIrValue(), lazy_weight ? c10::make_optional(lazy_weight->GetIrValue()) : c10::nullopt, reduction);
1027 if (!node) {
1028
1029 auto shapes = torch::lazy::compute_shape_binary_cross_entropy_backward(grad_output, self, target, weight, reduction);
1030 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
1031 if(torch::lazy::symbolicShapeEnabled()){
1032 std::vector<torch::jit::IValue> inputs = { grad_output, self, target, weight, reduction };
1033 const char* schema_str = "aten::binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor";
1034 applySymbolicShapesOnLT(schema_str, inputs, shapes);
1035 }
1036
1037 node = torch::lazy::MakeNode<BinaryCrossEntropyBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), lazy_target->GetIrValue(), lazy_weight ? c10::make_optional(lazy_weight->GetIrValue()) : c10::nullopt, reduction, std::move(shapes));
1038 CacheNode(node);
1039 }
1040
1041 auto result = torch::lazy::CreateAtenFromLtcTensor(
1042 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
1043 return result;
1044 }
1045
1046
1047 at::Tensor LazyNativeFunctions::bitwise_and(const at::Tensor & self, const at::Tensor & other) {
1048
1049 if (force_eager_fallback(at::aten::bitwise_and)) {
1050 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(bitwise_and, Tensor)>::call(
1051 self,
1052 other
1053 );
1054 }
1055
1056 TORCH_LAZY_FN_COUNTER("lazy::");
1057 auto common_device = torch::lazy::GetBackendDevice(self, other);
1058 TORCH_INTERNAL_ASSERT(common_device);
1059
1060 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
1061 LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
1062 torch::lazy::NodePtr node = torch::lazy::ReuseNode<BitwiseAndTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue());
1063 if (!node) {
1064 auto self_meta = to_meta(self);
1065 auto other_meta = to_meta(other);
1066 auto out_meta = at::meta::bitwise_and(self_meta, other_meta);
1067
1068std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
1069 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
1070 if(torch::lazy::symbolicShapeEnabled()){
1071 std::vector<torch::jit::IValue> inputs = { self, other };
1072 const char* schema_str = "aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor";
1073 applySymbolicShapesOnLT(schema_str, inputs, shapes);
1074 }
1075
1076 node = torch::lazy::MakeNode<BitwiseAndTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), std::move(shapes));
1077 CacheNode(node);
1078 }
1079
1080 auto result = torch::lazy::CreateAtenFromLtcTensor(
1081 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
1082 return result;
1083 }
1084
1085
1086 at::Tensor LazyNativeFunctions::bitwise_or(const at::Tensor & self, const at::Tensor & other) {
1087
1088 if (force_eager_fallback(at::aten::bitwise_or)) {
1089 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(bitwise_or, Tensor)>::call(
1090 self,
1091 other
1092 );
1093 }
1094
1095 TORCH_LAZY_FN_COUNTER("lazy::");
1096 auto common_device = torch::lazy::GetBackendDevice(self, other);
1097 TORCH_INTERNAL_ASSERT(common_device);
1098
1099 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
1100 LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
1101 torch::lazy::NodePtr node = torch::lazy::ReuseNode<BitwiseOrTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue());
1102 if (!node) {
1103 auto self_meta = to_meta(self);
1104 auto other_meta = to_meta(other);
1105 auto out_meta = at::meta::bitwise_or(self_meta, other_meta);
1106
1107std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
1108 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
1109 if(torch::lazy::symbolicShapeEnabled()){
1110 std::vector<torch::jit::IValue> inputs = { self, other };
1111 const char* schema_str = "aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor";
1112 applySymbolicShapesOnLT(schema_str, inputs, shapes);
1113 }
1114
1115 node = torch::lazy::MakeNode<BitwiseOrTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), std::move(shapes));
1116 CacheNode(node);
1117 }
1118
1119 auto result = torch::lazy::CreateAtenFromLtcTensor(
1120 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
1121 return result;
1122 }
1123
1124
1125 at::Tensor LazyNativeFunctions::bmm(const at::Tensor & self, const at::Tensor & mat2) {
1126
1127 if (force_eager_fallback(at::aten::bmm)) {
1128 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(bmm)>::call(
1129 self,
1130 mat2
1131 );
1132 }
1133
1134 TORCH_LAZY_FN_COUNTER("lazy::");
1135 auto common_device = torch::lazy::GetBackendDevice(self, mat2);
1136 TORCH_INTERNAL_ASSERT(common_device);
1137
1138 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
1139 LazyTensorPtr lazy_mat2 = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(mat2, *common_device);
1140 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Bmm>(lazy_self->GetIrValue(), lazy_mat2->GetIrValue());
1141 if (!node) {
1142 auto self_meta = to_meta(self);
1143 auto mat2_meta = to_meta(mat2);
1144 auto out_meta = at::meta::bmm(self_meta, mat2_meta);
1145
1146std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
1147 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
1148 if(torch::lazy::symbolicShapeEnabled()){
1149 std::vector<torch::jit::IValue> inputs = { self, mat2 };
1150 const char* schema_str = "aten::bmm(Tensor self, Tensor mat2) -> Tensor";
1151 applySymbolicShapesOnLT(schema_str, inputs, shapes);
1152 }
1153
1154 node = torch::lazy::MakeNode<Bmm>(lazy_self->GetIrValue(), lazy_mat2->GetIrValue(), std::move(shapes));
1155 CacheNode(node);
1156 }
1157
1158 auto result = torch::lazy::CreateAtenFromLtcTensor(
1159 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
1160 return result;
1161 }
1162
1163
1164 at::Tensor LazyNativeFunctions::cat(const at::ITensorListRef & tensors, int64_t dim) {
1165
1166 if (force_eager_fallback(at::aten::cat)) {
1167 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(cat)>::call(
1168 tensors,
1169 dim
1170 );
1171 }
1172
1173 TORCH_LAZY_FN_COUNTER("lazy::");
1174 auto common_device = torch::lazy::GetBackendDevice(tensors);
1175 TORCH_INTERNAL_ASSERT(common_device);
1176
1177 auto lazy_tensors_tensorlist = torch::lazy::GetTensorList(tensors);
1178 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Cat>(lazy_tensors_tensorlist, dim);
1179 if (!node) {
1180 auto tensors_meta = to_meta(tensors);
1181 auto out_meta = at::meta::cat(tensors_meta, dim);
1182
1183std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
1184 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
1185 if(torch::lazy::symbolicShapeEnabled()){
1186 std::vector<torch::jit::IValue> inputs = { tensors, dim };
1187 const char* schema_str = "aten::cat(Tensor[] tensors, int dim=0) -> Tensor";
1188 applySymbolicShapesOnLT(schema_str, inputs, shapes);
1189 }
1190
1191 node = torch::lazy::MakeNode<Cat>(lazy_tensors_tensorlist, dim, std::move(shapes));
1192 CacheNode(node);
1193 }
1194
1195 auto result = torch::lazy::CreateAtenFromLtcTensor(
1196 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
1197 return result;
1198 }
1199
1200
1201 at::Tensor LazyNativeFunctions::clamp(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
1202
1203 if (force_eager_fallback(at::aten::clamp)) {
1204 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(clamp)>::call(
1205 self,
1206 min,
1207 max
1208 );
1209 }
1210
1211 TORCH_LAZY_FN_COUNTER("lazy::");
1212 auto common_device = torch::lazy::GetBackendDevice(self);
1213 TORCH_INTERNAL_ASSERT(common_device);
1214
1215 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
1216 auto node_min = min ?
1217 c10::make_optional(torch::lazy::LazyGraphExecutor::Get()->
1218 GetIrValueForScalarFromCodegen(*min, *common_device)):
1219 c10::nullopt;
1220 auto node_max = max ?
1221 c10::make_optional(torch::lazy::LazyGraphExecutor::Get()->
1222 GetIrValueForScalarFromCodegen(*max, *common_device)):
1223 c10::nullopt;
1224 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Clamp>(lazy_self->GetIrValue(), node_min, node_max);
1225 if (!node) {
1226 auto self_meta = to_meta(self);
1227 auto out_meta = at::meta::clamp(self_meta, min, max);
1228
1229std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
1230 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
1231 if(torch::lazy::symbolicShapeEnabled()){
1232 std::vector<torch::jit::IValue> inputs = { self, min, max };
1233 const char* schema_str = "aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor";
1234 applySymbolicShapesOnLT(schema_str, inputs, shapes);
1235 }
1236
1237 node = torch::lazy::MakeNode<Clamp>(lazy_self->GetIrValue(), node_min, node_max, std::move(shapes));
1238 CacheNode(node);
1239 }
1240
1241 auto result = torch::lazy::CreateAtenFromLtcTensor(
1242 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
1243 return result;
1244 }
1245
1246
1247 at::Tensor LazyNativeFunctions::clamp_min(const at::Tensor & self, const at::Scalar & min) {
1248
1249 if (force_eager_fallback(at::aten::clamp_min)) {
1250 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(clamp_min)>::call(
1251 self,
1252 min
1253 );
1254 }
1255
1256 TORCH_LAZY_FN_COUNTER("lazy::");
1257 auto common_device = torch::lazy::GetBackendDevice(self);
1258 TORCH_INTERNAL_ASSERT(common_device);
1259
1260 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
1261 auto node_min = torch::lazy::LazyGraphExecutor::Get()->
1262 GetIrValueForScalarFromCodegen(min, *common_device);
1263 torch::lazy::NodePtr node = torch::lazy::ReuseNode<ClampMin>(lazy_self->GetIrValue(), node_min);
1264 if (!node) {
1265 auto self_meta = to_meta(self);
1266 auto out_meta = at::meta::clamp_min(self_meta, min);
1267
1268std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
1269 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
1270 if(torch::lazy::symbolicShapeEnabled()){
1271 std::vector<torch::jit::IValue> inputs = { self, min };
1272 const char* schema_str = "aten::clamp_min(Tensor self, Scalar min) -> Tensor";
1273 applySymbolicShapesOnLT(schema_str, inputs, shapes);
1274 }
1275
1276 node = torch::lazy::MakeNode<ClampMin>(lazy_self->GetIrValue(), node_min, std::move(shapes));
1277 CacheNode(node);
1278 }
1279
1280 auto result = torch::lazy::CreateAtenFromLtcTensor(
1281 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
1282 return result;
1283 }
1284
1285
1286 at::Tensor LazyNativeFunctions::constant_pad_nd(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value) {
1287
1288 if (force_eager_fallback(at::aten::constant_pad_nd)) {
1289 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(constant_pad_nd)>::call(
1290 self,
1291 c10::fromIntArrayRefSlow(pad),
1292 value
1293 );
1294 }
1295
1296 TORCH_LAZY_FN_COUNTER("lazy::");
1297 auto common_device = torch::lazy::GetBackendDevice(self);
1298 TORCH_INTERNAL_ASSERT(common_device);
1299
1300 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
1301 auto node_value = torch::lazy::LazyGraphExecutor::Get()->
1302 GetIrValueForScalarFromCodegen(value, *common_device);
1303 torch::lazy::NodePtr node = torch::lazy::ReuseNode<ConstantPadNd>(lazy_self->GetIrValue(), std::vector<int64_t>(pad.begin(), pad.end()), node_value);
1304 if (!node) {
1305
1306 auto shapes = torch::lazy::compute_shape_constant_pad_nd(self, pad, value);
1307 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
1308 if(torch::lazy::symbolicShapeEnabled()){
1309 std::vector<torch::jit::IValue> inputs = { self, pad, value };
1310 const char* schema_str = "aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor";
1311 applySymbolicShapesOnLT(schema_str, inputs, shapes);
1312 }
1313
1314 node = torch::lazy::MakeNode<ConstantPadNd>(lazy_self->GetIrValue(), std::vector<int64_t>(pad.begin(), pad.end()), node_value, std::move(shapes));
1315 CacheNode(node);
1316 }
1317
1318 auto result = torch::lazy::CreateAtenFromLtcTensor(
1319 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
1320 return result;
1321 }
1322
1323
1324 at::Tensor LazyNativeFunctions::convolution(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) {
1325
1326 if (force_eager_fallback(at::aten::convolution)) {
1327 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(convolution)>::call(
1328 input,
1329 weight,
1330 bias,
1331 stride,
1332 c10::fromIntArrayRefSlow(padding),
1333 dilation,
1334 transposed,
1335 c10::fromIntArrayRefSlow(output_padding),
1336 groups
1337 );
1338 }
1339
1340 TORCH_LAZY_FN_COUNTER("lazy::");
1341 auto common_device = torch::lazy::GetBackendDevice(input, weight, bias);
1342 TORCH_INTERNAL_ASSERT(common_device);
1343
1344 LazyTensorPtr lazy_input = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(input, *common_device);
1345 LazyTensorPtr lazy_weight = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(weight, *common_device);
1346 LazyTensorPtr lazy_bias = torch::lazy::TryGetLtcTensor(bias.value_or(at::Tensor()));
1347 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Convolution>(lazy_input->GetIrValue(), lazy_weight->GetIrValue(), lazy_bias ? c10::make_optional(lazy_bias->GetIrValue()) : c10::nullopt, std::vector<int64_t>(stride.begin(), stride.end()), std::vector<int64_t>(padding.begin(), padding.end()), std::vector<int64_t>(dilation.begin(), dilation.end()), transposed, std::vector<int64_t>(output_padding.begin(), output_padding.end()), groups);
1348 if (!node) {
1349
1350 auto shapes = torch::lazy::compute_shape_convolution(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
1351 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
1352 if(torch::lazy::symbolicShapeEnabled()){
1353 std::vector<torch::jit::IValue> inputs = { input, weight, bias, stride, padding, dilation, transposed, output_padding, groups };
1354 const char* schema_str = "aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups) -> Tensor";
1355 applySymbolicShapesOnLT(schema_str, inputs, shapes);
1356 }
1357
1358 node = torch::lazy::MakeNode<Convolution>(lazy_input->GetIrValue(), lazy_weight->GetIrValue(), lazy_bias ? c10::make_optional(lazy_bias->GetIrValue()) : c10::nullopt, std::vector<int64_t>(stride.begin(), stride.end()), std::vector<int64_t>(padding.begin(), padding.end()), std::vector<int64_t>(dilation.begin(), dilation.end()), transposed, std::vector<int64_t>(output_padding.begin(), output_padding.end()), groups, std::move(shapes));
1359 CacheNode(node);
1360 }
1361
1362 auto result = torch::lazy::CreateAtenFromLtcTensor(
1363 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
1364 return result;
1365 }
1366
1367
1368 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> LazyNativeFunctions::convolution_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
1369
1370 if (force_eager_fallback(at::aten::convolution_backward)) {
1371 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(convolution_backward)>::call(
1372 grad_output,
1373 input,
1374 weight,
1375 bias_sizes.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*bias_sizes)) : c10::nullopt,
1376 stride,
1377 c10::fromIntArrayRefSlow(padding),
1378 dilation,
1379 transposed,
1380 c10::fromIntArrayRefSlow(output_padding),
1381 groups,
1382 output_mask
1383 );
1384 }
1385
1386 TORCH_LAZY_FN_COUNTER("lazy::");
1387 auto common_device = torch::lazy::GetBackendDevice(grad_output, input, weight);
1388 TORCH_INTERNAL_ASSERT(common_device);
1389
1390 LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
1391 LazyTensorPtr lazy_input = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(input, *common_device);
1392 LazyTensorPtr lazy_weight = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(weight, *common_device);
1393 torch::lazy::NodePtr node = torch::lazy::ReuseNode<ConvolutionBackward>(lazy_grad_output->GetIrValue(), lazy_input->GetIrValue(), lazy_weight->GetIrValue(), torch::lazy::ToOptionalVector<int64_t>(bias_sizes), std::vector<int64_t>(stride.begin(), stride.end()), std::vector<int64_t>(padding.begin(), padding.end()), std::vector<int64_t>(dilation.begin(), dilation.end()), transposed, std::vector<int64_t>(output_padding.begin(), output_padding.end()), groups, std::vector<bool>(output_mask.begin(), output_mask.end()));
1394 if (!node) {
1395
1396 auto shapes = torch::lazy::compute_shape_convolution_backward(grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask);
1397 TORCH_INTERNAL_ASSERT(shapes.size() == 3);
1398 if(torch::lazy::symbolicShapeEnabled()){
1399 std::vector<torch::jit::IValue> inputs = { grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask };
1400 const char* schema_str = "aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)";
1401 applySymbolicShapesOnLT(schema_str, inputs, shapes);
1402 }
1403
1404 node = torch::lazy::MakeNode<ConvolutionBackward>(lazy_grad_output->GetIrValue(), lazy_input->GetIrValue(), lazy_weight->GetIrValue(), torch::lazy::ToOptionalVector<int64_t>(bias_sizes), std::vector<int64_t>(stride.begin(), stride.end()), std::vector<int64_t>(padding.begin(), padding.end()), std::vector<int64_t>(dilation.begin(), dilation.end()), transposed, std::vector<int64_t>(output_padding.begin(), output_padding.end()), groups, std::vector<bool>(output_mask.begin(), output_mask.end()), std::move(shapes));
1405 CacheNode(node);
1406 }
1407
1408 std::vector<LazyTensorPtr> lazy_tensors;
1409 for (int i = 0; i < 3; i++) {
1410 lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
1411 }
1412 auto result = torch::lazy::TupleAtenFromLtcTensors<3>(lazy_tensors);
1413 return result;
1414 }
1415
1416
1417 at::Tensor LazyNativeFunctions::cos(const at::Tensor & self) {
1418
1419 if (force_eager_fallback(at::aten::cos)) {
1420 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(cos)>::call(
1421 self
1422 );
1423 }
1424
1425 TORCH_LAZY_FN_COUNTER("lazy::");
1426 auto common_device = torch::lazy::GetBackendDevice(self);
1427 TORCH_INTERNAL_ASSERT(common_device);
1428
1429 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
1430 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Cos>(lazy_self->GetIrValue());
1431 if (!node) {
1432 auto self_meta = to_meta(self);
1433 auto out_meta = at::meta::cos(self_meta);
1434
1435std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
1436 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
1437 if(torch::lazy::symbolicShapeEnabled()){
1438 std::vector<torch::jit::IValue> inputs = { self };
1439 const char* schema_str = "aten::cos(Tensor self) -> Tensor";
1440 applySymbolicShapesOnLT(schema_str, inputs, shapes);
1441 }
1442
1443 node = torch::lazy::MakeNode<Cos>(lazy_self->GetIrValue(), std::move(shapes));
1444 CacheNode(node);
1445 }
1446
1447 auto result = torch::lazy::CreateAtenFromLtcTensor(
1448 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
1449 return result;
1450 }
1451
1452
1453 at::Tensor LazyNativeFunctions::cumsum(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
1454
1455 if (force_eager_fallback(at::aten::cumsum)) {
1456 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(cumsum)>::call(
1457 self,
1458 dim,
1459 dtype
1460 );
1461 }
1462
1463 TORCH_LAZY_FN_COUNTER("lazy::");
1464 auto common_device = torch::lazy::GetBackendDevice(self);
1465 TORCH_INTERNAL_ASSERT(common_device);
1466
1467 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
1468 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Cumsum>(lazy_self->GetIrValue(), dim, dtype);
1469 if (!node) {
1470 auto self_meta = to_meta(self);
1471 auto out_meta = at::meta::cumsum(self_meta, dim, dtype);
1472
1473std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
1474 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
1475 if(torch::lazy::symbolicShapeEnabled()){
1476 std::vector<torch::jit::IValue> inputs = { self, dim, dtype };
1477 const char* schema_str = "aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor";
1478 applySymbolicShapesOnLT(schema_str, inputs, shapes);
1479 }
1480
1481 node = torch::lazy::MakeNode<Cumsum>(lazy_self->GetIrValue(), dim, dtype, std::move(shapes));
1482 CacheNode(node);
1483 }
1484
1485 auto result = torch::lazy::CreateAtenFromLtcTensor(
1486 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
1487 return result;
1488 }
1489
1490
1491 at::Tensor LazyNativeFunctions::detach_copy(const at::Tensor & self) {
1492
1493 if (force_eager_fallback(at::aten::detach_copy)) {
1494 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(detach_copy)>::call(
1495 self
1496 );
1497 }
1498
1499 TORCH_LAZY_FN_COUNTER("lazy::");
1500 auto common_device = torch::lazy::GetBackendDevice(self);
1501 TORCH_INTERNAL_ASSERT(common_device);
1502
1503 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
1504 torch::lazy::NodePtr node = torch::lazy::ReuseNode<DetachCopy>(lazy_self->GetIrValue());
1505 if (!node) {
1506 auto self_meta = to_meta(self);
1507 auto out_meta = at::compositeexplicitautogradnonfunctional::detach_copy(self_meta);
1508
1509std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
1510 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
1511 if(torch::lazy::symbolicShapeEnabled()){
1512 std::vector<torch::jit::IValue> inputs = { self };
1513 const char* schema_str = "aten::detach_copy(Tensor self) -> Tensor";
1514 applySymbolicShapesOnLT(schema_str, inputs, shapes);
1515 }
1516
1517 node = torch::lazy::MakeNode<DetachCopy>(lazy_self->GetIrValue(), std::move(shapes));
1518 CacheNode(node);
1519 }
1520
1521 auto result = torch::lazy::CreateAtenFromLtcTensor(
1522 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
1523 return result;
1524 }
1525
1526
1527 at::Tensor LazyNativeFunctions::diagonal_copy(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
1528
1529 if (force_eager_fallback(at::aten::diagonal_copy)) {
1530 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(diagonal_copy)>::call(
1531 self,
1532 offset,
1533 dim1,
1534 dim2
1535 );
1536 }
1537
1538 TORCH_LAZY_FN_COUNTER("lazy::");
1539 auto common_device = torch::lazy::GetBackendDevice(self);
1540 TORCH_INTERNAL_ASSERT(common_device);
1541
1542 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
1543 torch::lazy::NodePtr node = torch::lazy::ReuseNode<DiagonalCopy>(lazy_self->GetIrValue(), offset, dim1, dim2);
1544 if (!node) {
1545 auto self_meta = to_meta(self);
1546 auto out_meta = at::compositeexplicitautogradnonfunctional::diagonal_copy(self_meta, offset, dim1, dim2);
1547
1548std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
1549 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
1550 if(torch::lazy::symbolicShapeEnabled()){
1551 std::vector<torch::jit::IValue> inputs = { self, offset, dim1, dim2 };
1552 const char* schema_str = "aten::diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor";
1553 applySymbolicShapesOnLT(schema_str, inputs, shapes);
1554 }
1555
1556 node = torch::lazy::MakeNode<DiagonalCopy>(lazy_self->GetIrValue(), offset, dim1, dim2, std::move(shapes));
1557 CacheNode(node);
1558 }
1559
1560 auto result = torch::lazy::CreateAtenFromLtcTensor(
1561 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
1562 return result;
1563 }
1564
1565
1566 at::Tensor LazyNativeFunctions::diagonal_scatter(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2) {
1567
1568 if (force_eager_fallback(at::aten::diagonal_scatter)) {
1569 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(diagonal_scatter)>::call(
1570 self,
1571 src,
1572 offset,
1573 dim1,
1574 dim2
1575 );
1576 }
1577
1578 TORCH_LAZY_FN_COUNTER("lazy::");
1579 auto common_device = torch::lazy::GetBackendDevice(self, src);
1580 TORCH_INTERNAL_ASSERT(common_device);
1581
1582 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
1583 LazyTensorPtr lazy_src = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(src, *common_device);
1584 torch::lazy::NodePtr node = torch::lazy::ReuseNode<DiagonalScatter>(lazy_self->GetIrValue(), lazy_src->GetIrValue(), offset, dim1, dim2);
1585 if (!node) {
1586
1587 auto shapes = torch::lazy::compute_shape_diagonal_scatter(self, src, offset, dim1, dim2);
1588 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
1589 if(torch::lazy::symbolicShapeEnabled()){
1590 std::vector<torch::jit::IValue> inputs = { self, src, offset, dim1, dim2 };
1591 const char* schema_str = "aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor";
1592 applySymbolicShapesOnLT(schema_str, inputs, shapes);
1593 }
1594
1595 node = torch::lazy::MakeNode<DiagonalScatter>(lazy_self->GetIrValue(), lazy_src->GetIrValue(), offset, dim1, dim2, std::move(shapes));
1596 CacheNode(node);
1597 }
1598
1599 auto result = torch::lazy::CreateAtenFromLtcTensor(
1600 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
1601 return result;
1602 }
1603
1604
1605 at::Tensor LazyNativeFunctions::div(const at::Tensor & self, const at::Tensor & other) {
1606
1607 if (force_eager_fallback(at::aten::div)) {
1608 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(div, Tensor)>::call(
1609 self,
1610 other
1611 );
1612 }
1613
1614 TORCH_LAZY_FN_COUNTER("lazy::");
1615 auto common_device = torch::lazy::GetBackendDevice(self, other);
1616 TORCH_INTERNAL_ASSERT(common_device);
1617
1618 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
1619 LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
1620 torch::lazy::NodePtr node = torch::lazy::ReuseNode<DivTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue());
1621 if (!node) {
1622 auto self_meta = to_meta(self);
1623 auto other_meta = to_meta(other);
1624 auto out_meta = at::meta::div(self_meta, other_meta);
1625
1626std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
1627 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
1628 if(torch::lazy::symbolicShapeEnabled()){
1629 std::vector<torch::jit::IValue> inputs = { self, other };
1630 const char* schema_str = "aten::div.Tensor(Tensor self, Tensor other) -> Tensor";
1631 applySymbolicShapesOnLT(schema_str, inputs, shapes);
1632 }
1633
1634 node = torch::lazy::MakeNode<DivTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), std::move(shapes));
1635 CacheNode(node);
1636 }
1637
1638 auto result = torch::lazy::CreateAtenFromLtcTensor(
1639 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
1640 return result;
1641 }
1642
1643
1644 at::Tensor LazyNativeFunctions::div(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
1645
1646 if (force_eager_fallback(at::aten::div)) {
1647 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(div, Tensor_mode)>::call(
1648 self,
1649 other,
1650 rounding_mode
1651 );
1652 }
1653
1654 TORCH_LAZY_FN_COUNTER("lazy::");
1655 auto common_device = torch::lazy::GetBackendDevice(self, other);
1656 TORCH_INTERNAL_ASSERT(common_device);
1657
1658 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
1659 LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
1660 torch::lazy::NodePtr node = torch::lazy::ReuseNode<DivTensorMode>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), rounding_mode);
1661 if (!node) {
1662 auto self_meta = to_meta(self);
1663 auto other_meta = to_meta(other);
1664 auto out_meta = at::meta::div(self_meta, other_meta, rounding_mode);
1665
1666std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
1667 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
1668 if(torch::lazy::symbolicShapeEnabled()){
1669 std::vector<torch::jit::IValue> inputs = { self, other, rounding_mode };
1670 const char* schema_str = "aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor";
1671 applySymbolicShapesOnLT(schema_str, inputs, shapes);
1672 }
1673
1674 node = torch::lazy::MakeNode<DivTensorMode>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), rounding_mode, std::move(shapes));
1675 CacheNode(node);
1676 }
1677
1678 auto result = torch::lazy::CreateAtenFromLtcTensor(
1679 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
1680 return result;
1681 }
1682
1683
1684 at::Tensor LazyNativeFunctions::elu(const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
1685
1686 if (force_eager_fallback(at::aten::elu)) {
1687 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(elu)>::call(
1688 self,
1689 alpha,
1690 scale,
1691 input_scale
1692 );
1693 }
1694
1695 TORCH_LAZY_FN_COUNTER("lazy::");
1696 auto common_device = torch::lazy::GetBackendDevice(self);
1697 TORCH_INTERNAL_ASSERT(common_device);
1698
1699 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
1700 auto node_alpha = torch::lazy::LazyGraphExecutor::Get()->
1701 GetIrValueForScalarFromCodegen(alpha, *common_device);
1702 auto node_scale = torch::lazy::LazyGraphExecutor::Get()->
1703 GetIrValueForScalarFromCodegen(scale, *common_device);
1704 auto node_input_scale = torch::lazy::LazyGraphExecutor::Get()->
1705 GetIrValueForScalarFromCodegen(input_scale, *common_device);
1706 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Elu>(lazy_self->GetIrValue(), node_alpha, node_scale, node_input_scale);
1707 if (!node) {
1708 auto self_meta = to_meta(self);
1709 auto out_meta = at::meta::elu(self_meta, alpha, scale, input_scale);
1710
1711std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
1712 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
1713 if(torch::lazy::symbolicShapeEnabled()){
1714 std::vector<torch::jit::IValue> inputs = { self, alpha, scale, input_scale };
1715 const char* schema_str = "aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor";
1716 applySymbolicShapesOnLT(schema_str, inputs, shapes);
1717 }
1718
1719 node = torch::lazy::MakeNode<Elu>(lazy_self->GetIrValue(), node_alpha, node_scale, node_input_scale, std::move(shapes));
1720 CacheNode(node);
1721 }
1722
1723 auto result = torch::lazy::CreateAtenFromLtcTensor(
1724 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
1725 return result;
1726 }
1727
1728
1729 at::Tensor LazyNativeFunctions::elu_backward(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) {
1730
1731 if (force_eager_fallback(at::aten::elu_backward)) {
1732 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(elu_backward)>::call(
1733 grad_output,
1734 alpha,
1735 scale,
1736 input_scale,
1737 is_result,
1738 self_or_result
1739 );
1740 }
1741
1742 TORCH_LAZY_FN_COUNTER("lazy::");
1743 auto common_device = torch::lazy::GetBackendDevice(grad_output, self_or_result);
1744 TORCH_INTERNAL_ASSERT(common_device);
1745
1746 LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
1747 auto node_alpha = torch::lazy::LazyGraphExecutor::Get()->
1748 GetIrValueForScalarFromCodegen(alpha, *common_device);
1749 auto node_scale = torch::lazy::LazyGraphExecutor::Get()->
1750 GetIrValueForScalarFromCodegen(scale, *common_device);
1751 auto node_input_scale = torch::lazy::LazyGraphExecutor::Get()->
1752 GetIrValueForScalarFromCodegen(input_scale, *common_device);
1753 LazyTensorPtr lazy_self_or_result = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self_or_result, *common_device);
1754 torch::lazy::NodePtr node = torch::lazy::ReuseNode<EluBackward>(lazy_grad_output->GetIrValue(), node_alpha, node_scale, node_input_scale, is_result, lazy_self_or_result->GetIrValue());
1755 if (!node) {
1756 auto grad_output_meta = to_meta(grad_output);
1757 auto self_or_result_meta = to_meta(self_or_result);
1758 auto out_meta = at::meta::elu_backward(grad_output_meta, alpha, scale, input_scale, is_result, self_or_result_meta);
1759
1760std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
1761 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
1762 if(torch::lazy::symbolicShapeEnabled()){
1763 std::vector<torch::jit::IValue> inputs = { grad_output, alpha, scale, input_scale, is_result, self_or_result };
1764 const char* schema_str = "aten::elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor";
1765 applySymbolicShapesOnLT(schema_str, inputs, shapes);
1766 }
1767
1768 node = torch::lazy::MakeNode<EluBackward>(lazy_grad_output->GetIrValue(), node_alpha, node_scale, node_input_scale, is_result, lazy_self_or_result->GetIrValue(), std::move(shapes));
1769 CacheNode(node);
1770 }
1771
1772 auto result = torch::lazy::CreateAtenFromLtcTensor(
1773 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
1774 return result;
1775 }
1776
1777
1778 at::Tensor LazyNativeFunctions::embedding(const at::Tensor & weight, const at::Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) {
1779
1780 if (force_eager_fallback(at::aten::embedding)) {
1781 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(embedding)>::call(
1782 weight,
1783 indices,
1784 padding_idx,
1785 scale_grad_by_freq,
1786 sparse
1787 );
1788 }
1789
1790 TORCH_LAZY_FN_COUNTER("lazy::");
1791 auto common_device = torch::lazy::GetBackendDevice(weight, indices);
1792 TORCH_INTERNAL_ASSERT(common_device);
1793
1794 LazyTensorPtr lazy_weight = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(weight, *common_device);
1795 LazyTensorPtr lazy_indices = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(indices, *common_device);
1796 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Embedding>(lazy_weight->GetIrValue(), lazy_indices->GetIrValue(), padding_idx, scale_grad_by_freq, sparse);
1797 if (!node) {
1798
1799 auto shapes = torch::lazy::compute_shape_embedding(weight, indices, padding_idx, scale_grad_by_freq, sparse);
1800 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
1801 if(torch::lazy::symbolicShapeEnabled()){
1802 std::vector<torch::jit::IValue> inputs = { weight, indices, padding_idx, scale_grad_by_freq, sparse };
1803 const char* schema_str = "aten::embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor";
1804 applySymbolicShapesOnLT(schema_str, inputs, shapes);
1805 }
1806
1807 node = torch::lazy::MakeNode<Embedding>(lazy_weight->GetIrValue(), lazy_indices->GetIrValue(), padding_idx, scale_grad_by_freq, sparse, std::move(shapes));
1808 CacheNode(node);
1809 }
1810
1811 auto result = torch::lazy::CreateAtenFromLtcTensor(
1812 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
1813 return result;
1814 }
1815
1816
1817 at::Tensor LazyNativeFunctions::embedding_dense_backward(const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) {
1818
1819 if (force_eager_fallback(at::aten::embedding_dense_backward)) {
1820 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(embedding_dense_backward)>::call(
1821 grad_output,
1822 indices,
1823 num_weights,
1824 padding_idx,
1825 scale_grad_by_freq
1826 );
1827 }
1828
1829 TORCH_LAZY_FN_COUNTER("lazy::");
1830 auto common_device = torch::lazy::GetBackendDevice(grad_output, indices);
1831 TORCH_INTERNAL_ASSERT(common_device);
1832
1833 LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
1834 LazyTensorPtr lazy_indices = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(indices, *common_device);
1835 torch::lazy::NodePtr node = torch::lazy::ReuseNode<EmbeddingDenseBackward>(lazy_grad_output->GetIrValue(), lazy_indices->GetIrValue(), num_weights, padding_idx, scale_grad_by_freq);
1836 if (!node) {
1837
1838 auto shapes = torch::lazy::compute_shape_embedding_dense_backward(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq);
1839 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
1840 if(torch::lazy::symbolicShapeEnabled()){
1841 std::vector<torch::jit::IValue> inputs = { grad_output, indices, num_weights, padding_idx, scale_grad_by_freq };
1842 const char* schema_str = "aten::embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor";
1843 applySymbolicShapesOnLT(schema_str, inputs, shapes);
1844 }
1845
1846 node = torch::lazy::MakeNode<EmbeddingDenseBackward>(lazy_grad_output->GetIrValue(), lazy_indices->GetIrValue(), num_weights, padding_idx, scale_grad_by_freq, std::move(shapes));
1847 CacheNode(node);
1848 }
1849
1850 auto result = torch::lazy::CreateAtenFromLtcTensor(
1851 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
1852 return result;
1853 }
1854
1855
1856 at::Tensor LazyNativeFunctions::eq(const at::Tensor & self, const at::Scalar & other) {
1857
1858 if (force_eager_fallback(at::aten::eq)) {
1859 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(eq, Scalar)>::call(
1860 self,
1861 other
1862 );
1863 }
1864
1865 TORCH_LAZY_FN_COUNTER("lazy::");
1866 auto common_device = torch::lazy::GetBackendDevice(self);
1867 TORCH_INTERNAL_ASSERT(common_device);
1868
1869 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
1870 auto node_other = torch::lazy::LazyGraphExecutor::Get()->
1871 GetIrValueForScalarFromCodegen(other, *common_device);
1872 torch::lazy::NodePtr node = torch::lazy::ReuseNode<EqScalar>(lazy_self->GetIrValue(), node_other);
1873 if (!node) {
1874 auto self_meta = to_meta(self);
1875 auto out_meta = at::meta::eq(self_meta, other);
1876
1877std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
1878 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
1879 if(torch::lazy::symbolicShapeEnabled()){
1880 std::vector<torch::jit::IValue> inputs = { self, other };
1881 const char* schema_str = "aten::eq.Scalar(Tensor self, Scalar other) -> Tensor";
1882 applySymbolicShapesOnLT(schema_str, inputs, shapes);
1883 }
1884
1885 node = torch::lazy::MakeNode<EqScalar>(lazy_self->GetIrValue(), node_other, std::move(shapes));
1886 CacheNode(node);
1887 }
1888
1889 auto result = torch::lazy::CreateAtenFromLtcTensor(
1890 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
1891 return result;
1892 }
1893
1894
1895 at::Tensor LazyNativeFunctions::eq(const at::Tensor & self, const at::Tensor & other) {
1896
1897 if (force_eager_fallback(at::aten::eq)) {
1898 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(eq, Tensor)>::call(
1899 self,
1900 other
1901 );
1902 }
1903
1904 TORCH_LAZY_FN_COUNTER("lazy::");
1905 auto common_device = torch::lazy::GetBackendDevice(self, other);
1906 TORCH_INTERNAL_ASSERT(common_device);
1907
1908 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
1909 LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
1910 torch::lazy::NodePtr node = torch::lazy::ReuseNode<EqTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue());
1911 if (!node) {
1912 auto self_meta = to_meta(self);
1913 auto other_meta = to_meta(other);
1914 auto out_meta = at::meta::eq(self_meta, other_meta);
1915
1916std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
1917 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
1918 if(torch::lazy::symbolicShapeEnabled()){
1919 std::vector<torch::jit::IValue> inputs = { self, other };
1920 const char* schema_str = "aten::eq.Tensor(Tensor self, Tensor other) -> Tensor";
1921 applySymbolicShapesOnLT(schema_str, inputs, shapes);
1922 }
1923
1924 node = torch::lazy::MakeNode<EqTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), std::move(shapes));
1925 CacheNode(node);
1926 }
1927
1928 auto result = torch::lazy::CreateAtenFromLtcTensor(
1929 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
1930 return result;
1931 }
1932
1933
1934 at::Tensor LazyNativeFunctions::exp(const at::Tensor & self) {
1935
1936 if (force_eager_fallback(at::aten::exp)) {
1937 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(exp)>::call(
1938 self
1939 );
1940 }
1941
1942 TORCH_LAZY_FN_COUNTER("lazy::");
1943 auto common_device = torch::lazy::GetBackendDevice(self);
1944 TORCH_INTERNAL_ASSERT(common_device);
1945
1946 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
1947 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Exp>(lazy_self->GetIrValue());
1948 if (!node) {
1949 auto self_meta = to_meta(self);
1950 auto out_meta = at::meta::exp(self_meta);
1951
1952std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
1953 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
1954 if(torch::lazy::symbolicShapeEnabled()){
1955 std::vector<torch::jit::IValue> inputs = { self };
1956 const char* schema_str = "aten::exp(Tensor self) -> Tensor";
1957 applySymbolicShapesOnLT(schema_str, inputs, shapes);
1958 }
1959
1960 node = torch::lazy::MakeNode<Exp>(lazy_self->GetIrValue(), std::move(shapes));
1961 CacheNode(node);
1962 }
1963
1964 auto result = torch::lazy::CreateAtenFromLtcTensor(
1965 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
1966 return result;
1967 }
1968
1969
1970 at::Tensor LazyNativeFunctions::expand_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) {
1971
1972 if (force_eager_fallback(at::aten::expand_copy)) {
1973 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(expand_copy)>::call(
1974 self,
1975 size,
1976 implicit
1977 );
1978 }
1979
1980 TORCH_LAZY_FN_COUNTER("lazy::");
1981 auto common_device = torch::lazy::GetBackendDevice(self);
1982 TORCH_INTERNAL_ASSERT(common_device);
1983
1984 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
1985 torch::lazy::NodePtr node = torch::lazy::ReuseNode<ExpandCopy>(lazy_self->GetIrValue(), GetSymIntArrayRefValue(size), implicit);
1986 if (!node) {
1987 auto self_meta = to_meta(self);
1988 auto out_meta = at::compositeexplicitautogradnonfunctional::expand_copy_symint(self_meta, size, implicit);
1989
1990std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
1991 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
1992 if(torch::lazy::symbolicShapeEnabled()){
1993 std::vector<torch::jit::IValue> inputs = { self, size, implicit };
1994 const char* schema_str = "aten::expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor";
1995 applySymbolicShapesOnLT(schema_str, inputs, shapes);
1996 }
1997
1998 node = torch::lazy::MakeNode<ExpandCopy>(lazy_self->GetIrValue(), GetSymIntArrayRefValue(size), implicit, std::move(shapes));
1999 CacheNode(node);
2000 }
2001
2002 auto result = torch::lazy::CreateAtenFromLtcTensor(
2003 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
2004 return result;
2005 }
2006
2007
2008 at::Tensor LazyNativeFunctions::flip(const at::Tensor & self, at::IntArrayRef dims) {
2009
2010 if (force_eager_fallback(at::aten::flip)) {
2011 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(flip)>::call(
2012 self,
2013 dims
2014 );
2015 }
2016
2017 TORCH_LAZY_FN_COUNTER("lazy::");
2018 auto common_device = torch::lazy::GetBackendDevice(self);
2019 TORCH_INTERNAL_ASSERT(common_device);
2020
2021 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
2022 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Flip>(lazy_self->GetIrValue(), std::vector<int64_t>(dims.begin(), dims.end()));
2023 if (!node) {
2024
2025 auto shapes = torch::lazy::compute_shape_flip(self, dims);
2026 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
2027 if(torch::lazy::symbolicShapeEnabled()){
2028 std::vector<torch::jit::IValue> inputs = { self, dims };
2029 const char* schema_str = "aten::flip(Tensor self, int[] dims) -> Tensor";
2030 applySymbolicShapesOnLT(schema_str, inputs, shapes);
2031 }
2032
2033 node = torch::lazy::MakeNode<Flip>(lazy_self->GetIrValue(), std::vector<int64_t>(dims.begin(), dims.end()), std::move(shapes));
2034 CacheNode(node);
2035 }
2036
2037 auto result = torch::lazy::CreateAtenFromLtcTensor(
2038 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
2039 return result;
2040 }
2041
2042
2043 at::Tensor LazyNativeFunctions::floor(const at::Tensor & self) {
2044
2045 if (force_eager_fallback(at::aten::floor)) {
2046 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(floor)>::call(
2047 self
2048 );
2049 }
2050
2051 TORCH_LAZY_FN_COUNTER("lazy::");
2052 auto common_device = torch::lazy::GetBackendDevice(self);
2053 TORCH_INTERNAL_ASSERT(common_device);
2054
2055 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
2056 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Floor>(lazy_self->GetIrValue());
2057 if (!node) {
2058 auto self_meta = to_meta(self);
2059 auto out_meta = at::meta::floor(self_meta);
2060
2061std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
2062 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
2063 if(torch::lazy::symbolicShapeEnabled()){
2064 std::vector<torch::jit::IValue> inputs = { self };
2065 const char* schema_str = "aten::floor(Tensor self) -> Tensor";
2066 applySymbolicShapesOnLT(schema_str, inputs, shapes);
2067 }
2068
2069 node = torch::lazy::MakeNode<Floor>(lazy_self->GetIrValue(), std::move(shapes));
2070 CacheNode(node);
2071 }
2072
2073 auto result = torch::lazy::CreateAtenFromLtcTensor(
2074 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
2075 return result;
2076 }
2077
2078
2079 at::Tensor LazyNativeFunctions::frac(const at::Tensor & self) {
2080
2081 if (force_eager_fallback(at::aten::frac)) {
2082 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(frac)>::call(
2083 self
2084 );
2085 }
2086
2087 TORCH_LAZY_FN_COUNTER("lazy::");
2088 auto common_device = torch::lazy::GetBackendDevice(self);
2089 TORCH_INTERNAL_ASSERT(common_device);
2090
2091 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
2092 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Frac>(lazy_self->GetIrValue());
2093 if (!node) {
2094 auto self_meta = to_meta(self);
2095 auto out_meta = at::meta::frac(self_meta);
2096
2097std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
2098 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
2099 if(torch::lazy::symbolicShapeEnabled()){
2100 std::vector<torch::jit::IValue> inputs = { self };
2101 const char* schema_str = "aten::frac(Tensor self) -> Tensor";
2102 applySymbolicShapesOnLT(schema_str, inputs, shapes);
2103 }
2104
2105 node = torch::lazy::MakeNode<Frac>(lazy_self->GetIrValue(), std::move(shapes));
2106 CacheNode(node);
2107 }
2108
2109 auto result = torch::lazy::CreateAtenFromLtcTensor(
2110 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
2111 return result;
2112 }
2113
2114
2115 at::Tensor LazyNativeFunctions::gather(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
2116
2117 if (force_eager_fallback(at::aten::gather)) {
2118 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(gather)>::call(
2119 self,
2120 dim,
2121 index,
2122 sparse_grad
2123 );
2124 }
2125
2126 TORCH_LAZY_FN_COUNTER("lazy::");
2127 auto common_device = torch::lazy::GetBackendDevice(self, index);
2128 TORCH_INTERNAL_ASSERT(common_device);
2129
2130 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
2131 LazyTensorPtr lazy_index = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(index, *common_device);
2132 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Gather>(lazy_self->GetIrValue(), dim, lazy_index->GetIrValue(), sparse_grad);
2133 if (!node) {
2134 auto self_meta = to_meta(self);
2135 auto index_meta = to_meta(index);
2136 auto out_meta = at::meta::gather(self_meta, dim, index_meta, sparse_grad);
2137
2138std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
2139 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
2140 if(torch::lazy::symbolicShapeEnabled()){
2141 std::vector<torch::jit::IValue> inputs = { self, dim, index, sparse_grad };
2142 const char* schema_str = "aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor";
2143 applySymbolicShapesOnLT(schema_str, inputs, shapes);
2144 }
2145
2146 node = torch::lazy::MakeNode<Gather>(lazy_self->GetIrValue(), dim, lazy_index->GetIrValue(), sparse_grad, std::move(shapes));
2147 CacheNode(node);
2148 }
2149
2150 auto result = torch::lazy::CreateAtenFromLtcTensor(
2151 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
2152 return result;
2153 }
2154
2155
2156 at::Tensor LazyNativeFunctions::ge(const at::Tensor & self, const at::Scalar & other) {
2157
2158 if (force_eager_fallback(at::aten::ge)) {
2159 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(ge, Scalar)>::call(
2160 self,
2161 other
2162 );
2163 }
2164
2165 TORCH_LAZY_FN_COUNTER("lazy::");
2166 auto common_device = torch::lazy::GetBackendDevice(self);
2167 TORCH_INTERNAL_ASSERT(common_device);
2168
2169 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
2170 auto node_other = torch::lazy::LazyGraphExecutor::Get()->
2171 GetIrValueForScalarFromCodegen(other, *common_device);
2172 torch::lazy::NodePtr node = torch::lazy::ReuseNode<GeScalar>(lazy_self->GetIrValue(), node_other);
2173 if (!node) {
2174 auto self_meta = to_meta(self);
2175 auto out_meta = at::meta::ge(self_meta, other);
2176
2177std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
2178 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
2179 if(torch::lazy::symbolicShapeEnabled()){
2180 std::vector<torch::jit::IValue> inputs = { self, other };
2181 const char* schema_str = "aten::ge.Scalar(Tensor self, Scalar other) -> Tensor";
2182 applySymbolicShapesOnLT(schema_str, inputs, shapes);
2183 }
2184
2185 node = torch::lazy::MakeNode<GeScalar>(lazy_self->GetIrValue(), node_other, std::move(shapes));
2186 CacheNode(node);
2187 }
2188
2189 auto result = torch::lazy::CreateAtenFromLtcTensor(
2190 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
2191 return result;
2192 }
2193
2194
2195 at::Tensor LazyNativeFunctions::ge(const at::Tensor & self, const at::Tensor & other) {
2196
2197 if (force_eager_fallback(at::aten::ge)) {
2198 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(ge, Tensor)>::call(
2199 self,
2200 other
2201 );
2202 }
2203
2204 TORCH_LAZY_FN_COUNTER("lazy::");
2205 auto common_device = torch::lazy::GetBackendDevice(self, other);
2206 TORCH_INTERNAL_ASSERT(common_device);
2207
2208 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
2209 LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
2210 torch::lazy::NodePtr node = torch::lazy::ReuseNode<GeTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue());
2211 if (!node) {
2212 auto self_meta = to_meta(self);
2213 auto other_meta = to_meta(other);
2214 auto out_meta = at::meta::ge(self_meta, other_meta);
2215
2216std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
2217 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
2218 if(torch::lazy::symbolicShapeEnabled()){
2219 std::vector<torch::jit::IValue> inputs = { self, other };
2220 const char* schema_str = "aten::ge.Tensor(Tensor self, Tensor other) -> Tensor";
2221 applySymbolicShapesOnLT(schema_str, inputs, shapes);
2222 }
2223
2224 node = torch::lazy::MakeNode<GeTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), std::move(shapes));
2225 CacheNode(node);
2226 }
2227
2228 auto result = torch::lazy::CreateAtenFromLtcTensor(
2229 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
2230 return result;
2231 }
2232
2233
2234 at::Tensor LazyNativeFunctions::gelu(const at::Tensor & self, c10::string_view approximate) {
2235
2236 if (force_eager_fallback(at::aten::gelu)) {
2237 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(gelu)>::call(
2238 self,
2239 approximate
2240 );
2241 }
2242
2243 TORCH_LAZY_FN_COUNTER("lazy::");
2244 auto common_device = torch::lazy::GetBackendDevice(self);
2245 TORCH_INTERNAL_ASSERT(common_device);
2246
2247 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
2248 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Gelu>(lazy_self->GetIrValue(), approximate);
2249 if (!node) {
2250 auto self_meta = to_meta(self);
2251 auto out_meta = at::meta::gelu(self_meta, approximate);
2252
2253std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
2254 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
2255 if(torch::lazy::symbolicShapeEnabled()){
2256 std::vector<torch::jit::IValue> inputs = { self, approximate };
2257 const char* schema_str = "aten::gelu(Tensor self, *, str approximate='none') -> Tensor";
2258 applySymbolicShapesOnLT(schema_str, inputs, shapes);
2259 }
2260
2261 node = torch::lazy::MakeNode<Gelu>(lazy_self->GetIrValue(), approximate, std::move(shapes));
2262 CacheNode(node);
2263 }
2264
2265 auto result = torch::lazy::CreateAtenFromLtcTensor(
2266 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
2267 return result;
2268 }
2269
2270
2271 at::Tensor LazyNativeFunctions::gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate) {
2272
2273 if (force_eager_fallback(at::aten::gelu_backward)) {
2274 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(gelu_backward)>::call(
2275 grad_output,
2276 self,
2277 approximate
2278 );
2279 }
2280
2281 TORCH_LAZY_FN_COUNTER("lazy::");
2282 auto common_device = torch::lazy::GetBackendDevice(grad_output, self);
2283 TORCH_INTERNAL_ASSERT(common_device);
2284
2285 LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
2286 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
2287 torch::lazy::NodePtr node = torch::lazy::ReuseNode<GeluBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), approximate);
2288 if (!node) {
2289 auto grad_output_meta = to_meta(grad_output);
2290 auto self_meta = to_meta(self);
2291 auto out_meta = at::meta::gelu_backward(grad_output_meta, self_meta, approximate);
2292
2293std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
2294 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
2295 if(torch::lazy::symbolicShapeEnabled()){
2296 std::vector<torch::jit::IValue> inputs = { grad_output, self, approximate };
2297 const char* schema_str = "aten::gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor";
2298 applySymbolicShapesOnLT(schema_str, inputs, shapes);
2299 }
2300
2301 node = torch::lazy::MakeNode<GeluBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), approximate, std::move(shapes));
2302 CacheNode(node);
2303 }
2304
2305 auto result = torch::lazy::CreateAtenFromLtcTensor(
2306 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
2307 return result;
2308 }
2309
2310
2311 at::Tensor LazyNativeFunctions::glu(const at::Tensor & self, int64_t dim) {
2312
2313 if (force_eager_fallback(at::aten::glu)) {
2314 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(glu)>::call(
2315 self,
2316 dim
2317 );
2318 }
2319
2320 TORCH_LAZY_FN_COUNTER("lazy::");
2321 auto common_device = torch::lazy::GetBackendDevice(self);
2322 TORCH_INTERNAL_ASSERT(common_device);
2323
2324 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
2325 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Glu>(lazy_self->GetIrValue(), dim);
2326 if (!node) {
2327 auto self_meta = to_meta(self);
2328 auto out_meta = at::meta::glu(self_meta, dim);
2329
2330std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
2331 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
2332 if(torch::lazy::symbolicShapeEnabled()){
2333 std::vector<torch::jit::IValue> inputs = { self, dim };
2334 const char* schema_str = "aten::glu(Tensor self, int dim=-1) -> Tensor";
2335 applySymbolicShapesOnLT(schema_str, inputs, shapes);
2336 }
2337
2338 node = torch::lazy::MakeNode<Glu>(lazy_self->GetIrValue(), dim, std::move(shapes));
2339 CacheNode(node);
2340 }
2341
2342 auto result = torch::lazy::CreateAtenFromLtcTensor(
2343 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
2344 return result;
2345 }
2346
2347
2348 at::Tensor LazyNativeFunctions::glu_backward(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) {
2349
2350 if (force_eager_fallback(at::aten::glu_backward)) {
2351 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(glu_backward)>::call(
2352 grad_output,
2353 self,
2354 dim
2355 );
2356 }
2357
2358 TORCH_LAZY_FN_COUNTER("lazy::");
2359 auto common_device = torch::lazy::GetBackendDevice(grad_output, self);
2360 TORCH_INTERNAL_ASSERT(common_device);
2361
2362 LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
2363 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
2364 torch::lazy::NodePtr node = torch::lazy::ReuseNode<GluBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), dim);
2365 if (!node) {
2366
2367 auto shapes = torch::lazy::compute_shape_glu_backward(grad_output, self, dim);
2368 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
2369 if(torch::lazy::symbolicShapeEnabled()){
2370 std::vector<torch::jit::IValue> inputs = { grad_output, self, dim };
2371 const char* schema_str = "aten::glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor";
2372 applySymbolicShapesOnLT(schema_str, inputs, shapes);
2373 }
2374
2375 node = torch::lazy::MakeNode<GluBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), dim, std::move(shapes));
2376 CacheNode(node);
2377 }
2378
2379 auto result = torch::lazy::CreateAtenFromLtcTensor(
2380 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
2381 return result;
2382 }
2383
2384
2385 at::Tensor LazyNativeFunctions::glu_jvp(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) {
2386
2387 if (force_eager_fallback(at::aten::glu_jvp)) {
2388 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(glu_jvp)>::call(
2389 glu,
2390 x,
2391 dx,
2392 dim
2393 );
2394 }
2395
2396 TORCH_LAZY_FN_COUNTER("lazy::");
2397 auto common_device = torch::lazy::GetBackendDevice(glu, x, dx);
2398 TORCH_INTERNAL_ASSERT(common_device);
2399
2400 LazyTensorPtr lazy_glu = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(glu, *common_device);
2401 LazyTensorPtr lazy_x = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(x, *common_device);
2402 LazyTensorPtr lazy_dx = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(dx, *common_device);
2403 torch::lazy::NodePtr node = torch::lazy::ReuseNode<GluJvp>(lazy_glu->GetIrValue(), lazy_x->GetIrValue(), lazy_dx->GetIrValue(), dim);
2404 if (!node) {
2405
2406 auto shapes = torch::lazy::compute_shape_glu_jvp(glu, x, dx, dim);
2407 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
2408 if(torch::lazy::symbolicShapeEnabled()){
2409 std::vector<torch::jit::IValue> inputs = { glu, x, dx, dim };
2410 const char* schema_str = "aten::glu_jvp(Tensor glu, Tensor x, Tensor dx, int dim) -> Tensor";
2411 applySymbolicShapesOnLT(schema_str, inputs, shapes);
2412 }
2413
2414 node = torch::lazy::MakeNode<GluJvp>(lazy_glu->GetIrValue(), lazy_x->GetIrValue(), lazy_dx->GetIrValue(), dim, std::move(shapes));
2415 CacheNode(node);
2416 }
2417
2418 auto result = torch::lazy::CreateAtenFromLtcTensor(
2419 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
2420 return result;
2421 }
2422
2423
2424 at::Tensor LazyNativeFunctions::grid_sampler_2d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
2425
2426 if (force_eager_fallback(at::aten::grid_sampler_2d)) {
2427 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(grid_sampler_2d)>::call(
2428 input,
2429 grid,
2430 interpolation_mode,
2431 padding_mode,
2432 align_corners
2433 );
2434 }
2435
2436 TORCH_LAZY_FN_COUNTER("lazy::");
2437 auto common_device = torch::lazy::GetBackendDevice(input, grid);
2438 TORCH_INTERNAL_ASSERT(common_device);
2439
2440 LazyTensorPtr lazy_input = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(input, *common_device);
2441 LazyTensorPtr lazy_grid = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grid, *common_device);
2442 torch::lazy::NodePtr node = torch::lazy::ReuseNode<GridSampler2d>(lazy_input->GetIrValue(), lazy_grid->GetIrValue(), interpolation_mode, padding_mode, align_corners);
2443 if (!node) {
2444
2445 auto shapes = torch::lazy::compute_shape_grid_sampler_2d(input, grid, interpolation_mode, padding_mode, align_corners);
2446 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
2447 if(torch::lazy::symbolicShapeEnabled()){
2448 std::vector<torch::jit::IValue> inputs = { input, grid, interpolation_mode, padding_mode, align_corners };
2449 const char* schema_str = "aten::grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor";
2450 applySymbolicShapesOnLT(schema_str, inputs, shapes);
2451 }
2452
2453 node = torch::lazy::MakeNode<GridSampler2d>(lazy_input->GetIrValue(), lazy_grid->GetIrValue(), interpolation_mode, padding_mode, align_corners, std::move(shapes));
2454 CacheNode(node);
2455 }
2456
2457 auto result = torch::lazy::CreateAtenFromLtcTensor(
2458 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
2459 return result;
2460 }
2461
2462
2463 ::std::tuple<at::Tensor,at::Tensor> LazyNativeFunctions::grid_sampler_2d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
2464
2465 if (force_eager_fallback(at::aten::grid_sampler_2d_backward)) {
2466 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(grid_sampler_2d_backward)>::call(
2467 grad_output,
2468 input,
2469 grid,
2470 interpolation_mode,
2471 padding_mode,
2472 align_corners,
2473 output_mask
2474 );
2475 }
2476
2477 TORCH_LAZY_FN_COUNTER("lazy::");
2478 auto common_device = torch::lazy::GetBackendDevice(grad_output, input, grid);
2479 TORCH_INTERNAL_ASSERT(common_device);
2480
2481 LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
2482 LazyTensorPtr lazy_input = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(input, *common_device);
2483 LazyTensorPtr lazy_grid = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grid, *common_device);
2484 torch::lazy::NodePtr node = torch::lazy::ReuseNode<GridSampler2dBackward>(lazy_grad_output->GetIrValue(), lazy_input->GetIrValue(), lazy_grid->GetIrValue(), interpolation_mode, padding_mode, align_corners, std::vector<bool>(output_mask.begin(), output_mask.end()));
2485 if (!node) {
2486
2487 auto shapes = torch::lazy::compute_shape_grid_sampler_2d_backward(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
2488 TORCH_INTERNAL_ASSERT(shapes.size() == 2);
2489 if(torch::lazy::symbolicShapeEnabled()){
2490 std::vector<torch::jit::IValue> inputs = { grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask };
2491 const char* schema_str = "aten::grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)";
2492 applySymbolicShapesOnLT(schema_str, inputs, shapes);
2493 }
2494
2495 node = torch::lazy::MakeNode<GridSampler2dBackward>(lazy_grad_output->GetIrValue(), lazy_input->GetIrValue(), lazy_grid->GetIrValue(), interpolation_mode, padding_mode, align_corners, std::vector<bool>(output_mask.begin(), output_mask.end()), std::move(shapes));
2496 CacheNode(node);
2497 }
2498
2499 std::vector<LazyTensorPtr> lazy_tensors;
2500 for (int i = 0; i < 2; i++) {
2501 lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
2502 }
2503 auto result = torch::lazy::TupleAtenFromLtcTensors<2>(lazy_tensors);
2504 return result;
2505 }
2506
2507
2508 at::Tensor LazyNativeFunctions::gt(const at::Tensor & self, const at::Scalar & other) {
2509
2510 if (force_eager_fallback(at::aten::gt)) {
2511 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(gt, Scalar)>::call(
2512 self,
2513 other
2514 );
2515 }
2516
2517 TORCH_LAZY_FN_COUNTER("lazy::");
2518 auto common_device = torch::lazy::GetBackendDevice(self);
2519 TORCH_INTERNAL_ASSERT(common_device);
2520
2521 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
2522 auto node_other = torch::lazy::LazyGraphExecutor::Get()->
2523 GetIrValueForScalarFromCodegen(other, *common_device);
2524 torch::lazy::NodePtr node = torch::lazy::ReuseNode<GtScalar>(lazy_self->GetIrValue(), node_other);
2525 if (!node) {
2526 auto self_meta = to_meta(self);
2527 auto out_meta = at::meta::gt(self_meta, other);
2528
2529std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
2530 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
2531 if(torch::lazy::symbolicShapeEnabled()){
2532 std::vector<torch::jit::IValue> inputs = { self, other };
2533 const char* schema_str = "aten::gt.Scalar(Tensor self, Scalar other) -> Tensor";
2534 applySymbolicShapesOnLT(schema_str, inputs, shapes);
2535 }
2536
2537 node = torch::lazy::MakeNode<GtScalar>(lazy_self->GetIrValue(), node_other, std::move(shapes));
2538 CacheNode(node);
2539 }
2540
2541 auto result = torch::lazy::CreateAtenFromLtcTensor(
2542 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
2543 return result;
2544 }
2545
2546
2547 at::Tensor LazyNativeFunctions::gt(const at::Tensor & self, const at::Tensor & other) {
2548
2549 if (force_eager_fallback(at::aten::gt)) {
2550 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(gt, Tensor)>::call(
2551 self,
2552 other
2553 );
2554 }
2555
2556 TORCH_LAZY_FN_COUNTER("lazy::");
2557 auto common_device = torch::lazy::GetBackendDevice(self, other);
2558 TORCH_INTERNAL_ASSERT(common_device);
2559
2560 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
2561 LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
2562 torch::lazy::NodePtr node = torch::lazy::ReuseNode<GtTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue());
2563 if (!node) {
2564 auto self_meta = to_meta(self);
2565 auto other_meta = to_meta(other);
2566 auto out_meta = at::meta::gt(self_meta, other_meta);
2567
2568std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
2569 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
2570 if(torch::lazy::symbolicShapeEnabled()){
2571 std::vector<torch::jit::IValue> inputs = { self, other };
2572 const char* schema_str = "aten::gt.Tensor(Tensor self, Tensor other) -> Tensor";
2573 applySymbolicShapesOnLT(schema_str, inputs, shapes);
2574 }
2575
2576 node = torch::lazy::MakeNode<GtTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), std::move(shapes));
2577 CacheNode(node);
2578 }
2579
2580 auto result = torch::lazy::CreateAtenFromLtcTensor(
2581 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
2582 return result;
2583 }
2584
2585
2586 at::Tensor LazyNativeFunctions::hardsigmoid(const at::Tensor & self) {
2587
2588 if (force_eager_fallback(at::aten::hardsigmoid)) {
2589 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(hardsigmoid)>::call(
2590 self
2591 );
2592 }
2593
2594 TORCH_LAZY_FN_COUNTER("lazy::");
2595 auto common_device = torch::lazy::GetBackendDevice(self);
2596 TORCH_INTERNAL_ASSERT(common_device);
2597
2598 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
2599 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Hardsigmoid>(lazy_self->GetIrValue());
2600 if (!node) {
2601 auto self_meta = to_meta(self);
2602 auto out_meta = at::meta::hardsigmoid(self_meta);
2603
2604std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
2605 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
2606 if(torch::lazy::symbolicShapeEnabled()){
2607 std::vector<torch::jit::IValue> inputs = { self };
2608 const char* schema_str = "aten::hardsigmoid(Tensor self) -> Tensor";
2609 applySymbolicShapesOnLT(schema_str, inputs, shapes);
2610 }
2611
2612 node = torch::lazy::MakeNode<Hardsigmoid>(lazy_self->GetIrValue(), std::move(shapes));
2613 CacheNode(node);
2614 }
2615
2616 auto result = torch::lazy::CreateAtenFromLtcTensor(
2617 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
2618 return result;
2619 }
2620
2621
2622 at::Tensor LazyNativeFunctions::index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index) {
2623
2624 if (force_eager_fallback(at::aten::index_select)) {
2625 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(index_select)>::call(
2626 self,
2627 dim,
2628 index
2629 );
2630 }
2631
2632 TORCH_LAZY_FN_COUNTER("lazy::");
2633 auto common_device = torch::lazy::GetBackendDevice(self, index);
2634 TORCH_INTERNAL_ASSERT(common_device);
2635
2636 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
2637 LazyTensorPtr lazy_index = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(index, *common_device);
2638 torch::lazy::NodePtr node = torch::lazy::ReuseNode<IndexSelect>(lazy_self->GetIrValue(), dim, lazy_index->GetIrValue());
2639 if (!node) {
2640
2641 auto shapes = torch::lazy::compute_shape_index_select(self, dim, index);
2642 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
2643 if(torch::lazy::symbolicShapeEnabled()){
2644 std::vector<torch::jit::IValue> inputs = { self, dim, index };
2645 const char* schema_str = "aten::index_select(Tensor self, int dim, Tensor index) -> Tensor";
2646 applySymbolicShapesOnLT(schema_str, inputs, shapes);
2647 }
2648
2649 node = torch::lazy::MakeNode<IndexSelect>(lazy_self->GetIrValue(), dim, lazy_index->GetIrValue(), std::move(shapes));
2650 CacheNode(node);
2651 }
2652
2653 auto result = torch::lazy::CreateAtenFromLtcTensor(
2654 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
2655 return result;
2656 }
2657
2658
2659 at::Tensor LazyNativeFunctions::le(const at::Tensor & self, const at::Scalar & other) {
2660
2661 if (force_eager_fallback(at::aten::le)) {
2662 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(le, Scalar)>::call(
2663 self,
2664 other
2665 );
2666 }
2667
2668 TORCH_LAZY_FN_COUNTER("lazy::");
2669 auto common_device = torch::lazy::GetBackendDevice(self);
2670 TORCH_INTERNAL_ASSERT(common_device);
2671
2672 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
2673 auto node_other = torch::lazy::LazyGraphExecutor::Get()->
2674 GetIrValueForScalarFromCodegen(other, *common_device);
2675 torch::lazy::NodePtr node = torch::lazy::ReuseNode<LeScalar>(lazy_self->GetIrValue(), node_other);
2676 if (!node) {
2677 auto self_meta = to_meta(self);
2678 auto out_meta = at::meta::le(self_meta, other);
2679
2680std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
2681 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
2682 if(torch::lazy::symbolicShapeEnabled()){
2683 std::vector<torch::jit::IValue> inputs = { self, other };
2684 const char* schema_str = "aten::le.Scalar(Tensor self, Scalar other) -> Tensor";
2685 applySymbolicShapesOnLT(schema_str, inputs, shapes);
2686 }
2687
2688 node = torch::lazy::MakeNode<LeScalar>(lazy_self->GetIrValue(), node_other, std::move(shapes));
2689 CacheNode(node);
2690 }
2691
2692 auto result = torch::lazy::CreateAtenFromLtcTensor(
2693 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
2694 return result;
2695 }
2696
2697
2698 at::Tensor LazyNativeFunctions::le(const at::Tensor & self, const at::Tensor & other) {
2699
2700 if (force_eager_fallback(at::aten::le)) {
2701 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(le, Tensor)>::call(
2702 self,
2703 other
2704 );
2705 }
2706
2707 TORCH_LAZY_FN_COUNTER("lazy::");
2708 auto common_device = torch::lazy::GetBackendDevice(self, other);
2709 TORCH_INTERNAL_ASSERT(common_device);
2710
2711 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
2712 LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
2713 torch::lazy::NodePtr node = torch::lazy::ReuseNode<LeTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue());
2714 if (!node) {
2715 auto self_meta = to_meta(self);
2716 auto other_meta = to_meta(other);
2717 auto out_meta = at::meta::le(self_meta, other_meta);
2718
2719std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
2720 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
2721 if(torch::lazy::symbolicShapeEnabled()){
2722 std::vector<torch::jit::IValue> inputs = { self, other };
2723 const char* schema_str = "aten::le.Tensor(Tensor self, Tensor other) -> Tensor";
2724 applySymbolicShapesOnLT(schema_str, inputs, shapes);
2725 }
2726
2727 node = torch::lazy::MakeNode<LeTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), std::move(shapes));
2728 CacheNode(node);
2729 }
2730
2731 auto result = torch::lazy::CreateAtenFromLtcTensor(
2732 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
2733 return result;
2734 }
2735
2736
2737 at::Tensor LazyNativeFunctions::leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope) {
2738
2739 if (force_eager_fallback(at::aten::leaky_relu)) {
2740 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(leaky_relu)>::call(
2741 self,
2742 negative_slope
2743 );
2744 }
2745
2746 TORCH_LAZY_FN_COUNTER("lazy::");
2747 auto common_device = torch::lazy::GetBackendDevice(self);
2748 TORCH_INTERNAL_ASSERT(common_device);
2749
2750 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
2751 auto node_negative_slope = torch::lazy::LazyGraphExecutor::Get()->
2752 GetIrValueForScalarFromCodegen(negative_slope, *common_device);
2753 torch::lazy::NodePtr node = torch::lazy::ReuseNode<LeakyRelu>(lazy_self->GetIrValue(), node_negative_slope);
2754 if (!node) {
2755 auto self_meta = to_meta(self);
2756 auto out_meta = at::meta::leaky_relu(self_meta, negative_slope);
2757
2758std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
2759 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
2760 if(torch::lazy::symbolicShapeEnabled()){
2761 std::vector<torch::jit::IValue> inputs = { self, negative_slope };
2762 const char* schema_str = "aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor";
2763 applySymbolicShapesOnLT(schema_str, inputs, shapes);
2764 }
2765
2766 node = torch::lazy::MakeNode<LeakyRelu>(lazy_self->GetIrValue(), node_negative_slope, std::move(shapes));
2767 CacheNode(node);
2768 }
2769
2770 auto result = torch::lazy::CreateAtenFromLtcTensor(
2771 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
2772 return result;
2773 }
2774
2775
2776 at::Tensor LazyNativeFunctions::leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) {
2777
2778 if (force_eager_fallback(at::aten::leaky_relu_backward)) {
2779 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(leaky_relu_backward)>::call(
2780 grad_output,
2781 self,
2782 negative_slope,
2783 self_is_result
2784 );
2785 }
2786
2787 TORCH_LAZY_FN_COUNTER("lazy::");
2788 auto common_device = torch::lazy::GetBackendDevice(grad_output, self);
2789 TORCH_INTERNAL_ASSERT(common_device);
2790
2791 LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
2792 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
2793 auto node_negative_slope = torch::lazy::LazyGraphExecutor::Get()->
2794 GetIrValueForScalarFromCodegen(negative_slope, *common_device);
2795 torch::lazy::NodePtr node = torch::lazy::ReuseNode<LeakyReluBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), node_negative_slope, self_is_result);
2796 if (!node) {
2797 auto grad_output_meta = to_meta(grad_output);
2798 auto self_meta = to_meta(self);
2799 auto out_meta = at::meta::leaky_relu_backward(grad_output_meta, self_meta, negative_slope, self_is_result);
2800
2801std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
2802 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
2803 if(torch::lazy::symbolicShapeEnabled()){
2804 std::vector<torch::jit::IValue> inputs = { grad_output, self, negative_slope, self_is_result };
2805 const char* schema_str = "aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor";
2806 applySymbolicShapesOnLT(schema_str, inputs, shapes);
2807 }
2808
2809 node = torch::lazy::MakeNode<LeakyReluBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), node_negative_slope, self_is_result, std::move(shapes));
2810 CacheNode(node);
2811 }
2812
2813 auto result = torch::lazy::CreateAtenFromLtcTensor(
2814 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
2815 return result;
2816 }
2817
2818
2819 at::Tensor LazyNativeFunctions::log(const at::Tensor & self) {
2820
2821 if (force_eager_fallback(at::aten::log)) {
2822 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(log)>::call(
2823 self
2824 );
2825 }
2826
2827 TORCH_LAZY_FN_COUNTER("lazy::");
2828 auto common_device = torch::lazy::GetBackendDevice(self);
2829 TORCH_INTERNAL_ASSERT(common_device);
2830
2831 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
2832 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Log>(lazy_self->GetIrValue());
2833 if (!node) {
2834 auto self_meta = to_meta(self);
2835 auto out_meta = at::meta::log(self_meta);
2836
2837std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
2838 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
2839 if(torch::lazy::symbolicShapeEnabled()){
2840 std::vector<torch::jit::IValue> inputs = { self };
2841 const char* schema_str = "aten::log(Tensor self) -> Tensor";
2842 applySymbolicShapesOnLT(schema_str, inputs, shapes);
2843 }
2844
2845 node = torch::lazy::MakeNode<Log>(lazy_self->GetIrValue(), std::move(shapes));
2846 CacheNode(node);
2847 }
2848
2849 auto result = torch::lazy::CreateAtenFromLtcTensor(
2850 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
2851 return result;
2852 }
2853
2854
2855 at::Tensor LazyNativeFunctions::log2(const at::Tensor & self) {
2856
2857 if (force_eager_fallback(at::aten::log2)) {
2858 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(log2)>::call(
2859 self
2860 );
2861 }
2862
2863 TORCH_LAZY_FN_COUNTER("lazy::");
2864 auto common_device = torch::lazy::GetBackendDevice(self);
2865 TORCH_INTERNAL_ASSERT(common_device);
2866
2867 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
2868 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Log2>(lazy_self->GetIrValue());
2869 if (!node) {
2870 auto self_meta = to_meta(self);
2871 auto out_meta = at::meta::log2(self_meta);
2872
2873std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
2874 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
2875 if(torch::lazy::symbolicShapeEnabled()){
2876 std::vector<torch::jit::IValue> inputs = { self };
2877 const char* schema_str = "aten::log2(Tensor self) -> Tensor";
2878 applySymbolicShapesOnLT(schema_str, inputs, shapes);
2879 }
2880
2881 node = torch::lazy::MakeNode<Log2>(lazy_self->GetIrValue(), std::move(shapes));
2882 CacheNode(node);
2883 }
2884
2885 auto result = torch::lazy::CreateAtenFromLtcTensor(
2886 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
2887 return result;
2888 }
2889
2890
2891 at::Tensor LazyNativeFunctions::log_sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) {
2892
2893 if (force_eager_fallback(at::aten::log_sigmoid_backward)) {
2894 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(log_sigmoid_backward)>::call(
2895 grad_output,
2896 self,
2897 buffer
2898 );
2899 }
2900
2901 TORCH_LAZY_FN_COUNTER("lazy::");
2902 auto common_device = torch::lazy::GetBackendDevice(grad_output, self, buffer);
2903 TORCH_INTERNAL_ASSERT(common_device);
2904
2905 LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
2906 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
2907 LazyTensorPtr lazy_buffer = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(buffer, *common_device);
2908 torch::lazy::NodePtr node = torch::lazy::ReuseNode<LogSigmoidBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), lazy_buffer->GetIrValue());
2909 if (!node) {
2910
2911 auto shapes = torch::lazy::compute_shape_log_sigmoid_backward(grad_output, self, buffer);
2912 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
2913 if(torch::lazy::symbolicShapeEnabled()){
2914 std::vector<torch::jit::IValue> inputs = { grad_output, self, buffer };
2915 const char* schema_str = "aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor";
2916 applySymbolicShapesOnLT(schema_str, inputs, shapes);
2917 }
2918
2919 node = torch::lazy::MakeNode<LogSigmoidBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), lazy_buffer->GetIrValue(), std::move(shapes));
2920 CacheNode(node);
2921 }
2922
2923 auto result = torch::lazy::CreateAtenFromLtcTensor(
2924 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
2925 return result;
2926 }
2927
2928
2929 ::std::tuple<at::Tensor,at::Tensor> LazyNativeFunctions::log_sigmoid_forward(const at::Tensor & self) {
2930
2931 if (force_eager_fallback(at::aten::log_sigmoid_forward)) {
2932 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(log_sigmoid_forward)>::call(
2933 self
2934 );
2935 }
2936
2937 TORCH_LAZY_FN_COUNTER("lazy::");
2938 auto common_device = torch::lazy::GetBackendDevice(self);
2939 TORCH_INTERNAL_ASSERT(common_device);
2940
2941 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
2942 torch::lazy::NodePtr node = torch::lazy::ReuseNode<LogSigmoidForward>(lazy_self->GetIrValue());
2943 if (!node) {
2944
2945 auto shapes = torch::lazy::compute_shape_log_sigmoid_forward(self);
2946 TORCH_INTERNAL_ASSERT(shapes.size() == 2);
2947 if(torch::lazy::symbolicShapeEnabled()){
2948 std::vector<torch::jit::IValue> inputs = { self };
2949 const char* schema_str = "aten::log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)";
2950 applySymbolicShapesOnLT(schema_str, inputs, shapes);
2951 }
2952
2953 node = torch::lazy::MakeNode<LogSigmoidForward>(lazy_self->GetIrValue(), std::move(shapes));
2954 CacheNode(node);
2955 }
2956
2957 std::vector<LazyTensorPtr> lazy_tensors;
2958 for (int i = 0; i < 2; i++) {
2959 lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
2960 }
2961 auto result = torch::lazy::TupleAtenFromLtcTensors<2>(lazy_tensors);
2962 return result;
2963 }
2964
2965
2966 at::Tensor LazyNativeFunctions::logdet(const at::Tensor & self) {
2967
2968 if (force_eager_fallback(at::aten::logdet)) {
2969 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(logdet)>::call(
2970 self
2971 );
2972 }
2973
2974 TORCH_LAZY_FN_COUNTER("lazy::");
2975 auto common_device = torch::lazy::GetBackendDevice(self);
2976 TORCH_INTERNAL_ASSERT(common_device);
2977
2978 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
2979 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Logdet>(lazy_self->GetIrValue());
2980 if (!node) {
2981
2982 auto shapes = torch::lazy::compute_shape_logdet(self);
2983 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
2984 if(torch::lazy::symbolicShapeEnabled()){
2985 std::vector<torch::jit::IValue> inputs = { self };
2986 const char* schema_str = "aten::logdet(Tensor self) -> Tensor";
2987 applySymbolicShapesOnLT(schema_str, inputs, shapes);
2988 }
2989
2990 node = torch::lazy::MakeNode<Logdet>(lazy_self->GetIrValue(), std::move(shapes));
2991 CacheNode(node);
2992 }
2993
2994 auto result = torch::lazy::CreateAtenFromLtcTensor(
2995 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
2996 return result;
2997 }
2998
2999
3000 at::Tensor LazyNativeFunctions::lt(const at::Tensor & self, const at::Scalar & other) {
3001
3002 if (force_eager_fallback(at::aten::lt)) {
3003 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(lt, Scalar)>::call(
3004 self,
3005 other
3006 );
3007 }
3008
3009 TORCH_LAZY_FN_COUNTER("lazy::");
3010 auto common_device = torch::lazy::GetBackendDevice(self);
3011 TORCH_INTERNAL_ASSERT(common_device);
3012
3013 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
3014 auto node_other = torch::lazy::LazyGraphExecutor::Get()->
3015 GetIrValueForScalarFromCodegen(other, *common_device);
3016 torch::lazy::NodePtr node = torch::lazy::ReuseNode<LtScalar>(lazy_self->GetIrValue(), node_other);
3017 if (!node) {
3018 auto self_meta = to_meta(self);
3019 auto out_meta = at::meta::lt(self_meta, other);
3020
3021std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
3022 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
3023 if(torch::lazy::symbolicShapeEnabled()){
3024 std::vector<torch::jit::IValue> inputs = { self, other };
3025 const char* schema_str = "aten::lt.Scalar(Tensor self, Scalar other) -> Tensor";
3026 applySymbolicShapesOnLT(schema_str, inputs, shapes);
3027 }
3028
3029 node = torch::lazy::MakeNode<LtScalar>(lazy_self->GetIrValue(), node_other, std::move(shapes));
3030 CacheNode(node);
3031 }
3032
3033 auto result = torch::lazy::CreateAtenFromLtcTensor(
3034 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
3035 return result;
3036 }
3037
3038
3039 at::Tensor LazyNativeFunctions::lt(const at::Tensor & self, const at::Tensor & other) {
3040
3041 if (force_eager_fallback(at::aten::lt)) {
3042 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(lt, Tensor)>::call(
3043 self,
3044 other
3045 );
3046 }
3047
3048 TORCH_LAZY_FN_COUNTER("lazy::");
3049 auto common_device = torch::lazy::GetBackendDevice(self, other);
3050 TORCH_INTERNAL_ASSERT(common_device);
3051
3052 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
3053 LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
3054 torch::lazy::NodePtr node = torch::lazy::ReuseNode<LtTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue());
3055 if (!node) {
3056 auto self_meta = to_meta(self);
3057 auto other_meta = to_meta(other);
3058 auto out_meta = at::meta::lt(self_meta, other_meta);
3059
3060std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
3061 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
3062 if(torch::lazy::symbolicShapeEnabled()){
3063 std::vector<torch::jit::IValue> inputs = { self, other };
3064 const char* schema_str = "aten::lt.Tensor(Tensor self, Tensor other) -> Tensor";
3065 applySymbolicShapesOnLT(schema_str, inputs, shapes);
3066 }
3067
3068 node = torch::lazy::MakeNode<LtTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), std::move(shapes));
3069 CacheNode(node);
3070 }
3071
3072 auto result = torch::lazy::CreateAtenFromLtcTensor(
3073 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
3074 return result;
3075 }
3076
3077
3078 at::Tensor LazyNativeFunctions::masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
3079
3080 if (force_eager_fallback(at::aten::masked_fill)) {
3081 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(masked_fill, Scalar)>::call(
3082 self,
3083 mask,
3084 value
3085 );
3086 }
3087
3088 TORCH_LAZY_FN_COUNTER("lazy::");
3089 auto common_device = torch::lazy::GetBackendDevice(self, mask);
3090 TORCH_INTERNAL_ASSERT(common_device);
3091
3092 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
3093 LazyTensorPtr lazy_mask = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(mask, *common_device);
3094 auto node_value = torch::lazy::LazyGraphExecutor::Get()->
3095 GetIrValueForScalarFromCodegen(value, *common_device);
3096 torch::lazy::NodePtr node = torch::lazy::ReuseNode<MaskedFillScalar>(lazy_self->GetIrValue(), lazy_mask->GetIrValue(), node_value);
3097 if (!node) {
3098
3099 auto shapes = torch::lazy::compute_shape_masked_fill(self, mask, value);
3100 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
3101 if(torch::lazy::symbolicShapeEnabled()){
3102 std::vector<torch::jit::IValue> inputs = { self, mask, value };
3103 const char* schema_str = "aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor";
3104 applySymbolicShapesOnLT(schema_str, inputs, shapes);
3105 }
3106
3107 node = torch::lazy::MakeNode<MaskedFillScalar>(lazy_self->GetIrValue(), lazy_mask->GetIrValue(), node_value, std::move(shapes));
3108 CacheNode(node);
3109 }
3110
3111 auto result = torch::lazy::CreateAtenFromLtcTensor(
3112 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
3113 return result;
3114 }
3115
3116
3117 at::Tensor LazyNativeFunctions::masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
3118
3119 if (force_eager_fallback(at::aten::masked_fill)) {
3120 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(masked_fill, Tensor)>::call(
3121 self,
3122 mask,
3123 value
3124 );
3125 }
3126
3127 TORCH_LAZY_FN_COUNTER("lazy::");
3128 auto common_device = torch::lazy::GetBackendDevice(self, mask, value);
3129 TORCH_INTERNAL_ASSERT(common_device);
3130
3131 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
3132 LazyTensorPtr lazy_mask = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(mask, *common_device);
3133 LazyTensorPtr lazy_value = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(value, *common_device);
3134 torch::lazy::NodePtr node = torch::lazy::ReuseNode<MaskedFillTensor>(lazy_self->GetIrValue(), lazy_mask->GetIrValue(), lazy_value->GetIrValue());
3135 if (!node) {
3136
3137 auto shapes = torch::lazy::compute_shape_masked_fill(self, mask, value);
3138 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
3139 if(torch::lazy::symbolicShapeEnabled()){
3140 std::vector<torch::jit::IValue> inputs = { self, mask, value };
3141 const char* schema_str = "aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor";
3142 applySymbolicShapesOnLT(schema_str, inputs, shapes);
3143 }
3144
3145 node = torch::lazy::MakeNode<MaskedFillTensor>(lazy_self->GetIrValue(), lazy_mask->GetIrValue(), lazy_value->GetIrValue(), std::move(shapes));
3146 CacheNode(node);
3147 }
3148
3149 auto result = torch::lazy::CreateAtenFromLtcTensor(
3150 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
3151 return result;
3152 }
3153
3154
3155 ::std::tuple<at::Tensor,at::Tensor> LazyNativeFunctions::max(const at::Tensor & self, int64_t dim, bool keepdim) {
3156
3157 if (force_eager_fallback(at::aten::max)) {
3158 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(max, dim)>::call(
3159 self,
3160 dim,
3161 keepdim
3162 );
3163 }
3164
3165 TORCH_LAZY_FN_COUNTER("lazy::");
3166 auto common_device = torch::lazy::GetBackendDevice(self);
3167 TORCH_INTERNAL_ASSERT(common_device);
3168
3169 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
3170 torch::lazy::NodePtr node = torch::lazy::ReuseNode<MaxDim>(lazy_self->GetIrValue(), dim, keepdim);
3171 if (!node) {
3172 auto self_meta = to_meta(self);
3173 auto out_meta = at::meta::max(self_meta, dim, keepdim);
3174 std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(std::get<0>(out_meta).scalar_type(), std::get<0>(out_meta).sizes().vec()),torch::lazy::Shape(std::get<1>(out_meta).scalar_type(), std::get<1>(out_meta).sizes().vec())};
3175 TORCH_INTERNAL_ASSERT(shapes.size() == 2);
3176 if(torch::lazy::symbolicShapeEnabled()){
3177 std::vector<torch::jit::IValue> inputs = { self, dim, keepdim };
3178 const char* schema_str = "aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)";
3179 applySymbolicShapesOnLT(schema_str, inputs, shapes);
3180 }
3181
3182 node = torch::lazy::MakeNode<MaxDim>(lazy_self->GetIrValue(), dim, keepdim, std::move(shapes));
3183 CacheNode(node);
3184 }
3185
3186 std::vector<LazyTensorPtr> lazy_tensors;
3187 for (int i = 0; i < 2; i++) {
3188 lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
3189 }
3190 auto result = torch::lazy::TupleAtenFromLtcTensors<2>(lazy_tensors);
3191 return result;
3192 }
3193
3194
3195 at::Tensor LazyNativeFunctions::max(const at::Tensor & self) {
3196
3197 if (force_eager_fallback(at::aten::max)) {
3198 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(max)>::call(
3199 self
3200 );
3201 }
3202
3203 TORCH_LAZY_FN_COUNTER("lazy::");
3204 auto common_device = torch::lazy::GetBackendDevice(self);
3205 TORCH_INTERNAL_ASSERT(common_device);
3206
3207 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
3208 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Max>(lazy_self->GetIrValue());
3209 if (!node) {
3210
3211 auto shapes = torch::lazy::compute_shape_max(self);
3212 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
3213 if(torch::lazy::symbolicShapeEnabled()){
3214 std::vector<torch::jit::IValue> inputs = { self };
3215 const char* schema_str = "aten::max(Tensor self) -> Tensor";
3216 applySymbolicShapesOnLT(schema_str, inputs, shapes);
3217 }
3218
3219 node = torch::lazy::MakeNode<Max>(lazy_self->GetIrValue(), std::move(shapes));
3220 CacheNode(node);
3221 }
3222
3223 auto result = torch::lazy::CreateAtenFromLtcTensor(
3224 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
3225 return result;
3226 }
3227
3228
3229 ::std::tuple<at::Tensor,at::Tensor> LazyNativeFunctions::max_pool2d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
3230
3231 if (force_eager_fallback(at::aten::max_pool2d_with_indices)) {
3232 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(max_pool2d_with_indices)>::call(
3233 self,
3234 kernel_size,
3235 stride,
3236 padding,
3237 dilation,
3238 ceil_mode
3239 );
3240 }
3241
3242 TORCH_LAZY_FN_COUNTER("lazy::");
3243 auto common_device = torch::lazy::GetBackendDevice(self);
3244 TORCH_INTERNAL_ASSERT(common_device);
3245
3246 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
3247 torch::lazy::NodePtr node = torch::lazy::ReuseNode<MaxPool2dWithIndices>(lazy_self->GetIrValue(), std::vector<int64_t>(kernel_size.begin(), kernel_size.end()), std::vector<int64_t>(stride.begin(), stride.end()), std::vector<int64_t>(padding.begin(), padding.end()), std::vector<int64_t>(dilation.begin(), dilation.end()), ceil_mode);
3248 if (!node) {
3249 auto self_meta = to_meta(self);
3250 auto out_meta = at::meta::max_pool2d_with_indices(self_meta, kernel_size, stride, padding, dilation, ceil_mode);
3251 std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(std::get<0>(out_meta).scalar_type(), std::get<0>(out_meta).sizes().vec()),torch::lazy::Shape(std::get<1>(out_meta).scalar_type(), std::get<1>(out_meta).sizes().vec())};
3252 TORCH_INTERNAL_ASSERT(shapes.size() == 2);
3253 if(torch::lazy::symbolicShapeEnabled()){
3254 std::vector<torch::jit::IValue> inputs = { self, kernel_size, stride, padding, dilation, ceil_mode };
3255 const char* schema_str = "aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)";
3256 applySymbolicShapesOnLT(schema_str, inputs, shapes);
3257 }
3258
3259 node = torch::lazy::MakeNode<MaxPool2dWithIndices>(lazy_self->GetIrValue(), std::vector<int64_t>(kernel_size.begin(), kernel_size.end()), std::vector<int64_t>(stride.begin(), stride.end()), std::vector<int64_t>(padding.begin(), padding.end()), std::vector<int64_t>(dilation.begin(), dilation.end()), ceil_mode, std::move(shapes));
3260 CacheNode(node);
3261 }
3262
3263 std::vector<LazyTensorPtr> lazy_tensors;
3264 for (int i = 0; i < 2; i++) {
3265 lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
3266 }
3267 auto result = torch::lazy::TupleAtenFromLtcTensors<2>(lazy_tensors);
3268 return result;
3269 }
3270
3271
3272 at::Tensor LazyNativeFunctions::max_pool2d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
3273
3274 if (force_eager_fallback(at::aten::max_pool2d_with_indices_backward)) {
3275 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(max_pool2d_with_indices_backward)>::call(
3276 grad_output,
3277 self,
3278 kernel_size,
3279 stride,
3280 padding,
3281 dilation,
3282 ceil_mode,
3283 indices
3284 );
3285 }
3286
3287 TORCH_LAZY_FN_COUNTER("lazy::");
3288 auto common_device = torch::lazy::GetBackendDevice(grad_output, self, indices);
3289 TORCH_INTERNAL_ASSERT(common_device);
3290
3291 LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
3292 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
3293 LazyTensorPtr lazy_indices = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(indices, *common_device);
3294 torch::lazy::NodePtr node = torch::lazy::ReuseNode<MaxPool2dWithIndicesBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), std::vector<int64_t>(kernel_size.begin(), kernel_size.end()), std::vector<int64_t>(stride.begin(), stride.end()), std::vector<int64_t>(padding.begin(), padding.end()), std::vector<int64_t>(dilation.begin(), dilation.end()), ceil_mode, lazy_indices->GetIrValue());
3295 if (!node) {
3296 auto grad_output_meta = to_meta(grad_output);
3297 auto self_meta = to_meta(self);
3298 auto indices_meta = to_meta(indices);
3299 auto out_meta = at::meta::max_pool2d_with_indices_backward(grad_output_meta, self_meta, kernel_size, stride, padding, dilation, ceil_mode, indices_meta);
3300
3301std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
3302 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
3303 if(torch::lazy::symbolicShapeEnabled()){
3304 std::vector<torch::jit::IValue> inputs = { grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices };
3305 const char* schema_str = "aten::max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor";
3306 applySymbolicShapesOnLT(schema_str, inputs, shapes);
3307 }
3308
3309 node = torch::lazy::MakeNode<MaxPool2dWithIndicesBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), std::vector<int64_t>(kernel_size.begin(), kernel_size.end()), std::vector<int64_t>(stride.begin(), stride.end()), std::vector<int64_t>(padding.begin(), padding.end()), std::vector<int64_t>(dilation.begin(), dilation.end()), ceil_mode, lazy_indices->GetIrValue(), std::move(shapes));
3310 CacheNode(node);
3311 }
3312
3313 auto result = torch::lazy::CreateAtenFromLtcTensor(
3314 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
3315 return result;
3316 }
3317
3318
3319 at::Tensor LazyNativeFunctions::maximum(const at::Tensor & self, const at::Tensor & other) {
3320
3321 if (force_eager_fallback(at::aten::maximum)) {
3322 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(maximum)>::call(
3323 self,
3324 other
3325 );
3326 }
3327
3328 TORCH_LAZY_FN_COUNTER("lazy::");
3329 auto common_device = torch::lazy::GetBackendDevice(self, other);
3330 TORCH_INTERNAL_ASSERT(common_device);
3331
3332 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
3333 LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
3334 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Maximum>(lazy_self->GetIrValue(), lazy_other->GetIrValue());
3335 if (!node) {
3336 auto self_meta = to_meta(self);
3337 auto other_meta = to_meta(other);
3338 auto out_meta = at::meta::maximum(self_meta, other_meta);
3339
3340std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
3341 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
3342 if(torch::lazy::symbolicShapeEnabled()){
3343 std::vector<torch::jit::IValue> inputs = { self, other };
3344 const char* schema_str = "aten::maximum(Tensor self, Tensor other) -> Tensor";
3345 applySymbolicShapesOnLT(schema_str, inputs, shapes);
3346 }
3347
3348 node = torch::lazy::MakeNode<Maximum>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), std::move(shapes));
3349 CacheNode(node);
3350 }
3351
3352 auto result = torch::lazy::CreateAtenFromLtcTensor(
3353 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
3354 return result;
3355 }
3356
3357
3358 at::Tensor LazyNativeFunctions::mean(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
3359
3360 if (force_eager_fallback(at::aten::mean)) {
3361 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(mean)>::call(
3362 self,
3363 dtype
3364 );
3365 }
3366
3367 TORCH_LAZY_FN_COUNTER("lazy::");
3368 auto common_device = torch::lazy::GetBackendDevice(self);
3369 TORCH_INTERNAL_ASSERT(common_device);
3370
3371 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
3372 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Mean>(lazy_self->GetIrValue(), dtype);
3373 if (!node) {
3374
3375 auto shapes = torch::lazy::compute_shape_mean(self, dtype);
3376 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
3377 if(torch::lazy::symbolicShapeEnabled()){
3378 std::vector<torch::jit::IValue> inputs = { self, dtype };
3379 const char* schema_str = "aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor";
3380 applySymbolicShapesOnLT(schema_str, inputs, shapes);
3381 }
3382
3383 node = torch::lazy::MakeNode<Mean>(lazy_self->GetIrValue(), dtype, std::move(shapes));
3384 CacheNode(node);
3385 }
3386
3387 auto result = torch::lazy::CreateAtenFromLtcTensor(
3388 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
3389 return result;
3390 }
3391
3392
3393 at::Tensor LazyNativeFunctions::mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
3394
3395 if (force_eager_fallback(at::aten::mean)) {
3396 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(mean, dim)>::call(
3397 self,
3398 dim,
3399 keepdim,
3400 dtype
3401 );
3402 }
3403
3404 TORCH_LAZY_FN_COUNTER("lazy::");
3405 auto common_device = torch::lazy::GetBackendDevice(self);
3406 TORCH_INTERNAL_ASSERT(common_device);
3407
3408 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
3409 torch::lazy::NodePtr node = torch::lazy::ReuseNode<MeanDim>(lazy_self->GetIrValue(), torch::lazy::ToOptionalVector<int64_t>(dim), keepdim, dtype);
3410 if (!node) {
3411 auto self_meta = to_meta(self);
3412 auto out_meta = at::meta::mean(self_meta, dim, keepdim, dtype);
3413
3414std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
3415 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
3416 if(torch::lazy::symbolicShapeEnabled()){
3417 std::vector<torch::jit::IValue> inputs = { self, dim, keepdim, dtype };
3418 const char* schema_str = "aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor";
3419 applySymbolicShapesOnLT(schema_str, inputs, shapes);
3420 }
3421
3422 node = torch::lazy::MakeNode<MeanDim>(lazy_self->GetIrValue(), torch::lazy::ToOptionalVector<int64_t>(dim), keepdim, dtype, std::move(shapes));
3423 CacheNode(node);
3424 }
3425
3426 auto result = torch::lazy::CreateAtenFromLtcTensor(
3427 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
3428 return result;
3429 }
3430
3431
3432 at::Tensor LazyNativeFunctions::min(const at::Tensor & self) {
3433
3434 if (force_eager_fallback(at::aten::min)) {
3435 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(min)>::call(
3436 self
3437 );
3438 }
3439
3440 TORCH_LAZY_FN_COUNTER("lazy::");
3441 auto common_device = torch::lazy::GetBackendDevice(self);
3442 TORCH_INTERNAL_ASSERT(common_device);
3443
3444 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
3445 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Min>(lazy_self->GetIrValue());
3446 if (!node) {
3447
3448 auto shapes = torch::lazy::compute_shape_min(self);
3449 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
3450 if(torch::lazy::symbolicShapeEnabled()){
3451 std::vector<torch::jit::IValue> inputs = { self };
3452 const char* schema_str = "aten::min(Tensor self) -> Tensor";
3453 applySymbolicShapesOnLT(schema_str, inputs, shapes);
3454 }
3455
3456 node = torch::lazy::MakeNode<Min>(lazy_self->GetIrValue(), std::move(shapes));
3457 CacheNode(node);
3458 }
3459
3460 auto result = torch::lazy::CreateAtenFromLtcTensor(
3461 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
3462 return result;
3463 }
3464
3465
3466 at::Tensor LazyNativeFunctions::minimum(const at::Tensor & self, const at::Tensor & other) {
3467
3468 if (force_eager_fallback(at::aten::minimum)) {
3469 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(minimum)>::call(
3470 self,
3471 other
3472 );
3473 }
3474
3475 TORCH_LAZY_FN_COUNTER("lazy::");
3476 auto common_device = torch::lazy::GetBackendDevice(self, other);
3477 TORCH_INTERNAL_ASSERT(common_device);
3478
3479 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
3480 LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
3481 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Minimum>(lazy_self->GetIrValue(), lazy_other->GetIrValue());
3482 if (!node) {
3483 auto self_meta = to_meta(self);
3484 auto other_meta = to_meta(other);
3485 auto out_meta = at::meta::minimum(self_meta, other_meta);
3486
3487std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
3488 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
3489 if(torch::lazy::symbolicShapeEnabled()){
3490 std::vector<torch::jit::IValue> inputs = { self, other };
3491 const char* schema_str = "aten::minimum(Tensor self, Tensor other) -> Tensor";
3492 applySymbolicShapesOnLT(schema_str, inputs, shapes);
3493 }
3494
3495 node = torch::lazy::MakeNode<Minimum>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), std::move(shapes));
3496 CacheNode(node);
3497 }
3498
3499 auto result = torch::lazy::CreateAtenFromLtcTensor(
3500 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
3501 return result;
3502 }
3503
3504
3505 at::Tensor LazyNativeFunctions::mm(const at::Tensor & self, const at::Tensor & mat2) {
3506
3507 if (force_eager_fallback(at::aten::mm)) {
3508 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(mm)>::call(
3509 self,
3510 mat2
3511 );
3512 }
3513
3514 TORCH_LAZY_FN_COUNTER("lazy::");
3515 auto common_device = torch::lazy::GetBackendDevice(self, mat2);
3516 TORCH_INTERNAL_ASSERT(common_device);
3517
3518 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
3519 LazyTensorPtr lazy_mat2 = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(mat2, *common_device);
3520 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Mm>(lazy_self->GetIrValue(), lazy_mat2->GetIrValue());
3521 if (!node) {
3522 auto self_meta = to_meta(self);
3523 auto mat2_meta = to_meta(mat2);
3524 auto out_meta = at::meta::mm(self_meta, mat2_meta);
3525
3526std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
3527 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
3528 if(torch::lazy::symbolicShapeEnabled()){
3529 std::vector<torch::jit::IValue> inputs = { self, mat2 };
3530 const char* schema_str = "aten::mm(Tensor self, Tensor mat2) -> Tensor";
3531 applySymbolicShapesOnLT(schema_str, inputs, shapes);
3532 }
3533
3534 node = torch::lazy::MakeNode<Mm>(lazy_self->GetIrValue(), lazy_mat2->GetIrValue(), std::move(shapes));
3535 CacheNode(node);
3536 }
3537
3538 auto result = torch::lazy::CreateAtenFromLtcTensor(
3539 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
3540 return result;
3541 }
3542
3543
3544 at::Tensor LazyNativeFunctions::mul(const at::Tensor & self, const at::Tensor & other) {
3545
3546 if (force_eager_fallback(at::aten::mul)) {
3547 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(mul, Tensor)>::call(
3548 self,
3549 other
3550 );
3551 }
3552
3553 TORCH_LAZY_FN_COUNTER("lazy::");
3554 auto common_device = torch::lazy::GetBackendDevice(self, other);
3555 TORCH_INTERNAL_ASSERT(common_device);
3556
3557 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
3558 LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
3559 torch::lazy::NodePtr node = torch::lazy::ReuseNode<MulTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue());
3560 if (!node) {
3561 auto self_meta = to_meta(self);
3562 auto other_meta = to_meta(other);
3563 auto out_meta = at::meta::mul(self_meta, other_meta);
3564
3565std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
3566 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
3567 if(torch::lazy::symbolicShapeEnabled()){
3568 std::vector<torch::jit::IValue> inputs = { self, other };
3569 const char* schema_str = "aten::mul.Tensor(Tensor self, Tensor other) -> Tensor";
3570 applySymbolicShapesOnLT(schema_str, inputs, shapes);
3571 }
3572
3573 node = torch::lazy::MakeNode<MulTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), std::move(shapes));
3574 CacheNode(node);
3575 }
3576
3577 auto result = torch::lazy::CreateAtenFromLtcTensor(
3578 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
3579 return result;
3580 }
3581
3582
3583 at::Tensor LazyNativeFunctions::mv(const at::Tensor & self, const at::Tensor & vec) {
3584
3585 if (force_eager_fallback(at::aten::mv)) {
3586 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(mv)>::call(
3587 self,
3588 vec
3589 );
3590 }
3591
3592 TORCH_LAZY_FN_COUNTER("lazy::");
3593 auto common_device = torch::lazy::GetBackendDevice(self, vec);
3594 TORCH_INTERNAL_ASSERT(common_device);
3595
3596 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
3597 LazyTensorPtr lazy_vec = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(vec, *common_device);
3598 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Mv>(lazy_self->GetIrValue(), lazy_vec->GetIrValue());
3599 if (!node) {
3600
3601 auto shapes = torch::lazy::compute_shape_mv(self, vec);
3602 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
3603 if(torch::lazy::symbolicShapeEnabled()){
3604 std::vector<torch::jit::IValue> inputs = { self, vec };
3605 const char* schema_str = "aten::mv(Tensor self, Tensor vec) -> Tensor";
3606 applySymbolicShapesOnLT(schema_str, inputs, shapes);
3607 }
3608
3609 node = torch::lazy::MakeNode<Mv>(lazy_self->GetIrValue(), lazy_vec->GetIrValue(), std::move(shapes));
3610 CacheNode(node);
3611 }
3612
3613 auto result = torch::lazy::CreateAtenFromLtcTensor(
3614 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
3615 return result;
3616 }
3617
3618
3619 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> LazyNativeFunctions::native_batch_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps) {
3620
3621 if (force_eager_fallback(at::aten::native_batch_norm)) {
3622 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(native_batch_norm)>::call(
3623 input,
3624 weight,
3625 bias,
3626 running_mean,
3627 running_var,
3628 training,
3629 momentum,
3630 eps
3631 );
3632 }
3633
3634 TORCH_LAZY_FN_COUNTER("lazy::");
3635 auto common_device = torch::lazy::GetBackendDevice(input, weight, bias, running_mean, running_var);
3636 TORCH_INTERNAL_ASSERT(common_device);
3637
3638 LazyTensorPtr lazy_input = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(input, *common_device);
3639 LazyTensorPtr lazy_weight = torch::lazy::TryGetLtcTensor(weight.value_or(at::Tensor()));
3640 LazyTensorPtr lazy_bias = torch::lazy::TryGetLtcTensor(bias.value_or(at::Tensor()));
3641 LazyTensorPtr lazy_running_mean = torch::lazy::TryGetLtcTensor(running_mean.value_or(at::Tensor()));
3642 LazyTensorPtr lazy_running_var = torch::lazy::TryGetLtcTensor(running_var.value_or(at::Tensor()));
3643 torch::lazy::NodePtr node = torch::lazy::ReuseNode<NativeBatchNorm>(lazy_input->GetIrValue(), lazy_weight ? c10::make_optional(lazy_weight->GetIrValue()) : c10::nullopt, lazy_bias ? c10::make_optional(lazy_bias->GetIrValue()) : c10::nullopt, lazy_running_mean ? c10::make_optional(lazy_running_mean->GetIrValue()) : c10::nullopt, lazy_running_var ? c10::make_optional(lazy_running_var->GetIrValue()) : c10::nullopt, training, momentum, eps);
3644 if (!node) {
3645
3646 auto shapes = torch::lazy::compute_shape_native_batch_norm(input, weight, bias, running_mean, running_var, training, momentum, eps);
3647 TORCH_INTERNAL_ASSERT(shapes.size() == 3);
3648 if(torch::lazy::symbolicShapeEnabled()){
3649 std::vector<torch::jit::IValue> inputs = { input, weight, bias, running_mean, running_var, training, momentum, eps };
3650 const char* schema_str = "aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)";
3651 applySymbolicShapesOnLT(schema_str, inputs, shapes);
3652 }
3653
3654 node = torch::lazy::MakeNode<NativeBatchNorm>(lazy_input->GetIrValue(), lazy_weight ? c10::make_optional(lazy_weight->GetIrValue()) : c10::nullopt, lazy_bias ? c10::make_optional(lazy_bias->GetIrValue()) : c10::nullopt, lazy_running_mean ? c10::make_optional(lazy_running_mean->GetIrValue()) : c10::nullopt, lazy_running_var ? c10::make_optional(lazy_running_var->GetIrValue()) : c10::nullopt, training, momentum, eps, std::move(shapes));
3655 CacheNode(node);
3656 }
3657
3658 std::vector<LazyTensorPtr> lazy_tensors;
3659 for (int i = 0; i < 3; i++) {
3660 lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
3661 }
3662 auto result = torch::lazy::TupleAtenFromLtcTensors<3>(lazy_tensors);
3663 return result;
3664 }
3665
3666
3667 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> LazyNativeFunctions::native_batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask) {
3668
3669 if (force_eager_fallback(at::aten::native_batch_norm_backward)) {
3670 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(native_batch_norm_backward)>::call(
3671 grad_out,
3672 input,
3673 weight,
3674 running_mean,
3675 running_var,
3676 save_mean,
3677 save_invstd,
3678 train,
3679 eps,
3680 output_mask
3681 );
3682 }
3683
3684 TORCH_LAZY_FN_COUNTER("lazy::");
3685 auto common_device = torch::lazy::GetBackendDevice(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd);
3686 TORCH_INTERNAL_ASSERT(common_device);
3687
3688 LazyTensorPtr lazy_grad_out = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_out, *common_device);
3689 LazyTensorPtr lazy_input = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(input, *common_device);
3690 LazyTensorPtr lazy_weight = torch::lazy::TryGetLtcTensor(weight.value_or(at::Tensor()));
3691 LazyTensorPtr lazy_running_mean = torch::lazy::TryGetLtcTensor(running_mean.value_or(at::Tensor()));
3692 LazyTensorPtr lazy_running_var = torch::lazy::TryGetLtcTensor(running_var.value_or(at::Tensor()));
3693 LazyTensorPtr lazy_save_mean = torch::lazy::TryGetLtcTensor(save_mean.value_or(at::Tensor()));
3694 LazyTensorPtr lazy_save_invstd = torch::lazy::TryGetLtcTensor(save_invstd.value_or(at::Tensor()));
3695 torch::lazy::NodePtr node = torch::lazy::ReuseNode<NativeBatchNormBackward>(lazy_grad_out->GetIrValue(), lazy_input->GetIrValue(), lazy_weight ? c10::make_optional(lazy_weight->GetIrValue()) : c10::nullopt, lazy_running_mean ? c10::make_optional(lazy_running_mean->GetIrValue()) : c10::nullopt, lazy_running_var ? c10::make_optional(lazy_running_var->GetIrValue()) : c10::nullopt, lazy_save_mean ? c10::make_optional(lazy_save_mean->GetIrValue()) : c10::nullopt, lazy_save_invstd ? c10::make_optional(lazy_save_invstd->GetIrValue()) : c10::nullopt, train, eps, std::vector<bool>(output_mask.begin(), output_mask.end()));
3696 if (!node) {
3697
3698 auto shapes = torch::lazy::compute_shape_native_batch_norm_backward(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask);
3699 TORCH_INTERNAL_ASSERT(shapes.size() == 3);
3700 if(torch::lazy::symbolicShapeEnabled()){
3701 std::vector<torch::jit::IValue> inputs = { grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask };
3702 const char* schema_str = "aten::native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor)";
3703 applySymbolicShapesOnLT(schema_str, inputs, shapes);
3704 }
3705
3706 node = torch::lazy::MakeNode<NativeBatchNormBackward>(lazy_grad_out->GetIrValue(), lazy_input->GetIrValue(), lazy_weight ? c10::make_optional(lazy_weight->GetIrValue()) : c10::nullopt, lazy_running_mean ? c10::make_optional(lazy_running_mean->GetIrValue()) : c10::nullopt, lazy_running_var ? c10::make_optional(lazy_running_var->GetIrValue()) : c10::nullopt, lazy_save_mean ? c10::make_optional(lazy_save_mean->GetIrValue()) : c10::nullopt, lazy_save_invstd ? c10::make_optional(lazy_save_invstd->GetIrValue()) : c10::nullopt, train, eps, std::vector<bool>(output_mask.begin(), output_mask.end()), std::move(shapes));
3707 CacheNode(node);
3708 }
3709
3710 std::vector<LazyTensorPtr> lazy_tensors;
3711 for (int i = 0; i < 3; i++) {
3712 lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
3713 }
3714 auto result = torch::lazy::TupleAtenFromLtcTensors<3>(lazy_tensors);
3715 return result;
3716 }
3717
3718
3719 ::std::tuple<at::Tensor,at::Tensor> LazyNativeFunctions::native_dropout(const at::Tensor & input, double p, c10::optional<bool> train) {
3720
3721 if (force_eager_fallback(at::aten::native_dropout)) {
3722 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(native_dropout)>::call(
3723 input,
3724 p,
3725 train
3726 );
3727 }
3728
3729 TORCH_LAZY_FN_COUNTER("lazy::");
3730 auto common_device = torch::lazy::GetBackendDevice(input);
3731 TORCH_INTERNAL_ASSERT(common_device);
3732
3733 LazyTensorPtr lazy_input = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(input, *common_device);
3734 torch::lazy::NodePtr node = torch::lazy::ReuseNode<NativeDropout>(lazy_input->GetIrValue(), p, train);
3735 if (!node) {
3736
3737 auto shapes = torch::lazy::compute_shape_native_dropout(input, p, train);
3738 TORCH_INTERNAL_ASSERT(shapes.size() == 2);
3739 if(torch::lazy::symbolicShapeEnabled()){
3740 std::vector<torch::jit::IValue> inputs = { input, p, train };
3741 const char* schema_str = "aten::native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor)";
3742 applySymbolicShapesOnLT(schema_str, inputs, shapes);
3743 }
3744
3745 node = torch::lazy::MakeNode<NativeDropout>(lazy_input->GetIrValue(), p, train, std::move(shapes));
3746 CacheNode(node);
3747 }
3748
3749 std::vector<LazyTensorPtr> lazy_tensors;
3750 for (int i = 0; i < 2; i++) {
3751 lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
3752 }
3753 auto result = torch::lazy::TupleAtenFromLtcTensors<2>(lazy_tensors);
3754 return result;
3755 }
3756
3757
3758 at::Tensor LazyNativeFunctions::native_dropout_backward(const at::Tensor & grad_output, const at::Tensor & mask, double scale) {
3759
3760 if (force_eager_fallback(at::aten::native_dropout_backward)) {
3761 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(native_dropout_backward)>::call(
3762 grad_output,
3763 mask,
3764 scale
3765 );
3766 }
3767
3768 TORCH_LAZY_FN_COUNTER("lazy::");
3769 auto common_device = torch::lazy::GetBackendDevice(grad_output, mask);
3770 TORCH_INTERNAL_ASSERT(common_device);
3771
3772 LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
3773 LazyTensorPtr lazy_mask = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(mask, *common_device);
3774 torch::lazy::NodePtr node = torch::lazy::ReuseNode<NativeDropoutBackward>(lazy_grad_output->GetIrValue(), lazy_mask->GetIrValue(), scale);
3775 if (!node) {
3776
3777 auto shapes = torch::lazy::compute_shape_native_dropout_backward(grad_output, mask, scale);
3778 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
3779 if(torch::lazy::symbolicShapeEnabled()){
3780 std::vector<torch::jit::IValue> inputs = { grad_output, mask, scale };
3781 const char* schema_str = "aten::native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor";
3782 applySymbolicShapesOnLT(schema_str, inputs, shapes);
3783 }
3784
3785 node = torch::lazy::MakeNode<NativeDropoutBackward>(lazy_grad_output->GetIrValue(), lazy_mask->GetIrValue(), scale, std::move(shapes));
3786 CacheNode(node);
3787 }
3788
3789 auto result = torch::lazy::CreateAtenFromLtcTensor(
3790 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
3791 return result;
3792 }
3793
3794
3795 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> LazyNativeFunctions::native_layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps) {
3796
3797 if (force_eager_fallback(at::aten::native_layer_norm)) {
3798 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(native_layer_norm)>::call(
3799 input,
3800 c10::fromIntArrayRefSlow(normalized_shape),
3801 weight,
3802 bias,
3803 eps
3804 );
3805 }
3806
3807 TORCH_LAZY_FN_COUNTER("lazy::");
3808 auto common_device = torch::lazy::GetBackendDevice(input, weight, bias);
3809 TORCH_INTERNAL_ASSERT(common_device);
3810
3811 LazyTensorPtr lazy_input = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(input, *common_device);
3812 LazyTensorPtr lazy_weight = torch::lazy::TryGetLtcTensor(weight.value_or(at::Tensor()));
3813 LazyTensorPtr lazy_bias = torch::lazy::TryGetLtcTensor(bias.value_or(at::Tensor()));
3814 torch::lazy::NodePtr node = torch::lazy::ReuseNode<NativeLayerNorm>(lazy_input->GetIrValue(), std::vector<int64_t>(normalized_shape.begin(), normalized_shape.end()), lazy_weight ? c10::make_optional(lazy_weight->GetIrValue()) : c10::nullopt, lazy_bias ? c10::make_optional(lazy_bias->GetIrValue()) : c10::nullopt, eps);
3815 if (!node) {
3816
3817 auto shapes = torch::lazy::compute_shape_native_layer_norm(input, normalized_shape, weight, bias, eps);
3818 TORCH_INTERNAL_ASSERT(shapes.size() == 3);
3819 if(torch::lazy::symbolicShapeEnabled()){
3820 std::vector<torch::jit::IValue> inputs = { input, normalized_shape, weight, bias, eps };
3821 const char* schema_str = "aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)";
3822 applySymbolicShapesOnLT(schema_str, inputs, shapes);
3823 }
3824
3825 node = torch::lazy::MakeNode<NativeLayerNorm>(lazy_input->GetIrValue(), std::vector<int64_t>(normalized_shape.begin(), normalized_shape.end()), lazy_weight ? c10::make_optional(lazy_weight->GetIrValue()) : c10::nullopt, lazy_bias ? c10::make_optional(lazy_bias->GetIrValue()) : c10::nullopt, eps, std::move(shapes));
3826 CacheNode(node);
3827 }
3828
3829 std::vector<LazyTensorPtr> lazy_tensors;
3830 for (int i = 0; i < 3; i++) {
3831 lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
3832 }
3833 auto result = torch::lazy::TupleAtenFromLtcTensors<3>(lazy_tensors);
3834 return result;
3835 }
3836
3837
3838 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> LazyNativeFunctions::native_layer_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
3839
3840 if (force_eager_fallback(at::aten::native_layer_norm_backward)) {
3841 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(native_layer_norm_backward)>::call(
3842 grad_out,
3843 input,
3844 c10::fromIntArrayRefSlow(normalized_shape),
3845 mean,
3846 rstd,
3847 weight,
3848 bias,
3849 output_mask
3850 );
3851 }
3852
3853 TORCH_LAZY_FN_COUNTER("lazy::");
3854 auto common_device = torch::lazy::GetBackendDevice(grad_out, input, mean, rstd, weight, bias);
3855 TORCH_INTERNAL_ASSERT(common_device);
3856
3857 LazyTensorPtr lazy_grad_out = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_out, *common_device);
3858 LazyTensorPtr lazy_input = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(input, *common_device);
3859 LazyTensorPtr lazy_mean = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(mean, *common_device);
3860 LazyTensorPtr lazy_rstd = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(rstd, *common_device);
3861 LazyTensorPtr lazy_weight = torch::lazy::TryGetLtcTensor(weight.value_or(at::Tensor()));
3862 LazyTensorPtr lazy_bias = torch::lazy::TryGetLtcTensor(bias.value_or(at::Tensor()));
3863 torch::lazy::NodePtr node = torch::lazy::ReuseNode<NativeLayerNormBackward>(lazy_grad_out->GetIrValue(), lazy_input->GetIrValue(), std::vector<int64_t>(normalized_shape.begin(), normalized_shape.end()), lazy_mean->GetIrValue(), lazy_rstd->GetIrValue(), lazy_weight ? c10::make_optional(lazy_weight->GetIrValue()) : c10::nullopt, lazy_bias ? c10::make_optional(lazy_bias->GetIrValue()) : c10::nullopt, std::vector<bool>(output_mask.begin(), output_mask.end()));
3864 if (!node) {
3865
3866 auto shapes = torch::lazy::compute_shape_native_layer_norm_backward(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask);
3867 TORCH_INTERNAL_ASSERT(shapes.size() == 3);
3868 if(torch::lazy::symbolicShapeEnabled()){
3869 std::vector<torch::jit::IValue> inputs = { grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask };
3870 const char* schema_str = "aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor)";
3871 applySymbolicShapesOnLT(schema_str, inputs, shapes);
3872 }
3873
3874 node = torch::lazy::MakeNode<NativeLayerNormBackward>(lazy_grad_out->GetIrValue(), lazy_input->GetIrValue(), std::vector<int64_t>(normalized_shape.begin(), normalized_shape.end()), lazy_mean->GetIrValue(), lazy_rstd->GetIrValue(), lazy_weight ? c10::make_optional(lazy_weight->GetIrValue()) : c10::nullopt, lazy_bias ? c10::make_optional(lazy_bias->GetIrValue()) : c10::nullopt, std::vector<bool>(output_mask.begin(), output_mask.end()), std::move(shapes));
3875 CacheNode(node);
3876 }
3877
3878 std::vector<LazyTensorPtr> lazy_tensors;
3879 for (int i = 0; i < 3; i++) {
3880 lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
3881 }
3882 auto result = torch::lazy::TupleAtenFromLtcTensors<3>(lazy_tensors);
3883 return result;
3884 }
3885
3886
3887 at::Tensor LazyNativeFunctions::ne(const at::Tensor & self, const at::Scalar & other) {
3888
3889 if (force_eager_fallback(at::aten::ne)) {
3890 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(ne, Scalar)>::call(
3891 self,
3892 other
3893 );
3894 }
3895
3896 TORCH_LAZY_FN_COUNTER("lazy::");
3897 auto common_device = torch::lazy::GetBackendDevice(self);
3898 TORCH_INTERNAL_ASSERT(common_device);
3899
3900 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
3901 auto node_other = torch::lazy::LazyGraphExecutor::Get()->
3902 GetIrValueForScalarFromCodegen(other, *common_device);
3903 torch::lazy::NodePtr node = torch::lazy::ReuseNode<NeScalar>(lazy_self->GetIrValue(), node_other);
3904 if (!node) {
3905 auto self_meta = to_meta(self);
3906 auto out_meta = at::meta::ne(self_meta, other);
3907
3908std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
3909 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
3910 if(torch::lazy::symbolicShapeEnabled()){
3911 std::vector<torch::jit::IValue> inputs = { self, other };
3912 const char* schema_str = "aten::ne.Scalar(Tensor self, Scalar other) -> Tensor";
3913 applySymbolicShapesOnLT(schema_str, inputs, shapes);
3914 }
3915
3916 node = torch::lazy::MakeNode<NeScalar>(lazy_self->GetIrValue(), node_other, std::move(shapes));
3917 CacheNode(node);
3918 }
3919
3920 auto result = torch::lazy::CreateAtenFromLtcTensor(
3921 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
3922 return result;
3923 }
3924
3925
3926 at::Tensor LazyNativeFunctions::ne(const at::Tensor & self, const at::Tensor & other) {
3927
3928 if (force_eager_fallback(at::aten::ne)) {
3929 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(ne, Tensor)>::call(
3930 self,
3931 other
3932 );
3933 }
3934
3935 TORCH_LAZY_FN_COUNTER("lazy::");
3936 auto common_device = torch::lazy::GetBackendDevice(self, other);
3937 TORCH_INTERNAL_ASSERT(common_device);
3938
3939 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
3940 LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
3941 torch::lazy::NodePtr node = torch::lazy::ReuseNode<NeTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue());
3942 if (!node) {
3943 auto self_meta = to_meta(self);
3944 auto other_meta = to_meta(other);
3945 auto out_meta = at::meta::ne(self_meta, other_meta);
3946
3947std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
3948 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
3949 if(torch::lazy::symbolicShapeEnabled()){
3950 std::vector<torch::jit::IValue> inputs = { self, other };
3951 const char* schema_str = "aten::ne.Tensor(Tensor self, Tensor other) -> Tensor";
3952 applySymbolicShapesOnLT(schema_str, inputs, shapes);
3953 }
3954
3955 node = torch::lazy::MakeNode<NeTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), std::move(shapes));
3956 CacheNode(node);
3957 }
3958
3959 auto result = torch::lazy::CreateAtenFromLtcTensor(
3960 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
3961 return result;
3962 }
3963
3964
3965 at::Tensor LazyNativeFunctions::neg(const at::Tensor & self) {
3966
3967 if (force_eager_fallback(at::aten::neg)) {
3968 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(neg)>::call(
3969 self
3970 );
3971 }
3972
3973 TORCH_LAZY_FN_COUNTER("lazy::");
3974 auto common_device = torch::lazy::GetBackendDevice(self);
3975 TORCH_INTERNAL_ASSERT(common_device);
3976
3977 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
3978 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Neg>(lazy_self->GetIrValue());
3979 if (!node) {
3980 auto self_meta = to_meta(self);
3981 auto out_meta = at::meta::neg(self_meta);
3982
3983std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
3984 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
3985 if(torch::lazy::symbolicShapeEnabled()){
3986 std::vector<torch::jit::IValue> inputs = { self };
3987 const char* schema_str = "aten::neg(Tensor self) -> Tensor";
3988 applySymbolicShapesOnLT(schema_str, inputs, shapes);
3989 }
3990
3991 node = torch::lazy::MakeNode<Neg>(lazy_self->GetIrValue(), std::move(shapes));
3992 CacheNode(node);
3993 }
3994
3995 auto result = torch::lazy::CreateAtenFromLtcTensor(
3996 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
3997 return result;
3998 }
3999
4000
4001 at::Tensor LazyNativeFunctions::nll_loss2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) {
4002
4003 if (force_eager_fallback(at::aten::nll_loss2d_backward)) {
4004 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(nll_loss2d_backward)>::call(
4005 grad_output,
4006 self,
4007 target,
4008 weight,
4009 reduction,
4010 ignore_index,
4011 total_weight
4012 );
4013 }
4014
4015 TORCH_LAZY_FN_COUNTER("lazy::");
4016 auto common_device = torch::lazy::GetBackendDevice(grad_output, self, target, weight, total_weight);
4017 TORCH_INTERNAL_ASSERT(common_device);
4018
4019 LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
4020 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
4021 LazyTensorPtr lazy_target = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(target, *common_device);
4022 LazyTensorPtr lazy_weight = torch::lazy::TryGetLtcTensor(weight.value_or(at::Tensor()));
4023 LazyTensorPtr lazy_total_weight = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(total_weight, *common_device);
4024 torch::lazy::NodePtr node = torch::lazy::ReuseNode<NllLoss2dBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), lazy_target->GetIrValue(), lazy_weight ? c10::make_optional(lazy_weight->GetIrValue()) : c10::nullopt, reduction, ignore_index, lazy_total_weight->GetIrValue());
4025 if (!node) {
4026
4027 auto shapes = torch::lazy::compute_shape_nll_loss2d_backward(grad_output, self, target, weight, reduction, ignore_index, total_weight);
4028 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
4029 if(torch::lazy::symbolicShapeEnabled()){
4030 std::vector<torch::jit::IValue> inputs = { grad_output, self, target, weight, reduction, ignore_index, total_weight };
4031 const char* schema_str = "aten::nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor";
4032 applySymbolicShapesOnLT(schema_str, inputs, shapes);
4033 }
4034
4035 node = torch::lazy::MakeNode<NllLoss2dBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), lazy_target->GetIrValue(), lazy_weight ? c10::make_optional(lazy_weight->GetIrValue()) : c10::nullopt, reduction, ignore_index, lazy_total_weight->GetIrValue(), std::move(shapes));
4036 CacheNode(node);
4037 }
4038
4039 auto result = torch::lazy::CreateAtenFromLtcTensor(
4040 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
4041 return result;
4042 }
4043
4044
4045 ::std::tuple<at::Tensor,at::Tensor> LazyNativeFunctions::nll_loss2d_forward(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
4046
4047 if (force_eager_fallback(at::aten::nll_loss2d_forward)) {
4048 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(nll_loss2d_forward)>::call(
4049 self,
4050 target,
4051 weight,
4052 reduction,
4053 ignore_index
4054 );
4055 }
4056
4057 TORCH_LAZY_FN_COUNTER("lazy::");
4058 auto common_device = torch::lazy::GetBackendDevice(self, target, weight);
4059 TORCH_INTERNAL_ASSERT(common_device);
4060
4061 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
4062 LazyTensorPtr lazy_target = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(target, *common_device);
4063 LazyTensorPtr lazy_weight = torch::lazy::TryGetLtcTensor(weight.value_or(at::Tensor()));
4064 torch::lazy::NodePtr node = torch::lazy::ReuseNode<NllLoss2dForward>(lazy_self->GetIrValue(), lazy_target->GetIrValue(), lazy_weight ? c10::make_optional(lazy_weight->GetIrValue()) : c10::nullopt, reduction, ignore_index);
4065 if (!node) {
4066
4067 auto shapes = torch::lazy::compute_shape_nll_loss2d_forward(self, target, weight, reduction, ignore_index);
4068 TORCH_INTERNAL_ASSERT(shapes.size() == 2);
4069 if(torch::lazy::symbolicShapeEnabled()){
4070 std::vector<torch::jit::IValue> inputs = { self, target, weight, reduction, ignore_index };
4071 const char* schema_str = "aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)";
4072 applySymbolicShapesOnLT(schema_str, inputs, shapes);
4073 }
4074
4075 node = torch::lazy::MakeNode<NllLoss2dForward>(lazy_self->GetIrValue(), lazy_target->GetIrValue(), lazy_weight ? c10::make_optional(lazy_weight->GetIrValue()) : c10::nullopt, reduction, ignore_index, std::move(shapes));
4076 CacheNode(node);
4077 }
4078
4079 std::vector<LazyTensorPtr> lazy_tensors;
4080 for (int i = 0; i < 2; i++) {
4081 lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
4082 }
4083 auto result = torch::lazy::TupleAtenFromLtcTensors<2>(lazy_tensors);
4084 return result;
4085 }
4086
4087
4088 at::Tensor LazyNativeFunctions::nll_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) {
4089
4090 if (force_eager_fallback(at::aten::nll_loss_backward)) {
4091 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(nll_loss_backward)>::call(
4092 grad_output,
4093 self,
4094 target,
4095 weight,
4096 reduction,
4097 ignore_index,
4098 total_weight
4099 );
4100 }
4101
4102 TORCH_LAZY_FN_COUNTER("lazy::");
4103 auto common_device = torch::lazy::GetBackendDevice(grad_output, self, target, weight, total_weight);
4104 TORCH_INTERNAL_ASSERT(common_device);
4105
4106 LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
4107 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
4108 LazyTensorPtr lazy_target = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(target, *common_device);
4109 LazyTensorPtr lazy_weight = torch::lazy::TryGetLtcTensor(weight.value_or(at::Tensor()));
4110 LazyTensorPtr lazy_total_weight = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(total_weight, *common_device);
4111 torch::lazy::NodePtr node = torch::lazy::ReuseNode<NllLossBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), lazy_target->GetIrValue(), lazy_weight ? c10::make_optional(lazy_weight->GetIrValue()) : c10::nullopt, reduction, ignore_index, lazy_total_weight->GetIrValue());
4112 if (!node) {
4113 auto grad_output_meta = to_meta(grad_output);
4114 auto self_meta = to_meta(self);
4115 auto target_meta = to_meta(target);
4116 auto weight_meta = to_meta(weight);
4117 auto total_weight_meta = to_meta(total_weight);
4118 auto out_meta = at::meta::nll_loss_backward(grad_output_meta, self_meta, target_meta, weight_meta, reduction, ignore_index, total_weight_meta);
4119
4120std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
4121 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
4122 if(torch::lazy::symbolicShapeEnabled()){
4123 std::vector<torch::jit::IValue> inputs = { grad_output, self, target, weight, reduction, ignore_index, total_weight };
4124 const char* schema_str = "aten::nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor";
4125 applySymbolicShapesOnLT(schema_str, inputs, shapes);
4126 }
4127
4128 node = torch::lazy::MakeNode<NllLossBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), lazy_target->GetIrValue(), lazy_weight ? c10::make_optional(lazy_weight->GetIrValue()) : c10::nullopt, reduction, ignore_index, lazy_total_weight->GetIrValue(), std::move(shapes));
4129 CacheNode(node);
4130 }
4131
4132 auto result = torch::lazy::CreateAtenFromLtcTensor(
4133 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
4134 return result;
4135 }
4136
4137
4138 ::std::tuple<at::Tensor,at::Tensor> LazyNativeFunctions::nll_loss_forward(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
4139
4140 if (force_eager_fallback(at::aten::nll_loss_forward)) {
4141 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(nll_loss_forward)>::call(
4142 self,
4143 target,
4144 weight,
4145 reduction,
4146 ignore_index
4147 );
4148 }
4149
4150 TORCH_LAZY_FN_COUNTER("lazy::");
4151 auto common_device = torch::lazy::GetBackendDevice(self, target, weight);
4152 TORCH_INTERNAL_ASSERT(common_device);
4153
4154 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
4155 LazyTensorPtr lazy_target = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(target, *common_device);
4156 LazyTensorPtr lazy_weight = torch::lazy::TryGetLtcTensor(weight.value_or(at::Tensor()));
4157 torch::lazy::NodePtr node = torch::lazy::ReuseNode<NllLossForward>(lazy_self->GetIrValue(), lazy_target->GetIrValue(), lazy_weight ? c10::make_optional(lazy_weight->GetIrValue()) : c10::nullopt, reduction, ignore_index);
4158 if (!node) {
4159 auto self_meta = to_meta(self);
4160 auto target_meta = to_meta(target);
4161 auto weight_meta = to_meta(weight);
4162 auto out_meta = at::meta::nll_loss_forward(self_meta, target_meta, weight_meta, reduction, ignore_index);
4163 std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(std::get<0>(out_meta).scalar_type(), std::get<0>(out_meta).sizes().vec()),torch::lazy::Shape(std::get<1>(out_meta).scalar_type(), std::get<1>(out_meta).sizes().vec())};
4164 TORCH_INTERNAL_ASSERT(shapes.size() == 2);
4165 if(torch::lazy::symbolicShapeEnabled()){
4166 std::vector<torch::jit::IValue> inputs = { self, target, weight, reduction, ignore_index };
4167 const char* schema_str = "aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)";
4168 applySymbolicShapesOnLT(schema_str, inputs, shapes);
4169 }
4170
4171 node = torch::lazy::MakeNode<NllLossForward>(lazy_self->GetIrValue(), lazy_target->GetIrValue(), lazy_weight ? c10::make_optional(lazy_weight->GetIrValue()) : c10::nullopt, reduction, ignore_index, std::move(shapes));
4172 CacheNode(node);
4173 }
4174
4175 std::vector<LazyTensorPtr> lazy_tensors;
4176 for (int i = 0; i < 2; i++) {
4177 lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
4178 }
4179 auto result = torch::lazy::TupleAtenFromLtcTensors<2>(lazy_tensors);
4180 return result;
4181 }
4182
4183
4184 at::Tensor LazyNativeFunctions::nonzero(const at::Tensor & self) {
4185
4186 if (force_eager_fallback(at::aten::nonzero)) {
4187 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(nonzero)>::call(
4188 self
4189 );
4190 }
4191
4192 TORCH_LAZY_FN_COUNTER("lazy::");
4193 auto common_device = torch::lazy::GetBackendDevice(self);
4194 TORCH_INTERNAL_ASSERT(common_device);
4195
4196 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
4197 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Nonzero>(lazy_self->GetIrValue());
4198 if (!node) {
4199
4200 auto shapes = torch::lazy::compute_shape_nonzero(self);
4201 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
4202 if(torch::lazy::symbolicShapeEnabled()){
4203 std::vector<torch::jit::IValue> inputs = { self };
4204 const char* schema_str = "aten::nonzero(Tensor self) -> Tensor";
4205 applySymbolicShapesOnLT(schema_str, inputs, shapes);
4206 }
4207
4208 node = torch::lazy::MakeNode<Nonzero>(lazy_self->GetIrValue(), std::move(shapes));
4209 CacheNode(node);
4210 }
4211
4212 auto result = torch::lazy::CreateAtenFromLtcTensor(
4213 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
4214 return result;
4215 }
4216
4217
4218 at::Tensor LazyNativeFunctions::norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) {
4219
4220 if (force_eager_fallback(at::aten::norm)) {
4221 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(norm, ScalarOpt_dim)>::call(
4222 self,
4223 p,
4224 dim,
4225 keepdim
4226 );
4227 }
4228
4229 TORCH_LAZY_FN_COUNTER("lazy::");
4230 auto common_device = torch::lazy::GetBackendDevice(self);
4231 TORCH_INTERNAL_ASSERT(common_device);
4232
4233 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
4234 auto node_p = p ?
4235 c10::make_optional(torch::lazy::LazyGraphExecutor::Get()->
4236 GetIrValueForScalarFromCodegen(*p, *common_device)):
4237 c10::nullopt;
4238 torch::lazy::NodePtr node = torch::lazy::ReuseNode<NormScalaroptDim>(lazy_self->GetIrValue(), node_p, std::vector<int64_t>(dim.begin(), dim.end()), keepdim);
4239 if (!node) {
4240 auto self_meta = to_meta(self);
4241 auto out_meta = at::meta::norm(self_meta, p, dim, keepdim);
4242
4243std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
4244 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
4245 if(torch::lazy::symbolicShapeEnabled()){
4246 std::vector<torch::jit::IValue> inputs = { self, p, dim, keepdim };
4247 const char* schema_str = "aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor";
4248 applySymbolicShapesOnLT(schema_str, inputs, shapes);
4249 }
4250
4251 node = torch::lazy::MakeNode<NormScalaroptDim>(lazy_self->GetIrValue(), node_p, std::vector<int64_t>(dim.begin(), dim.end()), keepdim, std::move(shapes));
4252 CacheNode(node);
4253 }
4254
4255 auto result = torch::lazy::CreateAtenFromLtcTensor(
4256 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
4257 return result;
4258 }
4259
4260
4261 at::Tensor LazyNativeFunctions::permute_copy(const at::Tensor & self, at::IntArrayRef dims) {
4262
4263 if (force_eager_fallback(at::aten::permute_copy)) {
4264 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(permute_copy)>::call(
4265 self,
4266 dims
4267 );
4268 }
4269
4270 TORCH_LAZY_FN_COUNTER("lazy::");
4271 auto common_device = torch::lazy::GetBackendDevice(self);
4272 TORCH_INTERNAL_ASSERT(common_device);
4273
4274 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
4275 torch::lazy::NodePtr node = torch::lazy::ReuseNode<PermuteCopy>(lazy_self->GetIrValue(), std::vector<int64_t>(dims.begin(), dims.end()));
4276 if (!node) {
4277 auto self_meta = to_meta(self);
4278 auto out_meta = at::compositeexplicitautogradnonfunctional::permute_copy(self_meta, dims);
4279
4280std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
4281 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
4282 if(torch::lazy::symbolicShapeEnabled()){
4283 std::vector<torch::jit::IValue> inputs = { self, dims };
4284 const char* schema_str = "aten::permute_copy(Tensor self, int[] dims) -> Tensor";
4285 applySymbolicShapesOnLT(schema_str, inputs, shapes);
4286 }
4287
4288 node = torch::lazy::MakeNode<PermuteCopy>(lazy_self->GetIrValue(), std::vector<int64_t>(dims.begin(), dims.end()), std::move(shapes));
4289 CacheNode(node);
4290 }
4291
4292 auto result = torch::lazy::CreateAtenFromLtcTensor(
4293 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
4294 return result;
4295 }
4296
4297
4298 at::Tensor LazyNativeFunctions::pow(const at::Tensor & self, const at::Tensor & exponent) {
4299
4300 if (force_eager_fallback(at::aten::pow)) {
4301 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(pow, Tensor_Tensor)>::call(
4302 self,
4303 exponent
4304 );
4305 }
4306
4307 TORCH_LAZY_FN_COUNTER("lazy::");
4308 auto common_device = torch::lazy::GetBackendDevice(self, exponent);
4309 TORCH_INTERNAL_ASSERT(common_device);
4310
4311 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
4312 LazyTensorPtr lazy_exponent = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(exponent, *common_device);
4313 torch::lazy::NodePtr node = torch::lazy::ReuseNode<PowTensorTensor>(lazy_self->GetIrValue(), lazy_exponent->GetIrValue());
4314 if (!node) {
4315 auto self_meta = to_meta(self);
4316 auto exponent_meta = to_meta(exponent);
4317 auto out_meta = at::meta::pow(self_meta, exponent_meta);
4318
4319std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
4320 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
4321 if(torch::lazy::symbolicShapeEnabled()){
4322 std::vector<torch::jit::IValue> inputs = { self, exponent };
4323 const char* schema_str = "aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor";
4324 applySymbolicShapesOnLT(schema_str, inputs, shapes);
4325 }
4326
4327 node = torch::lazy::MakeNode<PowTensorTensor>(lazy_self->GetIrValue(), lazy_exponent->GetIrValue(), std::move(shapes));
4328 CacheNode(node);
4329 }
4330
4331 auto result = torch::lazy::CreateAtenFromLtcTensor(
4332 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
4333 return result;
4334 }
4335
4336
4337 at::Tensor LazyNativeFunctions::pow(const at::Tensor & self, const at::Scalar & exponent) {
4338
4339 if (force_eager_fallback(at::aten::pow)) {
4340 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(pow, Tensor_Scalar)>::call(
4341 self,
4342 exponent
4343 );
4344 }
4345
4346 TORCH_LAZY_FN_COUNTER("lazy::");
4347 auto common_device = torch::lazy::GetBackendDevice(self);
4348 TORCH_INTERNAL_ASSERT(common_device);
4349
4350 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
4351 auto node_exponent = torch::lazy::LazyGraphExecutor::Get()->
4352 GetIrValueForScalarFromCodegen(exponent, *common_device);
4353 torch::lazy::NodePtr node = torch::lazy::ReuseNode<PowTensorScalar>(lazy_self->GetIrValue(), node_exponent);
4354 if (!node) {
4355 auto self_meta = to_meta(self);
4356 auto out_meta = at::meta::pow(self_meta, exponent);
4357
4358std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
4359 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
4360 if(torch::lazy::symbolicShapeEnabled()){
4361 std::vector<torch::jit::IValue> inputs = { self, exponent };
4362 const char* schema_str = "aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor";
4363 applySymbolicShapesOnLT(schema_str, inputs, shapes);
4364 }
4365
4366 node = torch::lazy::MakeNode<PowTensorScalar>(lazy_self->GetIrValue(), node_exponent, std::move(shapes));
4367 CacheNode(node);
4368 }
4369
4370 auto result = torch::lazy::CreateAtenFromLtcTensor(
4371 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
4372 return result;
4373 }
4374
4375
4376 at::Tensor LazyNativeFunctions::random(const at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator) {
4377
4378 if (force_eager_fallback(at::aten::random) || (generator.has_value() && generator->defined())) {
4379 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(random, from)>::call(
4380 self,
4381 from,
4382 to,
4383 generator
4384 );
4385 }
4386
4387 TORCH_LAZY_FN_COUNTER("lazy::");
4388 auto common_device = torch::lazy::GetBackendDevice(self);
4389 TORCH_INTERNAL_ASSERT(common_device);
4390
4391 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
4392 torch::lazy::NodePtr node = torch::lazy::ReuseNode<RandomFrom>(lazy_self->GetIrValue(), from, to);
4393 if (!node) {
4394
4395 auto shapes = torch::lazy::compute_shape_random(self, from, to, generator);
4396 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
4397 if(torch::lazy::symbolicShapeEnabled()){
4398 std::vector<torch::jit::IValue> inputs = { self, from, to };
4399 const char* schema_str = "aten::random.from(Tensor self, int from, int? to, *, Generator? generator=None) -> Tensor";
4400 applySymbolicShapesOnLT(schema_str, inputs, shapes);
4401 }
4402
4403 node = torch::lazy::MakeNode<RandomFrom>(lazy_self->GetIrValue(), from, to, std::move(shapes));
4404 CacheNode(node);
4405 }
4406
4407 auto result = torch::lazy::CreateAtenFromLtcTensor(
4408 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
4409 return result;
4410 }
4411
4412
4413 at::Tensor LazyNativeFunctions::random(const at::Tensor & self, int64_t to, c10::optional<at::Generator> generator) {
4414
4415 if (force_eager_fallback(at::aten::random) || (generator.has_value() && generator->defined())) {
4416 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(random, to)>::call(
4417 self,
4418 to,
4419 generator
4420 );
4421 }
4422
4423 TORCH_LAZY_FN_COUNTER("lazy::");
4424 auto common_device = torch::lazy::GetBackendDevice(self);
4425 TORCH_INTERNAL_ASSERT(common_device);
4426
4427 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
4428 torch::lazy::NodePtr node = torch::lazy::ReuseNode<RandomTo>(lazy_self->GetIrValue(), to);
4429 if (!node) {
4430
4431 auto shapes = torch::lazy::compute_shape_random(self, to, generator);
4432 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
4433 if(torch::lazy::symbolicShapeEnabled()){
4434 std::vector<torch::jit::IValue> inputs = { self, to };
4435 const char* schema_str = "aten::random.to(Tensor self, int to, *, Generator? generator=None) -> Tensor";
4436 applySymbolicShapesOnLT(schema_str, inputs, shapes);
4437 }
4438
4439 node = torch::lazy::MakeNode<RandomTo>(lazy_self->GetIrValue(), to, std::move(shapes));
4440 CacheNode(node);
4441 }
4442
4443 auto result = torch::lazy::CreateAtenFromLtcTensor(
4444 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
4445 return result;
4446 }
4447
4448
4449 at::Tensor LazyNativeFunctions::random(const at::Tensor & self, c10::optional<at::Generator> generator) {
4450
4451 if (force_eager_fallback(at::aten::random) || (generator.has_value() && generator->defined())) {
4452 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(random)>::call(
4453 self,
4454 generator
4455 );
4456 }
4457
4458 TORCH_LAZY_FN_COUNTER("lazy::");
4459 auto common_device = torch::lazy::GetBackendDevice(self);
4460 TORCH_INTERNAL_ASSERT(common_device);
4461
4462 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
4463 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Random>(lazy_self->GetIrValue());
4464 if (!node) {
4465
4466 auto shapes = torch::lazy::compute_shape_random(self, generator);
4467 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
4468 if(torch::lazy::symbolicShapeEnabled()){
4469 std::vector<torch::jit::IValue> inputs = { self };
4470 const char* schema_str = "aten::random(Tensor self, *, Generator? generator=None) -> Tensor";
4471 applySymbolicShapesOnLT(schema_str, inputs, shapes);
4472 }
4473
4474 node = torch::lazy::MakeNode<Random>(lazy_self->GetIrValue(), std::move(shapes));
4475 CacheNode(node);
4476 }
4477
4478 auto result = torch::lazy::CreateAtenFromLtcTensor(
4479 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
4480 return result;
4481 }
4482
4483
4484 at::Tensor LazyNativeFunctions::reciprocal(const at::Tensor & self) {
4485
4486 if (force_eager_fallback(at::aten::reciprocal)) {
4487 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(reciprocal)>::call(
4488 self
4489 );
4490 }
4491
4492 TORCH_LAZY_FN_COUNTER("lazy::");
4493 auto common_device = torch::lazy::GetBackendDevice(self);
4494 TORCH_INTERNAL_ASSERT(common_device);
4495
4496 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
4497 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Reciprocal>(lazy_self->GetIrValue());
4498 if (!node) {
4499 auto self_meta = to_meta(self);
4500 auto out_meta = at::meta::reciprocal(self_meta);
4501
4502std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
4503 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
4504 if(torch::lazy::symbolicShapeEnabled()){
4505 std::vector<torch::jit::IValue> inputs = { self };
4506 const char* schema_str = "aten::reciprocal(Tensor self) -> Tensor";
4507 applySymbolicShapesOnLT(schema_str, inputs, shapes);
4508 }
4509
4510 node = torch::lazy::MakeNode<Reciprocal>(lazy_self->GetIrValue(), std::move(shapes));
4511 CacheNode(node);
4512 }
4513
4514 auto result = torch::lazy::CreateAtenFromLtcTensor(
4515 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
4516 return result;
4517 }
4518
4519
4520 at::Tensor LazyNativeFunctions::relu(const at::Tensor & self) {
4521
4522 if (force_eager_fallback(at::aten::relu)) {
4523 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(relu)>::call(
4524 self
4525 );
4526 }
4527
4528 TORCH_LAZY_FN_COUNTER("lazy::");
4529 auto common_device = torch::lazy::GetBackendDevice(self);
4530 TORCH_INTERNAL_ASSERT(common_device);
4531
4532 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
4533 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Relu>(lazy_self->GetIrValue());
4534 if (!node) {
4535
4536 auto shapes = torch::lazy::compute_shape_relu(self);
4537 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
4538 if(torch::lazy::symbolicShapeEnabled()){
4539 std::vector<torch::jit::IValue> inputs = { self };
4540 const char* schema_str = "aten::relu(Tensor self) -> Tensor";
4541 applySymbolicShapesOnLT(schema_str, inputs, shapes);
4542 }
4543
4544 node = torch::lazy::MakeNode<Relu>(lazy_self->GetIrValue(), std::move(shapes));
4545 CacheNode(node);
4546 }
4547
4548 auto result = torch::lazy::CreateAtenFromLtcTensor(
4549 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
4550 return result;
4551 }
4552
4553
4554 at::Tensor LazyNativeFunctions::remainder(const at::Tensor & self, const at::Tensor & other) {
4555
4556 if (force_eager_fallback(at::aten::remainder)) {
4557 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(remainder, Tensor)>::call(
4558 self,
4559 other
4560 );
4561 }
4562
4563 TORCH_LAZY_FN_COUNTER("lazy::");
4564 auto common_device = torch::lazy::GetBackendDevice(self, other);
4565 TORCH_INTERNAL_ASSERT(common_device);
4566
4567 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
4568 LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
4569 torch::lazy::NodePtr node = torch::lazy::ReuseNode<RemainderTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue());
4570 if (!node) {
4571 auto self_meta = to_meta(self);
4572 auto other_meta = to_meta(other);
4573 auto out_meta = at::meta::remainder(self_meta, other_meta);
4574
4575std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
4576 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
4577 if(torch::lazy::symbolicShapeEnabled()){
4578 std::vector<torch::jit::IValue> inputs = { self, other };
4579 const char* schema_str = "aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor";
4580 applySymbolicShapesOnLT(schema_str, inputs, shapes);
4581 }
4582
4583 node = torch::lazy::MakeNode<RemainderTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), std::move(shapes));
4584 CacheNode(node);
4585 }
4586
4587 auto result = torch::lazy::CreateAtenFromLtcTensor(
4588 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
4589 return result;
4590 }
4591
4592
4593 at::Tensor LazyNativeFunctions::repeat(const at::Tensor & self, at::IntArrayRef repeats) {
4594
4595 if (force_eager_fallback(at::aten::repeat)) {
4596 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(repeat)>::call(
4597 self,
4598 c10::fromIntArrayRefSlow(repeats)
4599 );
4600 }
4601
4602 TORCH_LAZY_FN_COUNTER("lazy::");
4603 auto common_device = torch::lazy::GetBackendDevice(self);
4604 TORCH_INTERNAL_ASSERT(common_device);
4605
4606 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
4607 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Repeat>(lazy_self->GetIrValue(), std::vector<int64_t>(repeats.begin(), repeats.end()));
4608 if (!node) {
4609
4610 auto shapes = torch::lazy::compute_shape_repeat(self, repeats);
4611 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
4612 if(torch::lazy::symbolicShapeEnabled()){
4613 std::vector<torch::jit::IValue> inputs = { self, repeats };
4614 const char* schema_str = "aten::repeat(Tensor self, SymInt[] repeats) -> Tensor";
4615 applySymbolicShapesOnLT(schema_str, inputs, shapes);
4616 }
4617
4618 node = torch::lazy::MakeNode<Repeat>(lazy_self->GetIrValue(), std::vector<int64_t>(repeats.begin(), repeats.end()), std::move(shapes));
4619 CacheNode(node);
4620 }
4621
4622 auto result = torch::lazy::CreateAtenFromLtcTensor(
4623 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
4624 return result;
4625 }
4626
4627
4628 at::Tensor LazyNativeFunctions::rsqrt(const at::Tensor & self) {
4629
4630 if (force_eager_fallback(at::aten::rsqrt)) {
4631 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(rsqrt)>::call(
4632 self
4633 );
4634 }
4635
4636 TORCH_LAZY_FN_COUNTER("lazy::");
4637 auto common_device = torch::lazy::GetBackendDevice(self);
4638 TORCH_INTERNAL_ASSERT(common_device);
4639
4640 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
4641 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Rsqrt>(lazy_self->GetIrValue());
4642 if (!node) {
4643 auto self_meta = to_meta(self);
4644 auto out_meta = at::meta::rsqrt(self_meta);
4645
4646std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
4647 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
4648 if(torch::lazy::symbolicShapeEnabled()){
4649 std::vector<torch::jit::IValue> inputs = { self };
4650 const char* schema_str = "aten::rsqrt(Tensor self) -> Tensor";
4651 applySymbolicShapesOnLT(schema_str, inputs, shapes);
4652 }
4653
4654 node = torch::lazy::MakeNode<Rsqrt>(lazy_self->GetIrValue(), std::move(shapes));
4655 CacheNode(node);
4656 }
4657
4658 auto result = torch::lazy::CreateAtenFromLtcTensor(
4659 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
4660 return result;
4661 }
4662
4663
4664 at::Tensor LazyNativeFunctions::scatter_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
4665
4666 if (force_eager_fallback(at::aten::scatter_add)) {
4667 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(scatter_add)>::call(
4668 self,
4669 dim,
4670 index,
4671 src
4672 );
4673 }
4674
4675 TORCH_LAZY_FN_COUNTER("lazy::");
4676 auto common_device = torch::lazy::GetBackendDevice(self, index, src);
4677 TORCH_INTERNAL_ASSERT(common_device);
4678
4679 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
4680 LazyTensorPtr lazy_index = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(index, *common_device);
4681 LazyTensorPtr lazy_src = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(src, *common_device);
4682 torch::lazy::NodePtr node = torch::lazy::ReuseNode<ScatterAdd>(lazy_self->GetIrValue(), dim, lazy_index->GetIrValue(), lazy_src->GetIrValue());
4683 if (!node) {
4684 auto self_meta = to_meta(self);
4685 auto index_meta = to_meta(index);
4686 auto src_meta = to_meta(src);
4687 auto out_meta = at::meta::scatter_add(self_meta, dim, index_meta, src_meta);
4688
4689std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
4690 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
4691 if(torch::lazy::symbolicShapeEnabled()){
4692 std::vector<torch::jit::IValue> inputs = { self, dim, index, src };
4693 const char* schema_str = "aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor";
4694 applySymbolicShapesOnLT(schema_str, inputs, shapes);
4695 }
4696
4697 node = torch::lazy::MakeNode<ScatterAdd>(lazy_self->GetIrValue(), dim, lazy_index->GetIrValue(), lazy_src->GetIrValue(), std::move(shapes));
4698 CacheNode(node);
4699 }
4700
4701 auto result = torch::lazy::CreateAtenFromLtcTensor(
4702 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
4703 return result;
4704 }
4705
4706
4707 at::Tensor LazyNativeFunctions::select_copy(const at::Tensor & self, int64_t dim, int64_t index) {
4708
4709 if (force_eager_fallback(at::aten::select_copy)) {
4710 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(select_copy, int)>::call(
4711 self,
4712 dim,
4713 index
4714 );
4715 }
4716
4717 TORCH_LAZY_FN_COUNTER("lazy::");
4718 auto common_device = torch::lazy::GetBackendDevice(self);
4719 TORCH_INTERNAL_ASSERT(common_device);
4720
4721 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
4722 torch::lazy::NodePtr node = torch::lazy::ReuseNode<SelectCopyInt>(lazy_self->GetIrValue(), dim, index);
4723 if (!node) {
4724 auto self_meta = to_meta(self);
4725 auto out_meta = at::compositeexplicitautogradnonfunctional::select_copy(self_meta, dim, index);
4726
4727std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
4728 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
4729 if(torch::lazy::symbolicShapeEnabled()){
4730 std::vector<torch::jit::IValue> inputs = { self, dim, index };
4731 const char* schema_str = "aten::select_copy.int(Tensor self, int dim, SymInt index) -> Tensor";
4732 applySymbolicShapesOnLT(schema_str, inputs, shapes);
4733 }
4734
4735 node = torch::lazy::MakeNode<SelectCopyInt>(lazy_self->GetIrValue(), dim, index, std::move(shapes));
4736 CacheNode(node);
4737 }
4738
4739 auto result = torch::lazy::CreateAtenFromLtcTensor(
4740 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
4741 return result;
4742 }
4743
4744
4745 at::Tensor LazyNativeFunctions::select_scatter(const at::Tensor & self, const at::Tensor & src, int64_t dim, int64_t index) {
4746
4747 if (force_eager_fallback(at::aten::select_scatter)) {
4748 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(select_scatter)>::call(
4749 self,
4750 src,
4751 dim,
4752 index
4753 );
4754 }
4755
4756 TORCH_LAZY_FN_COUNTER("lazy::");
4757 auto common_device = torch::lazy::GetBackendDevice(self, src);
4758 TORCH_INTERNAL_ASSERT(common_device);
4759
4760 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
4761 LazyTensorPtr lazy_src = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(src, *common_device);
4762 torch::lazy::NodePtr node = torch::lazy::ReuseNode<SelectScatter>(lazy_self->GetIrValue(), lazy_src->GetIrValue(), dim, index);
4763 if (!node) {
4764
4765 auto shapes = torch::lazy::compute_shape_select_scatter(self, src, dim, index);
4766 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
4767 if(torch::lazy::symbolicShapeEnabled()){
4768 std::vector<torch::jit::IValue> inputs = { self, src, dim, index };
4769 const char* schema_str = "aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor";
4770 applySymbolicShapesOnLT(schema_str, inputs, shapes);
4771 }
4772
4773 node = torch::lazy::MakeNode<SelectScatter>(lazy_self->GetIrValue(), lazy_src->GetIrValue(), dim, index, std::move(shapes));
4774 CacheNode(node);
4775 }
4776
4777 auto result = torch::lazy::CreateAtenFromLtcTensor(
4778 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
4779 return result;
4780 }
4781
4782
4783 at::Tensor LazyNativeFunctions::sgn(const at::Tensor & self) {
4784
4785 if (force_eager_fallback(at::aten::sgn)) {
4786 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(sgn)>::call(
4787 self
4788 );
4789 }
4790
4791 TORCH_LAZY_FN_COUNTER("lazy::");
4792 auto common_device = torch::lazy::GetBackendDevice(self);
4793 TORCH_INTERNAL_ASSERT(common_device);
4794
4795 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
4796 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Sgn>(lazy_self->GetIrValue());
4797 if (!node) {
4798 auto self_meta = to_meta(self);
4799 auto out_meta = at::meta::sgn(self_meta);
4800
4801std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
4802 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
4803 if(torch::lazy::symbolicShapeEnabled()){
4804 std::vector<torch::jit::IValue> inputs = { self };
4805 const char* schema_str = "aten::sgn(Tensor self) -> Tensor";
4806 applySymbolicShapesOnLT(schema_str, inputs, shapes);
4807 }
4808
4809 node = torch::lazy::MakeNode<Sgn>(lazy_self->GetIrValue(), std::move(shapes));
4810 CacheNode(node);
4811 }
4812
4813 auto result = torch::lazy::CreateAtenFromLtcTensor(
4814 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
4815 return result;
4816 }
4817
4818
4819 at::Tensor LazyNativeFunctions::sigmoid(const at::Tensor & self) {
4820
4821 if (force_eager_fallback(at::aten::sigmoid)) {
4822 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(sigmoid)>::call(
4823 self
4824 );
4825 }
4826
4827 TORCH_LAZY_FN_COUNTER("lazy::");
4828 auto common_device = torch::lazy::GetBackendDevice(self);
4829 TORCH_INTERNAL_ASSERT(common_device);
4830
4831 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
4832 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Sigmoid>(lazy_self->GetIrValue());
4833 if (!node) {
4834 auto self_meta = to_meta(self);
4835 auto out_meta = at::meta::sigmoid(self_meta);
4836
4837std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
4838 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
4839 if(torch::lazy::symbolicShapeEnabled()){
4840 std::vector<torch::jit::IValue> inputs = { self };
4841 const char* schema_str = "aten::sigmoid(Tensor self) -> Tensor";
4842 applySymbolicShapesOnLT(schema_str, inputs, shapes);
4843 }
4844
4845 node = torch::lazy::MakeNode<Sigmoid>(lazy_self->GetIrValue(), std::move(shapes));
4846 CacheNode(node);
4847 }
4848
4849 auto result = torch::lazy::CreateAtenFromLtcTensor(
4850 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
4851 return result;
4852 }
4853
4854
4855 at::Tensor LazyNativeFunctions::sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & output) {
4856
4857 if (force_eager_fallback(c10::Symbol::fromQualString("aten::sigmoid_backward"))) {
4858 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(sigmoid_backward)>::call(
4859 grad_output,
4860 output
4861 );
4862 }
4863
4864 TORCH_LAZY_FN_COUNTER("lazy::");
4865 auto common_device = torch::lazy::GetBackendDevice(grad_output, output);
4866 TORCH_INTERNAL_ASSERT(common_device);
4867
4868 LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
4869 LazyTensorPtr lazy_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(output, *common_device);
4870 torch::lazy::NodePtr node = torch::lazy::ReuseNode<SigmoidBackward>(lazy_grad_output->GetIrValue(), lazy_output->GetIrValue());
4871 if (!node) {
4872 auto grad_output_meta = to_meta(grad_output);
4873 auto output_meta = to_meta(output);
4874 auto out_meta = at::meta::sigmoid_backward(grad_output_meta, output_meta);
4875
4876std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
4877 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
4878 if(torch::lazy::symbolicShapeEnabled()){
4879 std::vector<torch::jit::IValue> inputs = { grad_output, output };
4880 const char* schema_str = "aten::sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor";
4881 applySymbolicShapesOnLT(schema_str, inputs, shapes);
4882 }
4883
4884 node = torch::lazy::MakeNode<SigmoidBackward>(lazy_grad_output->GetIrValue(), lazy_output->GetIrValue(), std::move(shapes));
4885 CacheNode(node);
4886 }
4887
4888 auto result = torch::lazy::CreateAtenFromLtcTensor(
4889 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
4890 return result;
4891 }
4892
4893
4894 at::Tensor LazyNativeFunctions::silu(const at::Tensor & self) {
4895
4896 if (force_eager_fallback(at::aten::silu)) {
4897 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(silu)>::call(
4898 self
4899 );
4900 }
4901
4902 TORCH_LAZY_FN_COUNTER("lazy::");
4903 auto common_device = torch::lazy::GetBackendDevice(self);
4904 TORCH_INTERNAL_ASSERT(common_device);
4905
4906 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
4907 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Silu>(lazy_self->GetIrValue());
4908 if (!node) {
4909 auto self_meta = to_meta(self);
4910 auto out_meta = at::meta::silu(self_meta);
4911
4912std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
4913 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
4914 if(torch::lazy::symbolicShapeEnabled()){
4915 std::vector<torch::jit::IValue> inputs = { self };
4916 const char* schema_str = "aten::silu(Tensor self) -> Tensor";
4917 applySymbolicShapesOnLT(schema_str, inputs, shapes);
4918 }
4919
4920 node = torch::lazy::MakeNode<Silu>(lazy_self->GetIrValue(), std::move(shapes));
4921 CacheNode(node);
4922 }
4923
4924 auto result = torch::lazy::CreateAtenFromLtcTensor(
4925 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
4926 return result;
4927 }
4928
4929
4930 at::Tensor LazyNativeFunctions::slice_copy_symint(const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
4931
4932 if (force_eager_fallback(at::aten::slice_copy)) {
4933 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(slice_copy, Tensor)>::call(
4934 self,
4935 dim,
4936 start,
4937 end,
4938 step
4939 );
4940 }
4941
4942 TORCH_LAZY_FN_COUNTER("lazy::");
4943 auto common_device = torch::lazy::GetBackendDevice(self, start, end, step);
4944 TORCH_INTERNAL_ASSERT(common_device);
4945
4946 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
4947 torch::lazy::NodePtr node = torch::lazy::ReuseNode<SliceCopyTensor>(lazy_self->GetIrValue(), dim, start ? c10::make_optional(GetSymIntValue(*start)) : c10::nullopt, end ? c10::make_optional(GetSymIntValue(*end)) : c10::nullopt, GetSymIntValue(step));
4948 if (!node) {
4949 auto self_meta = to_meta(self);
4950 auto out_meta = at::compositeexplicitautogradnonfunctional::slice_copy_symint(self_meta, dim, start, end, step);
4951
4952std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
4953 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
4954 if(torch::lazy::symbolicShapeEnabled()){
4955 std::vector<torch::jit::IValue> inputs = { self, dim, start, end, step };
4956 const char* schema_str = "aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor";
4957 applySymbolicShapesOnLT(schema_str, inputs, shapes);
4958 }
4959
4960 node = torch::lazy::MakeNode<SliceCopyTensor>(lazy_self->GetIrValue(), dim, start ? c10::make_optional(GetSymIntValue(*start)) : c10::nullopt, end ? c10::make_optional(GetSymIntValue(*end)) : c10::nullopt, GetSymIntValue(step), std::move(shapes));
4961 CacheNode(node);
4962 }
4963
4964 auto result = torch::lazy::CreateAtenFromLtcTensor(
4965 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
4966 return result;
4967 }
4968
4969
4970 at::Tensor LazyNativeFunctions::slice_scatter_symint(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
4971
4972 if (force_eager_fallback(at::aten::slice_scatter)) {
4973 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(slice_scatter)>::call(
4974 self,
4975 src,
4976 dim,
4977 start,
4978 end,
4979 step
4980 );
4981 }
4982
4983 TORCH_LAZY_FN_COUNTER("lazy::");
4984 auto common_device = torch::lazy::GetBackendDevice(self, src, start, end, step);
4985 TORCH_INTERNAL_ASSERT(common_device);
4986
4987 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
4988 LazyTensorPtr lazy_src = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(src, *common_device);
4989 torch::lazy::NodePtr node = torch::lazy::ReuseNode<SliceScatter>(lazy_self->GetIrValue(), lazy_src->GetIrValue(), dim, start ? c10::make_optional(GetSymIntValue(*start)) : c10::nullopt, end ? c10::make_optional(GetSymIntValue(*end)) : c10::nullopt, GetSymIntValue(step));
4990 if (!node) {
4991
4992 auto shapes = torch::lazy::compute_shape_slice_scatter_symint(self, src, dim, start, end, step);
4993 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
4994 if(torch::lazy::symbolicShapeEnabled()){
4995 std::vector<torch::jit::IValue> inputs = { self, src, dim, start, end, step };
4996 const char* schema_str = "aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor";
4997 applySymbolicShapesOnLT(schema_str, inputs, shapes);
4998 }
4999
5000 node = torch::lazy::MakeNode<SliceScatter>(lazy_self->GetIrValue(), lazy_src->GetIrValue(), dim, start ? c10::make_optional(GetSymIntValue(*start)) : c10::nullopt, end ? c10::make_optional(GetSymIntValue(*end)) : c10::nullopt, GetSymIntValue(step), std::move(shapes));
5001 CacheNode(node);
5002 }
5003
5004 auto result = torch::lazy::CreateAtenFromLtcTensor(
5005 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
5006 return result;
5007 }
5008
5009
5010 at::Tensor LazyNativeFunctions::smooth_l1_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
5011
5012 if (force_eager_fallback(at::aten::smooth_l1_loss)) {
5013 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(smooth_l1_loss)>::call(
5014 self,
5015 target,
5016 reduction,
5017 beta
5018 );
5019 }
5020
5021 TORCH_LAZY_FN_COUNTER("lazy::");
5022 auto common_device = torch::lazy::GetBackendDevice(self, target);
5023 TORCH_INTERNAL_ASSERT(common_device);
5024
5025 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
5026 LazyTensorPtr lazy_target = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(target, *common_device);
5027 torch::lazy::NodePtr node = torch::lazy::ReuseNode<SmoothL1Loss>(lazy_self->GetIrValue(), lazy_target->GetIrValue(), reduction, beta);
5028 if (!node) {
5029 auto self_meta = to_meta(self);
5030 auto target_meta = to_meta(target);
5031 auto out_meta = at::meta::smooth_l1_loss(self_meta, target_meta, reduction, beta);
5032
5033std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
5034 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
5035 if(torch::lazy::symbolicShapeEnabled()){
5036 std::vector<torch::jit::IValue> inputs = { self, target, reduction, beta };
5037 const char* schema_str = "aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, float beta=1.0) -> Tensor";
5038 applySymbolicShapesOnLT(schema_str, inputs, shapes);
5039 }
5040
5041 node = torch::lazy::MakeNode<SmoothL1Loss>(lazy_self->GetIrValue(), lazy_target->GetIrValue(), reduction, beta, std::move(shapes));
5042 CacheNode(node);
5043 }
5044
5045 auto result = torch::lazy::CreateAtenFromLtcTensor(
5046 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
5047 return result;
5048 }
5049
5050
5051 at::Tensor LazyNativeFunctions::smooth_l1_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
5052
5053 if (force_eager_fallback(at::aten::smooth_l1_loss_backward)) {
5054 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(smooth_l1_loss_backward)>::call(
5055 grad_output,
5056 self,
5057 target,
5058 reduction,
5059 beta
5060 );
5061 }
5062
5063 TORCH_LAZY_FN_COUNTER("lazy::");
5064 auto common_device = torch::lazy::GetBackendDevice(grad_output, self, target);
5065 TORCH_INTERNAL_ASSERT(common_device);
5066
5067 LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
5068 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
5069 LazyTensorPtr lazy_target = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(target, *common_device);
5070 torch::lazy::NodePtr node = torch::lazy::ReuseNode<SmoothL1LossBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), lazy_target->GetIrValue(), reduction, beta);
5071 if (!node) {
5072
5073 auto shapes = torch::lazy::compute_shape_smooth_l1_loss_backward(grad_output, self, target, reduction, beta);
5074 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
5075 if(torch::lazy::symbolicShapeEnabled()){
5076 std::vector<torch::jit::IValue> inputs = { grad_output, self, target, reduction, beta };
5077 const char* schema_str = "aten::smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor";
5078 applySymbolicShapesOnLT(schema_str, inputs, shapes);
5079 }
5080
5081 node = torch::lazy::MakeNode<SmoothL1LossBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), lazy_target->GetIrValue(), reduction, beta, std::move(shapes));
5082 CacheNode(node);
5083 }
5084
5085 auto result = torch::lazy::CreateAtenFromLtcTensor(
5086 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
5087 return result;
5088 }
5089
5090
5091 at::Tensor LazyNativeFunctions::softplus(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
5092
5093 if (force_eager_fallback(at::aten::softplus)) {
5094 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(softplus)>::call(
5095 self,
5096 beta,
5097 threshold
5098 );
5099 }
5100
5101 TORCH_LAZY_FN_COUNTER("lazy::");
5102 auto common_device = torch::lazy::GetBackendDevice(self);
5103 TORCH_INTERNAL_ASSERT(common_device);
5104
5105 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
5106 auto node_beta = torch::lazy::LazyGraphExecutor::Get()->
5107 GetIrValueForScalarFromCodegen(beta, *common_device);
5108 auto node_threshold = torch::lazy::LazyGraphExecutor::Get()->
5109 GetIrValueForScalarFromCodegen(threshold, *common_device);
5110 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Softplus>(lazy_self->GetIrValue(), node_beta, node_threshold);
5111 if (!node) {
5112 auto self_meta = to_meta(self);
5113 auto out_meta = at::meta::softplus(self_meta, beta, threshold);
5114
5115std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
5116 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
5117 if(torch::lazy::symbolicShapeEnabled()){
5118 std::vector<torch::jit::IValue> inputs = { self, beta, threshold };
5119 const char* schema_str = "aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor";
5120 applySymbolicShapesOnLT(schema_str, inputs, shapes);
5121 }
5122
5123 node = torch::lazy::MakeNode<Softplus>(lazy_self->GetIrValue(), node_beta, node_threshold, std::move(shapes));
5124 CacheNode(node);
5125 }
5126
5127 auto result = torch::lazy::CreateAtenFromLtcTensor(
5128 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
5129 return result;
5130 }
5131
5132
5133 at::Tensor LazyNativeFunctions::softplus_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
5134
5135 if (force_eager_fallback(at::aten::softplus_backward)) {
5136 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(softplus_backward)>::call(
5137 grad_output,
5138 self,
5139 beta,
5140 threshold
5141 );
5142 }
5143
5144 TORCH_LAZY_FN_COUNTER("lazy::");
5145 auto common_device = torch::lazy::GetBackendDevice(grad_output, self);
5146 TORCH_INTERNAL_ASSERT(common_device);
5147
5148 LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
5149 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
5150 auto node_beta = torch::lazy::LazyGraphExecutor::Get()->
5151 GetIrValueForScalarFromCodegen(beta, *common_device);
5152 auto node_threshold = torch::lazy::LazyGraphExecutor::Get()->
5153 GetIrValueForScalarFromCodegen(threshold, *common_device);
5154 torch::lazy::NodePtr node = torch::lazy::ReuseNode<SoftplusBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), node_beta, node_threshold);
5155 if (!node) {
5156 auto grad_output_meta = to_meta(grad_output);
5157 auto self_meta = to_meta(self);
5158 auto out_meta = at::meta::softplus_backward(grad_output_meta, self_meta, beta, threshold);
5159
5160std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
5161 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
5162 if(torch::lazy::symbolicShapeEnabled()){
5163 std::vector<torch::jit::IValue> inputs = { grad_output, self, beta, threshold };
5164 const char* schema_str = "aten::softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor";
5165 applySymbolicShapesOnLT(schema_str, inputs, shapes);
5166 }
5167
5168 node = torch::lazy::MakeNode<SoftplusBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), node_beta, node_threshold, std::move(shapes));
5169 CacheNode(node);
5170 }
5171
5172 auto result = torch::lazy::CreateAtenFromLtcTensor(
5173 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
5174 return result;
5175 }
5176
5177
5178 ::std::tuple<at::Tensor,at::Tensor> LazyNativeFunctions::sort(const at::Tensor & self, int64_t dim, bool descending) {
5179
5180 if (force_eager_fallback(at::aten::sort)) {
5181 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(sort)>::call(
5182 self,
5183 dim,
5184 descending
5185 );
5186 }
5187
5188 TORCH_LAZY_FN_COUNTER("lazy::");
5189 auto common_device = torch::lazy::GetBackendDevice(self);
5190 TORCH_INTERNAL_ASSERT(common_device);
5191
5192 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
5193 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Sort>(lazy_self->GetIrValue(), dim, descending);
5194 if (!node) {
5195
5196 auto shapes = torch::lazy::compute_shape_sort(self, dim, descending);
5197 TORCH_INTERNAL_ASSERT(shapes.size() == 2);
5198 if(torch::lazy::symbolicShapeEnabled()){
5199 std::vector<torch::jit::IValue> inputs = { self, dim, descending };
5200 const char* schema_str = "aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)";
5201 applySymbolicShapesOnLT(schema_str, inputs, shapes);
5202 }
5203
5204 node = torch::lazy::MakeNode<Sort>(lazy_self->GetIrValue(), dim, descending, std::move(shapes));
5205 CacheNode(node);
5206 }
5207
5208 std::vector<LazyTensorPtr> lazy_tensors;
5209 for (int i = 0; i < 2; i++) {
5210 lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
5211 }
5212 auto result = torch::lazy::TupleAtenFromLtcTensors<2>(lazy_tensors);
5213 return result;
5214 }
5215
5216
5217 at::Tensor LazyNativeFunctions::sqrt(const at::Tensor & self) {
5218
5219 if (force_eager_fallback(at::aten::sqrt)) {
5220 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(sqrt)>::call(
5221 self
5222 );
5223 }
5224
5225 TORCH_LAZY_FN_COUNTER("lazy::");
5226 auto common_device = torch::lazy::GetBackendDevice(self);
5227 TORCH_INTERNAL_ASSERT(common_device);
5228
5229 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
5230 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Sqrt>(lazy_self->GetIrValue());
5231 if (!node) {
5232 auto self_meta = to_meta(self);
5233 auto out_meta = at::meta::sqrt(self_meta);
5234
5235std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
5236 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
5237 if(torch::lazy::symbolicShapeEnabled()){
5238 std::vector<torch::jit::IValue> inputs = { self };
5239 const char* schema_str = "aten::sqrt(Tensor self) -> Tensor";
5240 applySymbolicShapesOnLT(schema_str, inputs, shapes);
5241 }
5242
5243 node = torch::lazy::MakeNode<Sqrt>(lazy_self->GetIrValue(), std::move(shapes));
5244 CacheNode(node);
5245 }
5246
5247 auto result = torch::lazy::CreateAtenFromLtcTensor(
5248 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
5249 return result;
5250 }
5251
5252
5253 at::Tensor LazyNativeFunctions::squeeze_copy(const at::Tensor & self) {
5254
5255 if (force_eager_fallback(at::aten::squeeze_copy)) {
5256 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(squeeze_copy)>::call(
5257 self
5258 );
5259 }
5260
5261 TORCH_LAZY_FN_COUNTER("lazy::");
5262 auto common_device = torch::lazy::GetBackendDevice(self);
5263 TORCH_INTERNAL_ASSERT(common_device);
5264
5265 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
5266 torch::lazy::NodePtr node = torch::lazy::ReuseNode<SqueezeCopy>(lazy_self->GetIrValue());
5267 if (!node) {
5268 auto self_meta = to_meta(self);
5269 auto out_meta = at::compositeexplicitautogradnonfunctional::squeeze_copy(self_meta);
5270
5271std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
5272 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
5273 if(torch::lazy::symbolicShapeEnabled()){
5274 std::vector<torch::jit::IValue> inputs = { self };
5275 const char* schema_str = "aten::squeeze_copy(Tensor self) -> Tensor";
5276 applySymbolicShapesOnLT(schema_str, inputs, shapes);
5277 }
5278
5279 node = torch::lazy::MakeNode<SqueezeCopy>(lazy_self->GetIrValue(), std::move(shapes));
5280 CacheNode(node);
5281 }
5282
5283 auto result = torch::lazy::CreateAtenFromLtcTensor(
5284 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
5285 return result;
5286 }
5287
5288
5289 at::Tensor LazyNativeFunctions::squeeze_copy(const at::Tensor & self, int64_t dim) {
5290
5291 if (force_eager_fallback(at::aten::squeeze_copy)) {
5292 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(squeeze_copy, dim)>::call(
5293 self,
5294 dim
5295 );
5296 }
5297
5298 TORCH_LAZY_FN_COUNTER("lazy::");
5299 auto common_device = torch::lazy::GetBackendDevice(self);
5300 TORCH_INTERNAL_ASSERT(common_device);
5301
5302 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
5303 torch::lazy::NodePtr node = torch::lazy::ReuseNode<SqueezeCopyDim>(lazy_self->GetIrValue(), dim);
5304 if (!node) {
5305 auto self_meta = to_meta(self);
5306 auto out_meta = at::compositeexplicitautogradnonfunctional::squeeze_copy(self_meta, dim);
5307
5308std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
5309 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
5310 if(torch::lazy::symbolicShapeEnabled()){
5311 std::vector<torch::jit::IValue> inputs = { self, dim };
5312 const char* schema_str = "aten::squeeze_copy.dim(Tensor self, int dim) -> Tensor";
5313 applySymbolicShapesOnLT(schema_str, inputs, shapes);
5314 }
5315
5316 node = torch::lazy::MakeNode<SqueezeCopyDim>(lazy_self->GetIrValue(), dim, std::move(shapes));
5317 CacheNode(node);
5318 }
5319
5320 auto result = torch::lazy::CreateAtenFromLtcTensor(
5321 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
5322 return result;
5323 }
5324
5325
5326 at::Tensor LazyNativeFunctions::squeeze_copy(const at::Tensor & self, at::IntArrayRef dim) {
5327
5328 if (force_eager_fallback(at::aten::squeeze_copy)) {
5329 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(squeeze_copy, dims)>::call(
5330 self,
5331 dim
5332 );
5333 }
5334
5335 TORCH_LAZY_FN_COUNTER("lazy::");
5336 auto common_device = torch::lazy::GetBackendDevice(self);
5337 TORCH_INTERNAL_ASSERT(common_device);
5338
5339 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
5340 torch::lazy::NodePtr node = torch::lazy::ReuseNode<SqueezeCopyDims>(lazy_self->GetIrValue(), std::vector<int64_t>(dim.begin(), dim.end()));
5341 if (!node) {
5342 auto self_meta = to_meta(self);
5343 auto out_meta = at::compositeexplicitautogradnonfunctional::squeeze_copy(self_meta, dim);
5344
5345std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
5346 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
5347 if(torch::lazy::symbolicShapeEnabled()){
5348 std::vector<torch::jit::IValue> inputs = { self, dim };
5349 const char* schema_str = "aten::squeeze_copy.dims(Tensor self, int[] dim) -> Tensor";
5350 applySymbolicShapesOnLT(schema_str, inputs, shapes);
5351 }
5352
5353 node = torch::lazy::MakeNode<SqueezeCopyDims>(lazy_self->GetIrValue(), std::vector<int64_t>(dim.begin(), dim.end()), std::move(shapes));
5354 CacheNode(node);
5355 }
5356
5357 auto result = torch::lazy::CreateAtenFromLtcTensor(
5358 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
5359 return result;
5360 }
5361
5362
5363 at::Tensor LazyNativeFunctions::stack(at::TensorList tensors, int64_t dim) {
5364
5365 if (force_eager_fallback(at::aten::stack)) {
5366 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(stack)>::call(
5367 tensors,
5368 dim
5369 );
5370 }
5371
5372 TORCH_LAZY_FN_COUNTER("lazy::");
5373 auto common_device = torch::lazy::GetBackendDevice(tensors);
5374 TORCH_INTERNAL_ASSERT(common_device);
5375
5376 auto lazy_tensors_tensorlist = torch::lazy::GetTensorList(tensors);
5377 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Stack>(lazy_tensors_tensorlist, dim);
5378 if (!node) {
5379
5380 auto shapes = torch::lazy::compute_shape_stack(tensors, dim);
5381 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
5382 if(torch::lazy::symbolicShapeEnabled()){
5383 std::vector<torch::jit::IValue> inputs = { tensors, dim };
5384 const char* schema_str = "aten::stack(Tensor[] tensors, int dim=0) -> Tensor";
5385 applySymbolicShapesOnLT(schema_str, inputs, shapes);
5386 }
5387
5388 node = torch::lazy::MakeNode<Stack>(lazy_tensors_tensorlist, dim, std::move(shapes));
5389 CacheNode(node);
5390 }
5391
5392 auto result = torch::lazy::CreateAtenFromLtcTensor(
5393 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
5394 return result;
5395 }
5396
5397
5398 at::Tensor LazyNativeFunctions::std(const at::Tensor & self, bool unbiased) {
5399
5400 if (force_eager_fallback(at::aten::std)) {
5401 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(std)>::call(
5402 self,
5403 unbiased
5404 );
5405 }
5406
5407 TORCH_LAZY_FN_COUNTER("lazy::");
5408 auto common_device = torch::lazy::GetBackendDevice(self);
5409 TORCH_INTERNAL_ASSERT(common_device);
5410
5411 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
5412 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Std>(lazy_self->GetIrValue(), unbiased);
5413 if (!node) {
5414
5415 auto shapes = torch::lazy::compute_shape_std(self, unbiased);
5416 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
5417 if(torch::lazy::symbolicShapeEnabled()){
5418 std::vector<torch::jit::IValue> inputs = { self, unbiased };
5419 const char* schema_str = "aten::std(Tensor self, bool unbiased=True) -> Tensor";
5420 applySymbolicShapesOnLT(schema_str, inputs, shapes);
5421 }
5422
5423 node = torch::lazy::MakeNode<Std>(lazy_self->GetIrValue(), unbiased, std::move(shapes));
5424 CacheNode(node);
5425 }
5426
5427 auto result = torch::lazy::CreateAtenFromLtcTensor(
5428 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
5429 return result;
5430 }
5431
5432
5433 at::Tensor LazyNativeFunctions::std(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
5434
5435 if (force_eager_fallback(at::aten::std)) {
5436 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(std, dim)>::call(
5437 self,
5438 dim,
5439 unbiased,
5440 keepdim
5441 );
5442 }
5443
5444 TORCH_LAZY_FN_COUNTER("lazy::");
5445 auto common_device = torch::lazy::GetBackendDevice(self);
5446 TORCH_INTERNAL_ASSERT(common_device);
5447
5448 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
5449 torch::lazy::NodePtr node = torch::lazy::ReuseNode<StdDim>(lazy_self->GetIrValue(), torch::lazy::ToOptionalVector<int64_t>(dim), unbiased, keepdim);
5450 if (!node) {
5451
5452 auto shapes = torch::lazy::compute_shape_std(self, dim, unbiased, keepdim);
5453 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
5454 if(torch::lazy::symbolicShapeEnabled()){
5455 std::vector<torch::jit::IValue> inputs = { self, dim, unbiased, keepdim };
5456 const char* schema_str = "aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor";
5457 applySymbolicShapesOnLT(schema_str, inputs, shapes);
5458 }
5459
5460 node = torch::lazy::MakeNode<StdDim>(lazy_self->GetIrValue(), torch::lazy::ToOptionalVector<int64_t>(dim), unbiased, keepdim, std::move(shapes));
5461 CacheNode(node);
5462 }
5463
5464 auto result = torch::lazy::CreateAtenFromLtcTensor(
5465 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
5466 return result;
5467 }
5468
5469
5470 at::Tensor LazyNativeFunctions::std(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
5471
5472 if (force_eager_fallback(at::aten::std)) {
5473 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(std, correction)>::call(
5474 self,
5475 dim,
5476 correction,
5477 keepdim
5478 );
5479 }
5480
5481 TORCH_LAZY_FN_COUNTER("lazy::");
5482 auto common_device = torch::lazy::GetBackendDevice(self);
5483 TORCH_INTERNAL_ASSERT(common_device);
5484
5485 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
5486 torch::lazy::NodePtr node = torch::lazy::ReuseNode<StdCorrection>(lazy_self->GetIrValue(), torch::lazy::ToOptionalVector<int64_t>(dim), correction, keepdim);
5487 if (!node) {
5488
5489 auto shapes = torch::lazy::compute_shape_std(self, dim, correction, keepdim);
5490 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
5491 if(torch::lazy::symbolicShapeEnabled()){
5492 std::vector<torch::jit::IValue> inputs = { self, dim, correction, keepdim };
5493 const char* schema_str = "aten::std.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor";
5494 applySymbolicShapesOnLT(schema_str, inputs, shapes);
5495 }
5496
5497 node = torch::lazy::MakeNode<StdCorrection>(lazy_self->GetIrValue(), torch::lazy::ToOptionalVector<int64_t>(dim), correction, keepdim, std::move(shapes));
5498 CacheNode(node);
5499 }
5500
5501 auto result = torch::lazy::CreateAtenFromLtcTensor(
5502 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
5503 return result;
5504 }
5505
5506
5507 at::Tensor LazyNativeFunctions::sub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
5508
5509 if (force_eager_fallback(at::aten::sub)) {
5510 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(sub, Tensor)>::call(
5511 self,
5512 other,
5513 alpha
5514 );
5515 }
5516
5517 TORCH_LAZY_FN_COUNTER("lazy::");
5518 auto common_device = torch::lazy::GetBackendDevice(self, other);
5519 TORCH_INTERNAL_ASSERT(common_device);
5520
5521 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
5522 LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
5523 auto node_alpha = torch::lazy::LazyGraphExecutor::Get()->
5524 GetIrValueForScalarFromCodegen(alpha, *common_device);
5525 torch::lazy::NodePtr node = torch::lazy::ReuseNode<SubTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), node_alpha);
5526 if (!node) {
5527 auto self_meta = to_meta(self);
5528 auto other_meta = to_meta(other);
5529 auto out_meta = at::meta::sub(self_meta, other_meta, alpha);
5530
5531std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
5532 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
5533 if(torch::lazy::symbolicShapeEnabled()){
5534 std::vector<torch::jit::IValue> inputs = { self, other, alpha };
5535 const char* schema_str = "aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor";
5536 applySymbolicShapesOnLT(schema_str, inputs, shapes);
5537 }
5538
5539 node = torch::lazy::MakeNode<SubTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), node_alpha, std::move(shapes));
5540 CacheNode(node);
5541 }
5542
5543 auto result = torch::lazy::CreateAtenFromLtcTensor(
5544 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
5545 return result;
5546 }
5547
5548
5549 at::Tensor LazyNativeFunctions::sum(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
5550
5551 if (force_eager_fallback(at::aten::sum)) {
5552 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(sum)>::call(
5553 self,
5554 dtype
5555 );
5556 }
5557
5558 TORCH_LAZY_FN_COUNTER("lazy::");
5559 auto common_device = torch::lazy::GetBackendDevice(self);
5560 TORCH_INTERNAL_ASSERT(common_device);
5561
5562 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
5563 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Sum>(lazy_self->GetIrValue(), dtype);
5564 if (!node) {
5565
5566 auto shapes = torch::lazy::compute_shape_sum(self, dtype);
5567 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
5568 if(torch::lazy::symbolicShapeEnabled()){
5569 std::vector<torch::jit::IValue> inputs = { self, dtype };
5570 const char* schema_str = "aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor";
5571 applySymbolicShapesOnLT(schema_str, inputs, shapes);
5572 }
5573
5574 node = torch::lazy::MakeNode<Sum>(lazy_self->GetIrValue(), dtype, std::move(shapes));
5575 CacheNode(node);
5576 }
5577
5578 auto result = torch::lazy::CreateAtenFromLtcTensor(
5579 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
5580 return result;
5581 }
5582
5583
5584 at::Tensor LazyNativeFunctions::sum(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
5585
5586 if (force_eager_fallback(at::aten::sum)) {
5587 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(sum, dim_IntList)>::call(
5588 self,
5589 dim,
5590 keepdim,
5591 dtype
5592 );
5593 }
5594
5595 TORCH_LAZY_FN_COUNTER("lazy::");
5596 auto common_device = torch::lazy::GetBackendDevice(self);
5597 TORCH_INTERNAL_ASSERT(common_device);
5598
5599 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
5600 torch::lazy::NodePtr node = torch::lazy::ReuseNode<SumDimIntlist>(lazy_self->GetIrValue(), torch::lazy::ToOptionalVector<int64_t>(dim), keepdim, dtype);
5601 if (!node) {
5602 auto self_meta = to_meta(self);
5603 auto out_meta = at::meta::sum(self_meta, dim, keepdim, dtype);
5604
5605std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
5606 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
5607 if(torch::lazy::symbolicShapeEnabled()){
5608 std::vector<torch::jit::IValue> inputs = { self, dim, keepdim, dtype };
5609 const char* schema_str = "aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor";
5610 applySymbolicShapesOnLT(schema_str, inputs, shapes);
5611 }
5612
5613 node = torch::lazy::MakeNode<SumDimIntlist>(lazy_self->GetIrValue(), torch::lazy::ToOptionalVector<int64_t>(dim), keepdim, dtype, std::move(shapes));
5614 CacheNode(node);
5615 }
5616
5617 auto result = torch::lazy::CreateAtenFromLtcTensor(
5618 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
5619 return result;
5620 }
5621
5622
5623 at::Tensor LazyNativeFunctions::t_copy(const at::Tensor & self) {
5624
5625 if (force_eager_fallback(at::aten::t_copy)) {
5626 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(t_copy)>::call(
5627 self
5628 );
5629 }
5630
5631 TORCH_LAZY_FN_COUNTER("lazy::");
5632 auto common_device = torch::lazy::GetBackendDevice(self);
5633 TORCH_INTERNAL_ASSERT(common_device);
5634
5635 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
5636 torch::lazy::NodePtr node = torch::lazy::ReuseNode<TCopy>(lazy_self->GetIrValue());
5637 if (!node) {
5638 auto self_meta = to_meta(self);
5639 auto out_meta = at::compositeexplicitautogradnonfunctional::t_copy(self_meta);
5640
5641std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
5642 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
5643 if(torch::lazy::symbolicShapeEnabled()){
5644 std::vector<torch::jit::IValue> inputs = { self };
5645 const char* schema_str = "aten::t_copy(Tensor self) -> Tensor";
5646 applySymbolicShapesOnLT(schema_str, inputs, shapes);
5647 }
5648
5649 node = torch::lazy::MakeNode<TCopy>(lazy_self->GetIrValue(), std::move(shapes));
5650 CacheNode(node);
5651 }
5652
5653 auto result = torch::lazy::CreateAtenFromLtcTensor(
5654 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
5655 return result;
5656 }
5657
5658
5659 at::Tensor LazyNativeFunctions::tanh(const at::Tensor & self) {
5660
5661 if (force_eager_fallback(at::aten::tanh)) {
5662 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(tanh)>::call(
5663 self
5664 );
5665 }
5666
5667 TORCH_LAZY_FN_COUNTER("lazy::");
5668 auto common_device = torch::lazy::GetBackendDevice(self);
5669 TORCH_INTERNAL_ASSERT(common_device);
5670
5671 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
5672 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Tanh>(lazy_self->GetIrValue());
5673 if (!node) {
5674 auto self_meta = to_meta(self);
5675 auto out_meta = at::meta::tanh(self_meta);
5676
5677std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
5678 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
5679 if(torch::lazy::symbolicShapeEnabled()){
5680 std::vector<torch::jit::IValue> inputs = { self };
5681 const char* schema_str = "aten::tanh(Tensor self) -> Tensor";
5682 applySymbolicShapesOnLT(schema_str, inputs, shapes);
5683 }
5684
5685 node = torch::lazy::MakeNode<Tanh>(lazy_self->GetIrValue(), std::move(shapes));
5686 CacheNode(node);
5687 }
5688
5689 auto result = torch::lazy::CreateAtenFromLtcTensor(
5690 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
5691 return result;
5692 }
5693
5694
5695 at::Tensor LazyNativeFunctions::tanh_backward(const at::Tensor & grad_output, const at::Tensor & output) {
5696
5697 if (force_eager_fallback(at::aten::tanh_backward)) {
5698 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(tanh_backward)>::call(
5699 grad_output,
5700 output
5701 );
5702 }
5703
5704 TORCH_LAZY_FN_COUNTER("lazy::");
5705 auto common_device = torch::lazy::GetBackendDevice(grad_output, output);
5706 TORCH_INTERNAL_ASSERT(common_device);
5707
5708 LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
5709 LazyTensorPtr lazy_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(output, *common_device);
5710 torch::lazy::NodePtr node = torch::lazy::ReuseNode<TanhBackward>(lazy_grad_output->GetIrValue(), lazy_output->GetIrValue());
5711 if (!node) {
5712 auto grad_output_meta = to_meta(grad_output);
5713 auto output_meta = to_meta(output);
5714 auto out_meta = at::meta::tanh_backward(grad_output_meta, output_meta);
5715
5716std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
5717 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
5718 if(torch::lazy::symbolicShapeEnabled()){
5719 std::vector<torch::jit::IValue> inputs = { grad_output, output };
5720 const char* schema_str = "aten::tanh_backward(Tensor grad_output, Tensor output) -> Tensor";
5721 applySymbolicShapesOnLT(schema_str, inputs, shapes);
5722 }
5723
5724 node = torch::lazy::MakeNode<TanhBackward>(lazy_grad_output->GetIrValue(), lazy_output->GetIrValue(), std::move(shapes));
5725 CacheNode(node);
5726 }
5727
5728 auto result = torch::lazy::CreateAtenFromLtcTensor(
5729 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
5730 return result;
5731 }
5732
5733
5734 at::Tensor LazyNativeFunctions::threshold(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
5735
5736 if (force_eager_fallback(at::aten::threshold)) {
5737 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(threshold)>::call(
5738 self,
5739 threshold,
5740 value
5741 );
5742 }
5743
5744 TORCH_LAZY_FN_COUNTER("lazy::");
5745 auto common_device = torch::lazy::GetBackendDevice(self);
5746 TORCH_INTERNAL_ASSERT(common_device);
5747
5748 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
5749 auto node_threshold = torch::lazy::LazyGraphExecutor::Get()->
5750 GetIrValueForScalarFromCodegen(threshold, *common_device);
5751 auto node_value = torch::lazy::LazyGraphExecutor::Get()->
5752 GetIrValueForScalarFromCodegen(value, *common_device);
5753 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Threshold>(lazy_self->GetIrValue(), node_threshold, node_value);
5754 if (!node) {
5755 auto self_meta = to_meta(self);
5756 auto out_meta = at::meta::threshold(self_meta, threshold, value);
5757
5758std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
5759 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
5760 if(torch::lazy::symbolicShapeEnabled()){
5761 std::vector<torch::jit::IValue> inputs = { self, threshold, value };
5762 const char* schema_str = "aten::threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor";
5763 applySymbolicShapesOnLT(schema_str, inputs, shapes);
5764 }
5765
5766 node = torch::lazy::MakeNode<Threshold>(lazy_self->GetIrValue(), node_threshold, node_value, std::move(shapes));
5767 CacheNode(node);
5768 }
5769
5770 auto result = torch::lazy::CreateAtenFromLtcTensor(
5771 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
5772 return result;
5773 }
5774
5775
5776 at::Tensor LazyNativeFunctions::threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
5777
5778 if (force_eager_fallback(at::aten::threshold_backward)) {
5779 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(threshold_backward)>::call(
5780 grad_output,
5781 self,
5782 threshold
5783 );
5784 }
5785
5786 TORCH_LAZY_FN_COUNTER("lazy::");
5787 auto common_device = torch::lazy::GetBackendDevice(grad_output, self);
5788 TORCH_INTERNAL_ASSERT(common_device);
5789
5790 LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
5791 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
5792 auto node_threshold = torch::lazy::LazyGraphExecutor::Get()->
5793 GetIrValueForScalarFromCodegen(threshold, *common_device);
5794 torch::lazy::NodePtr node = torch::lazy::ReuseNode<ThresholdBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), node_threshold);
5795 if (!node) {
5796 auto grad_output_meta = to_meta(grad_output);
5797 auto self_meta = to_meta(self);
5798 auto out_meta = at::meta::threshold_backward(grad_output_meta, self_meta, threshold);
5799
5800std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
5801 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
5802 if(torch::lazy::symbolicShapeEnabled()){
5803 std::vector<torch::jit::IValue> inputs = { grad_output, self, threshold };
5804 const char* schema_str = "aten::threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor";
5805 applySymbolicShapesOnLT(schema_str, inputs, shapes);
5806 }
5807
5808 node = torch::lazy::MakeNode<ThresholdBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), node_threshold, std::move(shapes));
5809 CacheNode(node);
5810 }
5811
5812 auto result = torch::lazy::CreateAtenFromLtcTensor(
5813 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
5814 return result;
5815 }
5816
5817
5818 ::std::tuple<at::Tensor,at::Tensor> LazyNativeFunctions::topk(const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) {
5819
5820 if (force_eager_fallback(at::aten::topk)) {
5821 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(topk)>::call(
5822 self,
5823 k,
5824 dim,
5825 largest,
5826 sorted
5827 );
5828 }
5829
5830 TORCH_LAZY_FN_COUNTER("lazy::");
5831 auto common_device = torch::lazy::GetBackendDevice(self);
5832 TORCH_INTERNAL_ASSERT(common_device);
5833
5834 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
5835 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Topk>(lazy_self->GetIrValue(), k, dim, largest, sorted);
5836 if (!node) {
5837 auto self_meta = to_meta(self);
5838 auto out_meta = at::meta::topk(self_meta, k, dim, largest, sorted);
5839 std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(std::get<0>(out_meta).scalar_type(), std::get<0>(out_meta).sizes().vec()),torch::lazy::Shape(std::get<1>(out_meta).scalar_type(), std::get<1>(out_meta).sizes().vec())};
5840 TORCH_INTERNAL_ASSERT(shapes.size() == 2);
5841 if(torch::lazy::symbolicShapeEnabled()){
5842 std::vector<torch::jit::IValue> inputs = { self, k, dim, largest, sorted };
5843 const char* schema_str = "aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)";
5844 applySymbolicShapesOnLT(schema_str, inputs, shapes);
5845 }
5846
5847 node = torch::lazy::MakeNode<Topk>(lazy_self->GetIrValue(), k, dim, largest, sorted, std::move(shapes));
5848 CacheNode(node);
5849 }
5850
5851 std::vector<LazyTensorPtr> lazy_tensors;
5852 for (int i = 0; i < 2; i++) {
5853 lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
5854 }
5855 auto result = torch::lazy::TupleAtenFromLtcTensors<2>(lazy_tensors);
5856 return result;
5857 }
5858
5859
5860 at::Tensor LazyNativeFunctions::trace(const at::Tensor & self) {
5861
5862 if (force_eager_fallback(at::aten::trace)) {
5863 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(trace)>::call(
5864 self
5865 );
5866 }
5867
5868 TORCH_LAZY_FN_COUNTER("lazy::");
5869 auto common_device = torch::lazy::GetBackendDevice(self);
5870 TORCH_INTERNAL_ASSERT(common_device);
5871
5872 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
5873 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Trace>(lazy_self->GetIrValue());
5874 if (!node) {
5875
5876 auto shapes = torch::lazy::compute_shape_trace(self);
5877 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
5878 if(torch::lazy::symbolicShapeEnabled()){
5879 std::vector<torch::jit::IValue> inputs = { self };
5880 const char* schema_str = "aten::trace(Tensor self) -> Tensor";
5881 applySymbolicShapesOnLT(schema_str, inputs, shapes);
5882 }
5883
5884 node = torch::lazy::MakeNode<Trace>(lazy_self->GetIrValue(), std::move(shapes));
5885 CacheNode(node);
5886 }
5887
5888 auto result = torch::lazy::CreateAtenFromLtcTensor(
5889 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
5890 return result;
5891 }
5892
5893
5894 at::Tensor LazyNativeFunctions::transpose_copy(const at::Tensor & self, int64_t dim0, int64_t dim1) {
5895
5896 if (force_eager_fallback(at::aten::transpose_copy)) {
5897 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(transpose_copy, int)>::call(
5898 self,
5899 dim0,
5900 dim1
5901 );
5902 }
5903
5904 TORCH_LAZY_FN_COUNTER("lazy::");
5905 auto common_device = torch::lazy::GetBackendDevice(self);
5906 TORCH_INTERNAL_ASSERT(common_device);
5907
5908 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
5909 torch::lazy::NodePtr node = torch::lazy::ReuseNode<TransposeCopyInt>(lazy_self->GetIrValue(), dim0, dim1);
5910 if (!node) {
5911 auto self_meta = to_meta(self);
5912 auto out_meta = at::compositeexplicitautogradnonfunctional::transpose_copy(self_meta, dim0, dim1);
5913
5914std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
5915 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
5916 if(torch::lazy::symbolicShapeEnabled()){
5917 std::vector<torch::jit::IValue> inputs = { self, dim0, dim1 };
5918 const char* schema_str = "aten::transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor";
5919 applySymbolicShapesOnLT(schema_str, inputs, shapes);
5920 }
5921
5922 node = torch::lazy::MakeNode<TransposeCopyInt>(lazy_self->GetIrValue(), dim0, dim1, std::move(shapes));
5923 CacheNode(node);
5924 }
5925
5926 auto result = torch::lazy::CreateAtenFromLtcTensor(
5927 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
5928 return result;
5929 }
5930
5931
5932 at::Tensor LazyNativeFunctions::tril(const at::Tensor & self, int64_t diagonal) {
5933
5934 if (force_eager_fallback(at::aten::tril)) {
5935 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(tril)>::call(
5936 self,
5937 diagonal
5938 );
5939 }
5940
5941 TORCH_LAZY_FN_COUNTER("lazy::");
5942 auto common_device = torch::lazy::GetBackendDevice(self);
5943 TORCH_INTERNAL_ASSERT(common_device);
5944
5945 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
5946 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Tril>(lazy_self->GetIrValue(), diagonal);
5947 if (!node) {
5948 auto self_meta = to_meta(self);
5949 auto out_meta = at::meta::tril(self_meta, diagonal);
5950
5951std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
5952 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
5953 if(torch::lazy::symbolicShapeEnabled()){
5954 std::vector<torch::jit::IValue> inputs = { self, diagonal };
5955 const char* schema_str = "aten::tril(Tensor self, int diagonal=0) -> Tensor";
5956 applySymbolicShapesOnLT(schema_str, inputs, shapes);
5957 }
5958
5959 node = torch::lazy::MakeNode<Tril>(lazy_self->GetIrValue(), diagonal, std::move(shapes));
5960 CacheNode(node);
5961 }
5962
5963 auto result = torch::lazy::CreateAtenFromLtcTensor(
5964 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
5965 return result;
5966 }
5967
5968
5969 at::Tensor LazyNativeFunctions::triu(const at::Tensor & self, int64_t diagonal) {
5970
5971 if (force_eager_fallback(at::aten::triu)) {
5972 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(triu)>::call(
5973 self,
5974 diagonal
5975 );
5976 }
5977
5978 TORCH_LAZY_FN_COUNTER("lazy::");
5979 auto common_device = torch::lazy::GetBackendDevice(self);
5980 TORCH_INTERNAL_ASSERT(common_device);
5981
5982 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
5983 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Triu>(lazy_self->GetIrValue(), diagonal);
5984 if (!node) {
5985 auto self_meta = to_meta(self);
5986 auto out_meta = at::meta::triu(self_meta, diagonal);
5987
5988std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
5989 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
5990 if(torch::lazy::symbolicShapeEnabled()){
5991 std::vector<torch::jit::IValue> inputs = { self, diagonal };
5992 const char* schema_str = "aten::triu(Tensor self, int diagonal=0) -> Tensor";
5993 applySymbolicShapesOnLT(schema_str, inputs, shapes);
5994 }
5995
5996 node = torch::lazy::MakeNode<Triu>(lazy_self->GetIrValue(), diagonal, std::move(shapes));
5997 CacheNode(node);
5998 }
5999
6000 auto result = torch::lazy::CreateAtenFromLtcTensor(
6001 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
6002 return result;
6003 }
6004
6005
6006 at::Tensor LazyNativeFunctions::trunc(const at::Tensor & self) {
6007
6008 if (force_eager_fallback(at::aten::trunc)) {
6009 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(trunc)>::call(
6010 self
6011 );
6012 }
6013
6014 TORCH_LAZY_FN_COUNTER("lazy::");
6015 auto common_device = torch::lazy::GetBackendDevice(self);
6016 TORCH_INTERNAL_ASSERT(common_device);
6017
6018 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
6019 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Trunc>(lazy_self->GetIrValue());
6020 if (!node) {
6021 auto self_meta = to_meta(self);
6022 auto out_meta = at::meta::trunc(self_meta);
6023
6024std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
6025 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
6026 if(torch::lazy::symbolicShapeEnabled()){
6027 std::vector<torch::jit::IValue> inputs = { self };
6028 const char* schema_str = "aten::trunc(Tensor self) -> Tensor";
6029 applySymbolicShapesOnLT(schema_str, inputs, shapes);
6030 }
6031
6032 node = torch::lazy::MakeNode<Trunc>(lazy_self->GetIrValue(), std::move(shapes));
6033 CacheNode(node);
6034 }
6035
6036 auto result = torch::lazy::CreateAtenFromLtcTensor(
6037 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
6038 return result;
6039 }
6040
6041
6042 at::Tensor LazyNativeFunctions::unfold_copy(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
6043
6044 if (force_eager_fallback(at::aten::unfold_copy)) {
6045 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(unfold_copy)>::call(
6046 self,
6047 dimension,
6048 size,
6049 step
6050 );
6051 }
6052
6053 TORCH_LAZY_FN_COUNTER("lazy::");
6054 auto common_device = torch::lazy::GetBackendDevice(self);
6055 TORCH_INTERNAL_ASSERT(common_device);
6056
6057 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
6058 torch::lazy::NodePtr node = torch::lazy::ReuseNode<UnfoldCopy>(lazy_self->GetIrValue(), dimension, size, step);
6059 if (!node) {
6060 auto self_meta = to_meta(self);
6061 auto out_meta = at::compositeexplicitautogradnonfunctional::unfold_copy(self_meta, dimension, size, step);
6062
6063std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
6064 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
6065 if(torch::lazy::symbolicShapeEnabled()){
6066 std::vector<torch::jit::IValue> inputs = { self, dimension, size, step };
6067 const char* schema_str = "aten::unfold_copy(Tensor self, int dimension, int size, int step) -> Tensor";
6068 applySymbolicShapesOnLT(schema_str, inputs, shapes);
6069 }
6070
6071 node = torch::lazy::MakeNode<UnfoldCopy>(lazy_self->GetIrValue(), dimension, size, step, std::move(shapes));
6072 CacheNode(node);
6073 }
6074
6075 auto result = torch::lazy::CreateAtenFromLtcTensor(
6076 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
6077 return result;
6078 }
6079
6080
6081 at::Tensor LazyNativeFunctions::unsqueeze_copy(const at::Tensor & self, int64_t dim) {
6082
6083 if (force_eager_fallback(at::aten::unsqueeze_copy)) {
6084 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(unsqueeze_copy)>::call(
6085 self,
6086 dim
6087 );
6088 }
6089
6090 TORCH_LAZY_FN_COUNTER("lazy::");
6091 auto common_device = torch::lazy::GetBackendDevice(self);
6092 TORCH_INTERNAL_ASSERT(common_device);
6093
6094 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
6095 torch::lazy::NodePtr node = torch::lazy::ReuseNode<UnsqueezeCopy>(lazy_self->GetIrValue(), dim);
6096 if (!node) {
6097 auto self_meta = to_meta(self);
6098 auto out_meta = at::compositeexplicitautogradnonfunctional::unsqueeze_copy(self_meta, dim);
6099
6100std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
6101 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
6102 if(torch::lazy::symbolicShapeEnabled()){
6103 std::vector<torch::jit::IValue> inputs = { self, dim };
6104 const char* schema_str = "aten::unsqueeze_copy(Tensor self, int dim) -> Tensor";
6105 applySymbolicShapesOnLT(schema_str, inputs, shapes);
6106 }
6107
6108 node = torch::lazy::MakeNode<UnsqueezeCopy>(lazy_self->GetIrValue(), dim, std::move(shapes));
6109 CacheNode(node);
6110 }
6111
6112 auto result = torch::lazy::CreateAtenFromLtcTensor(
6113 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
6114 return result;
6115 }
6116
6117
6118 at::Tensor LazyNativeFunctions::upsample_bilinear2d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
6119
6120 if (force_eager_fallback(at::aten::upsample_bilinear2d)) {
6121 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(upsample_bilinear2d)>::call(
6122 self,
6123 c10::fromIntArrayRefSlow(output_size),
6124 align_corners,
6125 scales_h,
6126 scales_w
6127 );
6128 }
6129
6130 TORCH_LAZY_FN_COUNTER("lazy::");
6131 auto common_device = torch::lazy::GetBackendDevice(self);
6132 TORCH_INTERNAL_ASSERT(common_device);
6133
6134 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
6135 torch::lazy::NodePtr node = torch::lazy::ReuseNode<UpsampleBilinear2d>(lazy_self->GetIrValue(), std::vector<int64_t>(output_size.begin(), output_size.end()), align_corners, scales_h, scales_w);
6136 if (!node) {
6137 auto self_meta = to_meta(self);
6138 auto out_meta = at::meta::upsample_bilinear2d(self_meta, output_size, align_corners, scales_h, scales_w);
6139
6140std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
6141 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
6142 if(torch::lazy::symbolicShapeEnabled()){
6143 std::vector<torch::jit::IValue> inputs = { self, output_size, align_corners, scales_h, scales_w };
6144 const char* schema_str = "aten::upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor";
6145 applySymbolicShapesOnLT(schema_str, inputs, shapes);
6146 }
6147
6148 node = torch::lazy::MakeNode<UpsampleBilinear2d>(lazy_self->GetIrValue(), std::vector<int64_t>(output_size.begin(), output_size.end()), align_corners, scales_h, scales_w, std::move(shapes));
6149 CacheNode(node);
6150 }
6151
6152 auto result = torch::lazy::CreateAtenFromLtcTensor(
6153 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
6154 return result;
6155 }
6156
6157
6158 at::Tensor LazyNativeFunctions::upsample_bilinear2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
6159
6160 if (force_eager_fallback(at::aten::upsample_bilinear2d_backward)) {
6161 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(upsample_bilinear2d_backward)>::call(
6162 grad_output,
6163 c10::fromIntArrayRefSlow(output_size),
6164 c10::fromIntArrayRefSlow(input_size),
6165 align_corners,
6166 scales_h,
6167 scales_w
6168 );
6169 }
6170
6171 TORCH_LAZY_FN_COUNTER("lazy::");
6172 auto common_device = torch::lazy::GetBackendDevice(grad_output);
6173 TORCH_INTERNAL_ASSERT(common_device);
6174
6175 LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
6176 torch::lazy::NodePtr node = torch::lazy::ReuseNode<UpsampleBilinear2dBackward>(lazy_grad_output->GetIrValue(), std::vector<int64_t>(output_size.begin(), output_size.end()), std::vector<int64_t>(input_size.begin(), input_size.end()), align_corners, scales_h, scales_w);
6177 if (!node) {
6178 auto grad_output_meta = to_meta(grad_output);
6179 auto out_meta = at::meta::upsample_bilinear2d_backward(grad_output_meta, output_size, input_size, align_corners, scales_h, scales_w);
6180
6181std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
6182 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
6183 if(torch::lazy::symbolicShapeEnabled()){
6184 std::vector<torch::jit::IValue> inputs = { grad_output, output_size, input_size, align_corners, scales_h, scales_w };
6185 const char* schema_str = "aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor";
6186 applySymbolicShapesOnLT(schema_str, inputs, shapes);
6187 }
6188
6189 node = torch::lazy::MakeNode<UpsampleBilinear2dBackward>(lazy_grad_output->GetIrValue(), std::vector<int64_t>(output_size.begin(), output_size.end()), std::vector<int64_t>(input_size.begin(), input_size.end()), align_corners, scales_h, scales_w, std::move(shapes));
6190 CacheNode(node);
6191 }
6192
6193 auto result = torch::lazy::CreateAtenFromLtcTensor(
6194 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
6195 return result;
6196 }
6197
6198
6199 at::Tensor LazyNativeFunctions::upsample_nearest2d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
6200
6201 if (force_eager_fallback(at::aten::upsample_nearest2d)) {
6202 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(upsample_nearest2d)>::call(
6203 self,
6204 c10::fromIntArrayRefSlow(output_size),
6205 scales_h,
6206 scales_w
6207 );
6208 }
6209
6210 TORCH_LAZY_FN_COUNTER("lazy::");
6211 auto common_device = torch::lazy::GetBackendDevice(self);
6212 TORCH_INTERNAL_ASSERT(common_device);
6213
6214 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
6215 torch::lazy::NodePtr node = torch::lazy::ReuseNode<UpsampleNearest2d>(lazy_self->GetIrValue(), std::vector<int64_t>(output_size.begin(), output_size.end()), scales_h, scales_w);
6216 if (!node) {
6217 auto self_meta = to_meta(self);
6218 auto out_meta = at::meta::upsample_nearest2d(self_meta, output_size, scales_h, scales_w);
6219
6220std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
6221 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
6222 if(torch::lazy::symbolicShapeEnabled()){
6223 std::vector<torch::jit::IValue> inputs = { self, output_size, scales_h, scales_w };
6224 const char* schema_str = "aten::upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor";
6225 applySymbolicShapesOnLT(schema_str, inputs, shapes);
6226 }
6227
6228 node = torch::lazy::MakeNode<UpsampleNearest2d>(lazy_self->GetIrValue(), std::vector<int64_t>(output_size.begin(), output_size.end()), scales_h, scales_w, std::move(shapes));
6229 CacheNode(node);
6230 }
6231
6232 auto result = torch::lazy::CreateAtenFromLtcTensor(
6233 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
6234 return result;
6235 }
6236
6237
6238 at::Tensor LazyNativeFunctions::upsample_nearest2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
6239
6240 if (force_eager_fallback(at::aten::upsample_nearest2d_backward)) {
6241 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(upsample_nearest2d_backward)>::call(
6242 grad_output,
6243 c10::fromIntArrayRefSlow(output_size),
6244 c10::fromIntArrayRefSlow(input_size),
6245 scales_h,
6246 scales_w
6247 );
6248 }
6249
6250 TORCH_LAZY_FN_COUNTER("lazy::");
6251 auto common_device = torch::lazy::GetBackendDevice(grad_output);
6252 TORCH_INTERNAL_ASSERT(common_device);
6253
6254 LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
6255 torch::lazy::NodePtr node = torch::lazy::ReuseNode<UpsampleNearest2dBackward>(lazy_grad_output->GetIrValue(), std::vector<int64_t>(output_size.begin(), output_size.end()), std::vector<int64_t>(input_size.begin(), input_size.end()), scales_h, scales_w);
6256 if (!node) {
6257 auto grad_output_meta = to_meta(grad_output);
6258 auto out_meta = at::meta::upsample_nearest2d_backward(grad_output_meta, output_size, input_size, scales_h, scales_w);
6259
6260std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
6261 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
6262 if(torch::lazy::symbolicShapeEnabled()){
6263 std::vector<torch::jit::IValue> inputs = { grad_output, output_size, input_size, scales_h, scales_w };
6264 const char* schema_str = "aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor";
6265 applySymbolicShapesOnLT(schema_str, inputs, shapes);
6266 }
6267
6268 node = torch::lazy::MakeNode<UpsampleNearest2dBackward>(lazy_grad_output->GetIrValue(), std::vector<int64_t>(output_size.begin(), output_size.end()), std::vector<int64_t>(input_size.begin(), input_size.end()), scales_h, scales_w, std::move(shapes));
6269 CacheNode(node);
6270 }
6271
6272 auto result = torch::lazy::CreateAtenFromLtcTensor(
6273 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
6274 return result;
6275 }
6276
6277
6278 at::Tensor LazyNativeFunctions::view_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size) {
6279
6280 if (force_eager_fallback(at::aten::view_copy)) {
6281 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(view_copy)>::call(
6282 self,
6283 size
6284 );
6285 }
6286
6287 TORCH_LAZY_FN_COUNTER("lazy::");
6288 auto common_device = torch::lazy::GetBackendDevice(self);
6289 TORCH_INTERNAL_ASSERT(common_device);
6290
6291 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
6292 torch::lazy::NodePtr node = torch::lazy::ReuseNode<ViewCopy>(lazy_self->GetIrValue(), GetSymIntArrayRefValue(size));
6293 if (!node) {
6294 auto self_meta = to_meta(self);
6295 auto out_meta = at::compositeexplicitautogradnonfunctional::view_copy_symint(self_meta, size);
6296
6297std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
6298 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
6299 if(torch::lazy::symbolicShapeEnabled()){
6300 std::vector<torch::jit::IValue> inputs = { self, size };
6301 const char* schema_str = "aten::view_copy(Tensor self, SymInt[] size) -> Tensor";
6302 applySymbolicShapesOnLT(schema_str, inputs, shapes);
6303 }
6304
6305 node = torch::lazy::MakeNode<ViewCopy>(lazy_self->GetIrValue(), GetSymIntArrayRefValue(size), std::move(shapes));
6306 CacheNode(node);
6307 }
6308
6309 auto result = torch::lazy::CreateAtenFromLtcTensor(
6310 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
6311 return result;
6312 }
6313
6314
6315 at::Tensor LazyNativeFunctions::view_copy(const at::Tensor & self, at::ScalarType dtype) {
6316
6317 if (force_eager_fallback(at::aten::view_copy)) {
6318 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(view_copy, dtype)>::call(
6319 self,
6320 dtype
6321 );
6322 }
6323
6324 TORCH_LAZY_FN_COUNTER("lazy::");
6325 auto common_device = torch::lazy::GetBackendDevice(self);
6326 TORCH_INTERNAL_ASSERT(common_device);
6327
6328 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
6329 torch::lazy::NodePtr node = torch::lazy::ReuseNode<ViewCopyDtype>(lazy_self->GetIrValue(), dtype);
6330 if (!node) {
6331 auto self_meta = to_meta(self);
6332 auto out_meta = at::compositeexplicitautogradnonfunctional::view_copy(self_meta, dtype);
6333
6334std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
6335 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
6336 if(torch::lazy::symbolicShapeEnabled()){
6337 std::vector<torch::jit::IValue> inputs = { self, dtype };
6338 const char* schema_str = "aten::view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor";
6339 applySymbolicShapesOnLT(schema_str, inputs, shapes);
6340 }
6341
6342 node = torch::lazy::MakeNode<ViewCopyDtype>(lazy_self->GetIrValue(), dtype, std::move(shapes));
6343 CacheNode(node);
6344 }
6345
6346 auto result = torch::lazy::CreateAtenFromLtcTensor(
6347 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
6348 return result;
6349 }
6350
6351
6352 at::Tensor LazyNativeFunctions::zero(const at::Tensor & self) {
6353
6354 if (force_eager_fallback(at::aten::zero)) {
6355 return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(zero)>::call(
6356 self
6357 );
6358 }
6359
6360 TORCH_LAZY_FN_COUNTER("lazy::");
6361 auto common_device = torch::lazy::GetBackendDevice(self);
6362 TORCH_INTERNAL_ASSERT(common_device);
6363
6364 LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
6365 torch::lazy::NodePtr node = torch::lazy::ReuseNode<Zero>(lazy_self->GetIrValue());
6366 if (!node) {
6367
6368 auto shapes = torch::lazy::compute_shape_zero(self);
6369 TORCH_INTERNAL_ASSERT(shapes.size() == 1);
6370 if(torch::lazy::symbolicShapeEnabled()){
6371 std::vector<torch::jit::IValue> inputs = { self };
6372 const char* schema_str = "aten::zero(Tensor self) -> Tensor";
6373 applySymbolicShapesOnLT(schema_str, inputs, shapes);
6374 }
6375
6376 node = torch::lazy::MakeNode<Zero>(lazy_self->GetIrValue(), std::move(shapes));
6377 CacheNode(node);
6378 }
6379
6380 auto result = torch::lazy::CreateAtenFromLtcTensor(
6381 torch::lazy::LazyTensor::Create(std::move(node), *common_device));
6382 return result;
6383 }
6384
6385} // namespace lazy
6386} // namespace torch
6387